xref: /titanic_50/usr/src/uts/common/io/gld.c (revision 2df1fe9ca32bb227b9158c67f5c00b54c20b10fd)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2006 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 #pragma ident	"%Z%%M%	%I%	%E% SMI"
27 
28 /*
29  * gld - Generic LAN Driver Version 2, PSARC/1997/382
30  *
31  * This is a utility module that provides generic facilities for
32  * LAN	drivers.  The DLPI protocol and most STREAMS interfaces
33  * are handled here.
34  *
35  * It no longer provides compatibility with drivers
36  * implemented according to the GLD v0 documentation published
37  * in 1993. (See PSARC 2003/728)
38  */
39 
40 
41 #include <sys/types.h>
42 #include <sys/errno.h>
43 #include <sys/stropts.h>
44 #include <sys/stream.h>
45 #include <sys/kmem.h>
46 #include <sys/stat.h>
47 #include <sys/modctl.h>
48 #include <sys/kstat.h>
49 #include <sys/debug.h>
50 #include <sys/note.h>
51 #include <sys/sysmacros.h>
52 
53 #include <sys/byteorder.h>
54 #include <sys/strsun.h>
55 #include <sys/strsubr.h>
56 #include <sys/dlpi.h>
57 #include <sys/pattr.h>
58 #include <sys/ethernet.h>
59 #include <sys/ib/clients/ibd/ibd.h>
60 #include <sys/policy.h>
61 #include <sys/atomic.h>
62 
63 #include <sys/multidata.h>
64 #include <sys/gld.h>
65 #include <sys/gldpriv.h>
66 
67 #include <sys/ddi.h>
68 #include <sys/sunddi.h>
69 
70 /*
71  * Macros to increment statistics.
72  */
73 
74 /*
75  * Increase kstats. Note this operation is not atomic. It can be used when
76  * GLDM_LOCK_HELD_WRITE(macinfo).
77  */
78 #define	BUMP(stats, vstats, stat, delta)	do {			\
79 	((stats)->stat) += (delta);					\
80 	_NOTE(CONSTANTCONDITION)					\
81 	if ((vstats) != NULL)						\
82 		((struct gld_stats *)(vstats))->stat += (delta);	\
83 	_NOTE(CONSTANTCONDITION)					\
84 } while (0)
85 
86 #define	ATOMIC_BUMP_STAT(stat, delta)	do {			\
87 	_NOTE(CONSTANTCONDITION)				\
88 	if (sizeof ((stat)) == sizeof (uint32_t)) {		\
89 		atomic_add_32((uint32_t *)&(stat), (delta));	\
90 	_NOTE(CONSTANTCONDITION)				\
91 	} else if (sizeof ((stat)) == sizeof (uint64_t)) {	\
92 		atomic_add_64((uint64_t *)&(stat), (delta));	\
93 	}							\
94 	_NOTE(CONSTANTCONDITION)				\
95 } while (0)
96 
97 #define	ATOMIC_BUMP(stats, vstats, stat, delta)	do {			\
98 	ATOMIC_BUMP_STAT((stats)->stat, (delta));			\
99 	_NOTE(CONSTANTCONDITION)					\
100 	if ((vstats) != NULL) {						\
101 		ATOMIC_BUMP_STAT(((struct gld_stats *)(vstats))->stat,	\
102 		    (delta));						\
103 	}								\
104 	_NOTE(CONSTANTCONDITION)					\
105 } while (0)
106 
107 #define	UPDATE_STATS(stats, vstats, pktinfo, delta) {			\
108 	if ((pktinfo).isBroadcast) {					\
109 		ATOMIC_BUMP((stats), (vstats),				\
110 		    glds_brdcstxmt, (delta));				\
111 	} else if ((pktinfo).isMulticast) {				\
112 		ATOMIC_BUMP((stats), (vstats), glds_multixmt, (delta));	\
113 	}								\
114 	ATOMIC_BUMP((stats), (vstats), glds_bytexmt64,			\
115 	    ((pktinfo).pktLen));					\
116 	ATOMIC_BUMP((stats), (vstats), glds_pktxmt64, (delta));		\
117 }
118 
119 #ifdef GLD_DEBUG
120 int gld_debug = GLDERRS;
121 #endif
122 
123 /* called from gld_register */
124 static int gld_initstats(gld_mac_info_t *);
125 
126 /* called from kstat mechanism, and from wsrv's get_statistics */
127 static int gld_update_kstat(kstat_t *, int);
128 
129 /* statistics for additional vlans */
130 static int gld_init_vlan_stats(gld_vlan_t *);
131 static int gld_update_vlan_kstat(kstat_t *, int);
132 
133 /* called from gld_getinfo */
134 static dev_info_t *gld_finddevinfo(dev_t);
135 
136 /* called from wput, wsrv, unidata, and v0_sched to send a packet */
137 /* also from the source routing stuff for sending RDE protocol packets */
138 static int gld_start(queue_t *, mblk_t *, int, uint32_t);
139 static int gld_start_mdt(queue_t *, mblk_t *, int);
140 
141 /* called from gld_start[_mdt] to loopback packet(s) in promiscuous mode */
142 static void gld_precv(gld_mac_info_t *, mblk_t *, uint32_t, struct gld_stats *);
143 static void gld_precv_mdt(gld_mac_info_t *, gld_vlan_t *, mblk_t *,
144     pdesc_t *, pktinfo_t *);
145 
146 /* receive group: called from gld_recv and gld_precv* with maclock held */
147 static void gld_sendup(gld_mac_info_t *, pktinfo_t *, mblk_t *,
148     int (*)());
149 static int gld_accept(gld_t *, pktinfo_t *);
150 static int gld_mcmatch(gld_t *, pktinfo_t *);
151 static int gld_multicast(unsigned char *, gld_t *);
152 static int gld_paccept(gld_t *, pktinfo_t *);
153 static void gld_passon(gld_t *, mblk_t *, pktinfo_t *,
154     void (*)(queue_t *, mblk_t *));
155 static mblk_t *gld_addudind(gld_t *, mblk_t *, pktinfo_t *, boolean_t);
156 
157 /* wsrv group: called from wsrv, single threaded per queue */
158 static int gld_ioctl(queue_t *, mblk_t *);
159 static void gld_fastpath(gld_t *, queue_t *, mblk_t *);
160 static int gld_cmds(queue_t *, mblk_t *);
161 static mblk_t *gld_bindack(queue_t *, mblk_t *);
162 static int gld_notify_req(queue_t *, mblk_t *);
163 static int gld_udqos(queue_t *, mblk_t *);
164 static int gld_bind(queue_t *, mblk_t *);
165 static int gld_unbind(queue_t *, mblk_t *);
166 static int gld_inforeq(queue_t *, mblk_t *);
167 static int gld_unitdata(queue_t *, mblk_t *);
168 static int gldattach(queue_t *, mblk_t *);
169 static int gldunattach(queue_t *, mblk_t *);
170 static int gld_enable_multi(queue_t *, mblk_t *);
171 static int gld_disable_multi(queue_t *, mblk_t *);
172 static void gld_send_disable_multi(gld_mac_info_t *, gld_mcast_t *);
173 static int gld_promisc(queue_t *, mblk_t *, t_uscalar_t, boolean_t);
174 static int gld_physaddr(queue_t *, mblk_t *);
175 static int gld_setaddr(queue_t *, mblk_t *);
176 static int gld_get_statistics(queue_t *, mblk_t *);
177 static int gld_cap(queue_t *, mblk_t *);
178 static int gld_cap_ack(queue_t *, mblk_t *);
179 static int gld_cap_enable(queue_t *, mblk_t *);
180 
181 /* misc utilities, some requiring various mutexes held */
182 static int gld_start_mac(gld_mac_info_t *);
183 static void gld_stop_mac(gld_mac_info_t *);
184 static void gld_set_ipq(gld_t *);
185 static void gld_flushqueue(queue_t *);
186 static glddev_t *gld_devlookup(int);
187 static int gld_findminor(glddev_t *);
188 static void gldinsque(void *, void *);
189 static void gldremque(void *);
190 void gld_bitrevcopy(caddr_t, caddr_t, size_t);
191 void gld_bitreverse(uchar_t *, size_t);
192 char *gld_macaddr_sprintf(char *, unsigned char *, int);
193 static gld_vlan_t *gld_add_vlan(gld_mac_info_t *, uint32_t vid);
194 static void gld_rem_vlan(gld_vlan_t *);
195 gld_vlan_t *gld_find_vlan(gld_mac_info_t *, uint32_t);
196 gld_vlan_t *gld_get_vlan(gld_mac_info_t *, uint32_t);
197 
198 #ifdef GLD_DEBUG
199 static void gld_check_assertions(void);
200 extern void gld_sr_dump(gld_mac_info_t *);
201 #endif
202 
203 /*
204  * Allocate and zero-out "number" structures each of type "structure" in
205  * kernel memory.
206  */
207 #define	GETSTRUCT(structure, number)   \
208 	(kmem_zalloc((uint_t)(sizeof (structure) * (number)), KM_NOSLEEP))
209 
210 #define	abs(a) ((a) < 0 ? -(a) : a)
211 
212 uint32_t gld_global_options = GLD_OPT_NO_ETHRXSNAP;
213 
214 /*
215  * VLANs are only supported on ethernet devices that manipulate VLAN headers
216  * themselves.
217  */
218 #define	VLAN_CAPABLE(macinfo) \
219 	((macinfo)->gldm_type == DL_ETHER && \
220 	(macinfo)->gldm_send_tagged != NULL)
221 
222 /*
223  * The set of notifications generatable by GLD itself, the additional
224  * set that can be generated if the MAC driver provide the link-state
225  * tracking callback capability, and the set supported by the GLD
226  * notification code below.
227  *
228  * PLEASE keep these in sync with what the code actually does!
229  */
230 static const uint32_t gld_internal_notes =	DL_NOTE_PROMISC_ON_PHYS |
231 						DL_NOTE_PROMISC_OFF_PHYS |
232 						DL_NOTE_PHYS_ADDR;
233 static const uint32_t gld_linkstate_notes =	DL_NOTE_LINK_DOWN |
234 						DL_NOTE_LINK_UP |
235 						DL_NOTE_SPEED;
236 static const uint32_t gld_supported_notes =	DL_NOTE_PROMISC_ON_PHYS |
237 						DL_NOTE_PROMISC_OFF_PHYS |
238 						DL_NOTE_PHYS_ADDR |
239 						DL_NOTE_LINK_DOWN |
240 						DL_NOTE_LINK_UP |
241 						DL_NOTE_SPEED;
242 
243 /* Media must correspond to #defines in gld.h */
244 static char *gld_media[] = {
245 	"unknown",	/* GLDM_UNKNOWN - driver cannot determine media */
246 	"aui",		/* GLDM_AUI */
247 	"bnc",		/* GLDM_BNC */
248 	"twpair",	/* GLDM_TP */
249 	"fiber",	/* GLDM_FIBER */
250 	"100baseT",	/* GLDM_100BT */
251 	"100vgAnyLan",	/* GLDM_VGANYLAN */
252 	"10baseT",	/* GLDM_10BT */
253 	"ring4",	/* GLDM_RING4 */
254 	"ring16",	/* GLDM_RING16 */
255 	"PHY/MII",	/* GLDM_PHYMII */
256 	"100baseTX",	/* GLDM_100BTX */
257 	"100baseT4",	/* GLDM_100BT4 */
258 	"unknown",	/* skip */
259 	"ipib",		/* GLDM_IB */
260 };
261 
262 /* Must correspond to #defines in gld.h */
263 static char *gld_duplex[] = {
264 	"unknown",	/* GLD_DUPLEX_UNKNOWN - not known or not applicable */
265 	"half",		/* GLD_DUPLEX_HALF */
266 	"full"		/* GLD_DUPLEX_FULL */
267 };
268 
269 /*
270  * Interface types currently supported by GLD.
271  * If you add new types, you must check all "XXX" strings in the GLD source
272  * for implementation issues that may affect the support of your new type.
273  * In particular, any type with gldm_addrlen > 6, or gldm_saplen != -2, will
274  * require generalizing this GLD source to handle the new cases.  In other
275  * words there are assumptions built into the code in a few places that must
276  * be fixed.  Be sure to turn on DEBUG/ASSERT code when testing a new type.
277  */
278 static gld_interface_t interfaces[] = {
279 
280 	/* Ethernet Bus */
281 	{
282 		DL_ETHER,
283 		(uint_t)-1,
284 		sizeof (struct ether_header),
285 		gld_interpret_ether,
286 		NULL,
287 		gld_fastpath_ether,
288 		gld_unitdata_ether,
289 		gld_init_ether,
290 		gld_uninit_ether,
291 		"ether"
292 	},
293 
294 	/* Fiber Distributed data interface */
295 	{
296 		DL_FDDI,
297 		4352,
298 		sizeof (struct fddi_mac_frm),
299 		gld_interpret_fddi,
300 		NULL,
301 		gld_fastpath_fddi,
302 		gld_unitdata_fddi,
303 		gld_init_fddi,
304 		gld_uninit_fddi,
305 		"fddi"
306 	},
307 
308 	/* Token Ring interface */
309 	{
310 		DL_TPR,
311 		17914,
312 		-1,			/* variable header size */
313 		gld_interpret_tr,
314 		NULL,
315 		gld_fastpath_tr,
316 		gld_unitdata_tr,
317 		gld_init_tr,
318 		gld_uninit_tr,
319 		"tpr"
320 	},
321 
322 	/* Infiniband */
323 	{
324 		DL_IB,
325 		4092,
326 		sizeof (struct ipoib_header),
327 		gld_interpret_ib,
328 		gld_interpret_mdt_ib,
329 		gld_fastpath_ib,
330 		gld_unitdata_ib,
331 		gld_init_ib,
332 		gld_uninit_ib,
333 		"ipib"
334 	},
335 };
336 
337 /*
338  * bit reversal lookup table.
339  */
340 static	uchar_t bit_rev[] = {
341 	0x00, 0x80, 0x40, 0xc0, 0x20, 0xa0, 0x60, 0xe0, 0x10, 0x90, 0x50, 0xd0,
342 	0x30, 0xb0, 0x70, 0xf0, 0x08, 0x88, 0x48, 0xc8, 0x28, 0xa8, 0x68, 0xe8,
343 	0x18, 0x98, 0x58, 0xd8, 0x38, 0xb8, 0x78, 0xf8, 0x04, 0x84, 0x44, 0xc4,
344 	0x24, 0xa4, 0x64, 0xe4, 0x14, 0x94, 0x54, 0xd4, 0x34, 0xb4, 0x74, 0xf4,
345 	0x0c, 0x8c, 0x4c, 0xcc, 0x2c, 0xac, 0x6c, 0xec, 0x1c, 0x9c, 0x5c, 0xdc,
346 	0x3c, 0xbc, 0x7c, 0xfc, 0x02, 0x82, 0x42, 0xc2, 0x22, 0xa2, 0x62, 0xe2,
347 	0x12, 0x92, 0x52, 0xd2, 0x32, 0xb2, 0x72, 0xf2, 0x0a, 0x8a, 0x4a, 0xca,
348 	0x2a, 0xaa, 0x6a, 0xea, 0x1a, 0x9a, 0x5a, 0xda, 0x3a, 0xba, 0x7a, 0xfa,
349 	0x06, 0x86, 0x46, 0xc6, 0x26, 0xa6, 0x66, 0xe6, 0x16, 0x96, 0x56, 0xd6,
350 	0x36, 0xb6, 0x76, 0xf6, 0x0e, 0x8e, 0x4e, 0xce, 0x2e, 0xae, 0x6e, 0xee,
351 	0x1e, 0x9e, 0x5e, 0xde, 0x3e, 0xbe, 0x7e, 0xfe, 0x01, 0x81, 0x41, 0xc1,
352 	0x21, 0xa1, 0x61, 0xe1, 0x11, 0x91, 0x51, 0xd1, 0x31, 0xb1, 0x71, 0xf1,
353 	0x09, 0x89, 0x49, 0xc9, 0x29, 0xa9, 0x69, 0xe9, 0x19, 0x99, 0x59, 0xd9,
354 	0x39, 0xb9, 0x79, 0xf9, 0x05, 0x85, 0x45, 0xc5, 0x25, 0xa5, 0x65, 0xe5,
355 	0x15, 0x95, 0x55, 0xd5, 0x35, 0xb5, 0x75, 0xf5, 0x0d, 0x8d, 0x4d, 0xcd,
356 	0x2d, 0xad, 0x6d, 0xed, 0x1d, 0x9d, 0x5d, 0xdd, 0x3d, 0xbd, 0x7d, 0xfd,
357 	0x03, 0x83, 0x43, 0xc3, 0x23, 0xa3, 0x63, 0xe3, 0x13, 0x93, 0x53, 0xd3,
358 	0x33, 0xb3, 0x73, 0xf3, 0x0b, 0x8b, 0x4b, 0xcb, 0x2b, 0xab, 0x6b, 0xeb,
359 	0x1b, 0x9b, 0x5b, 0xdb, 0x3b, 0xbb, 0x7b, 0xfb, 0x07, 0x87, 0x47, 0xc7,
360 	0x27, 0xa7, 0x67, 0xe7, 0x17, 0x97, 0x57, 0xd7, 0x37, 0xb7, 0x77, 0xf7,
361 	0x0f, 0x8f, 0x4f, 0xcf, 0x2f, 0xaf, 0x6f, 0xef, 0x1f, 0x9f, 0x5f, 0xdf,
362 	0x3f, 0xbf, 0x7f, 0xff,
363 };
364 
365 /*
366  * User priorities, mapped from b_band.
367  */
368 static uint32_t user_priority[] = {
369 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
370 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
371 	1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
372 	1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
373 	2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
374 	2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
375 	3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
376 	3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
377 	4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
378 	4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
379 	5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
380 	5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
381 	6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
382 	6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
383 	7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
384 	7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7
385 };
386 
387 #define	UPRI(gld, band)	((band != 0) ? user_priority[(band)] : (gld)->gld_upri)
388 
389 static struct glddevice gld_device_list;  /* Per-system root of GLD tables */
390 
391 /*
392  * Module linkage information for the kernel.
393  */
394 
395 static struct modldrv modlmisc = {
396 	&mod_miscops,		/* Type of module - a utility provider */
397 	"Generic LAN Driver (" GLD_VERSION_STRING ") %I%"
398 #ifdef GLD_DEBUG
399 	" DEBUG"
400 #endif
401 };
402 
403 static struct modlinkage modlinkage = {
404 	MODREV_1, &modlmisc, NULL
405 };
406 
407 int
408 _init(void)
409 {
410 	int e;
411 
412 	/* initialize gld_device_list mutex */
413 	mutex_init(&gld_device_list.gld_devlock, NULL, MUTEX_DRIVER, NULL);
414 
415 	/* initialize device driver (per-major) list */
416 	gld_device_list.gld_next =
417 	    gld_device_list.gld_prev = &gld_device_list;
418 
419 	if ((e = mod_install(&modlinkage)) != 0)
420 		mutex_destroy(&gld_device_list.gld_devlock);
421 
422 	return (e);
423 }
424 
425 int
426 _fini(void)
427 {
428 	int e;
429 
430 	if ((e = mod_remove(&modlinkage)) != 0)
431 		return (e);
432 
433 	ASSERT(gld_device_list.gld_next ==
434 	    (glddev_t *)&gld_device_list.gld_next);
435 	ASSERT(gld_device_list.gld_prev ==
436 	    (glddev_t *)&gld_device_list.gld_next);
437 	mutex_destroy(&gld_device_list.gld_devlock);
438 
439 	return (e);
440 }
441 
442 int
443 _info(struct modinfo *modinfop)
444 {
445 	return (mod_info(&modlinkage, modinfop));
446 }
447 
448 /*
449  * GLD service routines
450  */
451 
452 /* So this gld binary maybe can be forward compatible with future v2 drivers */
453 #define	GLD_MAC_RESERVED (16 * sizeof (caddr_t))
454 
455 /*ARGSUSED*/
456 gld_mac_info_t *
457 gld_mac_alloc(dev_info_t *devinfo)
458 {
459 	gld_mac_info_t *macinfo;
460 
461 	macinfo = kmem_zalloc(sizeof (gld_mac_info_t) + GLD_MAC_RESERVED,
462 	    KM_SLEEP);
463 
464 	/*
465 	 * The setting of gldm_driver_version will not be documented or allowed
466 	 * until a future release.
467 	 */
468 	macinfo->gldm_driver_version = GLD_VERSION_200;
469 
470 	/*
471 	 * GLD's version.  This also is undocumented for now, but will be
472 	 * available if needed in the future.
473 	 */
474 	macinfo->gldm_GLD_version = GLD_VERSION;
475 
476 	return (macinfo);
477 }
478 
479 /*
480  * gld_mac_free must be called after the driver has removed interrupts
481  * and completely stopped calling gld_recv() and gld_sched().  At that
482  * point the interrupt routine is guaranteed by the system to have been
483  * exited and the maclock is no longer needed.  Of course, it is
484  * expected (required) that (assuming gld_register() succeeded),
485  * gld_unregister() was called before gld_mac_free().
486  */
487 void
488 gld_mac_free(gld_mac_info_t *macinfo)
489 {
490 	ASSERT(macinfo);
491 	ASSERT(macinfo->gldm_GLD_version == GLD_VERSION);
492 
493 	/*
494 	 * Assert that if we made it through gld_register, then we must
495 	 * have unregistered.
496 	 */
497 	ASSERT(!GLDM_LOCK_INITED(macinfo) ||
498 	    (macinfo->gldm_GLD_flags & GLD_UNREGISTERED));
499 
500 	GLDM_LOCK_DESTROY(macinfo);
501 
502 	kmem_free(macinfo, sizeof (gld_mac_info_t) + GLD_MAC_RESERVED);
503 }
504 
505 /*
506  * gld_register -- called once per device instance (PPA)
507  *
508  * During its attach routine, a real device driver will register with GLD
509  * so that later opens and dl_attach_reqs will work.  The arguments are the
510  * devinfo pointer, the device name, and a macinfo structure describing the
511  * physical device instance.
512  */
513 int
514 gld_register(dev_info_t *devinfo, char *devname, gld_mac_info_t *macinfo)
515 {
516 	int mediatype;
517 	int major = ddi_name_to_major(devname), i;
518 	glddev_t *glddev;
519 	gld_mac_pvt_t *mac_pvt;
520 	char minordev[32];
521 	char pbuf[3*GLD_MAX_ADDRLEN];
522 	gld_interface_t *ifp;
523 
524 	ASSERT(devinfo != NULL);
525 	ASSERT(macinfo != NULL);
526 
527 	if (macinfo->gldm_driver_version != GLD_VERSION)
528 		return (DDI_FAILURE);
529 
530 	mediatype = macinfo->gldm_type;
531 
532 	/*
533 	 * Entry points should be ready for us.
534 	 * ioctl is optional.
535 	 * set_multicast and get_stats are optional in v0.
536 	 * intr is only required if you add an interrupt.
537 	 */
538 	ASSERT(macinfo->gldm_reset != NULL);
539 	ASSERT(macinfo->gldm_start != NULL);
540 	ASSERT(macinfo->gldm_stop != NULL);
541 	ASSERT(macinfo->gldm_set_mac_addr != NULL);
542 	ASSERT(macinfo->gldm_set_promiscuous != NULL);
543 	ASSERT(macinfo->gldm_send != NULL);
544 
545 	ASSERT(macinfo->gldm_maxpkt >= macinfo->gldm_minpkt);
546 	ASSERT(macinfo->gldm_GLD_version == GLD_VERSION);
547 	ASSERT(macinfo->gldm_broadcast_addr != NULL);
548 	ASSERT(macinfo->gldm_vendor_addr != NULL);
549 	ASSERT(macinfo->gldm_ident != NULL);
550 
551 	if (macinfo->gldm_addrlen > GLD_MAX_ADDRLEN) {
552 		cmn_err(CE_WARN, "GLD: %s driver gldm_addrlen %d > %d not sup"
553 		    "ported", devname, macinfo->gldm_addrlen, GLD_MAX_ADDRLEN);
554 		return (DDI_FAILURE);
555 	}
556 
557 	/*
558 	 * GLD only functions properly with saplen == -2
559 	 */
560 	if (macinfo->gldm_saplen != -2) {
561 		cmn_err(CE_WARN, "GLD: %s driver gldm_saplen %d != -2 "
562 		    "not supported", devname, macinfo->gldm_saplen);
563 		return (DDI_FAILURE);
564 	}
565 
566 	/* see gld_rsrv() */
567 	if (ddi_getprop(DDI_DEV_T_NONE, devinfo, 0, "fast_recv", 0))
568 		macinfo->gldm_options |= GLDOPT_FAST_RECV;
569 
570 	mutex_enter(&gld_device_list.gld_devlock);
571 	glddev = gld_devlookup(major);
572 
573 	/*
574 	 *  Allocate per-driver (major) data structure if necessary
575 	 */
576 	if (glddev == NULL) {
577 		/* first occurrence of this device name (major number) */
578 		glddev = GETSTRUCT(glddev_t, 1);
579 		if (glddev == NULL) {
580 			mutex_exit(&gld_device_list.gld_devlock);
581 			return (DDI_FAILURE);
582 		}
583 		(void) strncpy(glddev->gld_name, devname,
584 		    sizeof (glddev->gld_name) - 1);
585 		glddev->gld_major = major;
586 		glddev->gld_nextminor = GLD_MIN_CLONE_MINOR;
587 		glddev->gld_mac_next = glddev->gld_mac_prev =
588 			(gld_mac_info_t *)&glddev->gld_mac_next;
589 		glddev->gld_str_next = glddev->gld_str_prev =
590 			(gld_t *)&glddev->gld_str_next;
591 		mutex_init(&glddev->gld_devlock, NULL, MUTEX_DRIVER, NULL);
592 
593 		/* allow increase of number of supported multicast addrs */
594 		glddev->gld_multisize = ddi_getprop(DDI_DEV_T_NONE,
595 		    devinfo, 0, "multisize", GLD_MAX_MULTICAST);
596 
597 		/*
598 		 * Optionally restrict DLPI provider style
599 		 *
600 		 * -1 - don't create style 1 nodes
601 		 * -2 - don't create style 2 nodes
602 		 */
603 		glddev->gld_styles = ddi_getprop(DDI_DEV_T_NONE, devinfo, 0,
604 		    "gld-provider-styles", 0);
605 
606 		/* Stuff that's needed before any PPA gets attached */
607 		glddev->gld_type = macinfo->gldm_type;
608 		glddev->gld_minsdu = macinfo->gldm_minpkt;
609 		glddev->gld_saplen = macinfo->gldm_saplen;
610 		glddev->gld_addrlen = macinfo->gldm_addrlen;
611 		glddev->gld_broadcast = kmem_zalloc(macinfo->gldm_addrlen,
612 		    KM_SLEEP);
613 		bcopy(macinfo->gldm_broadcast_addr,
614 		    glddev->gld_broadcast, macinfo->gldm_addrlen);
615 		glddev->gld_maxsdu = macinfo->gldm_maxpkt;
616 		gldinsque(glddev, gld_device_list.gld_prev);
617 	}
618 	glddev->gld_ndevice++;
619 	/* Now glddev can't go away until we unregister this mac (or fail) */
620 	mutex_exit(&gld_device_list.gld_devlock);
621 
622 	/*
623 	 *  Per-instance initialization
624 	 */
625 
626 	/*
627 	 * Initialize per-mac structure that is private to GLD.
628 	 * Set up interface pointer. These are device class specific pointers
629 	 * used to handle FDDI/TR/ETHER/IPoIB specific packets.
630 	 */
631 	for (i = 0; i < sizeof (interfaces)/sizeof (*interfaces); i++) {
632 		if (mediatype != interfaces[i].mac_type)
633 			continue;
634 
635 		macinfo->gldm_mac_pvt = kmem_zalloc(sizeof (gld_mac_pvt_t),
636 		    KM_SLEEP);
637 		((gld_mac_pvt_t *)macinfo->gldm_mac_pvt)->interfacep = ifp =
638 		    &interfaces[i];
639 		break;
640 	}
641 
642 	if (ifp == NULL) {
643 		cmn_err(CE_WARN, "GLD: this version does not support %s driver "
644 		    "of type %d", devname, mediatype);
645 		goto failure;
646 	}
647 
648 	/*
649 	 * Driver can only register MTU within legal media range.
650 	 */
651 	if (macinfo->gldm_maxpkt > ifp->mtu_size) {
652 		cmn_err(CE_WARN, "GLD: oversize MTU is specified by driver %s",
653 		    devname);
654 		goto failure;
655 	}
656 
657 	/*
658 	 * For now, only Infiniband drivers can use MDT. Do not add
659 	 * support for Ethernet, FDDI or TR.
660 	 */
661 	if (macinfo->gldm_mdt_pre != NULL) {
662 		if (mediatype != DL_IB) {
663 			cmn_err(CE_WARN, "GLD: MDT not supported for %s "
664 			    "driver of type %d", devname, mediatype);
665 			goto failure;
666 		}
667 
668 		/*
669 		 * Validate entry points.
670 		 */
671 		if ((macinfo->gldm_mdt_send == NULL) ||
672 		    (macinfo->gldm_mdt_post == NULL)) {
673 			cmn_err(CE_WARN, "GLD: invalid MDT entry points for "
674 			    "%s driver of type %d", devname, mediatype);
675 			goto failure;
676 		}
677 		macinfo->gldm_options |= GLDOPT_MDT;
678 	}
679 
680 	mac_pvt = (gld_mac_pvt_t *)macinfo->gldm_mac_pvt;
681 	mac_pvt->major_dev = glddev;
682 
683 	mac_pvt->curr_macaddr = kmem_zalloc(macinfo->gldm_addrlen, KM_SLEEP);
684 	/*
685 	 * XXX Do bit-reversed devices store gldm_vendor in canonical
686 	 * format or in wire format?  Also gldm_broadcast.  For now
687 	 * we are assuming canonical, but I'm not sure that makes the
688 	 * most sense for ease of driver implementation.
689 	 */
690 	bcopy(macinfo->gldm_vendor_addr, mac_pvt->curr_macaddr,
691 	    macinfo->gldm_addrlen);
692 	mac_pvt->statistics = kmem_zalloc(sizeof (struct gld_stats), KM_SLEEP);
693 
694 	/*
695 	 * The available set of notifications is those generatable by GLD
696 	 * itself, plus those corresponding to the capabilities of the MAC
697 	 * driver, intersected with those supported by gld_notify_ind() above.
698 	 */
699 	mac_pvt->notifications = gld_internal_notes;
700 	if (macinfo->gldm_capabilities & GLD_CAP_LINKSTATE)
701 		mac_pvt->notifications |= gld_linkstate_notes;
702 	mac_pvt->notifications &= gld_supported_notes;
703 
704 	GLDM_LOCK_INIT(macinfo);
705 
706 	ddi_set_driver_private(devinfo, macinfo);
707 
708 	/*
709 	 * Now atomically get a PPA and put ourselves on the mac list.
710 	 */
711 	mutex_enter(&glddev->gld_devlock);
712 
713 #ifdef DEBUG
714 	if (macinfo->gldm_ppa != ddi_get_instance(devinfo))
715 		cmn_err(CE_WARN, "%s%d instance != ppa %d",
716 		    ddi_driver_name(devinfo), ddi_get_instance(devinfo),
717 		    macinfo->gldm_ppa);
718 #endif
719 
720 	/*
721 	 * Create style 2 node (gated by gld-provider-styles property).
722 	 *
723 	 * NOTE: When the CLONE_DEV flag is specified to
724 	 *	 ddi_create_minor_node() the minor number argument is
725 	 *	 immaterial. Opens of that node will go via the clone
726 	 *	 driver and gld_open() will always be passed a dev_t with
727 	 *	 minor of zero.
728 	 */
729 	if (glddev->gld_styles != -2) {
730 		if (ddi_create_minor_node(devinfo, glddev->gld_name, S_IFCHR,
731 		    0, DDI_NT_NET, CLONE_DEV) == DDI_FAILURE) {
732 			mutex_exit(&glddev->gld_devlock);
733 			goto late_failure;
734 		}
735 	}
736 
737 	/*
738 	 * Create style 1 node (gated by gld-provider-styles property)
739 	 */
740 	if (glddev->gld_styles != -1) {
741 		(void) sprintf(minordev, "%s%d", glddev->gld_name,
742 		    macinfo->gldm_ppa);
743 		if (ddi_create_minor_node(devinfo, minordev, S_IFCHR,
744 		    GLD_STYLE1_PPA_TO_MINOR(macinfo->gldm_ppa), DDI_NT_NET,
745 		    0) != DDI_SUCCESS) {
746 			mutex_exit(&glddev->gld_devlock);
747 			goto late_failure;
748 		}
749 	}
750 
751 	/* add ourselves to this major device's linked list of instances */
752 	gldinsque(macinfo, glddev->gld_mac_prev);
753 
754 	mutex_exit(&glddev->gld_devlock);
755 
756 	/*
757 	 * Unfortunately we need the ppa before we call gld_initstats();
758 	 * otherwise we would like to do this just above the mutex_enter
759 	 * above.  In which case we could have set MAC_READY inside the
760 	 * mutex and we wouldn't have needed to check it in open and
761 	 * DL_ATTACH.  We wouldn't like to do the initstats/kstat_create
762 	 * inside the mutex because it might get taken in our kstat_update
763 	 * routine and cause a deadlock with kstat_chain_lock.
764 	 */
765 
766 	/* gld_initstats() calls (*ifp->init)() */
767 	if (gld_initstats(macinfo) != GLD_SUCCESS) {
768 		mutex_enter(&glddev->gld_devlock);
769 		gldremque(macinfo);
770 		mutex_exit(&glddev->gld_devlock);
771 		goto late_failure;
772 	}
773 
774 	/*
775 	 * Need to indicate we are NOW ready to process interrupts;
776 	 * any interrupt before this is set is for someone else.
777 	 * This flag is also now used to tell open, et. al. that this
778 	 * mac is now fully ready and available for use.
779 	 */
780 	GLDM_LOCK(macinfo, RW_WRITER);
781 	macinfo->gldm_GLD_flags |= GLD_MAC_READY;
782 	GLDM_UNLOCK(macinfo);
783 
784 	/* log local ethernet address -- XXX not DDI compliant */
785 	if (macinfo->gldm_addrlen == sizeof (struct ether_addr))
786 		(void) localetheraddr(
787 		    (struct ether_addr *)macinfo->gldm_vendor_addr, NULL);
788 
789 	/* now put announcement into the message buffer */
790 	cmn_err(CE_CONT, "!%s%d: %s: type \"%s\" mac address %s\n",
791 	    glddev->gld_name,
792 	    macinfo->gldm_ppa, macinfo->gldm_ident,
793 	    mac_pvt->interfacep->mac_string,
794 	    gld_macaddr_sprintf(pbuf, macinfo->gldm_vendor_addr,
795 	    macinfo->gldm_addrlen));
796 
797 	ddi_report_dev(devinfo);
798 	return (DDI_SUCCESS);
799 
800 late_failure:
801 	ddi_remove_minor_node(devinfo, NULL);
802 	GLDM_LOCK_DESTROY(macinfo);
803 	if (mac_pvt->curr_macaddr != NULL)
804 	    kmem_free(mac_pvt->curr_macaddr, macinfo->gldm_addrlen);
805 	if (mac_pvt->statistics != NULL)
806 	    kmem_free(mac_pvt->statistics, sizeof (struct gld_stats));
807 	kmem_free(macinfo->gldm_mac_pvt, sizeof (gld_mac_pvt_t));
808 	macinfo->gldm_mac_pvt = NULL;
809 
810 failure:
811 	mutex_enter(&gld_device_list.gld_devlock);
812 	glddev->gld_ndevice--;
813 	/*
814 	 * Note that just because this goes to zero here does not necessarily
815 	 * mean that we were the one who added the glddev above.  It's
816 	 * possible that the first mac unattached while were were in here
817 	 * failing to attach the second mac.  But we're now the last.
818 	 */
819 	if (glddev->gld_ndevice == 0) {
820 		/* There should be no macinfos left */
821 		ASSERT(glddev->gld_mac_next ==
822 		    (gld_mac_info_t *)&glddev->gld_mac_next);
823 		ASSERT(glddev->gld_mac_prev ==
824 		    (gld_mac_info_t *)&glddev->gld_mac_next);
825 
826 		/*
827 		 * There should be no DL_UNATTACHED streams: the system
828 		 * should not have detached the "first" devinfo which has
829 		 * all the open style 2 streams.
830 		 *
831 		 * XXX This is not clear.  See gld_getinfo and Bug 1165519
832 		 */
833 		ASSERT(glddev->gld_str_next == (gld_t *)&glddev->gld_str_next);
834 		ASSERT(glddev->gld_str_prev == (gld_t *)&glddev->gld_str_next);
835 
836 		gldremque(glddev);
837 		mutex_destroy(&glddev->gld_devlock);
838 		if (glddev->gld_broadcast != NULL)
839 			kmem_free(glddev->gld_broadcast, glddev->gld_addrlen);
840 		kmem_free(glddev, sizeof (glddev_t));
841 	}
842 	mutex_exit(&gld_device_list.gld_devlock);
843 
844 	return (DDI_FAILURE);
845 }
846 
847 /*
848  * gld_unregister (macinfo)
849  * remove the macinfo structure from local structures
850  * this is cleanup for a driver to be unloaded
851  */
852 int
853 gld_unregister(gld_mac_info_t *macinfo)
854 {
855 	gld_mac_pvt_t *mac_pvt = (gld_mac_pvt_t *)macinfo->gldm_mac_pvt;
856 	glddev_t *glddev = mac_pvt->major_dev;
857 	gld_interface_t *ifp;
858 	int multisize = sizeof (gld_mcast_t) * glddev->gld_multisize;
859 
860 	mutex_enter(&glddev->gld_devlock);
861 	GLDM_LOCK(macinfo, RW_WRITER);
862 
863 	if (mac_pvt->nvlan > 0) {
864 		GLDM_UNLOCK(macinfo);
865 		mutex_exit(&glddev->gld_devlock);
866 		return (DDI_FAILURE);
867 	}
868 
869 #ifdef	GLD_DEBUG
870 	{
871 		int i;
872 
873 		for (i = 0; i < VLAN_HASHSZ; i++) {
874 			if ((mac_pvt->vlan_hash[i] != NULL))
875 				cmn_err(CE_PANIC,
876 				    "%s, line %d: "
877 				    "mac_pvt->vlan_hash[%d] != NULL",
878 				    __FILE__, __LINE__, i);
879 		}
880 	}
881 #endif
882 
883 	/* Delete this mac */
884 	gldremque(macinfo);
885 
886 	/* Disallow further entries to gld_recv() and gld_sched() */
887 	macinfo->gldm_GLD_flags |= GLD_UNREGISTERED;
888 
889 	GLDM_UNLOCK(macinfo);
890 	mutex_exit(&glddev->gld_devlock);
891 
892 	ifp = ((gld_mac_pvt_t *)macinfo->gldm_mac_pvt)->interfacep;
893 	(*ifp->uninit)(macinfo);
894 
895 	ASSERT(mac_pvt->kstatp);
896 	kstat_delete(mac_pvt->kstatp);
897 
898 	ASSERT(GLDM_LOCK_INITED(macinfo));
899 	kmem_free(mac_pvt->curr_macaddr, macinfo->gldm_addrlen);
900 	kmem_free(mac_pvt->statistics, sizeof (struct gld_stats));
901 
902 	if (mac_pvt->mcast_table != NULL)
903 		kmem_free(mac_pvt->mcast_table, multisize);
904 	kmem_free(macinfo->gldm_mac_pvt, sizeof (gld_mac_pvt_t));
905 	macinfo->gldm_mac_pvt = (caddr_t)NULL;
906 
907 	/* We now have one fewer instance for this major device */
908 	mutex_enter(&gld_device_list.gld_devlock);
909 	glddev->gld_ndevice--;
910 	if (glddev->gld_ndevice == 0) {
911 		/* There should be no macinfos left */
912 		ASSERT(glddev->gld_mac_next ==
913 		    (gld_mac_info_t *)&glddev->gld_mac_next);
914 		ASSERT(glddev->gld_mac_prev ==
915 		    (gld_mac_info_t *)&glddev->gld_mac_next);
916 
917 		/*
918 		 * There should be no DL_UNATTACHED streams: the system
919 		 * should not have detached the "first" devinfo which has
920 		 * all the open style 2 streams.
921 		 *
922 		 * XXX This is not clear.  See gld_getinfo and Bug 1165519
923 		 */
924 		ASSERT(glddev->gld_str_next == (gld_t *)&glddev->gld_str_next);
925 		ASSERT(glddev->gld_str_prev == (gld_t *)&glddev->gld_str_next);
926 
927 		ddi_remove_minor_node(macinfo->gldm_devinfo, NULL);
928 		gldremque(glddev);
929 		mutex_destroy(&glddev->gld_devlock);
930 		if (glddev->gld_broadcast != NULL)
931 			kmem_free(glddev->gld_broadcast, glddev->gld_addrlen);
932 		kmem_free(glddev, sizeof (glddev_t));
933 	}
934 	mutex_exit(&gld_device_list.gld_devlock);
935 
936 	return (DDI_SUCCESS);
937 }
938 
939 /*
940  * gld_initstats
941  * called from gld_register
942  */
943 static int
944 gld_initstats(gld_mac_info_t *macinfo)
945 {
946 	gld_mac_pvt_t *mac_pvt = (gld_mac_pvt_t *)macinfo->gldm_mac_pvt;
947 	struct gldkstats *sp;
948 	glddev_t *glddev;
949 	kstat_t *ksp;
950 	gld_interface_t *ifp;
951 
952 	glddev = mac_pvt->major_dev;
953 
954 	if ((ksp = kstat_create(glddev->gld_name, macinfo->gldm_ppa,
955 	    NULL, "net", KSTAT_TYPE_NAMED,
956 	    sizeof (struct gldkstats) / sizeof (kstat_named_t), 0)) == NULL) {
957 		cmn_err(CE_WARN,
958 		    "GLD: failed to create kstat structure for %s%d",
959 		    glddev->gld_name, macinfo->gldm_ppa);
960 		return (GLD_FAILURE);
961 	}
962 	mac_pvt->kstatp = ksp;
963 
964 	ksp->ks_update = gld_update_kstat;
965 	ksp->ks_private = (void *)macinfo;
966 
967 	sp = ksp->ks_data;
968 	kstat_named_init(&sp->glds_pktrcv, "ipackets", KSTAT_DATA_UINT32);
969 	kstat_named_init(&sp->glds_pktxmt, "opackets", KSTAT_DATA_UINT32);
970 	kstat_named_init(&sp->glds_errrcv, "ierrors", KSTAT_DATA_ULONG);
971 	kstat_named_init(&sp->glds_errxmt, "oerrors", KSTAT_DATA_ULONG);
972 	kstat_named_init(&sp->glds_bytexmt, "obytes", KSTAT_DATA_UINT32);
973 	kstat_named_init(&sp->glds_bytercv, "rbytes", KSTAT_DATA_UINT32);
974 	kstat_named_init(&sp->glds_multixmt, "multixmt", KSTAT_DATA_ULONG);
975 	kstat_named_init(&sp->glds_multircv, "multircv", KSTAT_DATA_ULONG);
976 	kstat_named_init(&sp->glds_brdcstxmt, "brdcstxmt", KSTAT_DATA_ULONG);
977 	kstat_named_init(&sp->glds_brdcstrcv, "brdcstrcv", KSTAT_DATA_ULONG);
978 	kstat_named_init(&sp->glds_blocked, "blocked", KSTAT_DATA_ULONG);
979 	kstat_named_init(&sp->glds_noxmtbuf, "noxmtbuf", KSTAT_DATA_ULONG);
980 	kstat_named_init(&sp->glds_norcvbuf, "norcvbuf", KSTAT_DATA_ULONG);
981 	kstat_named_init(&sp->glds_xmtretry, "xmtretry", KSTAT_DATA_ULONG);
982 	kstat_named_init(&sp->glds_intr, "intr", KSTAT_DATA_ULONG);
983 	kstat_named_init(&sp->glds_pktrcv64, "ipackets64", KSTAT_DATA_UINT64);
984 	kstat_named_init(&sp->glds_pktxmt64, "opackets64", KSTAT_DATA_UINT64);
985 	kstat_named_init(&sp->glds_bytexmt64, "obytes64", KSTAT_DATA_UINT64);
986 	kstat_named_init(&sp->glds_bytercv64, "rbytes64", KSTAT_DATA_UINT64);
987 	kstat_named_init(&sp->glds_unknowns, "unknowns", KSTAT_DATA_ULONG);
988 	kstat_named_init(&sp->glds_speed, "ifspeed", KSTAT_DATA_UINT64);
989 	kstat_named_init(&sp->glds_media, "media", KSTAT_DATA_CHAR);
990 	kstat_named_init(&sp->glds_prom, "promisc", KSTAT_DATA_CHAR);
991 
992 	kstat_named_init(&sp->glds_overflow, "oflo", KSTAT_DATA_ULONG);
993 	kstat_named_init(&sp->glds_underflow, "uflo", KSTAT_DATA_ULONG);
994 	kstat_named_init(&sp->glds_missed, "missed", KSTAT_DATA_ULONG);
995 
996 	kstat_named_init(&sp->glds_xmtbadinterp, "xmt_badinterp",
997 	    KSTAT_DATA_UINT32);
998 	kstat_named_init(&sp->glds_rcvbadinterp, "rcv_badinterp",
999 	    KSTAT_DATA_UINT32);
1000 
1001 	ifp = ((gld_mac_pvt_t *)macinfo->gldm_mac_pvt)->interfacep;
1002 
1003 	(*ifp->init)(macinfo);
1004 
1005 	kstat_install(ksp);
1006 
1007 	return (GLD_SUCCESS);
1008 }
1009 
1010 /* called from kstat mechanism, and from wsrv's get_statistics_req */
1011 static int
1012 gld_update_kstat(kstat_t *ksp, int rw)
1013 {
1014 	gld_mac_info_t	*macinfo;
1015 	gld_mac_pvt_t	*mac_pvt;
1016 	struct gldkstats *gsp;
1017 	struct gld_stats *stats;
1018 
1019 	if (rw == KSTAT_WRITE)
1020 		return (EACCES);
1021 
1022 	macinfo = (gld_mac_info_t *)ksp->ks_private;
1023 	ASSERT(macinfo != NULL);
1024 
1025 	GLDM_LOCK(macinfo, RW_WRITER);
1026 
1027 	if (!(macinfo->gldm_GLD_flags & GLD_MAC_READY)) {
1028 		GLDM_UNLOCK(macinfo);
1029 		return (EIO);	/* this one's not ready yet */
1030 	}
1031 
1032 	if (macinfo->gldm_GLD_flags & GLD_UNREGISTERED) {
1033 		GLDM_UNLOCK(macinfo);
1034 		return (EIO);	/* this one's not ready any more */
1035 	}
1036 
1037 	mac_pvt = (gld_mac_pvt_t *)macinfo->gldm_mac_pvt;
1038 	gsp = mac_pvt->kstatp->ks_data;
1039 	ASSERT(gsp);
1040 	stats = mac_pvt->statistics;
1041 
1042 	if (macinfo->gldm_get_stats)
1043 		(void) (*macinfo->gldm_get_stats)(macinfo, stats);
1044 
1045 	gsp->glds_pktxmt.value.ui32 = stats->glds_pktxmt64 & 0xffffffff;
1046 	gsp->glds_bytexmt.value.ui32 = stats->glds_bytexmt64 & 0xffffffff;
1047 	gsp->glds_multixmt.value.ul = stats->glds_multixmt;
1048 	gsp->glds_brdcstxmt.value.ul = stats->glds_brdcstxmt;
1049 	gsp->glds_noxmtbuf.value.ul = stats->glds_noxmtbuf;	/* 0 for now */
1050 	gsp->glds_xmtretry.value.ul = stats->glds_xmtretry;
1051 
1052 	gsp->glds_pktxmt64.value.ui64 = stats->glds_pktxmt64;
1053 	gsp->glds_bytexmt64.value.ui64 = stats->glds_bytexmt64;
1054 	gsp->glds_xmtbadinterp.value.ui32 = stats->glds_xmtbadinterp;
1055 
1056 	gsp->glds_pktrcv.value.ui32 = stats->glds_pktrcv64 & 0xffffffff;
1057 	gsp->glds_errxmt.value.ul = stats->glds_errxmt;
1058 	gsp->glds_errrcv.value.ul = stats->glds_errrcv;
1059 	gsp->glds_bytercv.value.ui32 = stats->glds_bytercv64 & 0xffffffff;
1060 	gsp->glds_multircv.value.ul = stats->glds_multircv;
1061 	gsp->glds_brdcstrcv.value.ul = stats->glds_brdcstrcv;
1062 	gsp->glds_blocked.value.ul = stats->glds_blocked;
1063 	gsp->glds_overflow.value.ul = stats->glds_overflow;
1064 	gsp->glds_underflow.value.ul = stats->glds_underflow;
1065 	gsp->glds_missed.value.ul = stats->glds_missed;
1066 	gsp->glds_norcvbuf.value.ul = stats->glds_norcvbuf +
1067 	    stats->glds_gldnorcvbuf;
1068 	gsp->glds_intr.value.ul = stats->glds_intr;
1069 
1070 	gsp->glds_speed.value.ui64 = stats->glds_speed;
1071 	gsp->glds_unknowns.value.ul = stats->glds_unknowns;
1072 	gsp->glds_pktrcv64.value.ui64 = stats->glds_pktrcv64;
1073 	gsp->glds_bytercv64.value.ui64 = stats->glds_bytercv64;
1074 	gsp->glds_rcvbadinterp.value.ui32 = stats->glds_rcvbadinterp;
1075 
1076 	if (mac_pvt->nprom)
1077 		(void) strcpy(gsp->glds_prom.value.c, "phys");
1078 	else if (mac_pvt->nprom_multi)
1079 		(void) strcpy(gsp->glds_prom.value.c, "multi");
1080 	else
1081 		(void) strcpy(gsp->glds_prom.value.c, "off");
1082 
1083 	(void) strcpy(gsp->glds_media.value.c, gld_media[
1084 	    stats->glds_media < sizeof (gld_media) / sizeof (gld_media[0])
1085 	    ? stats->glds_media : 0]);
1086 
1087 	switch (macinfo->gldm_type) {
1088 	case DL_ETHER:
1089 		gsp->glds_frame.value.ul = stats->glds_frame;
1090 		gsp->glds_crc.value.ul = stats->glds_crc;
1091 		gsp->glds_collisions.value.ul = stats->glds_collisions;
1092 		gsp->glds_excoll.value.ul = stats->glds_excoll;
1093 		gsp->glds_defer.value.ul = stats->glds_defer;
1094 		gsp->glds_short.value.ul = stats->glds_short;
1095 		gsp->glds_xmtlatecoll.value.ul = stats->glds_xmtlatecoll;
1096 		gsp->glds_nocarrier.value.ul = stats->glds_nocarrier;
1097 		gsp->glds_dot3_first_coll.value.ui32 =
1098 		    stats->glds_dot3_first_coll;
1099 		gsp->glds_dot3_multi_coll.value.ui32 =
1100 		    stats->glds_dot3_multi_coll;
1101 		gsp->glds_dot3_sqe_error.value.ui32 =
1102 		    stats->glds_dot3_sqe_error;
1103 		gsp->glds_dot3_mac_xmt_error.value.ui32 =
1104 		    stats->glds_dot3_mac_xmt_error;
1105 		gsp->glds_dot3_mac_rcv_error.value.ui32 =
1106 		    stats->glds_dot3_mac_rcv_error;
1107 		gsp->glds_dot3_frame_too_long.value.ui32 =
1108 		    stats->glds_dot3_frame_too_long;
1109 		(void) strcpy(gsp->glds_duplex.value.c, gld_duplex[
1110 		    stats->glds_duplex <
1111 		    sizeof (gld_duplex) / sizeof (gld_duplex[0]) ?
1112 		    stats->glds_duplex : 0]);
1113 		break;
1114 	case DL_TPR:
1115 		gsp->glds_dot5_line_error.value.ui32 =
1116 		    stats->glds_dot5_line_error;
1117 		gsp->glds_dot5_burst_error.value.ui32 =
1118 		    stats->glds_dot5_burst_error;
1119 		gsp->glds_dot5_signal_loss.value.ui32 =
1120 		    stats->glds_dot5_signal_loss;
1121 		gsp->glds_dot5_ace_error.value.ui32 =
1122 		    stats->glds_dot5_ace_error;
1123 		gsp->glds_dot5_internal_error.value.ui32 =
1124 		    stats->glds_dot5_internal_error;
1125 		gsp->glds_dot5_lost_frame_error.value.ui32 =
1126 		    stats->glds_dot5_lost_frame_error;
1127 		gsp->glds_dot5_frame_copied_error.value.ui32 =
1128 		    stats->glds_dot5_frame_copied_error;
1129 		gsp->glds_dot5_token_error.value.ui32 =
1130 		    stats->glds_dot5_token_error;
1131 		gsp->glds_dot5_freq_error.value.ui32 =
1132 		    stats->glds_dot5_freq_error;
1133 		break;
1134 	case DL_FDDI:
1135 		gsp->glds_fddi_mac_error.value.ui32 =
1136 		    stats->glds_fddi_mac_error;
1137 		gsp->glds_fddi_mac_lost.value.ui32 =
1138 		    stats->glds_fddi_mac_lost;
1139 		gsp->glds_fddi_mac_token.value.ui32 =
1140 		    stats->glds_fddi_mac_token;
1141 		gsp->glds_fddi_mac_tvx_expired.value.ui32 =
1142 		    stats->glds_fddi_mac_tvx_expired;
1143 		gsp->glds_fddi_mac_late.value.ui32 =
1144 		    stats->glds_fddi_mac_late;
1145 		gsp->glds_fddi_mac_ring_op.value.ui32 =
1146 		    stats->glds_fddi_mac_ring_op;
1147 		break;
1148 	case DL_IB:
1149 		break;
1150 	default:
1151 		break;
1152 	}
1153 
1154 	GLDM_UNLOCK(macinfo);
1155 
1156 #ifdef GLD_DEBUG
1157 	gld_check_assertions();
1158 	if (gld_debug & GLDRDE)
1159 		gld_sr_dump(macinfo);
1160 #endif
1161 
1162 	return (0);
1163 }
1164 
1165 static int
1166 gld_init_vlan_stats(gld_vlan_t *vlan)
1167 {
1168 	gld_mac_info_t *mac = vlan->gldv_mac;
1169 	gld_mac_pvt_t *mac_pvt = (gld_mac_pvt_t *)mac->gldm_mac_pvt;
1170 	struct gldkstats *sp;
1171 	glddev_t *glddev;
1172 	kstat_t *ksp;
1173 	char *name;
1174 	int instance;
1175 
1176 	glddev = mac_pvt->major_dev;
1177 	name = glddev->gld_name;
1178 	instance = (vlan->gldv_id * GLD_VLAN_SCALE) + mac->gldm_ppa;
1179 
1180 	if ((ksp = kstat_create(name, instance,
1181 	    NULL, "net", KSTAT_TYPE_NAMED,
1182 	    sizeof (struct gldkstats) / sizeof (kstat_named_t), 0)) == NULL) {
1183 		cmn_err(CE_WARN,
1184 		    "GLD: failed to create kstat structure for %s%d",
1185 		    name, instance);
1186 		return (GLD_FAILURE);
1187 	}
1188 
1189 	vlan->gldv_kstatp = ksp;
1190 
1191 	ksp->ks_update = gld_update_vlan_kstat;
1192 	ksp->ks_private = (void *)vlan;
1193 
1194 	sp = ksp->ks_data;
1195 	kstat_named_init(&sp->glds_pktrcv, "ipackets", KSTAT_DATA_UINT32);
1196 	kstat_named_init(&sp->glds_pktxmt, "opackets", KSTAT_DATA_UINT32);
1197 	kstat_named_init(&sp->glds_errrcv, "ierrors", KSTAT_DATA_ULONG);
1198 	kstat_named_init(&sp->glds_errxmt, "oerrors", KSTAT_DATA_ULONG);
1199 	kstat_named_init(&sp->glds_bytexmt, "obytes", KSTAT_DATA_UINT32);
1200 	kstat_named_init(&sp->glds_bytercv, "rbytes", KSTAT_DATA_UINT32);
1201 	kstat_named_init(&sp->glds_multixmt, "multixmt", KSTAT_DATA_ULONG);
1202 	kstat_named_init(&sp->glds_multircv, "multircv", KSTAT_DATA_ULONG);
1203 	kstat_named_init(&sp->glds_brdcstxmt, "brdcstxmt", KSTAT_DATA_ULONG);
1204 	kstat_named_init(&sp->glds_brdcstrcv, "brdcstrcv", KSTAT_DATA_ULONG);
1205 	kstat_named_init(&sp->glds_blocked, "blocked", KSTAT_DATA_ULONG);
1206 	kstat_named_init(&sp->glds_noxmtbuf, "noxmtbuf", KSTAT_DATA_ULONG);
1207 	kstat_named_init(&sp->glds_norcvbuf, "norcvbuf", KSTAT_DATA_ULONG);
1208 	kstat_named_init(&sp->glds_xmtretry, "xmtretry", KSTAT_DATA_ULONG);
1209 	kstat_named_init(&sp->glds_intr, "intr", KSTAT_DATA_ULONG);
1210 	kstat_named_init(&sp->glds_pktrcv64, "ipackets64", KSTAT_DATA_UINT64);
1211 	kstat_named_init(&sp->glds_pktxmt64, "opackets64", KSTAT_DATA_UINT64);
1212 	kstat_named_init(&sp->glds_bytexmt64, "obytes64", KSTAT_DATA_UINT64);
1213 	kstat_named_init(&sp->glds_bytercv64, "rbytes64", KSTAT_DATA_UINT64);
1214 	kstat_named_init(&sp->glds_unknowns, "unknowns", KSTAT_DATA_ULONG);
1215 	kstat_named_init(&sp->glds_speed, "ifspeed", KSTAT_DATA_UINT64);
1216 	kstat_named_init(&sp->glds_media, "media", KSTAT_DATA_CHAR);
1217 	kstat_named_init(&sp->glds_prom, "promisc", KSTAT_DATA_CHAR);
1218 
1219 	kstat_named_init(&sp->glds_overflow, "oflo", KSTAT_DATA_ULONG);
1220 	kstat_named_init(&sp->glds_underflow, "uflo", KSTAT_DATA_ULONG);
1221 	kstat_named_init(&sp->glds_missed, "missed", KSTAT_DATA_ULONG);
1222 
1223 	kstat_named_init(&sp->glds_xmtbadinterp, "xmt_badinterp",
1224 	    KSTAT_DATA_UINT32);
1225 	kstat_named_init(&sp->glds_rcvbadinterp, "rcv_badinterp",
1226 	    KSTAT_DATA_UINT32);
1227 
1228 	kstat_install(ksp);
1229 	return (GLD_SUCCESS);
1230 }
1231 
1232 static int
1233 gld_update_vlan_kstat(kstat_t *ksp, int rw)
1234 {
1235 	gld_vlan_t	*vlan;
1236 	gld_mac_info_t	*macinfo;
1237 	struct gldkstats *gsp;
1238 	struct gld_stats *stats;
1239 	gld_mac_pvt_t *mac_pvt;
1240 	uint32_t media;
1241 
1242 	if (rw == KSTAT_WRITE)
1243 		return (EACCES);
1244 
1245 	vlan = (gld_vlan_t *)ksp->ks_private;
1246 	ASSERT(vlan != NULL);
1247 
1248 	macinfo = vlan->gldv_mac;
1249 	GLDM_LOCK(macinfo, RW_WRITER);
1250 
1251 	mac_pvt = (gld_mac_pvt_t *)macinfo->gldm_mac_pvt;
1252 
1253 	gsp = vlan->gldv_kstatp->ks_data;
1254 	ASSERT(gsp);
1255 	stats = vlan->gldv_stats;
1256 
1257 	gsp->glds_pktxmt.value.ui32 = stats->glds_pktxmt64 & 0xffffffff;
1258 	gsp->glds_bytexmt.value.ui32 = stats->glds_bytexmt64 & 0xffffffff;
1259 	gsp->glds_errxmt.value.ul = stats->glds_errxmt;
1260 	gsp->glds_multixmt.value.ul = stats->glds_multixmt;
1261 	gsp->glds_brdcstxmt.value.ul = stats->glds_brdcstxmt;
1262 	gsp->glds_noxmtbuf.value.ul = stats->glds_noxmtbuf;
1263 	gsp->glds_xmtretry.value.ul = stats->glds_xmtretry;
1264 	gsp->glds_pktxmt64.value.ui64 = stats->glds_pktxmt64;
1265 	gsp->glds_bytexmt64.value.ui64 = stats->glds_bytexmt64;
1266 
1267 	gsp->glds_pktrcv.value.ui32 = stats->glds_pktrcv64 & 0xffffffff;
1268 	gsp->glds_bytercv.value.ui32 = stats->glds_bytercv64 & 0xffffffff;
1269 	gsp->glds_errrcv.value.ul = stats->glds_errrcv;
1270 	gsp->glds_multircv.value.ul = stats->glds_multircv;
1271 	gsp->glds_brdcstrcv.value.ul = stats->glds_brdcstrcv;
1272 	gsp->glds_blocked.value.ul = stats->glds_blocked;
1273 	gsp->glds_pktrcv64.value.ui64 = stats->glds_pktrcv64;
1274 	gsp->glds_bytercv64.value.ui64 = stats->glds_bytercv64;
1275 	gsp->glds_unknowns.value.ul = stats->glds_unknowns;
1276 	gsp->glds_xmtbadinterp.value.ui32 = stats->glds_xmtbadinterp;
1277 	gsp->glds_rcvbadinterp.value.ui32 = stats->glds_rcvbadinterp;
1278 
1279 	gsp->glds_speed.value.ui64 = mac_pvt->statistics->glds_speed;
1280 	media = mac_pvt->statistics->glds_media;
1281 	(void) strcpy(gsp->glds_media.value.c,
1282 	    gld_media[media < sizeof (gld_media) / sizeof (gld_media[0]) ?
1283 	    media : 0]);
1284 
1285 	GLDM_UNLOCK(macinfo);
1286 	return (0);
1287 }
1288 
1289 /*
1290  * The device dependent driver specifies gld_getinfo as its getinfo routine.
1291  */
1292 /*ARGSUSED*/
1293 int
1294 gld_getinfo(dev_info_t *dip, ddi_info_cmd_t cmd, void *arg, void **resultp)
1295 {
1296 	dev_info_t	*devinfo;
1297 	minor_t		minor = getminor((dev_t)arg);
1298 	int		rc = DDI_FAILURE;
1299 
1300 	switch (cmd) {
1301 	case DDI_INFO_DEVT2DEVINFO:
1302 		if ((devinfo = gld_finddevinfo((dev_t)arg)) != NULL) {
1303 			*(dev_info_t **)resultp = devinfo;
1304 			rc = DDI_SUCCESS;
1305 		}
1306 		break;
1307 	case DDI_INFO_DEVT2INSTANCE:
1308 		/* Need static mapping for deferred attach */
1309 		if (minor == GLD_USE_STYLE2) {
1310 			/*
1311 			 * Style 2:  this minor number does not correspond to
1312 			 * any particular instance number.
1313 			 */
1314 			rc = DDI_FAILURE;
1315 		} else if (minor <= GLD_MAX_STYLE1_MINOR) {
1316 			/* Style 1:  calculate the PPA from the minor */
1317 			*resultp = (void *)(uintptr_t)
1318 			    GLD_STYLE1_MINOR_TO_PPA(minor);
1319 			rc = DDI_SUCCESS;
1320 		} else {
1321 			/* Clone:  look for it.  Not a static mapping */
1322 			if ((devinfo = gld_finddevinfo((dev_t)arg)) != NULL) {
1323 				*resultp = (void *)(uintptr_t)
1324 				    ddi_get_instance(devinfo);
1325 				rc = DDI_SUCCESS;
1326 			}
1327 		}
1328 		break;
1329 	}
1330 
1331 	return (rc);
1332 }
1333 
1334 /* called from gld_getinfo */
1335 dev_info_t *
1336 gld_finddevinfo(dev_t dev)
1337 {
1338 	minor_t		minor = getminor(dev);
1339 	glddev_t	*device;
1340 	gld_mac_info_t	*mac;
1341 	gld_vlan_t	*vlan;
1342 	gld_t		*str;
1343 	dev_info_t	*devinfo = NULL;
1344 	int		i;
1345 
1346 	if (minor == GLD_USE_STYLE2) {
1347 		/*
1348 		 * Style 2:  this minor number does not correspond to
1349 		 * any particular instance number.
1350 		 *
1351 		 * XXX We don't know what to say.  See Bug 1165519.
1352 		 */
1353 		return (NULL);
1354 	}
1355 
1356 	mutex_enter(&gld_device_list.gld_devlock);	/* hold the device */
1357 
1358 	device = gld_devlookup(getmajor(dev));
1359 	if (device == NULL) {
1360 		/* There are no attached instances of this device */
1361 		mutex_exit(&gld_device_list.gld_devlock);
1362 		return (NULL);
1363 	}
1364 
1365 	/*
1366 	 * Search all attached macs and streams.
1367 	 *
1368 	 * XXX We don't bother checking the DL_UNATTACHED streams since
1369 	 * we don't know what devinfo we should report back even if we
1370 	 * found the minor.  Maybe we should associate streams that are
1371 	 * not currently attached to a PPA with the "first" devinfo node
1372 	 * of the major device to attach -- the one that created the
1373 	 * minor node for the generic device.
1374 	 */
1375 	mutex_enter(&device->gld_devlock);
1376 
1377 	for (mac = device->gld_mac_next;
1378 	    mac != (gld_mac_info_t *)&device->gld_mac_next;
1379 	    mac = mac->gldm_next) {
1380 		gld_mac_pvt_t *pvt = (gld_mac_pvt_t *)mac->gldm_mac_pvt;
1381 
1382 		if (!(mac->gldm_GLD_flags & GLD_MAC_READY))
1383 			continue;	/* this one's not ready yet */
1384 		if (minor <= GLD_MAX_STYLE1_MINOR) {
1385 			/* Style 1 -- look for the corresponding PPA */
1386 			if (minor == GLD_STYLE1_PPA_TO_MINOR(mac->gldm_ppa)) {
1387 				devinfo = mac->gldm_devinfo;
1388 				goto out;	/* found it! */
1389 			} else
1390 				continue;	/* not this PPA */
1391 		}
1392 
1393 		/* We are looking for a clone */
1394 		for (i = 0; i < VLAN_HASHSZ; i++) {
1395 			for (vlan = pvt->vlan_hash[i];
1396 			    vlan != NULL; vlan = vlan->gldv_next) {
1397 				for (str = vlan->gldv_str_next;
1398 				    str != (gld_t *)&vlan->gldv_str_next;
1399 				    str = str->gld_next) {
1400 					ASSERT(str->gld_mac_info == mac);
1401 					if (minor == str->gld_minor) {
1402 						devinfo = mac->gldm_devinfo;
1403 						goto out;
1404 					}
1405 				}
1406 			}
1407 		}
1408 	}
1409 out:
1410 	mutex_exit(&device->gld_devlock);
1411 	mutex_exit(&gld_device_list.gld_devlock);
1412 	return (devinfo);
1413 }
1414 
1415 /*
1416  * STREAMS open routine.  The device dependent driver specifies this as its
1417  * open entry point.
1418  */
1419 /*ARGSUSED2*/
1420 int
1421 gld_open(queue_t *q, dev_t *dev, int flag, int sflag, cred_t *cred)
1422 {
1423 	gld_mac_pvt_t *mac_pvt;
1424 	gld_t *gld;
1425 	glddev_t *glddev;
1426 	gld_mac_info_t *macinfo;
1427 	minor_t minor = getminor(*dev);
1428 	gld_vlan_t *vlan;
1429 	t_uscalar_t ppa;
1430 
1431 	ASSERT(q != NULL);
1432 
1433 	if (minor > GLD_MAX_STYLE1_MINOR)
1434 		return (ENXIO);
1435 
1436 	ASSERT(q->q_ptr == NULL);	/* Clone device gives us a fresh Q */
1437 
1438 	/* Find our per-major glddev_t structure */
1439 	mutex_enter(&gld_device_list.gld_devlock);
1440 	glddev = gld_devlookup(getmajor(*dev));
1441 
1442 	/*
1443 	 * This glddev will hang around since detach (and therefore
1444 	 * gld_unregister) can't run while we're here in the open routine.
1445 	 */
1446 	mutex_exit(&gld_device_list.gld_devlock);
1447 
1448 	if (glddev == NULL)
1449 		return (ENXIO);
1450 
1451 #ifdef GLD_DEBUG
1452 	if (gld_debug & GLDPROT) {
1453 		if (minor == GLD_USE_STYLE2)
1454 			cmn_err(CE_NOTE, "gld_open(%p, Style 2)", (void *)q);
1455 		else
1456 			cmn_err(CE_NOTE, "gld_open(%p, Style 1, minor = %d)",
1457 			    (void *)q, minor);
1458 	}
1459 #endif
1460 
1461 	/*
1462 	 * get a per-stream structure and link things together so we
1463 	 * can easily find them later.
1464 	 */
1465 	gld = kmem_zalloc(sizeof (gld_t), KM_SLEEP);
1466 
1467 	/*
1468 	 * fill in the structure and state info
1469 	 */
1470 	gld->gld_qptr = q;
1471 	gld->gld_device = glddev;
1472 	gld->gld_state = DL_UNATTACHED;
1473 
1474 	/*
1475 	 * we must atomically find a free minor number and add the stream
1476 	 * to a list, because gld_findminor has to traverse the lists to
1477 	 * determine which minor numbers are free.
1478 	 */
1479 	mutex_enter(&glddev->gld_devlock);
1480 
1481 	/* find a free minor device number for the clone */
1482 	gld->gld_minor = gld_findminor(glddev);
1483 	if (gld->gld_minor == 0) {
1484 		mutex_exit(&glddev->gld_devlock);
1485 		kmem_free(gld, sizeof (gld_t));
1486 		return (ENOSR);
1487 	}
1488 
1489 #ifdef GLD_VERBOSE_DEBUG
1490 	if (gld_debug & GLDPROT)
1491 		cmn_err(CE_NOTE, "gld_open() gld ptr: %p minor: %d",
1492 		    (void *)gld, gld->gld_minor);
1493 #endif
1494 
1495 	if (minor == GLD_USE_STYLE2) {
1496 		gld->gld_style = DL_STYLE2;
1497 		*dev = makedevice(getmajor(*dev), gld->gld_minor);
1498 		WR(q)->q_ptr = q->q_ptr = (caddr_t)gld;
1499 		gldinsque(gld, glddev->gld_str_prev);
1500 #ifdef GLD_VERBOSE_DEBUG
1501 		if (gld_debug & GLDPROT)
1502 			cmn_err(CE_NOTE, "GLDstruct added to device list");
1503 #endif
1504 		(void) qassociate(q, -1);
1505 		goto done;
1506 	}
1507 
1508 	gld->gld_style = DL_STYLE1;
1509 
1510 	/* the PPA is actually 1 less than the minordev */
1511 	ppa = GLD_STYLE1_MINOR_TO_PPA(minor);
1512 
1513 	for (macinfo = glddev->gld_mac_next;
1514 	    macinfo != (gld_mac_info_t *)(&glddev->gld_mac_next);
1515 	    macinfo = macinfo->gldm_next) {
1516 		ASSERT(macinfo != NULL);
1517 		if (macinfo->gldm_ppa != ppa)
1518 			continue;
1519 
1520 		if (!(macinfo->gldm_GLD_flags & GLD_MAC_READY))
1521 			continue;	/* this one's not ready yet */
1522 
1523 		/*
1524 		 * we found the correct PPA
1525 		 */
1526 		GLDM_LOCK(macinfo, RW_WRITER);
1527 
1528 		gld->gld_mac_info = macinfo;
1529 
1530 		if (macinfo->gldm_send_tagged != NULL)
1531 			gld->gld_send = macinfo->gldm_send_tagged;
1532 		else
1533 			gld->gld_send = macinfo->gldm_send;
1534 
1535 		/* now ready for action */
1536 		gld->gld_state = DL_UNBOUND;
1537 
1538 		if ((vlan = gld_get_vlan(macinfo, VLAN_VID_NONE)) == NULL) {
1539 			GLDM_UNLOCK(macinfo);
1540 			mutex_exit(&glddev->gld_devlock);
1541 			kmem_free(gld, sizeof (gld_t));
1542 			return (EIO);
1543 		}
1544 
1545 		mac_pvt = (gld_mac_pvt_t *)macinfo->gldm_mac_pvt;
1546 		if (!mac_pvt->started) {
1547 			if (gld_start_mac(macinfo) != GLD_SUCCESS) {
1548 				gld_rem_vlan(vlan);
1549 				GLDM_UNLOCK(macinfo);
1550 				mutex_exit(&glddev->gld_devlock);
1551 				kmem_free(gld, sizeof (gld_t));
1552 				return (EIO);
1553 			}
1554 		}
1555 
1556 		gld->gld_vlan = vlan;
1557 		vlan->gldv_nstreams++;
1558 		gldinsque(gld, vlan->gldv_str_prev);
1559 		*dev = makedevice(getmajor(*dev), gld->gld_minor);
1560 		WR(q)->q_ptr = q->q_ptr = (caddr_t)gld;
1561 
1562 		GLDM_UNLOCK(macinfo);
1563 #ifdef GLD_VERBOSE_DEBUG
1564 		if (gld_debug & GLDPROT)
1565 			cmn_err(CE_NOTE,
1566 			    "GLDstruct added to instance list");
1567 #endif
1568 		break;
1569 	}
1570 
1571 	if (gld->gld_state == DL_UNATTACHED) {
1572 		mutex_exit(&glddev->gld_devlock);
1573 		kmem_free(gld, sizeof (gld_t));
1574 		return (ENXIO);
1575 	}
1576 
1577 done:
1578 	mutex_exit(&glddev->gld_devlock);
1579 	noenable(WR(q));	/* We'll do the qenables manually */
1580 	qprocson(q);		/* start the queues running */
1581 	qenable(WR(q));
1582 	return (0);
1583 }
1584 
1585 /*
1586  * normal stream close call checks current status and cleans up
1587  * data structures that were dynamically allocated
1588  */
1589 /*ARGSUSED1*/
1590 int
1591 gld_close(queue_t *q, int flag, cred_t *cred)
1592 {
1593 	gld_t	*gld = (gld_t *)q->q_ptr;
1594 	glddev_t *glddev = gld->gld_device;
1595 
1596 	ASSERT(q);
1597 	ASSERT(gld);
1598 
1599 #ifdef GLD_DEBUG
1600 	if (gld_debug & GLDPROT) {
1601 		cmn_err(CE_NOTE, "gld_close(%p, Style %d)",
1602 		    (void *)q, (gld->gld_style & 0x1) + 1);
1603 	}
1604 #endif
1605 
1606 	/* Hold all device streams lists still while we check for a macinfo */
1607 	mutex_enter(&glddev->gld_devlock);
1608 
1609 	if (gld->gld_mac_info != NULL) {
1610 		/* If there's a macinfo, block recv while we change state */
1611 		GLDM_LOCK(gld->gld_mac_info, RW_WRITER);
1612 		gld->gld_flags |= GLD_STR_CLOSING; /* no more rcv putnexts */
1613 		GLDM_UNLOCK(gld->gld_mac_info);
1614 	} else {
1615 		/* no mac DL_ATTACHED right now */
1616 		gld->gld_flags |= GLD_STR_CLOSING;
1617 	}
1618 
1619 	mutex_exit(&glddev->gld_devlock);
1620 
1621 	/*
1622 	 * qprocsoff before we call gld_unbind/gldunattach, so that
1623 	 * we know wsrv isn't in there trying to undo what we're doing.
1624 	 */
1625 	qprocsoff(q);
1626 
1627 	ASSERT(gld->gld_wput_count == 0);
1628 	gld->gld_wput_count = 0;	/* just in case */
1629 
1630 	if (gld->gld_state == DL_IDLE) {
1631 		/* Need to unbind */
1632 		ASSERT(gld->gld_mac_info != NULL);
1633 		(void) gld_unbind(WR(q), NULL);
1634 	}
1635 
1636 	if (gld->gld_state == DL_UNBOUND) {
1637 		/*
1638 		 * Need to unattach
1639 		 * For style 2 stream, gldunattach also
1640 		 * associate queue with NULL dip
1641 		 */
1642 		ASSERT(gld->gld_mac_info != NULL);
1643 		(void) gldunattach(WR(q), NULL);
1644 	}
1645 
1646 	/* disassociate the stream from the device */
1647 	q->q_ptr = WR(q)->q_ptr = NULL;
1648 
1649 	/*
1650 	 * Since we unattached above (if necessary), we know that we're
1651 	 * on the per-major list of unattached streams, rather than a
1652 	 * per-PPA list.  So we know we should hold the devlock.
1653 	 */
1654 	mutex_enter(&glddev->gld_devlock);
1655 	gldremque(gld);			/* remove from Style 2 list */
1656 	mutex_exit(&glddev->gld_devlock);
1657 
1658 	kmem_free(gld, sizeof (gld_t));
1659 
1660 	return (0);
1661 }
1662 
1663 /*
1664  * gld_rsrv (q)
1665  *	simple read service procedure
1666  *	purpose is to avoid the time it takes for packets
1667  *	to move through IP so we can get them off the board
1668  *	as fast as possible due to limited PC resources.
1669  *
1670  *	This is not normally used in the current implementation.  It
1671  *	can be selected with the undocumented property "fast_recv".
1672  *	If that property is set, gld_recv will send the packet
1673  *	upstream with a putq() rather than a putnext(), thus causing
1674  *	this routine to be scheduled.
1675  */
1676 int
1677 gld_rsrv(queue_t *q)
1678 {
1679 	mblk_t *mp;
1680 
1681 	while ((mp = getq(q)) != NULL) {
1682 		if (canputnext(q)) {
1683 			putnext(q, mp);
1684 		} else {
1685 			freemsg(mp);
1686 		}
1687 	}
1688 	return (0);
1689 }
1690 
1691 /*
1692  * gld_wput (q, mp)
1693  * general gld stream write put routine. Receives fastpath data from upper
1694  * modules and processes it immediately.  ioctl and M_PROTO/M_PCPROTO are
1695  * queued for later processing by the service procedure.
1696  */
1697 
1698 int
1699 gld_wput(queue_t *q, mblk_t *mp)
1700 {
1701 	gld_t  *gld = (gld_t *)(q->q_ptr);
1702 	int	rc;
1703 	boolean_t multidata = B_TRUE;
1704 	uint32_t upri;
1705 
1706 #ifdef GLD_DEBUG
1707 	if (gld_debug & GLDTRACE)
1708 		cmn_err(CE_NOTE, "gld_wput(%p %p): type %x",
1709 		    (void *)q, (void *)mp, DB_TYPE(mp));
1710 #endif
1711 	switch (DB_TYPE(mp)) {
1712 
1713 	case M_DATA:
1714 		/* fast data / raw support */
1715 		/* we must be DL_ATTACHED and DL_BOUND to do this */
1716 		/* Tricky to access memory without taking the mutex */
1717 		if ((gld->gld_flags & (GLD_RAW | GLD_FAST)) == 0 ||
1718 		    gld->gld_state != DL_IDLE) {
1719 			merror(q, mp, EPROTO);
1720 			break;
1721 		}
1722 		/*
1723 		 * Cleanup MBLK_VTAG in case it is set by other
1724 		 * modules. MBLK_VTAG is used to save the vtag information.
1725 		 */
1726 		GLD_CLEAR_MBLK_VTAG(mp);
1727 		multidata = B_FALSE;
1728 		/* LINTED: E_CASE_FALLTHRU */
1729 	case M_MULTIDATA:
1730 		/* Only call gld_start() directly if nothing queued ahead */
1731 		/* No guarantees about ordering with different threads */
1732 		if (q->q_first)
1733 			goto use_wsrv;
1734 
1735 		/*
1736 		 * This can happen if wsrv has taken off the last mblk but
1737 		 * is still processing it.
1738 		 */
1739 		membar_consumer();
1740 		if (gld->gld_in_wsrv)
1741 			goto use_wsrv;
1742 
1743 		/*
1744 		 * Keep a count of current wput calls to start.
1745 		 * Nonzero count delays any attempted DL_UNBIND.
1746 		 * See comments above gld_start().
1747 		 */
1748 		atomic_add_32((uint32_t *)&gld->gld_wput_count, 1);
1749 		membar_enter();
1750 
1751 		/* Recheck state now wput_count is set to prevent DL_UNBIND */
1752 		/* If this Q is in process of DL_UNBIND, don't call start */
1753 		if (gld->gld_state != DL_IDLE || gld->gld_in_unbind) {
1754 			/* Extremely unlikely */
1755 			atomic_add_32((uint32_t *)&gld->gld_wput_count, -1);
1756 			goto use_wsrv;
1757 		}
1758 
1759 		/*
1760 		 * Get the priority value. Note that in raw mode, the
1761 		 * per-packet priority value kept in b_band is ignored.
1762 		 */
1763 		upri = (gld->gld_flags & GLD_RAW) ? gld->gld_upri :
1764 		    UPRI(gld, mp->b_band);
1765 
1766 		rc = (multidata) ? gld_start_mdt(q, mp, GLD_WPUT) :
1767 		    gld_start(q, mp, GLD_WPUT, upri);
1768 
1769 		/* Allow DL_UNBIND again */
1770 		membar_exit();
1771 		atomic_add_32((uint32_t *)&gld->gld_wput_count, -1);
1772 
1773 		if (rc == GLD_NORESOURCES)
1774 			qenable(q);
1775 		break;	/*  Done with this packet */
1776 
1777 use_wsrv:
1778 		/* Q not empty, in DL_DETACH, or start gave NORESOURCES */
1779 		(void) putq(q, mp);
1780 		qenable(q);
1781 		break;
1782 
1783 	case M_IOCTL:
1784 		/* ioctl relies on wsrv single threading per queue */
1785 		(void) putq(q, mp);
1786 		qenable(q);
1787 		break;
1788 
1789 	case M_CTL:
1790 		(void) putq(q, mp);
1791 		qenable(q);
1792 		break;
1793 
1794 	case M_FLUSH:		/* canonical flush handling */
1795 		/* XXX Should these be FLUSHALL? */
1796 		if (*mp->b_rptr & FLUSHW)
1797 			flushq(q, 0);
1798 		if (*mp->b_rptr & FLUSHR) {
1799 			flushq(RD(q), 0);
1800 			*mp->b_rptr &= ~FLUSHW;
1801 			qreply(q, mp);
1802 		} else
1803 			freemsg(mp);
1804 		break;
1805 
1806 	case M_PROTO:
1807 	case M_PCPROTO:
1808 		/* these rely on wsrv single threading per queue */
1809 		(void) putq(q, mp);
1810 		qenable(q);
1811 		break;
1812 
1813 	default:
1814 #ifdef GLD_DEBUG
1815 		if (gld_debug & GLDETRACE)
1816 			cmn_err(CE_WARN,
1817 			    "gld: Unexpected packet type from queue: 0x%x",
1818 			    DB_TYPE(mp));
1819 #endif
1820 		freemsg(mp);
1821 	}
1822 	return (0);
1823 }
1824 
1825 /*
1826  * gld_wsrv - Incoming messages are processed according to the DLPI protocol
1827  * specification.
1828  *
1829  * wsrv is single-threaded per Q.  We make use of this to avoid taking the
1830  * lock for reading data items that are only ever written by us.
1831  */
1832 
1833 int
1834 gld_wsrv(queue_t *q)
1835 {
1836 	mblk_t *mp;
1837 	gld_t *gld = (gld_t *)q->q_ptr;
1838 	gld_mac_info_t *macinfo;
1839 	union DL_primitives *prim;
1840 	int err;
1841 	boolean_t multidata;
1842 	uint32_t upri;
1843 
1844 #ifdef GLD_DEBUG
1845 	if (gld_debug & GLDTRACE)
1846 		cmn_err(CE_NOTE, "gld_wsrv(%p)", (void *)q);
1847 #endif
1848 
1849 	ASSERT(!gld->gld_in_wsrv);
1850 
1851 	gld->gld_xwait = B_FALSE; /* We are now going to process this Q */
1852 
1853 	if (q->q_first == NULL)
1854 		return (0);
1855 
1856 	macinfo = gld->gld_mac_info;
1857 
1858 	/*
1859 	 * Help wput avoid a call to gld_start if there might be a message
1860 	 * previously queued by that thread being processed here.
1861 	 */
1862 	gld->gld_in_wsrv = B_TRUE;
1863 	membar_enter();
1864 
1865 	while ((mp = getq(q)) != NULL) {
1866 		switch (DB_TYPE(mp)) {
1867 		case M_DATA:
1868 		case M_MULTIDATA:
1869 			multidata = (DB_TYPE(mp) == M_MULTIDATA);
1870 
1871 			/*
1872 			 * retry of a previously processed UNITDATA_REQ
1873 			 * or is a RAW or FAST message from above.
1874 			 */
1875 			if (macinfo == NULL) {
1876 				/* No longer attached to a PPA, drop packet */
1877 				freemsg(mp);
1878 				break;
1879 			}
1880 
1881 			gld->gld_sched_ran = B_FALSE;
1882 			membar_enter();
1883 
1884 			/*
1885 			 * Get the priority value. Note that in raw mode, the
1886 			 * per-packet priority value kept in b_band is ignored.
1887 			 */
1888 			upri = (gld->gld_flags & GLD_RAW) ? gld->gld_upri :
1889 			    UPRI(gld, mp->b_band);
1890 
1891 			err = (multidata) ? gld_start_mdt(q, mp, GLD_WSRV) :
1892 			    gld_start(q, mp, GLD_WSRV, upri);
1893 			if (err == GLD_NORESOURCES) {
1894 				/* gld_sched will qenable us later */
1895 				gld->gld_xwait = B_TRUE; /* want qenable */
1896 				membar_enter();
1897 				/*
1898 				 * v2:  we're not holding the lock; it's
1899 				 * possible that the driver could have already
1900 				 * called gld_sched (following up on its
1901 				 * return of GLD_NORESOURCES), before we got a
1902 				 * chance to do the putbq() and set gld_xwait.
1903 				 * So if we saw a call to gld_sched that
1904 				 * examined this queue, since our call to
1905 				 * gld_start() above, then it's possible we've
1906 				 * already seen the only call to gld_sched()
1907 				 * we're ever going to see.  So we better retry
1908 				 * transmitting this packet right now.
1909 				 */
1910 				if (gld->gld_sched_ran) {
1911 #ifdef GLD_DEBUG
1912 					if (gld_debug & GLDTRACE)
1913 						cmn_err(CE_NOTE, "gld_wsrv: "
1914 						    "sched was called");
1915 #endif
1916 					break;	/* try again right now */
1917 				}
1918 				gld->gld_in_wsrv = B_FALSE;
1919 				return (0);
1920 			}
1921 			break;
1922 
1923 		case M_IOCTL:
1924 			(void) gld_ioctl(q, mp);
1925 			break;
1926 
1927 		case M_CTL:
1928 			if (macinfo == NULL) {
1929 				freemsg(mp);
1930 				break;
1931 			}
1932 
1933 			if (macinfo->gldm_mctl != NULL) {
1934 				GLDM_LOCK(macinfo, RW_WRITER);
1935 				(void) (*macinfo->gldm_mctl) (macinfo, q, mp);
1936 				GLDM_UNLOCK(macinfo);
1937 			} else {
1938 				/* This driver doesn't recognize, just drop */
1939 				freemsg(mp);
1940 			}
1941 			break;
1942 
1943 		case M_PROTO:	/* Will be an DLPI message of some type */
1944 		case M_PCPROTO:
1945 			if ((err = gld_cmds(q, mp)) != GLDE_OK) {
1946 				if (err == GLDE_RETRY) {
1947 					gld->gld_in_wsrv = B_FALSE;
1948 					return (0); /* quit while we're ahead */
1949 				}
1950 				prim = (union DL_primitives *)mp->b_rptr;
1951 				dlerrorack(q, mp, prim->dl_primitive, err, 0);
1952 			}
1953 			break;
1954 
1955 		default:
1956 			/* This should never happen */
1957 #ifdef GLD_DEBUG
1958 			if (gld_debug & GLDERRS)
1959 				cmn_err(CE_WARN,
1960 				    "gld_wsrv: db_type(%x) not supported",
1961 				    mp->b_datap->db_type);
1962 #endif
1963 			freemsg(mp);	/* unknown types are discarded */
1964 			break;
1965 		}
1966 	}
1967 
1968 	membar_exit();
1969 	gld->gld_in_wsrv = B_FALSE;
1970 	return (0);
1971 }
1972 
1973 /*
1974  * gld_start() can get called from gld_wput(), gld_wsrv(), or gld_unitdata().
1975  *
1976  * We only come directly from wput() in the GLD_FAST (fastpath) or RAW case.
1977  *
1978  * In particular, we must avoid calling gld_precv*() if we came from wput().
1979  * gld_precv*() is where we, on the transmit side, loop back our outgoing
1980  * packets to the receive side if we are in physical promiscuous mode.
1981  * Since the receive side holds a lock across its call to the upstream
1982  * putnext, and that upstream module could well have looped back to our
1983  * wput() routine on the same thread, we cannot call gld_precv* from here
1984  * for fear of causing a recursive lock entry in our receive code.
1985  *
1986  * There is a problem here when coming from gld_wput().  While wput
1987  * only comes here if the queue is attached to a PPA and bound to a SAP
1988  * and there are no messages on the queue ahead of the M_DATA that could
1989  * change that, it is theoretically possible that another thread could
1990  * now wput a DL_UNBIND and a DL_DETACH message, and the wsrv() routine
1991  * could wake up and process them, before we finish processing this
1992  * send of the M_DATA.  This can only possibly happen on a Style 2 RAW or
1993  * FAST (fastpath) stream:  non RAW/FAST streams always go through wsrv(),
1994  * and Style 1 streams only DL_DETACH in the close routine, where
1995  * qprocsoff() protects us.  If this happens we could end up calling
1996  * gldm_send() after we have detached the stream and possibly called
1997  * gldm_stop().  Worse, once the number of attached streams goes to zero,
1998  * detach/unregister could be called, and the macinfo could go away entirely.
1999  *
2000  * No one has ever seen this happen.
2001  *
2002  * It is some trouble to fix this, and we would rather not add any mutex
2003  * logic into the wput() routine, which is supposed to be a "fast"
2004  * path.
2005  *
2006  * What I've done is use an atomic counter to keep a count of the number
2007  * of threads currently calling gld_start() from wput() on this stream.
2008  * If DL_DETACH sees this as nonzero, it putbqs the request back onto
2009  * the queue and qenables, hoping to have better luck next time.  Since
2010  * people shouldn't be trying to send after they've asked to DL_DETACH,
2011  * hopefully very soon all the wput=>start threads should have returned
2012  * and the DL_DETACH will succeed.  It's hard to test this since the odds
2013  * of the failure even trying to happen are so small.  I probably could
2014  * have ignored the whole issue and never been the worse for it.
2015  *
2016  * Because some GLDv2 Ethernet drivers do not allow the size of transmitted
2017  * packet to be greater than ETHERMAX, we must first strip the VLAN tag
2018  * from a tagged packet before passing it to the driver's gld_send() entry
2019  * point function, and pass the VLAN tag as a separate argument. The
2020  * gld_send() function may fail. In that case, the packet will need to be
2021  * queued in order to be processed again in GLD's service routine. As the
2022  * VTAG has already been stripped at that time, we save the VTAG information
2023  * in (the unused fields of) dblk using GLD_SAVE_MBLK_VTAG(), so that the
2024  * VTAG can also be queued and be able to be got when gld_start() is called
2025  * next time from gld_wsrv().
2026  *
2027  * Some rules to use GLD_{CLEAR|SAVE}_MBLK_VTAG macros:
2028  *
2029  * - GLD_SAVE_MBLK_VTAG() must be called to save the VTAG information each time
2030  *   the message is queued by putbq().
2031  *
2032  * - GLD_CLEAR_MBLK_VTAG() must be called to clear the bogus VTAG information
2033  *   (if any) in dblk before the message is passed to the gld_start() function.
2034  */
2035 static int
2036 gld_start(queue_t *q, mblk_t *mp, int caller, uint32_t upri)
2037 {
2038 	mblk_t *nmp;
2039 	gld_t *gld = (gld_t *)q->q_ptr;
2040 	gld_mac_info_t *macinfo;
2041 	gld_mac_pvt_t *mac_pvt;
2042 	int rc;
2043 	gld_interface_t *ifp;
2044 	pktinfo_t pktinfo;
2045 	uint32_t vtag, vid;
2046 	uint32_t raw_vtag = 0;
2047 	gld_vlan_t *vlan;
2048 	struct gld_stats *stats0, *stats = NULL;
2049 
2050 	ASSERT(DB_TYPE(mp) == M_DATA);
2051 	macinfo = gld->gld_mac_info;
2052 	mac_pvt = (gld_mac_pvt_t *)macinfo->gldm_mac_pvt;
2053 	ifp = mac_pvt->interfacep;
2054 	vlan = (gld_vlan_t *)gld->gld_vlan;
2055 	vid = vlan->gldv_id;
2056 
2057 	/*
2058 	 * If this interface is a VLAN, the kstats of corresponding
2059 	 * "VLAN 0" should also be updated. Note that the gld_vlan_t
2060 	 * structure for VLAN 0 might not exist if there are no DLPI
2061 	 * consumers attaching on VLAN 0. Fortunately we can directly
2062 	 * access VLAN 0's kstats from macinfo.
2063 	 *
2064 	 * Therefore, stats0 (VLAN 0's kstats) must always be
2065 	 * updated, and stats must to be updated if it is not NULL.
2066 	 */
2067 	stats0 = mac_pvt->statistics;
2068 	if (vid != VLAN_VID_NONE)
2069 		stats = vlan->gldv_stats;
2070 
2071 	if ((*ifp->interpreter)(macinfo, mp, &pktinfo, GLD_TX) != 0) {
2072 #ifdef GLD_DEBUG
2073 		if (gld_debug & GLDERRS)
2074 			cmn_err(CE_WARN,
2075 			    "gld_start: failed to interpret outbound packet");
2076 #endif
2077 		goto badarg;
2078 	}
2079 
2080 	vtag = VLAN_VID_NONE;
2081 	raw_vtag = GLD_GET_MBLK_VTAG(mp);
2082 	if (GLD_VTAG_TCI(raw_vtag) != 0) {
2083 		uint16_t raw_pri, raw_vid, evid;
2084 
2085 		/*
2086 		 * Tagged packet.
2087 		 */
2088 		raw_pri = GLD_VTAG_PRI(raw_vtag);
2089 		raw_vid = GLD_VTAG_VID(raw_vtag);
2090 		GLD_CLEAR_MBLK_VTAG(mp);
2091 
2092 		if (gld->gld_flags & GLD_RAW) {
2093 			/*
2094 			 * In raw mode, we only expect untagged packets or
2095 			 * special priority-tagged packets on a VLAN stream.
2096 			 * Drop the packet if its VID is not zero.
2097 			 */
2098 			if (vid != VLAN_VID_NONE && raw_vid != VLAN_VID_NONE)
2099 				goto badarg;
2100 
2101 			/*
2102 			 * If it is raw mode, use the per-stream priority if
2103 			 * the priority is not specified in the packet.
2104 			 * Otherwise, ignore the priority bits in the packet.
2105 			 */
2106 			upri = (raw_pri != 0) ? raw_pri : upri;
2107 		}
2108 
2109 		if (vid == VLAN_VID_NONE && vid != raw_vid) {
2110 			gld_vlan_t *tmp_vlan;
2111 
2112 			/*
2113 			 * This link is a physical link but the packet is
2114 			 * a VLAN tagged packet, the kstats of corresponding
2115 			 * VLAN (if any) should also be updated.
2116 			 */
2117 			tmp_vlan = gld_find_vlan(macinfo, raw_vid);
2118 			if (tmp_vlan != NULL)
2119 				stats = tmp_vlan->gldv_stats;
2120 		}
2121 
2122 		evid = (vid == VLAN_VID_NONE) ? raw_vid : vid;
2123 		if (evid != VLAN_VID_NONE || upri != 0)
2124 			vtag = GLD_MAKE_VTAG(upri, VLAN_CFI_ETHER, evid);
2125 	} else {
2126 		/*
2127 		 * Untagged packet:
2128 		 * Get vtag from the attached PPA of this stream.
2129 		 */
2130 		if ((vid != VLAN_VID_NONE) ||
2131 		    ((macinfo->gldm_type == DL_ETHER) && (upri != 0))) {
2132 			vtag = GLD_MAKE_VTAG(upri, VLAN_CFI_ETHER, vid);
2133 		}
2134 	}
2135 
2136 	/*
2137 	 * We're not holding the lock for this check.  If the promiscuous
2138 	 * state is in flux it doesn't matter much if we get this wrong.
2139 	 */
2140 	if (mac_pvt->nprom > 0) {
2141 		/*
2142 		 * We want to loopback to the receive side, but to avoid
2143 		 * recursive lock entry:  if we came from wput(), which
2144 		 * could have looped back via IP from our own receive
2145 		 * interrupt thread, we decline this request.  wput()
2146 		 * will then queue the packet for wsrv().  This means
2147 		 * that when snoop is running we don't get the advantage
2148 		 * of the wput() multithreaded direct entry to the
2149 		 * driver's send routine.
2150 		 */
2151 		if (caller == GLD_WPUT) {
2152 			GLD_SAVE_MBLK_VTAG(mp, raw_vtag);
2153 			(void) putbq(q, mp);
2154 			return (GLD_NORESOURCES);
2155 		}
2156 		if (macinfo->gldm_capabilities & GLD_CAP_ZEROCOPY)
2157 			nmp = dupmsg_noloan(mp);
2158 		else
2159 			nmp = dupmsg(mp);
2160 	} else
2161 		nmp = NULL;		/* we need no loopback */
2162 
2163 	if (ifp->hdr_size > 0 &&
2164 	    pktinfo.pktLen > ifp->hdr_size + (vtag == 0 ? 0 : VTAG_SIZE) +
2165 	    macinfo->gldm_maxpkt) {
2166 		if (nmp)
2167 			freemsg(nmp);	/* free the duped message */
2168 #ifdef GLD_DEBUG
2169 		if (gld_debug & GLDERRS)
2170 			cmn_err(CE_WARN,
2171 			    "gld_start: oversize outbound packet, size %d,"
2172 			    "max %d", pktinfo.pktLen,
2173 			    ifp->hdr_size + (vtag == 0 ? 0 : VTAG_SIZE) +
2174 			    macinfo->gldm_maxpkt);
2175 #endif
2176 		goto badarg;
2177 	}
2178 
2179 	rc = (*gld->gld_send)(macinfo, mp, vtag);
2180 
2181 	if (rc != GLD_SUCCESS) {
2182 		if (rc == GLD_NORESOURCES) {
2183 			ATOMIC_BUMP(stats0, stats, glds_xmtretry, 1);
2184 			GLD_SAVE_MBLK_VTAG(mp, raw_vtag);
2185 			(void) putbq(q, mp);
2186 		} else {
2187 			/* transmit error; drop the packet */
2188 			freemsg(mp);
2189 			/* We're supposed to count failed attempts as well */
2190 			UPDATE_STATS(stats0, stats, pktinfo, 1);
2191 #ifdef GLD_DEBUG
2192 			if (gld_debug & GLDERRS)
2193 				cmn_err(CE_WARN,
2194 				    "gld_start: gldm_send failed %d", rc);
2195 #endif
2196 		}
2197 		if (nmp)
2198 			freemsg(nmp);	/* free the dupped message */
2199 		return (rc);
2200 	}
2201 
2202 	UPDATE_STATS(stats0, stats, pktinfo, 1);
2203 
2204 	/*
2205 	 * Loopback case. The message needs to be returned back on
2206 	 * the read side. This would silently fail if the dupmsg fails
2207 	 * above. This is probably OK, if there is no memory to dup the
2208 	 * block, then there isn't much we could do anyway.
2209 	 */
2210 	if (nmp) {
2211 		GLDM_LOCK(macinfo, RW_WRITER);
2212 		gld_precv(macinfo, nmp, vtag, stats);
2213 		GLDM_UNLOCK(macinfo);
2214 	}
2215 
2216 	return (GLD_SUCCESS);
2217 
2218 badarg:
2219 	freemsg(mp);
2220 
2221 	ATOMIC_BUMP(stats0, stats, glds_xmtbadinterp, 1);
2222 	return (GLD_BADARG);
2223 }
2224 
2225 /*
2226  * With MDT V.2 a single message mp can have one header area and multiple
2227  * payload areas. A packet is described by dl_pkt_info, and each packet can
2228  * span multiple payload areas (currently with TCP, each packet will have one
2229  * header and at the most two payload areas). MACs might have a limit on the
2230  * number of payload segments (i.e. per packet scatter-gather limit), and
2231  * MDT V.2 has a way of specifying that with mdt_span_limit; the MAC driver
2232  * might also have a limit on the total number of payloads in a message, and
2233  * that is specified by mdt_max_pld.
2234  */
2235 static int
2236 gld_start_mdt(queue_t *q, mblk_t *mp, int caller)
2237 {
2238 	mblk_t *nextmp;
2239 	gld_t *gld = (gld_t *)q->q_ptr;
2240 	gld_mac_info_t *macinfo = gld->gld_mac_info;
2241 	gld_mac_pvt_t *mac_pvt = (gld_mac_pvt_t *)macinfo->gldm_mac_pvt;
2242 	int numpacks, mdtpacks;
2243 	gld_interface_t *ifp = mac_pvt->interfacep;
2244 	pktinfo_t pktinfo;
2245 	gld_vlan_t *vlan = (gld_vlan_t *)gld->gld_vlan;
2246 	boolean_t doloop = B_FALSE;
2247 	multidata_t *dlmdp;
2248 	pdescinfo_t pinfo;
2249 	pdesc_t *dl_pkt;
2250 	void *cookie;
2251 	uint_t totLen = 0;
2252 
2253 	ASSERT(DB_TYPE(mp) == M_MULTIDATA);
2254 
2255 	/*
2256 	 * We're not holding the lock for this check.  If the promiscuous
2257 	 * state is in flux it doesn't matter much if we get this wrong.
2258 	 */
2259 	if (mac_pvt->nprom > 0) {
2260 		/*
2261 		 * We want to loopback to the receive side, but to avoid
2262 		 * recursive lock entry:  if we came from wput(), which
2263 		 * could have looped back via IP from our own receive
2264 		 * interrupt thread, we decline this request.  wput()
2265 		 * will then queue the packet for wsrv().  This means
2266 		 * that when snoop is running we don't get the advantage
2267 		 * of the wput() multithreaded direct entry to the
2268 		 * driver's send routine.
2269 		 */
2270 		if (caller == GLD_WPUT) {
2271 			(void) putbq(q, mp);
2272 			return (GLD_NORESOURCES);
2273 		}
2274 		doloop = B_TRUE;
2275 
2276 		/*
2277 		 * unlike the M_DATA case, we don't have to call
2278 		 * dupmsg_noloan here because mmd_transform
2279 		 * (called by gld_precv_mdt) will make a copy of
2280 		 * each dblk.
2281 		 */
2282 	}
2283 
2284 	while (mp != NULL) {
2285 		/*
2286 		 * The lower layer driver only gets a single multidata
2287 		 * message; this also makes it easier to handle noresources.
2288 		 */
2289 		nextmp = mp->b_cont;
2290 		mp->b_cont = NULL;
2291 
2292 		/*
2293 		 * Get number of packets in this message; if nothing
2294 		 * to transmit, go to next message.
2295 		 */
2296 		dlmdp = mmd_getmultidata(mp);
2297 		if ((mdtpacks = (int)mmd_getcnt(dlmdp, NULL, NULL)) == 0) {
2298 			freemsg(mp);
2299 			mp = nextmp;
2300 			continue;
2301 		}
2302 
2303 		/*
2304 		 * Run interpreter to populate media specific pktinfo fields.
2305 		 * This collects per MDT message information like sap,
2306 		 * broad/multicast etc.
2307 		 */
2308 		(void) (*ifp->interpreter_mdt)(macinfo, mp, NULL, &pktinfo,
2309 		    GLD_MDT_TX);
2310 
2311 		numpacks = (*macinfo->gldm_mdt_pre)(macinfo, mp, &cookie);
2312 
2313 		if (numpacks > 0) {
2314 			/*
2315 			 * Driver indicates it can transmit at least 1, and
2316 			 * possibly all, packets in MDT message.
2317 			 */
2318 			int count = numpacks;
2319 
2320 			for (dl_pkt = mmd_getfirstpdesc(dlmdp, &pinfo);
2321 			    (dl_pkt != NULL);
2322 			    dl_pkt = mmd_getnextpdesc(dl_pkt, &pinfo)) {
2323 				/*
2324 				 * Format this packet by adding link header and
2325 				 * adjusting pdescinfo to include it; get
2326 				 * packet length.
2327 				 */
2328 				(void) (*ifp->interpreter_mdt)(macinfo, NULL,
2329 				    &pinfo, &pktinfo, GLD_MDT_TXPKT);
2330 
2331 				totLen += pktinfo.pktLen;
2332 
2333 				/*
2334 				 * Loop back packet before handing to the
2335 				 * driver.
2336 				 */
2337 				if (doloop &&
2338 				    mmd_adjpdesc(dl_pkt, &pinfo) != NULL) {
2339 					GLDM_LOCK(macinfo, RW_WRITER);
2340 					gld_precv_mdt(macinfo, vlan, mp,
2341 					    dl_pkt, &pktinfo);
2342 					GLDM_UNLOCK(macinfo);
2343 				}
2344 
2345 				/*
2346 				 * And send off to driver.
2347 				 */
2348 				(*macinfo->gldm_mdt_send)(macinfo, cookie,
2349 				    &pinfo);
2350 
2351 				/*
2352 				 * Be careful not to invoke getnextpdesc if we
2353 				 * already sent the last packet, since driver
2354 				 * might have posted it to hardware causing a
2355 				 * completion and freemsg() so the MDT data
2356 				 * structures might not be valid anymore.
2357 				 */
2358 				if (--count == 0)
2359 					break;
2360 			}
2361 			(*macinfo->gldm_mdt_post)(macinfo, mp, cookie);
2362 			pktinfo.pktLen = totLen;
2363 			UPDATE_STATS(vlan->gldv_stats, NULL, pktinfo, numpacks);
2364 
2365 			/*
2366 			 * In the noresources case (when driver indicates it
2367 			 * can not transmit all packets in the MDT message),
2368 			 * adjust to skip the first few packets on retrial.
2369 			 */
2370 			if (numpacks != mdtpacks) {
2371 				/*
2372 				 * Release already processed packet descriptors.
2373 				 */
2374 				for (count = 0; count < numpacks; count++) {
2375 					dl_pkt = mmd_getfirstpdesc(dlmdp,
2376 					    &pinfo);
2377 					mmd_rempdesc(dl_pkt);
2378 				}
2379 				ATOMIC_BUMP(vlan->gldv_stats, NULL,
2380 				    glds_xmtretry, 1);
2381 				mp->b_cont = nextmp;
2382 				(void) putbq(q, mp);
2383 				return (GLD_NORESOURCES);
2384 			}
2385 		} else if (numpacks == 0) {
2386 			/*
2387 			 * Driver indicates it can not transmit any packets
2388 			 * currently and will request retrial later.
2389 			 */
2390 			ATOMIC_BUMP(vlan->gldv_stats, NULL, glds_xmtretry, 1);
2391 			mp->b_cont = nextmp;
2392 			(void) putbq(q, mp);
2393 			return (GLD_NORESOURCES);
2394 		} else {
2395 			ASSERT(numpacks == -1);
2396 			/*
2397 			 * We're supposed to count failed attempts as well.
2398 			 */
2399 			dl_pkt = mmd_getfirstpdesc(dlmdp, &pinfo);
2400 			while (dl_pkt != NULL) {
2401 				/*
2402 				 * Call interpreter to determine total packet
2403 				 * bytes that are being dropped.
2404 				 */
2405 				(void) (*ifp->interpreter_mdt)(macinfo, NULL,
2406 				    &pinfo, &pktinfo, GLD_MDT_TXPKT);
2407 
2408 				totLen += pktinfo.pktLen;
2409 
2410 				dl_pkt = mmd_getnextpdesc(dl_pkt, &pinfo);
2411 			}
2412 			pktinfo.pktLen = totLen;
2413 			UPDATE_STATS(vlan->gldv_stats, NULL, pktinfo, mdtpacks);
2414 
2415 			/*
2416 			 * Transmit error; drop the message, move on
2417 			 * to the next one.
2418 			 */
2419 			freemsg(mp);
2420 		}
2421 
2422 		/*
2423 		 * Process the next multidata block, if there is one.
2424 		 */
2425 		mp = nextmp;
2426 	}
2427 
2428 	return (GLD_SUCCESS);
2429 }
2430 
2431 /*
2432  * gld_intr (macinfo)
2433  */
2434 uint_t
2435 gld_intr(gld_mac_info_t *macinfo)
2436 {
2437 	ASSERT(macinfo != NULL);
2438 
2439 	if (!(macinfo->gldm_GLD_flags & GLD_MAC_READY))
2440 		return (DDI_INTR_UNCLAIMED);
2441 
2442 	return ((*macinfo->gldm_intr)(macinfo));
2443 }
2444 
2445 /*
2446  * gld_sched (macinfo)
2447  *
2448  * This routine scans the streams that refer to a specific macinfo
2449  * structure and causes the STREAMS scheduler to try to run them if
2450  * they are marked as waiting for the transmit buffer.
2451  */
2452 void
2453 gld_sched(gld_mac_info_t *macinfo)
2454 {
2455 	gld_mac_pvt_t *mac_pvt;
2456 	gld_t *gld;
2457 	gld_vlan_t *vlan;
2458 	int i;
2459 
2460 	ASSERT(macinfo != NULL);
2461 
2462 	GLDM_LOCK(macinfo, RW_WRITER);
2463 
2464 	if (macinfo->gldm_GLD_flags & GLD_UNREGISTERED) {
2465 		/* We're probably being called from a leftover interrupt */
2466 		GLDM_UNLOCK(macinfo);
2467 		return;
2468 	}
2469 
2470 	mac_pvt = (gld_mac_pvt_t *)macinfo->gldm_mac_pvt;
2471 
2472 	for (i = 0; i < VLAN_HASHSZ; i++) {
2473 		for (vlan = mac_pvt->vlan_hash[i];
2474 		    vlan != NULL; vlan = vlan->gldv_next) {
2475 			for (gld = vlan->gldv_str_next;
2476 			    gld != (gld_t *)&vlan->gldv_str_next;
2477 			    gld = gld->gld_next) {
2478 				ASSERT(gld->gld_mac_info == macinfo);
2479 				gld->gld_sched_ran = B_TRUE;
2480 				membar_enter();
2481 				if (gld->gld_xwait) {
2482 					gld->gld_xwait = B_FALSE;
2483 					qenable(WR(gld->gld_qptr));
2484 				}
2485 			}
2486 		}
2487 	}
2488 
2489 	GLDM_UNLOCK(macinfo);
2490 }
2491 
2492 /*
2493  * gld_precv (macinfo, mp, vtag, stats)
2494  * called from gld_start to loopback a packet when in promiscuous mode
2495  *
2496  * VLAN 0's statistics need to be updated. If stats is not NULL,
2497  * it needs to be updated as well.
2498  */
2499 static void
2500 gld_precv(gld_mac_info_t *macinfo, mblk_t *mp, uint32_t vtag,
2501     struct gld_stats *stats)
2502 {
2503 	gld_mac_pvt_t *mac_pvt;
2504 	gld_interface_t *ifp;
2505 	pktinfo_t pktinfo;
2506 
2507 	ASSERT(GLDM_LOCK_HELD_WRITE(macinfo));
2508 
2509 	mac_pvt = (gld_mac_pvt_t *)macinfo->gldm_mac_pvt;
2510 	ifp = mac_pvt->interfacep;
2511 
2512 	/*
2513 	 * call the media specific packet interpreter routine
2514 	 */
2515 	if ((*ifp->interpreter)(macinfo, mp, &pktinfo, GLD_RXLOOP) != 0) {
2516 		freemsg(mp);
2517 		BUMP(mac_pvt->statistics, stats, glds_rcvbadinterp, 1);
2518 #ifdef GLD_DEBUG
2519 		if (gld_debug & GLDERRS)
2520 			cmn_err(CE_WARN,
2521 			    "gld_precv: interpreter failed");
2522 #endif
2523 		return;
2524 	}
2525 
2526 	/*
2527 	 * Update the vtag information.
2528 	 */
2529 	pktinfo.isTagged = (vtag != VLAN_VID_NONE);
2530 	pktinfo.vid = GLD_VTAG_VID(vtag);
2531 	pktinfo.cfi = GLD_VTAG_CFI(vtag);
2532 	pktinfo.user_pri = GLD_VTAG_PRI(vtag);
2533 
2534 	gld_sendup(macinfo, &pktinfo, mp, gld_paccept);
2535 }
2536 
2537 /*
2538  * Called from gld_start_mdt to loopback packet(s) when in promiscuous mode.
2539  * Note that 'vlan' is always a physical link, because MDT can only be
2540  * enabled on non-VLAN streams.
2541  */
2542 /*ARGSUSED*/
2543 static void
2544 gld_precv_mdt(gld_mac_info_t *macinfo, gld_vlan_t *vlan, mblk_t *mp,
2545     pdesc_t *dl_pkt, pktinfo_t *pktinfo)
2546 {
2547 	mblk_t *adjmp;
2548 	gld_mac_pvt_t *mac_pvt = (gld_mac_pvt_t *)macinfo->gldm_mac_pvt;
2549 	gld_interface_t *ifp = mac_pvt->interfacep;
2550 
2551 	ASSERT(GLDM_LOCK_HELD_WRITE(macinfo));
2552 
2553 	/*
2554 	 * Get source/destination.
2555 	 */
2556 	(void) (*ifp->interpreter_mdt)(macinfo, mp, NULL, pktinfo,
2557 	    GLD_MDT_RXLOOP);
2558 	if ((adjmp = mmd_transform(dl_pkt)) != NULL)
2559 		gld_sendup(macinfo, pktinfo, adjmp, gld_paccept);
2560 }
2561 
2562 /*
2563  * gld_recv (macinfo, mp)
2564  * called with an mac-level packet in a mblock; take the maclock,
2565  * try the ip4q and ip6q hack, and otherwise call gld_sendup.
2566  *
2567  * V0 drivers already are holding the mutex when they call us.
2568  */
2569 void
2570 gld_recv(gld_mac_info_t *macinfo, mblk_t *mp)
2571 {
2572 	gld_recv_tagged(macinfo, mp, VLAN_VTAG_NONE);
2573 }
2574 
2575 void
2576 gld_recv_tagged(gld_mac_info_t *macinfo, mblk_t *mp, uint32_t vtag)
2577 {
2578 	gld_mac_pvt_t *mac_pvt;
2579 	char pbuf[3*GLD_MAX_ADDRLEN];
2580 	pktinfo_t pktinfo;
2581 	gld_interface_t *ifp;
2582 	queue_t *ipq = NULL;
2583 	gld_vlan_t *vlan = NULL, *vlan0 = NULL, *vlann = NULL;
2584 	struct gld_stats *stats0, *stats = NULL;
2585 	uint32_t vid;
2586 	int err;
2587 
2588 	ASSERT(macinfo != NULL);
2589 	ASSERT(mp->b_datap->db_ref);
2590 
2591 	GLDM_LOCK(macinfo, RW_READER);
2592 
2593 	if (macinfo->gldm_GLD_flags & GLD_UNREGISTERED) {
2594 		/* We're probably being called from a leftover interrupt */
2595 		freemsg(mp);
2596 		goto done;
2597 	}
2598 
2599 	/*
2600 	 * If this packet is a VLAN tagged packet, the kstats of corresponding
2601 	 * "VLAN 0" should also be updated. We can directly access VLAN 0's
2602 	 * kstats from macinfo.
2603 	 *
2604 	 * Further, the packets needs to be passed to VLAN 0 if there is
2605 	 * any DLPI consumer on VLAN 0 who is interested in tagged packets
2606 	 * (DL_PROMISC_SAP is on or is bounded to ETHERTYPE_VLAN SAP).
2607 	 */
2608 	mac_pvt = (gld_mac_pvt_t *)macinfo->gldm_mac_pvt;
2609 	stats0 = mac_pvt->statistics;
2610 
2611 	vid = GLD_VTAG_VID(vtag);
2612 	vlan0 = gld_find_vlan(macinfo, VLAN_VID_NONE);
2613 	if (vid != VLAN_VID_NONE) {
2614 		/*
2615 		 * If there are no physical DLPI consumers interested in the
2616 		 * VLAN packet, clear vlan0.
2617 		 */
2618 		if ((vlan0 != NULL) && (vlan0->gldv_nvlan_sap == 0))
2619 			vlan0 = NULL;
2620 		/*
2621 		 * vlann is the VLAN with the same VID as the VLAN packet.
2622 		 */
2623 		vlann = gld_find_vlan(macinfo, vid);
2624 		if (vlann != NULL)
2625 			stats = vlann->gldv_stats;
2626 	}
2627 
2628 	vlan = (vid == VLAN_VID_NONE) ? vlan0 : vlann;
2629 
2630 	ifp = mac_pvt->interfacep;
2631 	err = (*ifp->interpreter)(macinfo, mp, &pktinfo, GLD_RXQUICK);
2632 
2633 	BUMP(stats0, stats, glds_bytercv64, pktinfo.pktLen);
2634 	BUMP(stats0, stats, glds_pktrcv64, 1);
2635 
2636 	if ((vlann == NULL) && (vlan0 == NULL)) {
2637 		freemsg(mp);
2638 		goto done;
2639 	}
2640 
2641 	/*
2642 	 * Check whether underlying media code supports the IPQ hack:
2643 	 *
2644 	 * - the interpreter could quickly parse the packet
2645 	 * - the device type supports IPQ (ethernet and IPoIB)
2646 	 * - there is one, and only one, IP stream bound (to this VLAN)
2647 	 * - that stream is a "fastpath" stream
2648 	 * - the packet is of type ETHERTYPE_IP or ETHERTYPE_IPV6
2649 	 * - there are no streams in promiscuous mode (on this VLAN)
2650 	 * - if this packet is tagged, there is no need to send this
2651 	 *   packet to physical streams
2652 	 */
2653 	if ((err != 0) && ((vlan != NULL) && (vlan->gldv_nprom == 0)) &&
2654 	    (vlan == vlan0 || vlan0 == NULL)) {
2655 		switch (pktinfo.ethertype) {
2656 		case ETHERTYPE_IP:
2657 			ipq = vlan->gldv_ipq;
2658 			break;
2659 		case ETHERTYPE_IPV6:
2660 			ipq = vlan->gldv_ipv6q;
2661 			break;
2662 		}
2663 	}
2664 
2665 	/*
2666 	 * Special case for IP; we can simply do the putnext here, if:
2667 	 * o The IPQ hack is possible (ipq != NULL).
2668 	 * o the packet is specifically for me, and therefore:
2669 	 * - the packet is not multicast or broadcast (fastpath only
2670 	 *   wants unicast packets).
2671 	 *
2672 	 * o the stream is not asserting flow control.
2673 	 */
2674 	if (ipq != NULL &&
2675 	    pktinfo.isForMe &&
2676 	    canputnext(ipq)) {
2677 		/*
2678 		 * Skip the mac header. We know there is no LLC1/SNAP header
2679 		 * in this packet
2680 		 */
2681 		mp->b_rptr += pktinfo.macLen;
2682 		putnext(ipq, mp);
2683 		goto done;
2684 	}
2685 
2686 	/*
2687 	 * call the media specific packet interpreter routine
2688 	 */
2689 	if ((*ifp->interpreter)(macinfo, mp, &pktinfo, GLD_RX) != 0) {
2690 		BUMP(stats0, stats, glds_rcvbadinterp, 1);
2691 #ifdef GLD_DEBUG
2692 		if (gld_debug & GLDERRS)
2693 			cmn_err(CE_WARN,
2694 			    "gld_recv_tagged: interpreter failed");
2695 #endif
2696 		freemsg(mp);
2697 		goto done;
2698 	}
2699 
2700 	/*
2701 	 * This is safe even if vtag is VLAN_VTAG_NONE
2702 	 */
2703 	pktinfo.vid = vid;
2704 	pktinfo.cfi = GLD_VTAG_CFI(vtag);
2705 #ifdef GLD_DEBUG
2706 	if (pktinfo.cfi != VLAN_CFI_ETHER)
2707 		cmn_err(CE_WARN, "gld_recv_tagged: non-ETHER CFI");
2708 #endif
2709 	pktinfo.user_pri = GLD_VTAG_PRI(vtag);
2710 	pktinfo.isTagged = (vtag != VLAN_VID_NONE);
2711 
2712 #ifdef GLD_DEBUG
2713 	if ((gld_debug & GLDRECV) &&
2714 	    (!(gld_debug & GLDNOBR) ||
2715 	    (!pktinfo.isBroadcast && !pktinfo.isMulticast))) {
2716 		char pbuf2[3*GLD_MAX_ADDRLEN];
2717 
2718 		cmn_err(CE_CONT, "gld_recv_tagged: machdr=<%s -> %s>\n",
2719 		    gld_macaddr_sprintf(pbuf, pktinfo.shost,
2720 		    macinfo->gldm_addrlen), gld_macaddr_sprintf(pbuf2,
2721 		    pktinfo.dhost, macinfo->gldm_addrlen));
2722 		cmn_err(CE_CONT, "gld_recv_tagged: VlanId %d UserPri %d\n",
2723 		    pktinfo.vid,
2724 		    pktinfo.user_pri);
2725 		cmn_err(CE_CONT, "gld_recv_tagged: ethertype: %4x Len: %4d "
2726 		    "Hdr: %d,%d isMulticast: %s\n",
2727 		    pktinfo.ethertype,
2728 		    pktinfo.pktLen,
2729 		    pktinfo.macLen,
2730 		    pktinfo.hdrLen,
2731 		    pktinfo.isMulticast ? "Y" : "N");
2732 	}
2733 #endif
2734 
2735 	gld_sendup(macinfo, &pktinfo, mp, gld_accept);
2736 
2737 done:
2738 	GLDM_UNLOCK(macinfo);
2739 }
2740 
2741 /* =================================================================== */
2742 /* receive group: called from gld_recv and gld_precv* with maclock held */
2743 /* =================================================================== */
2744 
2745 /*
2746  * Search all the streams attached to the specified VLAN looking for
2747  * those eligible to receive the packet.
2748  * Note that in order to avoid an extra dupmsg(), if this is the first
2749  * eligible stream, remember it (in fgldp) so that we can send up the
2750  * message after this function.
2751  *
2752  * Return errno if fails. Currently the only error is ENOMEM.
2753  */
2754 static int
2755 gld_sendup_vlan(gld_vlan_t *vlan, pktinfo_t *pktinfo, mblk_t *mp,
2756     int (*acceptfunc)(), void (*send)(), int (*cansend)(), gld_t **fgldp)
2757 {
2758 	mblk_t *nmp;
2759 	gld_t *gld;
2760 	int err = 0;
2761 
2762 	ASSERT(vlan != NULL);
2763 	for (gld = vlan->gldv_str_next; gld != (gld_t *)&vlan->gldv_str_next;
2764 	    gld = gld->gld_next) {
2765 #ifdef GLD_VERBOSE_DEBUG
2766 		cmn_err(CE_NOTE, "gld_sendup: SAP: %4x QPTR: %p "QSTATE: %s",
2767 		    gld->gld_sap, (void *)gld->gld_qptr,
2768 		    gld->gld_state == DL_IDLE ? "IDLE": "NOT IDLE");
2769 #endif
2770 		ASSERT(gld->gld_qptr != NULL);
2771 		ASSERT(gld->gld_state == DL_IDLE ||
2772 		    gld->gld_state == DL_UNBOUND);
2773 		ASSERT(gld->gld_vlan == vlan);
2774 
2775 		if (gld->gld_state != DL_IDLE)
2776 			continue;	/* not eligible to receive */
2777 		if (gld->gld_flags & GLD_STR_CLOSING)
2778 			continue;	/* not eligible to receive */
2779 
2780 #ifdef GLD_DEBUG
2781 		if ((gld_debug & GLDRECV) &&
2782 		    (!(gld_debug & GLDNOBR) ||
2783 		    (!pktinfo->isBroadcast && !pktinfo->isMulticast)))
2784 			cmn_err(CE_NOTE,
2785 			    "gld_sendup: queue sap: %4x promis: %s %s %s",
2786 			    gld->gld_sap,
2787 			    gld->gld_flags & GLD_PROM_PHYS ? "phys " : "     ",
2788 			    gld->gld_flags & GLD_PROM_SAP  ? "sap  " : "     ",
2789 			    gld->gld_flags & GLD_PROM_MULT ? "multi" : "     ");
2790 #endif
2791 
2792 		/*
2793 		 * The accept function differs depending on whether this is
2794 		 * a packet that we received from the wire or a loopback.
2795 		 */
2796 		if ((*acceptfunc)(gld, pktinfo)) {
2797 			/* sap matches */
2798 			pktinfo->wasAccepted = 1; /* known protocol */
2799 
2800 			if (!(*cansend)(gld->gld_qptr)) {
2801 				/*
2802 				 * Upper stream is not accepting messages, i.e.
2803 				 * it is flow controlled, therefore we will
2804 				 * forgo sending the message up this stream.
2805 				 */
2806 #ifdef GLD_DEBUG
2807 				if (gld_debug & GLDETRACE)
2808 					cmn_err(CE_WARN,
2809 					    "gld_sendup: canput failed");
2810 #endif
2811 				BUMP(vlan->gldv_stats, NULL, glds_blocked, 1);
2812 				qenable(gld->gld_qptr);
2813 				continue;
2814 			}
2815 
2816 			/*
2817 			 * In order to avoid an extra dupmsg(), remember this
2818 			 * gld if this is the first eligible stream.
2819 			 */
2820 			if (*fgldp == NULL) {
2821 				*fgldp = gld;
2822 				continue;
2823 			}
2824 
2825 			/* duplicate the packet for this stream */
2826 			nmp = dupmsg(mp);
2827 			if (nmp == NULL) {
2828 				BUMP(vlan->gldv_stats, NULL,
2829 				    glds_gldnorcvbuf, 1);
2830 #ifdef GLD_DEBUG
2831 				if (gld_debug & GLDERRS)
2832 					cmn_err(CE_WARN,
2833 					    "gld_sendup: dupmsg failed");
2834 #endif
2835 				/* couldn't get resources; drop it */
2836 				err = ENOMEM;
2837 				break;
2838 			}
2839 			/* pass the message up the stream */
2840 			gld_passon(gld, nmp, pktinfo, send);
2841 		}
2842 	}
2843 	return (err);
2844 }
2845 
2846 /*
2847  * gld_sendup (macinfo, pktinfo, mp, acceptfunc)
2848  * called with an ethernet packet in an mblk; must decide whether
2849  * packet is for us and which streams to queue it to.
2850  */
2851 static void
2852 gld_sendup(gld_mac_info_t *macinfo, pktinfo_t *pktinfo,
2853     mblk_t *mp, int (*acceptfunc)())
2854 {
2855 	gld_t *fgld = NULL;
2856 	void (*send)(queue_t *qp, mblk_t *mp);
2857 	int (*cansend)(queue_t *qp);
2858 	gld_vlan_t *vlan0, *vlann = NULL;
2859 	struct gld_stats *stats0, *stats = NULL;
2860 	int err = 0;
2861 
2862 #ifdef GLD_DEBUG
2863 	if (gld_debug & GLDTRACE)
2864 		cmn_err(CE_NOTE, "gld_sendup(%p, %p)", (void *)mp,
2865 		    (void *)macinfo);
2866 #endif
2867 
2868 	ASSERT(mp != NULL);
2869 	ASSERT(macinfo != NULL);
2870 	ASSERT(pktinfo != NULL);
2871 	ASSERT(GLDM_LOCK_HELD(macinfo));
2872 
2873 	/*
2874 	 * The tagged packets should also be looped back (transmit-side)
2875 	 * or sent up (receive-side) to VLAN 0 if VLAN 0 is set to
2876 	 * DL_PROMISC_SAP or there is any DLPI consumer bind to the
2877 	 * ETHERTYPE_VLAN SAP. The kstats of VLAN 0 needs to be updated
2878 	 * as well.
2879 	 */
2880 	stats0 = ((gld_mac_pvt_t *)macinfo->gldm_mac_pvt)->statistics;
2881 	vlan0 = gld_find_vlan(macinfo, VLAN_VID_NONE);
2882 	if (pktinfo->vid != VLAN_VID_NONE) {
2883 		if ((vlan0 != NULL) && (vlan0->gldv_nvlan_sap == 0))
2884 			vlan0 = NULL;
2885 		vlann = gld_find_vlan(macinfo, pktinfo->vid);
2886 		if (vlann != NULL)
2887 			stats = vlann->gldv_stats;
2888 	}
2889 
2890 	ASSERT((vlan0 != NULL) || (vlann != NULL));
2891 
2892 	/*
2893 	 * The "fast" in "GLDOPT_FAST_RECV" refers to the speed at which
2894 	 * gld_recv returns to the caller's interrupt routine.  The total
2895 	 * network throughput would normally be lower when selecting this
2896 	 * option, because we putq the messages and process them later,
2897 	 * instead of sending them with putnext now.  Some time critical
2898 	 * device might need this, so it's here but undocumented.
2899 	 */
2900 	if (macinfo->gldm_options & GLDOPT_FAST_RECV) {
2901 		send = (void (*)(queue_t *, mblk_t *))putq;
2902 		cansend = canput;
2903 	} else {
2904 		send = (void (*)(queue_t *, mblk_t *))putnext;
2905 		cansend = canputnext;
2906 	}
2907 
2908 	/*
2909 	 * Send the packets for all eligible streams.
2910 	 */
2911 	if (vlan0 != NULL) {
2912 		err = gld_sendup_vlan(vlan0, pktinfo, mp, acceptfunc, send,
2913 		    cansend, &fgld);
2914 	}
2915 	if ((err == 0) && (vlann != NULL)) {
2916 		err = gld_sendup_vlan(vlann, pktinfo, mp, acceptfunc, send,
2917 		    cansend, &fgld);
2918 	}
2919 
2920 	ASSERT(mp);
2921 	/* send the original dup of the packet up the first stream found */
2922 	if (fgld)
2923 		gld_passon(fgld, mp, pktinfo, send);
2924 	else
2925 		freemsg(mp);	/* no streams matched */
2926 
2927 	/* We do not count looped back packets */
2928 	if (acceptfunc == gld_paccept)
2929 		return;		/* transmit loopback case */
2930 
2931 	if (pktinfo->isBroadcast)
2932 		BUMP(stats0, stats, glds_brdcstrcv, 1);
2933 	else if (pktinfo->isMulticast)
2934 		BUMP(stats0, stats, glds_multircv, 1);
2935 
2936 	/* No stream accepted this packet */
2937 	if (!pktinfo->wasAccepted)
2938 		BUMP(stats0, stats, glds_unknowns, 1);
2939 }
2940 
2941 #define	GLD_IS_PHYS(gld)	\
2942 	(((gld_vlan_t *)gld->gld_vlan)->gldv_id == VLAN_VID_NONE)
2943 
2944 /*
2945  * A packet matches a stream if:
2946  *      The stream's VLAN id is the same as the one in the packet.
2947  *  and the stream accepts EtherType encoded packets and the type matches
2948  *  or  the stream accepts LLC packets and the packet is an LLC packet
2949  */
2950 #define	MATCH(stream, pktinfo) \
2951 	((((gld_vlan_t *)stream->gld_vlan)->gldv_id == pktinfo->vid) && \
2952 	((stream->gld_ethertype && stream->gld_sap == pktinfo->ethertype) || \
2953 	(!stream->gld_ethertype && pktinfo->isLLC)))
2954 
2955 /*
2956  * This function validates a packet for sending up a particular
2957  * stream. The message header has been parsed and its characteristic
2958  * are recorded in the pktinfo data structure. The streams stack info
2959  * are presented in gld data structures.
2960  */
2961 static int
2962 gld_accept(gld_t *gld, pktinfo_t *pktinfo)
2963 {
2964 	/*
2965 	 * if there is no match do not bother checking further.
2966 	 * Note that it is okay to examine gld_vlan because
2967 	 * macinfo->gldm_lock is held.
2968 	 *
2969 	 * Because all tagged packets have SAP value ETHERTYPE_VLAN,
2970 	 * these packets will pass the SAP filter check if the stream
2971 	 * is a ETHERTYPE_VLAN listener.
2972 	 */
2973 	if ((!MATCH(gld, pktinfo) && !(gld->gld_flags & GLD_PROM_SAP) &&
2974 	    !(GLD_IS_PHYS(gld) && gld->gld_sap == ETHERTYPE_VLAN &&
2975 	    pktinfo->isTagged)))
2976 		return (0);
2977 
2978 	/*
2979 	 * We don't accept any packet from the hardware if we originated it.
2980 	 * (Contrast gld_paccept, the send-loopback accept function.)
2981 	 */
2982 	if (pktinfo->isLooped)
2983 		return (0);
2984 
2985 	/*
2986 	 * If the packet is broadcast or sent to us directly we will accept it.
2987 	 * Also we will accept multicast packets requested by the stream.
2988 	 */
2989 	if (pktinfo->isForMe || pktinfo->isBroadcast ||
2990 	    gld_mcmatch(gld, pktinfo))
2991 		return (1);
2992 
2993 	/*
2994 	 * Finally, accept anything else if we're in promiscuous mode
2995 	 */
2996 	if (gld->gld_flags & GLD_PROM_PHYS)
2997 		return (1);
2998 
2999 	return (0);
3000 }
3001 
3002 /*
3003  * Return TRUE if the given multicast address is one
3004  * of those that this particular Stream is interested in.
3005  */
3006 static int
3007 gld_mcmatch(gld_t *gld, pktinfo_t *pktinfo)
3008 {
3009 	/*
3010 	 * Return FALSE if not a multicast address.
3011 	 */
3012 	if (!pktinfo->isMulticast)
3013 		return (0);
3014 
3015 	/*
3016 	 * Check if all multicasts have been enabled for this Stream
3017 	 */
3018 	if (gld->gld_flags & GLD_PROM_MULT)
3019 		return (1);
3020 
3021 	/*
3022 	 * Return FALSE if no multicast addresses enabled for this Stream.
3023 	 */
3024 	if (!gld->gld_mcast)
3025 		return (0);
3026 
3027 	/*
3028 	 * Otherwise, look for it in the table.
3029 	 */
3030 	return (gld_multicast(pktinfo->dhost, gld));
3031 }
3032 
3033 /*
3034  * gld_multicast determines if the address is a multicast address for
3035  * this stream.
3036  */
3037 static int
3038 gld_multicast(unsigned char *macaddr, gld_t *gld)
3039 {
3040 	int i;
3041 
3042 	ASSERT(GLDM_LOCK_HELD(gld->gld_mac_info));
3043 
3044 	if (!gld->gld_mcast)
3045 		return (0);
3046 
3047 	for (i = 0; i < gld->gld_multicnt; i++) {
3048 		if (gld->gld_mcast[i]) {
3049 			ASSERT(gld->gld_mcast[i]->gldm_refcnt);
3050 			if (mac_eq(gld->gld_mcast[i]->gldm_addr, macaddr,
3051 			    gld->gld_mac_info->gldm_addrlen))
3052 				return (1);
3053 		}
3054 	}
3055 
3056 	return (0);
3057 }
3058 
3059 /*
3060  * accept function for looped back packets
3061  */
3062 static int
3063 gld_paccept(gld_t *gld, pktinfo_t *pktinfo)
3064 {
3065 	/*
3066 	 * Note that it is okay to examine gld_vlan because macinfo->gldm_lock
3067 	 * is held.
3068 	 *
3069 	 * If a stream is a ETHERTYPE_VLAN listener, it must
3070 	 * accept all tagged packets as those packets have SAP value
3071 	 * ETHERTYPE_VLAN.
3072 	 */
3073 	return (gld->gld_flags & GLD_PROM_PHYS &&
3074 	    (MATCH(gld, pktinfo) || gld->gld_flags & GLD_PROM_SAP ||
3075 	    (GLD_IS_PHYS(gld) && gld->gld_sap == ETHERTYPE_VLAN &&
3076 	    pktinfo->isTagged)));
3077 
3078 }
3079 
3080 static void
3081 gld_passon(gld_t *gld, mblk_t *mp, pktinfo_t *pktinfo,
3082 	void (*send)(queue_t *qp, mblk_t *mp))
3083 {
3084 	boolean_t is_phys = GLD_IS_PHYS(gld);
3085 	int skiplen;
3086 	boolean_t addtag = B_FALSE;
3087 	uint32_t vtag = 0;
3088 
3089 #ifdef GLD_DEBUG
3090 	if (gld_debug & GLDTRACE)
3091 		cmn_err(CE_NOTE, "gld_passon(%p, %p, %p)", (void *)gld,
3092 		    (void *)mp, (void *)pktinfo);
3093 
3094 	if ((gld_debug & GLDRECV) && (!(gld_debug & GLDNOBR) ||
3095 	    (!pktinfo->isBroadcast && !pktinfo->isMulticast)))
3096 		cmn_err(CE_NOTE, "gld_passon: q: %p mblk: %p minor: %d sap: %x",
3097 		    (void *)gld->gld_qptr->q_next, (void *)mp, gld->gld_minor,
3098 		    gld->gld_sap);
3099 #endif
3100 	/*
3101 	 * Figure out how much of the packet header to throw away.
3102 	 *
3103 	 * Normal DLPI (non RAW/FAST) streams also want the
3104 	 * DL_UNITDATA_IND M_PROTO message block prepended to the M_DATA.
3105 	 */
3106 	if (gld->gld_flags & GLD_RAW) {
3107 		/*
3108 		 * The packet will be tagged in the following cases:
3109 		 *   - if priority is not 0
3110 		 *   - a tagged packet sent on a physical link
3111 		 */
3112 		if ((pktinfo->isTagged && is_phys) || (pktinfo->user_pri != 0))
3113 			addtag = B_TRUE;
3114 		skiplen = 0;
3115 	} else {
3116 		/*
3117 		 * The packet will be tagged if it meets all below conditions:
3118 		 *   -  this is a physical stream
3119 		 *   -  this packet is tagged packet
3120 		 *   -  the stream is either a DL_PROMISC_SAP listener or a
3121 		 *	ETHERTYPE_VLAN listener
3122 		 */
3123 		if (is_phys && pktinfo->isTagged &&
3124 		    ((gld->gld_sap == ETHERTYPE_VLAN) ||
3125 		    (gld->gld_flags & GLD_PROM_SAP))) {
3126 			addtag = B_TRUE;
3127 		}
3128 
3129 		skiplen = pktinfo->macLen;		/* skip mac header */
3130 		if (gld->gld_ethertype)
3131 			skiplen += pktinfo->hdrLen;	/* skip any extra */
3132 	}
3133 	if (skiplen >= pktinfo->pktLen) {
3134 		/*
3135 		 * If the interpreter did its job right, then it cannot be
3136 		 * asking us to skip more bytes than are in the packet!
3137 		 * However, there could be zero data bytes left after the
3138 		 * amount to skip.  DLPI specifies that passed M_DATA blocks
3139 		 * should contain at least one byte of data, so if we have
3140 		 * none we just drop it.
3141 		 */
3142 		ASSERT(!(skiplen > pktinfo->pktLen));
3143 		freemsg(mp);
3144 		return;
3145 	}
3146 
3147 	if (addtag) {
3148 		mblk_t *savemp = mp;
3149 
3150 		vtag = GLD_MAKE_VTAG(pktinfo->user_pri, pktinfo->cfi,
3151 		    is_phys ? pktinfo->vid : VLAN_VID_NONE);
3152 		if ((mp = gld_insert_vtag_ether(mp, vtag)) == NULL) {
3153 			freemsg(savemp);
3154 			return;
3155 		}
3156 	}
3157 
3158 	/*
3159 	 * Skip over the header(s), taking care to possibly handle message
3160 	 * fragments shorter than the amount we need to skip.  Hopefully
3161 	 * the driver will put the entire packet, or at least the entire
3162 	 * header, into a single message block.  But we handle it if not.
3163 	 */
3164 	while (skiplen >= MBLKL(mp)) {
3165 		mblk_t *savemp = mp;
3166 		skiplen -= MBLKL(mp);
3167 		mp = mp->b_cont;
3168 		ASSERT(mp != NULL);	/* because skiplen < pktinfo->pktLen */
3169 		freeb(savemp);
3170 	}
3171 	mp->b_rptr += skiplen;
3172 
3173 	/* Add M_PROTO if necessary, and pass upstream */
3174 	if (((gld->gld_flags & GLD_FAST) && !pktinfo->isMulticast &&
3175 	    !pktinfo->isBroadcast) || (gld->gld_flags & GLD_RAW)) {
3176 		/* RAW/FAST: just send up the M_DATA */
3177 		(*send)(gld->gld_qptr, mp);
3178 	} else {
3179 		/* everybody else wants to see a unitdata_ind structure */
3180 		mp = gld_addudind(gld, mp, pktinfo, addtag);
3181 		if (mp)
3182 			(*send)(gld->gld_qptr, mp);
3183 		/* if it failed, gld_addudind already bumped statistic */
3184 	}
3185 }
3186 
3187 /*
3188  * gld_addudind(gld, mp, pktinfo)
3189  * format a DL_UNITDATA_IND message to be sent upstream to the user
3190  */
3191 static mblk_t *
3192 gld_addudind(gld_t *gld, mblk_t *mp, pktinfo_t *pktinfo, boolean_t tagged)
3193 {
3194 	gld_mac_info_t		*macinfo = gld->gld_mac_info;
3195 	gld_vlan_t		*vlan = (gld_vlan_t *)gld->gld_vlan;
3196 	dl_unitdata_ind_t	*dludindp;
3197 	mblk_t			*nmp;
3198 	int			size;
3199 	int			type;
3200 
3201 #ifdef GLD_DEBUG
3202 	if (gld_debug & GLDTRACE)
3203 		cmn_err(CE_NOTE, "gld_addudind(%p, %p, %p)", (void *)gld,
3204 		    (void *)mp, (void *)pktinfo);
3205 #endif
3206 	ASSERT(macinfo != NULL);
3207 
3208 	/*
3209 	 * Allocate the DL_UNITDATA_IND M_PROTO header, if allocation fails
3210 	 * might as well discard since we can't go further
3211 	 */
3212 	size = sizeof (dl_unitdata_ind_t) +
3213 	    2 * (macinfo->gldm_addrlen + abs(macinfo->gldm_saplen));
3214 	if ((nmp = allocb(size, BPRI_MED)) == NULL) {
3215 		freemsg(mp);
3216 		BUMP(vlan->gldv_stats, NULL, glds_gldnorcvbuf, 1);
3217 #ifdef GLD_DEBUG
3218 		if (gld_debug & GLDERRS)
3219 			cmn_err(CE_WARN,
3220 			    "gld_addudind: allocb failed");
3221 #endif
3222 		return ((mblk_t *)NULL);
3223 	}
3224 	DB_TYPE(nmp) = M_PROTO;
3225 	nmp->b_rptr = nmp->b_datap->db_lim - size;
3226 
3227 	if (tagged)
3228 		type = ETHERTYPE_VLAN;
3229 	else
3230 		type = (gld->gld_ethertype) ? pktinfo->ethertype : 0;
3231 
3232 
3233 	/*
3234 	 * now setup the DL_UNITDATA_IND header
3235 	 *
3236 	 * XXX This looks broken if the saps aren't two bytes.
3237 	 */
3238 	dludindp = (dl_unitdata_ind_t *)nmp->b_rptr;
3239 	dludindp->dl_primitive = DL_UNITDATA_IND;
3240 	dludindp->dl_src_addr_length =
3241 	    dludindp->dl_dest_addr_length = macinfo->gldm_addrlen +
3242 					abs(macinfo->gldm_saplen);
3243 	dludindp->dl_dest_addr_offset = sizeof (dl_unitdata_ind_t);
3244 	dludindp->dl_src_addr_offset = dludindp->dl_dest_addr_offset +
3245 					dludindp->dl_dest_addr_length;
3246 
3247 	dludindp->dl_group_address = (pktinfo->isMulticast ||
3248 					pktinfo->isBroadcast);
3249 
3250 	nmp->b_wptr = nmp->b_rptr + dludindp->dl_dest_addr_offset;
3251 
3252 	mac_copy(pktinfo->dhost, nmp->b_wptr, macinfo->gldm_addrlen);
3253 	nmp->b_wptr += macinfo->gldm_addrlen;
3254 
3255 	ASSERT(macinfo->gldm_saplen == -2);	/* XXX following code assumes */
3256 	*(ushort_t *)(nmp->b_wptr) = type;
3257 	nmp->b_wptr += abs(macinfo->gldm_saplen);
3258 
3259 	ASSERT(nmp->b_wptr == nmp->b_rptr + dludindp->dl_src_addr_offset);
3260 
3261 	mac_copy(pktinfo->shost, nmp->b_wptr, macinfo->gldm_addrlen);
3262 	nmp->b_wptr += macinfo->gldm_addrlen;
3263 
3264 	*(ushort_t *)(nmp->b_wptr) = type;
3265 	nmp->b_wptr += abs(macinfo->gldm_saplen);
3266 
3267 	if (pktinfo->nosource)
3268 		dludindp->dl_src_addr_offset = dludindp->dl_src_addr_length = 0;
3269 	linkb(nmp, mp);
3270 	return (nmp);
3271 }
3272 
3273 /* ======================================================= */
3274 /* wsrv group: called from wsrv, single threaded per queue */
3275 /* ======================================================= */
3276 
3277 /*
3278  * We go to some trouble to avoid taking the same lock during normal
3279  * transmit processing as we do during normal receive processing.
3280  *
3281  * Elements of the per-instance macinfo and per-stream gld_t structures
3282  * are for the most part protected by the GLDM_LOCK rwlock/mutex.
3283  * (Elements of the gld_mac_pvt_t structure are considered part of the
3284  * macinfo structure for purposes of this discussion).
3285  *
3286  * However, it is more complicated than that:
3287  *
3288  *	Elements of the macinfo structure that are set before the macinfo
3289  *	structure is added to its device list by gld_register(), and never
3290  *	thereafter modified, are accessed without requiring taking the lock.
3291  *	A similar rule applies to those elements of the gld_t structure that
3292  *	are written by gld_open() before the stream is added to any list.
3293  *
3294  *	Most other elements of the macinfo structure may only be read or
3295  *	written while holding the maclock.
3296  *
3297  *	Most writable elements of the gld_t structure are written only
3298  *	within the single-threaded domain of wsrv() and subsidiaries.
3299  *	(This domain includes open/close while qprocs are not on.)
3300  *	The maclock need not be taken while within that domain
3301  *	simply to read those elements.  Writing to them, even within
3302  *	that domain, or reading from it outside that domain, requires
3303  *	holding the maclock.  Exception:  if the stream is not
3304  *	presently attached to a PPA, there is no associated macinfo,
3305  *	and no maclock need be taken.
3306  *
3307  *	The curr_macaddr element of the mac private structure is also
3308  *      protected by the GLDM_LOCK rwlock/mutex, like most other members
3309  *      of that structure. However, there are a few instances in the
3310  *      transmit path where we choose to forgo lock protection when
3311  *      reading this variable. This is to avoid lock contention between
3312  *      threads executing the DL_UNITDATA_REQ case and receive threads.
3313  *      In doing so we will take a small risk or a few corrupted packets
3314  *      during the short an rare times when someone is changing the interface's
3315  *      physical address. We consider the small cost in this rare case to be
3316  *      worth the benefit of reduced lock contention under normal operating
3317  *      conditions. The risk/cost is small because:
3318  *          1. there is no guarantee at this layer of uncorrupted delivery.
3319  *          2. the physaddr doesn't change very often - no performance hit.
3320  *          3. if the physaddr changes, other stuff is going to be screwed
3321  *             up for a while anyway, while other sites refigure ARP, etc.,
3322  *             so losing a couple of packets is the least of our worries.
3323  *
3324  *	The list of streams associated with a macinfo is protected by
3325  *	two locks:  the per-macinfo maclock, and the per-major-device
3326  *	gld_devlock.  Both must be held to modify the list, but either
3327  *	may be held to protect the list during reading/traversing.  This
3328  *	allows independent locking for multiple instances in the receive
3329  *	path (using macinfo), while facilitating routines that must search
3330  *	the entire set of streams associated with a major device, such as
3331  *	gld_findminor(), gld_finddevinfo(), close().  The "nstreams"
3332  *	macinfo	element, and the gld_mac_info gld_t element, are similarly
3333  *	protected, since they change at exactly the same time macinfo
3334  *	streams list does.
3335  *
3336  *	The list of macinfo structures associated with a major device
3337  *	structure is protected by the gld_devlock, as is the per-major
3338  *	list of Style 2 streams in the DL_UNATTACHED state.
3339  *
3340  *	The list of major devices is kept on a module-global list
3341  *	gld_device_list, which has its own lock to protect the list.
3342  *
3343  *	When it is necessary to hold more than one lock at a time, they
3344  *	are acquired in this "outside in" order:
3345  *		gld_device_list.gld_devlock
3346  *		glddev->gld_devlock
3347  *		GLDM_LOCK(macinfo)
3348  *
3349  *	Finally, there are some "volatile" elements of the gld_t structure
3350  *	used for synchronization between various routines that don't share
3351  *	the same mutexes.  See the routines for details.  These are:
3352  *		gld_xwait	between gld_wsrv() and gld_sched()
3353  *		gld_sched_ran	between gld_wsrv() and gld_sched()
3354  *		gld_in_unbind	between gld_wput() and wsrv's gld_unbind()
3355  *		gld_wput_count	between gld_wput() and wsrv's gld_unbind()
3356  *		gld_in_wsrv	between gld_wput() and gld_wsrv()
3357  *				(used in conjunction with q->q_first)
3358  */
3359 
3360 /*
3361  * gld_ioctl (q, mp)
3362  * handles all ioctl requests passed downstream. This routine is
3363  * passed a pointer to the message block with the ioctl request in it, and a
3364  * pointer to the queue so it can respond to the ioctl request with an ack.
3365  */
3366 int
3367 gld_ioctl(queue_t *q, mblk_t *mp)
3368 {
3369 	struct iocblk *iocp;
3370 	gld_t *gld;
3371 	gld_mac_info_t *macinfo;
3372 
3373 #ifdef GLD_DEBUG
3374 	if (gld_debug & GLDTRACE)
3375 		cmn_err(CE_NOTE, "gld_ioctl(%p %p)", (void *)q, (void *)mp);
3376 #endif
3377 	gld = (gld_t *)q->q_ptr;
3378 	iocp = (struct iocblk *)mp->b_rptr;
3379 	switch (iocp->ioc_cmd) {
3380 	case DLIOCRAW:		/* raw M_DATA mode */
3381 		gld->gld_flags |= GLD_RAW;
3382 		DB_TYPE(mp) = M_IOCACK;
3383 		qreply(q, mp);
3384 		break;
3385 
3386 	case DL_IOC_HDR_INFO:	/* fastpath */
3387 		/*
3388 		 * DL_IOC_HDR_INFO should only come from IP. The one
3389 		 * initiated from user-land should not be allowed.
3390 		 */
3391 		if ((gld_global_options & GLD_OPT_NO_FASTPATH) ||
3392 		    (iocp->ioc_cr != kcred)) {
3393 			miocnak(q, mp, 0, EINVAL);
3394 			break;
3395 		}
3396 		gld_fastpath(gld, q, mp);
3397 		break;
3398 
3399 	default:
3400 		macinfo	 = gld->gld_mac_info;
3401 		if (macinfo == NULL || macinfo->gldm_ioctl == NULL) {
3402 			miocnak(q, mp, 0, EINVAL);
3403 			break;
3404 		}
3405 
3406 		GLDM_LOCK(macinfo, RW_WRITER);
3407 		(void) (*macinfo->gldm_ioctl) (macinfo, q, mp);
3408 		GLDM_UNLOCK(macinfo);
3409 		break;
3410 	}
3411 	return (0);
3412 }
3413 
3414 /*
3415  * Since the rules for "fastpath" mode don't seem to be documented
3416  * anywhere, I will describe GLD's rules for fastpath users here:
3417  *
3418  * Once in this mode you remain there until close.
3419  * If you unbind/rebind you should get a new header using DL_IOC_HDR_INFO.
3420  * You must be bound (DL_IDLE) to transmit.
3421  * There are other rules not listed above.
3422  */
3423 static void
3424 gld_fastpath(gld_t *gld, queue_t *q, mblk_t *mp)
3425 {
3426 	gld_interface_t *ifp;
3427 	gld_mac_info_t *macinfo;
3428 	dl_unitdata_req_t *dludp;
3429 	mblk_t *nmp;
3430 	t_scalar_t off, len;
3431 	uint_t maclen;
3432 	int error;
3433 
3434 	if (gld->gld_state != DL_IDLE) {
3435 		miocnak(q, mp, 0, EINVAL);
3436 		return;
3437 	}
3438 
3439 	macinfo = gld->gld_mac_info;
3440 	ASSERT(macinfo != NULL);
3441 	maclen = macinfo->gldm_addrlen + abs(macinfo->gldm_saplen);
3442 
3443 	error = miocpullup(mp, sizeof (dl_unitdata_req_t) + maclen);
3444 	if (error != 0) {
3445 		miocnak(q, mp, 0, error);
3446 		return;
3447 	}
3448 
3449 	dludp = (dl_unitdata_req_t *)mp->b_cont->b_rptr;
3450 	off = dludp->dl_dest_addr_offset;
3451 	len = dludp->dl_dest_addr_length;
3452 	if (dludp->dl_primitive != DL_UNITDATA_REQ ||
3453 	    !MBLKIN(mp->b_cont, off, len) || len != maclen) {
3454 		miocnak(q, mp, 0, EINVAL);
3455 		return;
3456 	}
3457 
3458 	/*
3459 	 * We take his fastpath request as a declaration that he will accept
3460 	 * M_DATA messages from us, whether or not we are willing to accept
3461 	 * them from him.  This allows us to have fastpath in one direction
3462 	 * (flow upstream) even on media with Source Routing, where we are
3463 	 * unable to provide a fixed MAC header to be prepended to downstream
3464 	 * flowing packets.  So we set GLD_FAST whether or not we decide to
3465 	 * allow him to send M_DATA down to us.
3466 	 */
3467 	GLDM_LOCK(macinfo, RW_WRITER);
3468 	gld->gld_flags |= GLD_FAST;
3469 	GLDM_UNLOCK(macinfo);
3470 
3471 	ifp = ((gld_mac_pvt_t *)macinfo->gldm_mac_pvt)->interfacep;
3472 
3473 	/* This will fail for Source Routing media */
3474 	/* Also on Ethernet on 802.2 SAPs */
3475 	if ((nmp = (*ifp->mkfastpath)(gld, mp)) == NULL) {
3476 		miocnak(q, mp, 0, ENOMEM);
3477 		return;
3478 	}
3479 
3480 	/*
3481 	 * Link new mblk in after the "request" mblks.
3482 	 */
3483 	linkb(mp, nmp);
3484 	miocack(q, mp, msgdsize(mp->b_cont), 0);
3485 }
3486 
3487 /*
3488  * gld_cmds (q, mp)
3489  *	process the DL commands as defined in dlpi.h
3490  *	note that the primitives return status which is passed back
3491  *	to the service procedure.  If the value is GLDE_RETRY, then
3492  *	it is assumed that processing must stop and the primitive has
3493  *	been put back onto the queue.  If the value is any other error,
3494  *	then an error ack is generated by the service procedure.
3495  */
3496 static int
3497 gld_cmds(queue_t *q, mblk_t *mp)
3498 {
3499 	union DL_primitives *dlp = (union DL_primitives *)mp->b_rptr;
3500 	gld_t *gld = (gld_t *)(q->q_ptr);
3501 	int result = DL_BADPRIM;
3502 	int mblkl = MBLKL(mp);
3503 	t_uscalar_t dlreq;
3504 
3505 	/* Make sure we have at least dlp->dl_primitive */
3506 	if (mblkl < sizeof (dlp->dl_primitive))
3507 		return (DL_BADPRIM);
3508 
3509 	dlreq = dlp->dl_primitive;
3510 #ifdef	GLD_DEBUG
3511 	if (gld_debug & GLDTRACE)
3512 		cmn_err(CE_NOTE,
3513 		    "gld_cmds(%p, %p):dlp=%p, dlp->dl_primitive=%d",
3514 		    (void *)q, (void *)mp, (void *)dlp, dlreq);
3515 #endif
3516 
3517 	switch (dlreq) {
3518 	case DL_UDQOS_REQ:
3519 		if (mblkl < DL_UDQOS_REQ_SIZE)
3520 			break;
3521 		result = gld_udqos(q, mp);
3522 		break;
3523 
3524 	case DL_BIND_REQ:
3525 		if (mblkl < DL_BIND_REQ_SIZE)
3526 			break;
3527 		result = gld_bind(q, mp);
3528 		break;
3529 
3530 	case DL_UNBIND_REQ:
3531 		if (mblkl < DL_UNBIND_REQ_SIZE)
3532 			break;
3533 		result = gld_unbind(q, mp);
3534 		break;
3535 
3536 	case DL_UNITDATA_REQ:
3537 		if (mblkl < DL_UNITDATA_REQ_SIZE)
3538 			break;
3539 		result = gld_unitdata(q, mp);
3540 		break;
3541 
3542 	case DL_INFO_REQ:
3543 		if (mblkl < DL_INFO_REQ_SIZE)
3544 			break;
3545 		result = gld_inforeq(q, mp);
3546 		break;
3547 
3548 	case DL_ATTACH_REQ:
3549 		if (mblkl < DL_ATTACH_REQ_SIZE)
3550 			break;
3551 		if (gld->gld_style == DL_STYLE2)
3552 			result = gldattach(q, mp);
3553 		else
3554 			result = DL_NOTSUPPORTED;
3555 		break;
3556 
3557 	case DL_DETACH_REQ:
3558 		if (mblkl < DL_DETACH_REQ_SIZE)
3559 			break;
3560 		if (gld->gld_style == DL_STYLE2)
3561 			result = gldunattach(q, mp);
3562 		else
3563 			result = DL_NOTSUPPORTED;
3564 		break;
3565 
3566 	case DL_ENABMULTI_REQ:
3567 		if (mblkl < DL_ENABMULTI_REQ_SIZE)
3568 			break;
3569 		result = gld_enable_multi(q, mp);
3570 		break;
3571 
3572 	case DL_DISABMULTI_REQ:
3573 		if (mblkl < DL_DISABMULTI_REQ_SIZE)
3574 			break;
3575 		result = gld_disable_multi(q, mp);
3576 		break;
3577 
3578 	case DL_PHYS_ADDR_REQ:
3579 		if (mblkl < DL_PHYS_ADDR_REQ_SIZE)
3580 			break;
3581 		result = gld_physaddr(q, mp);
3582 		break;
3583 
3584 	case DL_SET_PHYS_ADDR_REQ:
3585 		if (mblkl < DL_SET_PHYS_ADDR_REQ_SIZE)
3586 			break;
3587 		result = gld_setaddr(q, mp);
3588 		break;
3589 
3590 	case DL_PROMISCON_REQ:
3591 		if (mblkl < DL_PROMISCON_REQ_SIZE)
3592 			break;
3593 		result = gld_promisc(q, mp, dlreq, B_TRUE);
3594 		break;
3595 
3596 	case DL_PROMISCOFF_REQ:
3597 		if (mblkl < DL_PROMISCOFF_REQ_SIZE)
3598 			break;
3599 		result = gld_promisc(q, mp, dlreq, B_FALSE);
3600 		break;
3601 
3602 	case DL_GET_STATISTICS_REQ:
3603 		if (mblkl < DL_GET_STATISTICS_REQ_SIZE)
3604 			break;
3605 		result = gld_get_statistics(q, mp);
3606 		break;
3607 
3608 	case DL_CAPABILITY_REQ:
3609 		if (mblkl < DL_CAPABILITY_REQ_SIZE)
3610 			break;
3611 		result = gld_cap(q, mp);
3612 		break;
3613 
3614 	case DL_NOTIFY_REQ:
3615 		if (mblkl < DL_NOTIFY_REQ_SIZE)
3616 			break;
3617 		result = gld_notify_req(q, mp);
3618 		break;
3619 
3620 	case DL_XID_REQ:
3621 	case DL_XID_RES:
3622 	case DL_TEST_REQ:
3623 	case DL_TEST_RES:
3624 	case DL_CONTROL_REQ:
3625 	case DL_PASSIVE_REQ:
3626 		result = DL_NOTSUPPORTED;
3627 		break;
3628 
3629 	default:
3630 #ifdef	GLD_DEBUG
3631 		if (gld_debug & GLDERRS)
3632 			cmn_err(CE_WARN,
3633 			    "gld_cmds: unknown M_PROTO message: %d",
3634 			    dlreq);
3635 #endif
3636 		result = DL_BADPRIM;
3637 	}
3638 
3639 	return (result);
3640 }
3641 
3642 static int
3643 gld_cap(queue_t *q, mblk_t *mp)
3644 {
3645 	gld_t *gld = (gld_t *)q->q_ptr;
3646 	dl_capability_req_t *dlp = (dl_capability_req_t *)mp->b_rptr;
3647 
3648 	if (gld->gld_state == DL_UNATTACHED)
3649 		return (DL_OUTSTATE);
3650 
3651 	if (dlp->dl_sub_length == 0)
3652 		return (gld_cap_ack(q, mp));
3653 
3654 	return (gld_cap_enable(q, mp));
3655 }
3656 
3657 static int
3658 gld_cap_ack(queue_t *q, mblk_t *mp)
3659 {
3660 	gld_t *gld = (gld_t *)q->q_ptr;
3661 	gld_mac_info_t *macinfo = gld->gld_mac_info;
3662 	gld_interface_t *ifp;
3663 	dl_capability_ack_t *dlap;
3664 	dl_capability_sub_t *dlsp;
3665 	size_t size = sizeof (dl_capability_ack_t);
3666 	size_t subsize = 0;
3667 
3668 	ifp = ((gld_mac_pvt_t *)macinfo->gldm_mac_pvt)->interfacep;
3669 
3670 	if (macinfo->gldm_capabilities & GLD_CAP_CKSUM_ANY)
3671 		subsize += sizeof (dl_capability_sub_t) +
3672 		    sizeof (dl_capab_hcksum_t);
3673 	if (macinfo->gldm_capabilities & GLD_CAP_ZEROCOPY)
3674 		subsize += sizeof (dl_capability_sub_t) +
3675 		    sizeof (dl_capab_zerocopy_t);
3676 	if (macinfo->gldm_options & GLDOPT_MDT)
3677 		subsize += (sizeof (dl_capability_sub_t) +
3678 		    sizeof (dl_capab_mdt_t));
3679 
3680 	if ((mp = mexchange(q, mp, size + subsize, M_PROTO,
3681 	    DL_CAPABILITY_ACK)) == NULL)
3682 		return (GLDE_OK);
3683 
3684 	dlap = (dl_capability_ack_t *)mp->b_rptr;
3685 	dlap->dl_sub_offset = 0;
3686 	if ((dlap->dl_sub_length = subsize) != 0)
3687 		dlap->dl_sub_offset = sizeof (dl_capability_ack_t);
3688 	dlsp = (dl_capability_sub_t *)&dlap[1];
3689 
3690 	if (macinfo->gldm_capabilities & GLD_CAP_CKSUM_ANY) {
3691 		dl_capab_hcksum_t *dlhp = (dl_capab_hcksum_t *)&dlsp[1];
3692 
3693 		dlsp->dl_cap = DL_CAPAB_HCKSUM;
3694 		dlsp->dl_length = sizeof (dl_capab_hcksum_t);
3695 
3696 		dlhp->hcksum_version = HCKSUM_VERSION_1;
3697 
3698 		dlhp->hcksum_txflags = 0;
3699 		if (macinfo->gldm_capabilities & GLD_CAP_CKSUM_PARTIAL)
3700 			dlhp->hcksum_txflags |= HCKSUM_INET_PARTIAL;
3701 		if (macinfo->gldm_capabilities & GLD_CAP_CKSUM_FULL_V4)
3702 			dlhp->hcksum_txflags |= HCKSUM_INET_FULL_V4;
3703 		if (macinfo->gldm_capabilities & GLD_CAP_CKSUM_FULL_V6)
3704 			dlhp->hcksum_txflags |= HCKSUM_INET_FULL_V6;
3705 		if (macinfo->gldm_capabilities & GLD_CAP_CKSUM_IPHDR)
3706 			dlhp->hcksum_txflags |= HCKSUM_IPHDRCKSUM;
3707 
3708 		dlcapabsetqid(&(dlhp->hcksum_mid), RD(q));
3709 		dlsp = (dl_capability_sub_t *)&dlhp[1];
3710 	}
3711 
3712 	if (macinfo->gldm_capabilities & GLD_CAP_ZEROCOPY) {
3713 		dl_capab_zerocopy_t *dlzp = (dl_capab_zerocopy_t *)&dlsp[1];
3714 
3715 		dlsp->dl_cap = DL_CAPAB_ZEROCOPY;
3716 		dlsp->dl_length = sizeof (dl_capab_zerocopy_t);
3717 		dlzp->zerocopy_version = ZEROCOPY_VERSION_1;
3718 		dlzp->zerocopy_flags = DL_CAPAB_VMSAFE_MEM;
3719 
3720 		dlcapabsetqid(&(dlzp->zerocopy_mid), RD(q));
3721 		dlsp = (dl_capability_sub_t *)&dlzp[1];
3722 	}
3723 
3724 	if (macinfo->gldm_options & GLDOPT_MDT) {
3725 		dl_capab_mdt_t *dlmp = (dl_capab_mdt_t *)&dlsp[1];
3726 
3727 		dlsp->dl_cap = DL_CAPAB_MDT;
3728 		dlsp->dl_length = sizeof (dl_capab_mdt_t);
3729 
3730 		dlmp->mdt_version = MDT_VERSION_2;
3731 		dlmp->mdt_max_pld = macinfo->gldm_mdt_segs;
3732 		dlmp->mdt_span_limit = macinfo->gldm_mdt_sgl;
3733 		dlcapabsetqid(&dlmp->mdt_mid, OTHERQ(q));
3734 		dlmp->mdt_flags = DL_CAPAB_MDT_ENABLE;
3735 		dlmp->mdt_hdr_head = ifp->hdr_size;
3736 		dlmp->mdt_hdr_tail = 0;
3737 	}
3738 
3739 	qreply(q, mp);
3740 	return (GLDE_OK);
3741 }
3742 
3743 static int
3744 gld_cap_enable(queue_t *q, mblk_t *mp)
3745 {
3746 	dl_capability_req_t *dlp;
3747 	dl_capability_sub_t *dlsp;
3748 	dl_capab_hcksum_t *dlhp;
3749 	offset_t off;
3750 	size_t len;
3751 	size_t size;
3752 	offset_t end;
3753 
3754 	dlp = (dl_capability_req_t *)mp->b_rptr;
3755 	dlp->dl_primitive = DL_CAPABILITY_ACK;
3756 
3757 	off = dlp->dl_sub_offset;
3758 	len = dlp->dl_sub_length;
3759 
3760 	if (!MBLKIN(mp, off, len))
3761 		return (DL_BADPRIM);
3762 
3763 	end = off + len;
3764 	while (off < end) {
3765 		dlsp = (dl_capability_sub_t *)(mp->b_rptr + off);
3766 		size = sizeof (dl_capability_sub_t) + dlsp->dl_length;
3767 		if (off + size > end)
3768 			return (DL_BADPRIM);
3769 
3770 		switch (dlsp->dl_cap) {
3771 		case DL_CAPAB_HCKSUM:
3772 			dlhp = (dl_capab_hcksum_t *)&dlsp[1];
3773 			/* nothing useful we can do with the contents */
3774 			dlcapabsetqid(&(dlhp->hcksum_mid), RD(q));
3775 			break;
3776 		default:
3777 			break;
3778 		}
3779 
3780 		off += size;
3781 	}
3782 
3783 	qreply(q, mp);
3784 	return (GLDE_OK);
3785 }
3786 
3787 /*
3788  * Send a copy of the DL_NOTIFY_IND message <mp> to each stream that has
3789  * requested the specific <notification> that the message carries AND is
3790  * eligible and ready to receive the notification immediately.
3791  *
3792  * This routine ignores flow control. Notifications will be sent regardless.
3793  *
3794  * In all cases, the original message passed in is freed at the end of
3795  * the routine.
3796  */
3797 static void
3798 gld_notify_qs(gld_mac_info_t *macinfo, mblk_t *mp, uint32_t notification)
3799 {
3800 	gld_mac_pvt_t *mac_pvt;
3801 	gld_vlan_t *vlan;
3802 	gld_t *gld;
3803 	mblk_t *nmp;
3804 	int i;
3805 
3806 	ASSERT(GLDM_LOCK_HELD_WRITE(macinfo));
3807 
3808 	mac_pvt = (gld_mac_pvt_t *)macinfo->gldm_mac_pvt;
3809 
3810 	/*
3811 	 * Search all the streams attached to this macinfo looking
3812 	 * for those eligible to receive the present notification.
3813 	 */
3814 	for (i = 0; i < VLAN_HASHSZ; i++) {
3815 		for (vlan = mac_pvt->vlan_hash[i];
3816 		    vlan != NULL; vlan = vlan->gldv_next) {
3817 			for (gld = vlan->gldv_str_next;
3818 			    gld != (gld_t *)&vlan->gldv_str_next;
3819 			    gld = gld->gld_next) {
3820 				ASSERT(gld->gld_qptr != NULL);
3821 				ASSERT(gld->gld_state == DL_IDLE ||
3822 				    gld->gld_state == DL_UNBOUND);
3823 				ASSERT(gld->gld_mac_info == macinfo);
3824 
3825 				if (gld->gld_flags & GLD_STR_CLOSING)
3826 					continue; /* not eligible - skip */
3827 				if (!(notification & gld->gld_notifications))
3828 					continue; /* not wanted - skip */
3829 				if ((nmp = dupmsg(mp)) == NULL)
3830 					continue; /* can't copy - skip */
3831 
3832 				/*
3833 				 * All OK; send dup'd notification up this
3834 				 * stream
3835 				 */
3836 				qreply(WR(gld->gld_qptr), nmp);
3837 			}
3838 		}
3839 	}
3840 
3841 	/*
3842 	 * Drop the original message block now
3843 	 */
3844 	freemsg(mp);
3845 }
3846 
3847 /*
3848  * For each (understood) bit in the <notifications> argument, contruct
3849  * a DL_NOTIFY_IND message and send it to the specified <q>, or to all
3850  * eligible queues if <q> is NULL.
3851  */
3852 static void
3853 gld_notify_ind(gld_mac_info_t *macinfo, uint32_t notifications, queue_t *q)
3854 {
3855 	gld_mac_pvt_t *mac_pvt;
3856 	dl_notify_ind_t *dlnip;
3857 	struct gld_stats *stats;
3858 	mblk_t *mp;
3859 	size_t size;
3860 	uint32_t bit;
3861 
3862 	GLDM_LOCK(macinfo, RW_WRITER);
3863 
3864 	/*
3865 	 * The following cases shouldn't happen, but just in case the
3866 	 * MAC driver calls gld_linkstate() at an inappropriate time, we
3867 	 * check anyway ...
3868 	 */
3869 	if (!(macinfo->gldm_GLD_flags & GLD_MAC_READY)) {
3870 		GLDM_UNLOCK(macinfo);
3871 		return;				/* not ready yet	*/
3872 	}
3873 
3874 	if (macinfo->gldm_GLD_flags & GLD_UNREGISTERED) {
3875 		GLDM_UNLOCK(macinfo);
3876 		return;				/* not ready anymore	*/
3877 	}
3878 
3879 	/*
3880 	 * Make sure the kstats are up to date, 'cos we use some of
3881 	 * the kstat values below, specifically the link speed ...
3882 	 */
3883 	mac_pvt = (gld_mac_pvt_t *)macinfo->gldm_mac_pvt;
3884 	stats = mac_pvt->statistics;
3885 	if (macinfo->gldm_get_stats)
3886 		(void) (*macinfo->gldm_get_stats)(macinfo, stats);
3887 
3888 	for (bit = 1; notifications != 0; bit <<= 1) {
3889 		if ((notifications & bit) == 0)
3890 			continue;
3891 		notifications &= ~bit;
3892 
3893 		size = DL_NOTIFY_IND_SIZE;
3894 		if (bit == DL_NOTE_PHYS_ADDR)
3895 			size += macinfo->gldm_addrlen;
3896 		if ((mp = allocb(size, BPRI_MED)) == NULL)
3897 			continue;
3898 
3899 		mp->b_datap->db_type = M_PROTO;
3900 		mp->b_wptr = mp->b_rptr + size;
3901 		dlnip = (dl_notify_ind_t *)mp->b_rptr;
3902 		dlnip->dl_primitive = DL_NOTIFY_IND;
3903 		dlnip->dl_notification = 0;
3904 		dlnip->dl_data = 0;
3905 		dlnip->dl_addr_length = 0;
3906 		dlnip->dl_addr_offset = 0;
3907 
3908 		switch (bit) {
3909 		case DL_NOTE_PROMISC_ON_PHYS:
3910 		case DL_NOTE_PROMISC_OFF_PHYS:
3911 			if (mac_pvt->nprom != 0)
3912 				dlnip->dl_notification = bit;
3913 			break;
3914 
3915 		case DL_NOTE_LINK_DOWN:
3916 			if (macinfo->gldm_linkstate == GLD_LINKSTATE_DOWN)
3917 				dlnip->dl_notification = bit;
3918 			break;
3919 
3920 		case DL_NOTE_LINK_UP:
3921 			if (macinfo->gldm_linkstate == GLD_LINKSTATE_UP)
3922 				dlnip->dl_notification = bit;
3923 			break;
3924 
3925 		case DL_NOTE_SPEED:
3926 			/*
3927 			 * Conversion required here:
3928 			 *	GLD keeps the speed in bit/s in a uint64
3929 			 *	DLPI wants it in kb/s in a uint32
3930 			 * Fortunately this is still big enough for 10Gb/s!
3931 			 */
3932 			dlnip->dl_notification = bit;
3933 			dlnip->dl_data = stats->glds_speed/1000ULL;
3934 			break;
3935 
3936 		case DL_NOTE_PHYS_ADDR:
3937 			dlnip->dl_notification = bit;
3938 			dlnip->dl_data = DL_CURR_PHYS_ADDR;
3939 			dlnip->dl_addr_offset = sizeof (dl_notify_ind_t);
3940 			dlnip->dl_addr_length = macinfo->gldm_addrlen +
3941 			    abs(macinfo->gldm_saplen);
3942 			mac_pvt = (gld_mac_pvt_t *)macinfo->gldm_mac_pvt;
3943 			mac_copy(mac_pvt->curr_macaddr,
3944 			    mp->b_rptr + sizeof (dl_notify_ind_t),
3945 			    macinfo->gldm_addrlen);
3946 			break;
3947 
3948 		default:
3949 			break;
3950 		}
3951 
3952 		if (dlnip->dl_notification == 0)
3953 			freemsg(mp);
3954 		else if (q != NULL)
3955 			qreply(q, mp);
3956 		else
3957 			gld_notify_qs(macinfo, mp, bit);
3958 	}
3959 
3960 	GLDM_UNLOCK(macinfo);
3961 }
3962 
3963 /*
3964  * gld_notify_req - handle a DL_NOTIFY_REQ message
3965  */
3966 static int
3967 gld_notify_req(queue_t *q, mblk_t *mp)
3968 {
3969 	gld_t *gld = (gld_t *)q->q_ptr;
3970 	gld_mac_info_t *macinfo;
3971 	gld_mac_pvt_t *pvt;
3972 	dl_notify_req_t *dlnrp;
3973 	dl_notify_ack_t *dlnap;
3974 
3975 	ASSERT(gld != NULL);
3976 	ASSERT(gld->gld_qptr == RD(q));
3977 
3978 	dlnrp = (dl_notify_req_t *)mp->b_rptr;
3979 
3980 #ifdef GLD_DEBUG
3981 	if (gld_debug & GLDTRACE)
3982 		cmn_err(CE_NOTE, "gld_notify_req(%p %p)",
3983 			(void *)q, (void *)mp);
3984 #endif
3985 
3986 	if (gld->gld_state == DL_UNATTACHED) {
3987 #ifdef GLD_DEBUG
3988 		if (gld_debug & GLDERRS)
3989 			cmn_err(CE_NOTE, "gld_notify_req: wrong state (%d)",
3990 				gld->gld_state);
3991 #endif
3992 		return (DL_OUTSTATE);
3993 	}
3994 
3995 	/*
3996 	 * Remember what notifications are required by this stream
3997 	 */
3998 	macinfo = gld->gld_mac_info;
3999 	pvt = (gld_mac_pvt_t *)macinfo->gldm_mac_pvt;
4000 
4001 	gld->gld_notifications = dlnrp->dl_notifications & pvt->notifications;
4002 
4003 	/*
4004 	 * The return DL_NOTIFY_ACK carries the bitset of notifications
4005 	 * that this driver can provide, independently of which ones have
4006 	 * previously been or are now being requested.
4007 	 */
4008 	if ((mp = mexchange(q, mp, sizeof (dl_notify_ack_t), M_PCPROTO,
4009 	    DL_NOTIFY_ACK)) == NULL)
4010 		return (DL_SYSERR);
4011 
4012 	dlnap = (dl_notify_ack_t *)mp->b_rptr;
4013 	dlnap->dl_notifications = pvt->notifications;
4014 	qreply(q, mp);
4015 
4016 	/*
4017 	 * A side effect of a DL_NOTIFY_REQ is that after the DL_NOTIFY_ACK
4018 	 * reply, the the requestor gets zero or more DL_NOTIFY_IND messages
4019 	 * that provide the current status.
4020 	 */
4021 	gld_notify_ind(macinfo, gld->gld_notifications, q);
4022 
4023 	return (GLDE_OK);
4024 }
4025 
4026 /*
4027  * gld_linkstate()
4028  *	Called by driver to tell GLD the state of the physical link.
4029  *	As a side effect, sends a DL_NOTE_LINK_UP or DL_NOTE_LINK_DOWN
4030  *	notification to each client that has previously requested such
4031  *	notifications
4032  */
4033 void
4034 gld_linkstate(gld_mac_info_t *macinfo, int32_t newstate)
4035 {
4036 	uint32_t notification;
4037 
4038 	switch (newstate) {
4039 	default:
4040 		return;
4041 
4042 	case GLD_LINKSTATE_DOWN:
4043 		notification = DL_NOTE_LINK_DOWN;
4044 		break;
4045 
4046 	case GLD_LINKSTATE_UP:
4047 		notification = DL_NOTE_LINK_UP | DL_NOTE_SPEED;
4048 		break;
4049 
4050 	case GLD_LINKSTATE_UNKNOWN:
4051 		notification = 0;
4052 		break;
4053 	}
4054 
4055 	GLDM_LOCK(macinfo, RW_WRITER);
4056 	if (macinfo->gldm_linkstate == newstate)
4057 		notification = 0;
4058 	else
4059 		macinfo->gldm_linkstate = newstate;
4060 	GLDM_UNLOCK(macinfo);
4061 
4062 	if (notification)
4063 		gld_notify_ind(macinfo, notification, NULL);
4064 }
4065 
4066 /*
4067  * gld_udqos - set the current QoS parameters (priority only at the moment).
4068  */
4069 static int
4070 gld_udqos(queue_t *q, mblk_t *mp)
4071 {
4072 	dl_udqos_req_t *dlp;
4073 	gld_t  *gld = (gld_t *)q->q_ptr;
4074 	int off;
4075 	int len;
4076 	dl_qos_cl_sel1_t *selp;
4077 
4078 	ASSERT(gld);
4079 	ASSERT(gld->gld_qptr == RD(q));
4080 
4081 #ifdef GLD_DEBUG
4082 	if (gld_debug & GLDTRACE)
4083 		cmn_err(CE_NOTE, "gld_udqos(%p %p)", (void *)q, (void *)mp);
4084 #endif
4085 
4086 	if (gld->gld_state != DL_IDLE) {
4087 #ifdef GLD_DEBUG
4088 		if (gld_debug & GLDERRS)
4089 			cmn_err(CE_NOTE, "gld_udqos: wrong state (%d)",
4090 			    gld->gld_state);
4091 #endif
4092 		return (DL_OUTSTATE);
4093 	}
4094 
4095 	dlp = (dl_udqos_req_t *)mp->b_rptr;
4096 	off = dlp->dl_qos_offset;
4097 	len = dlp->dl_qos_length;
4098 
4099 	if (len != sizeof (dl_qos_cl_sel1_t) || !MBLKIN(mp, off, len))
4100 		return (DL_BADQOSTYPE);
4101 
4102 	selp = (dl_qos_cl_sel1_t *)(mp->b_rptr + off);
4103 	if (selp->dl_qos_type != DL_QOS_CL_SEL1)
4104 		return (DL_BADQOSTYPE);
4105 
4106 	if (selp->dl_trans_delay != 0 &&
4107 	    selp->dl_trans_delay != DL_QOS_DONT_CARE)
4108 		return (DL_BADQOSPARAM);
4109 	if (selp->dl_protection != 0 &&
4110 	    selp->dl_protection != DL_QOS_DONT_CARE)
4111 		return (DL_BADQOSPARAM);
4112 	if (selp->dl_residual_error != 0 &&
4113 	    selp->dl_residual_error != DL_QOS_DONT_CARE)
4114 		return (DL_BADQOSPARAM);
4115 	if (selp->dl_priority < 0 || selp->dl_priority > 7)
4116 		return (DL_BADQOSPARAM);
4117 
4118 	gld->gld_upri = selp->dl_priority;
4119 
4120 	dlokack(q, mp, DL_UDQOS_REQ);
4121 	return (GLDE_OK);
4122 }
4123 
4124 static mblk_t *
4125 gld_bindack(queue_t *q, mblk_t *mp)
4126 {
4127 	gld_t *gld = (gld_t *)q->q_ptr;
4128 	gld_mac_info_t *macinfo = gld->gld_mac_info;
4129 	gld_mac_pvt_t *mac_pvt = (gld_mac_pvt_t *)macinfo->gldm_mac_pvt;
4130 	dl_bind_ack_t *dlp;
4131 	size_t size;
4132 	t_uscalar_t addrlen;
4133 	uchar_t *sapp;
4134 
4135 	addrlen = macinfo->gldm_addrlen + abs(macinfo->gldm_saplen);
4136 	size = sizeof (dl_bind_ack_t) + addrlen;
4137 	if ((mp = mexchange(q, mp, size, M_PCPROTO, DL_BIND_ACK)) == NULL)
4138 		return (NULL);
4139 
4140 	dlp = (dl_bind_ack_t *)mp->b_rptr;
4141 	dlp->dl_sap = gld->gld_sap;
4142 	dlp->dl_addr_length = addrlen;
4143 	dlp->dl_addr_offset = sizeof (dl_bind_ack_t);
4144 	dlp->dl_max_conind = 0;
4145 	dlp->dl_xidtest_flg = 0;
4146 
4147 	mac_copy(mac_pvt->curr_macaddr, (uchar_t *)&dlp[1],
4148 	    macinfo->gldm_addrlen);
4149 	sapp = mp->b_rptr + dlp->dl_addr_offset + macinfo->gldm_addrlen;
4150 	*(ushort_t *)sapp = gld->gld_sap;
4151 
4152 	return (mp);
4153 }
4154 
4155 /*
4156  * gld_bind - determine if a SAP is already allocated and whether it is legal
4157  * to do the bind at this time
4158  */
4159 static int
4160 gld_bind(queue_t *q, mblk_t *mp)
4161 {
4162 	ulong_t	sap;
4163 	dl_bind_req_t *dlp;
4164 	gld_t *gld = (gld_t *)q->q_ptr;
4165 	gld_mac_info_t *macinfo = gld->gld_mac_info;
4166 
4167 	ASSERT(gld);
4168 	ASSERT(gld->gld_qptr == RD(q));
4169 
4170 #ifdef GLD_DEBUG
4171 	if (gld_debug & GLDTRACE)
4172 		cmn_err(CE_NOTE, "gld_bind(%p %p)", (void *)q, (void *)mp);
4173 #endif
4174 
4175 	dlp = (dl_bind_req_t *)mp->b_rptr;
4176 	sap = dlp->dl_sap;
4177 
4178 #ifdef GLD_DEBUG
4179 	if (gld_debug & GLDPROT)
4180 		cmn_err(CE_NOTE, "gld_bind: lsap=%lx", sap);
4181 #endif
4182 
4183 	if (gld->gld_state != DL_UNBOUND) {
4184 #ifdef GLD_DEBUG
4185 		if (gld_debug & GLDERRS)
4186 			cmn_err(CE_NOTE, "gld_bind: bound or not attached (%d)",
4187 				gld->gld_state);
4188 #endif
4189 		return (DL_OUTSTATE);
4190 	}
4191 	ASSERT(macinfo);
4192 
4193 	if (dlp->dl_service_mode != DL_CLDLS) {
4194 		return (DL_UNSUPPORTED);
4195 	}
4196 	if (dlp->dl_xidtest_flg & (DL_AUTO_XID | DL_AUTO_TEST)) {
4197 		return (DL_NOAUTO);
4198 	}
4199 
4200 	/*
4201 	 * Check sap validity and decide whether this stream accepts
4202 	 * IEEE 802.2 (LLC) packets.
4203 	 */
4204 	if (sap > ETHERTYPE_MAX)
4205 		return (DL_BADSAP);
4206 
4207 	/*
4208 	 * Decide whether the SAP value selects EtherType encoding/decoding.
4209 	 * For compatibility with monolithic ethernet drivers, the range of
4210 	 * SAP values is different for DL_ETHER media.
4211 	 */
4212 	switch (macinfo->gldm_type) {
4213 	case DL_ETHER:
4214 		gld->gld_ethertype = (sap > ETHERMTU);
4215 		break;
4216 	default:
4217 		gld->gld_ethertype = (sap > GLD_MAX_802_SAP);
4218 		break;
4219 	}
4220 
4221 	/* if we get to here, then the SAP is legal enough */
4222 	GLDM_LOCK(macinfo, RW_WRITER);
4223 	gld->gld_state = DL_IDLE;	/* bound and ready */
4224 	gld->gld_sap = sap;
4225 	if ((macinfo->gldm_type == DL_ETHER) && (sap == ETHERTYPE_VLAN))
4226 		((gld_vlan_t *)gld->gld_vlan)->gldv_nvlan_sap++;
4227 	gld_set_ipq(gld);
4228 
4229 #ifdef GLD_DEBUG
4230 	if (gld_debug & GLDPROT)
4231 		cmn_err(CE_NOTE, "gld_bind: ok - sap = %d", gld->gld_sap);
4232 #endif
4233 
4234 	/* ACK the BIND */
4235 	mp = gld_bindack(q, mp);
4236 	GLDM_UNLOCK(macinfo);
4237 
4238 	if (mp != NULL) {
4239 		qreply(q, mp);
4240 		return (GLDE_OK);
4241 	}
4242 
4243 	return (DL_SYSERR);
4244 }
4245 
4246 /*
4247  * gld_unbind - perform an unbind of an LSAP or ether type on the stream.
4248  * The stream is still open and can be re-bound.
4249  */
4250 static int
4251 gld_unbind(queue_t *q, mblk_t *mp)
4252 {
4253 	gld_t *gld = (gld_t *)q->q_ptr;
4254 	gld_mac_info_t *macinfo = gld->gld_mac_info;
4255 
4256 	ASSERT(gld);
4257 
4258 #ifdef GLD_DEBUG
4259 	if (gld_debug & GLDTRACE)
4260 		cmn_err(CE_NOTE, "gld_unbind(%p %p)", (void *)q, (void *)mp);
4261 #endif
4262 
4263 	if (gld->gld_state != DL_IDLE) {
4264 #ifdef GLD_DEBUG
4265 		if (gld_debug & GLDERRS)
4266 			cmn_err(CE_NOTE, "gld_unbind: wrong state (%d)",
4267 				gld->gld_state);
4268 #endif
4269 		return (DL_OUTSTATE);
4270 	}
4271 	ASSERT(macinfo);
4272 
4273 	/*
4274 	 * Avoid unbinding (DL_UNBIND_REQ) while FAST/RAW is inside wput.
4275 	 * See comments above gld_start().
4276 	 */
4277 	gld->gld_in_unbind = B_TRUE;	/* disallow wput=>start */
4278 	membar_enter();
4279 	if (gld->gld_wput_count != 0) {
4280 		gld->gld_in_unbind = B_FALSE;
4281 		ASSERT(mp);		/* we didn't come from close */
4282 #ifdef GLD_DEBUG
4283 		if (gld_debug & GLDETRACE)
4284 			cmn_err(CE_NOTE, "gld_unbind: defer for wput");
4285 #endif
4286 		(void) putbq(q, mp);
4287 		qenable(q);		/* try again soon */
4288 		return (GLDE_RETRY);
4289 	}
4290 
4291 	GLDM_LOCK(macinfo, RW_WRITER);
4292 	if ((macinfo->gldm_type == DL_ETHER) &&
4293 	    (gld->gld_sap == ETHERTYPE_VLAN)) {
4294 		((gld_vlan_t *)gld->gld_vlan)->gldv_nvlan_sap--;
4295 	}
4296 	gld->gld_state = DL_UNBOUND;
4297 	gld->gld_sap = 0;
4298 	gld_set_ipq(gld);
4299 	GLDM_UNLOCK(macinfo);
4300 
4301 	membar_exit();
4302 	gld->gld_in_unbind = B_FALSE;
4303 
4304 	/* mp is NULL if we came from close */
4305 	if (mp) {
4306 		gld_flushqueue(q);	/* flush the queues */
4307 		dlokack(q, mp, DL_UNBIND_REQ);
4308 	}
4309 	return (GLDE_OK);
4310 }
4311 
4312 /*
4313  * gld_inforeq - generate the response to an info request
4314  */
4315 static int
4316 gld_inforeq(queue_t *q, mblk_t *mp)
4317 {
4318 	gld_t		*gld;
4319 	dl_info_ack_t	*dlp;
4320 	int		bufsize;
4321 	glddev_t	*glddev;
4322 	gld_mac_info_t	*macinfo;
4323 	gld_mac_pvt_t	*mac_pvt;
4324 	int		sel_offset = 0;
4325 	int		range_offset = 0;
4326 	int		addr_offset;
4327 	int		addr_length;
4328 	int		sap_length;
4329 	int		brdcst_offset;
4330 	int		brdcst_length;
4331 	uchar_t		*sapp;
4332 
4333 #ifdef GLD_DEBUG
4334 	if (gld_debug & GLDTRACE)
4335 		cmn_err(CE_NOTE, "gld_inforeq(%p %p)", (void *)q, (void *)mp);
4336 #endif
4337 	gld = (gld_t *)q->q_ptr;
4338 	ASSERT(gld);
4339 	glddev = gld->gld_device;
4340 	ASSERT(glddev);
4341 
4342 	if (gld->gld_state == DL_IDLE || gld->gld_state == DL_UNBOUND) {
4343 		macinfo = gld->gld_mac_info;
4344 		ASSERT(macinfo != NULL);
4345 
4346 		mac_pvt = (gld_mac_pvt_t *)macinfo->gldm_mac_pvt;
4347 
4348 		addr_length = macinfo->gldm_addrlen;
4349 		sap_length = macinfo->gldm_saplen;
4350 		brdcst_length = macinfo->gldm_addrlen;
4351 	} else {
4352 		addr_length = glddev->gld_addrlen;
4353 		sap_length = glddev->gld_saplen;
4354 		brdcst_length = glddev->gld_addrlen;
4355 	}
4356 
4357 	bufsize = sizeof (dl_info_ack_t);
4358 
4359 	addr_offset = bufsize;
4360 	bufsize += addr_length;
4361 	bufsize += abs(sap_length);
4362 
4363 	brdcst_offset = bufsize;
4364 	bufsize += brdcst_length;
4365 
4366 	if (((gld_vlan_t *)gld->gld_vlan) != NULL) {
4367 		sel_offset = P2ROUNDUP(bufsize, sizeof (int64_t));
4368 		bufsize = sel_offset + sizeof (dl_qos_cl_sel1_t);
4369 
4370 		range_offset = P2ROUNDUP(bufsize, sizeof (int64_t));
4371 		bufsize = range_offset + sizeof (dl_qos_cl_range1_t);
4372 	}
4373 
4374 	if ((mp = mexchange(q, mp, bufsize, M_PCPROTO, DL_INFO_ACK)) == NULL)
4375 		return (GLDE_OK);	/* nothing more to be done */
4376 
4377 	bzero(mp->b_rptr, bufsize);
4378 
4379 	dlp = (dl_info_ack_t *)mp->b_rptr;
4380 	dlp->dl_primitive = DL_INFO_ACK;
4381 	dlp->dl_version = DL_VERSION_2;
4382 	dlp->dl_service_mode = DL_CLDLS;
4383 	dlp->dl_current_state = gld->gld_state;
4384 	dlp->dl_provider_style = gld->gld_style;
4385 
4386 	if (sel_offset != 0) {
4387 		dl_qos_cl_sel1_t	*selp;
4388 		dl_qos_cl_range1_t	*rangep;
4389 
4390 		ASSERT(range_offset != 0);
4391 
4392 		dlp->dl_qos_offset = sel_offset;
4393 		dlp->dl_qos_length = sizeof (dl_qos_cl_sel1_t);
4394 		dlp->dl_qos_range_offset = range_offset;
4395 		dlp->dl_qos_range_length = sizeof (dl_qos_cl_range1_t);
4396 
4397 		selp = (dl_qos_cl_sel1_t *)(mp->b_rptr + sel_offset);
4398 		selp->dl_qos_type = DL_QOS_CL_SEL1;
4399 		selp->dl_priority = gld->gld_upri;
4400 
4401 		rangep = (dl_qos_cl_range1_t *)(mp->b_rptr + range_offset);
4402 		rangep->dl_qos_type = DL_QOS_CL_RANGE1;
4403 		rangep->dl_priority.dl_min = 0;
4404 		rangep->dl_priority.dl_max = 7;
4405 	}
4406 
4407 	if (gld->gld_state == DL_IDLE || gld->gld_state == DL_UNBOUND) {
4408 		dlp->dl_min_sdu = macinfo->gldm_minpkt;
4409 		dlp->dl_max_sdu = macinfo->gldm_maxpkt;
4410 		dlp->dl_mac_type = macinfo->gldm_type;
4411 		dlp->dl_addr_length = addr_length + abs(sap_length);
4412 		dlp->dl_sap_length = sap_length;
4413 
4414 		if (gld->gld_state == DL_IDLE) {
4415 			/*
4416 			 * If we are bound to a non-LLC SAP on any medium
4417 			 * other than Ethernet, then we need room for a
4418 			 * SNAP header.  So we have to adjust the MTU size
4419 			 * accordingly.  XXX I suppose this should be done
4420 			 * in gldutil.c, but it seems likely that this will
4421 			 * always be true for everything GLD supports but
4422 			 * Ethernet.  Check this if you add another medium.
4423 			 */
4424 			if ((macinfo->gldm_type == DL_TPR ||
4425 			    macinfo->gldm_type == DL_FDDI) &&
4426 			    gld->gld_ethertype)
4427 				dlp->dl_max_sdu -= LLC_SNAP_HDR_LEN;
4428 
4429 			/* copy macaddr and sap */
4430 			dlp->dl_addr_offset = addr_offset;
4431 
4432 			mac_copy(mac_pvt->curr_macaddr, mp->b_rptr +
4433 			    addr_offset, macinfo->gldm_addrlen);
4434 			sapp = mp->b_rptr + addr_offset +
4435 			    macinfo->gldm_addrlen;
4436 			*(ushort_t *)sapp = gld->gld_sap;
4437 		} else {
4438 			dlp->dl_addr_offset = 0;
4439 		}
4440 
4441 		/* copy broadcast addr */
4442 		dlp->dl_brdcst_addr_length = macinfo->gldm_addrlen;
4443 		dlp->dl_brdcst_addr_offset = brdcst_offset;
4444 		mac_copy((caddr_t)macinfo->gldm_broadcast_addr,
4445 		    mp->b_rptr + brdcst_offset, brdcst_length);
4446 	} else {
4447 		/*
4448 		 * No PPA is attached.
4449 		 * The best we can do is use the values provided
4450 		 * by the first mac that called gld_register.
4451 		 */
4452 		dlp->dl_min_sdu = glddev->gld_minsdu;
4453 		dlp->dl_max_sdu = glddev->gld_maxsdu;
4454 		dlp->dl_mac_type = glddev->gld_type;
4455 		dlp->dl_addr_length = addr_length + abs(sap_length);
4456 		dlp->dl_sap_length = sap_length;
4457 		dlp->dl_addr_offset = 0;
4458 		dlp->dl_brdcst_addr_offset = brdcst_offset;
4459 		dlp->dl_brdcst_addr_length = brdcst_length;
4460 		mac_copy((caddr_t)glddev->gld_broadcast,
4461 		    mp->b_rptr + brdcst_offset, brdcst_length);
4462 	}
4463 	qreply(q, mp);
4464 	return (GLDE_OK);
4465 }
4466 
4467 /*
4468  * gld_unitdata (q, mp)
4469  * send a datagram.  Destination address/lsap is in M_PROTO
4470  * message (first mblock), data is in remainder of message.
4471  *
4472  */
4473 static int
4474 gld_unitdata(queue_t *q, mblk_t *mp)
4475 {
4476 	gld_t *gld = (gld_t *)q->q_ptr;
4477 	dl_unitdata_req_t *dlp = (dl_unitdata_req_t *)mp->b_rptr;
4478 	gld_mac_info_t *macinfo = gld->gld_mac_info;
4479 	size_t	msglen;
4480 	mblk_t	*nmp;
4481 	gld_interface_t *ifp;
4482 	uint32_t start;
4483 	uint32_t stuff;
4484 	uint32_t end;
4485 	uint32_t value;
4486 	uint32_t flags;
4487 	uint32_t upri;
4488 
4489 #ifdef GLD_DEBUG
4490 	if (gld_debug & GLDTRACE)
4491 		cmn_err(CE_NOTE, "gld_unitdata(%p %p)", (void *)q, (void *)mp);
4492 #endif
4493 
4494 	if (gld->gld_state != DL_IDLE) {
4495 #ifdef GLD_DEBUG
4496 		if (gld_debug & GLDERRS)
4497 			cmn_err(CE_NOTE, "gld_unitdata: wrong state (%d)",
4498 				gld->gld_state);
4499 #endif
4500 		dluderrorind(q, mp, mp->b_rptr + dlp->dl_dest_addr_offset,
4501 		    dlp->dl_dest_addr_length, DL_OUTSTATE, 0);
4502 		return (GLDE_OK);
4503 	}
4504 	ASSERT(macinfo != NULL);
4505 
4506 	if (!MBLKIN(mp, dlp->dl_dest_addr_offset, dlp->dl_dest_addr_length) ||
4507 	    dlp->dl_dest_addr_length !=
4508 	    macinfo->gldm_addrlen + abs(macinfo->gldm_saplen)) {
4509 		dluderrorind(q, mp, mp->b_rptr + dlp->dl_dest_addr_offset,
4510 		    dlp->dl_dest_addr_length, DL_BADADDR, 0);
4511 		return (GLDE_OK);
4512 	}
4513 
4514 	upri = dlp->dl_priority.dl_max;
4515 
4516 	msglen = msgdsize(mp);
4517 	if (msglen == 0 || msglen > macinfo->gldm_maxpkt) {
4518 #ifdef GLD_DEBUG
4519 		if (gld_debug & GLDERRS)
4520 			cmn_err(CE_NOTE, "gld_unitdata: bad msglen (%d)",
4521 				(int)msglen);
4522 #endif
4523 		dluderrorind(q, mp, mp->b_rptr + dlp->dl_dest_addr_offset,
4524 		    dlp->dl_dest_addr_length, DL_BADDATA, 0);
4525 		return (GLDE_OK);
4526 	}
4527 
4528 	ASSERT(mp->b_cont != NULL);	/* because msgdsize(mp) is nonzero */
4529 
4530 	ifp = ((gld_mac_pvt_t *)macinfo->gldm_mac_pvt)->interfacep;
4531 
4532 	/* grab any checksum information that may be present */
4533 	hcksum_retrieve(mp->b_cont, NULL, NULL, &start, &stuff, &end,
4534 	    &value, &flags);
4535 
4536 	/*
4537 	 * Prepend a valid header for transmission
4538 	 */
4539 	if ((nmp = (*ifp->mkunitdata)(gld, mp)) == NULL) {
4540 #ifdef GLD_DEBUG
4541 		if (gld_debug & GLDERRS)
4542 			cmn_err(CE_NOTE, "gld_unitdata: mkunitdata failed.");
4543 #endif
4544 		dluderrorind(q, mp, mp->b_rptr + dlp->dl_dest_addr_offset,
4545 		    dlp->dl_dest_addr_length, DL_SYSERR, ENOSR);
4546 		return (GLDE_OK);
4547 	}
4548 
4549 	/* apply any checksum information to the first block in the chain */
4550 	(void) hcksum_assoc(nmp, NULL, NULL, start, stuff, end, value,
4551 	    flags, 0);
4552 
4553 	GLD_CLEAR_MBLK_VTAG(nmp);
4554 	if (gld_start(q, nmp, GLD_WSRV, upri) == GLD_NORESOURCES) {
4555 		qenable(q);
4556 		return (GLDE_RETRY);
4557 	}
4558 
4559 	return (GLDE_OK);
4560 }
4561 
4562 /*
4563  * gldattach(q, mp)
4564  * DLPI DL_ATTACH_REQ
4565  * this attaches the stream to a PPA
4566  */
4567 static int
4568 gldattach(queue_t *q, mblk_t *mp)
4569 {
4570 	dl_attach_req_t *at;
4571 	gld_mac_info_t *macinfo;
4572 	gld_t  *gld = (gld_t *)q->q_ptr;
4573 	glddev_t *glddev;
4574 	gld_mac_pvt_t *mac_pvt;
4575 	uint32_t ppa;
4576 	uint32_t vid;
4577 	gld_vlan_t *vlan;
4578 
4579 	at = (dl_attach_req_t *)mp->b_rptr;
4580 
4581 	if (gld->gld_state != DL_UNATTACHED)
4582 		return (DL_OUTSTATE);
4583 
4584 	ASSERT(!gld->gld_mac_info);
4585 
4586 	ppa = at->dl_ppa % GLD_VLAN_SCALE;	/* 0 .. 999	*/
4587 	vid = at->dl_ppa / GLD_VLAN_SCALE;	/* 0 .. 4094	*/
4588 	if (vid > VLAN_VID_MAX)
4589 		return (DL_BADPPA);
4590 
4591 	glddev = gld->gld_device;
4592 	mutex_enter(&glddev->gld_devlock);
4593 	for (macinfo = glddev->gld_mac_next;
4594 	    macinfo != (gld_mac_info_t *)&glddev->gld_mac_next;
4595 	    macinfo = macinfo->gldm_next) {
4596 		int inst;
4597 
4598 		ASSERT(macinfo != NULL);
4599 		if (macinfo->gldm_ppa != ppa)
4600 			continue;
4601 
4602 		if (!(macinfo->gldm_GLD_flags & GLD_MAC_READY))
4603 			continue;	/* this one's not ready yet */
4604 
4605 		/*
4606 		 * VLAN sanity check
4607 		 */
4608 		if (vid != VLAN_VID_NONE && !VLAN_CAPABLE(macinfo)) {
4609 			mutex_exit(&glddev->gld_devlock);
4610 			return (DL_BADPPA);
4611 		}
4612 
4613 		/*
4614 		 * We found the correct PPA, hold the instance
4615 		 */
4616 		inst = ddi_get_instance(macinfo->gldm_devinfo);
4617 		if (inst == -1 || qassociate(q, inst) != 0) {
4618 			mutex_exit(&glddev->gld_devlock);
4619 			return (DL_BADPPA);
4620 		}
4621 
4622 		/* Take the stream off the per-driver-class list */
4623 		gldremque(gld);
4624 
4625 		/*
4626 		 * We must hold the lock to prevent multiple calls
4627 		 * to the reset and start routines.
4628 		 */
4629 		GLDM_LOCK(macinfo, RW_WRITER);
4630 
4631 		gld->gld_mac_info = macinfo;
4632 
4633 		if (macinfo->gldm_send_tagged != NULL)
4634 			gld->gld_send = macinfo->gldm_send_tagged;
4635 		else
4636 			gld->gld_send = macinfo->gldm_send;
4637 
4638 		if ((vlan = gld_get_vlan(macinfo, vid)) == NULL) {
4639 			GLDM_UNLOCK(macinfo);
4640 			gldinsque(gld, glddev->gld_str_prev);
4641 			mutex_exit(&glddev->gld_devlock);
4642 			(void) qassociate(q, -1);
4643 			return (DL_BADPPA);
4644 		}
4645 
4646 		mac_pvt = (gld_mac_pvt_t *)macinfo->gldm_mac_pvt;
4647 		if (!mac_pvt->started) {
4648 			if (gld_start_mac(macinfo) != GLD_SUCCESS) {
4649 				gld_rem_vlan(vlan);
4650 				GLDM_UNLOCK(macinfo);
4651 				gldinsque(gld, glddev->gld_str_prev);
4652 				mutex_exit(&glddev->gld_devlock);
4653 				dlerrorack(q, mp, DL_ATTACH_REQ, DL_SYSERR,
4654 				    EIO);
4655 				(void) qassociate(q, -1);
4656 				return (GLDE_OK);
4657 			}
4658 		}
4659 
4660 		gld->gld_vlan = vlan;
4661 		vlan->gldv_nstreams++;
4662 		gldinsque(gld, vlan->gldv_str_prev);
4663 		gld->gld_state = DL_UNBOUND;
4664 		GLDM_UNLOCK(macinfo);
4665 
4666 #ifdef GLD_DEBUG
4667 		if (gld_debug & GLDPROT) {
4668 			cmn_err(CE_NOTE, "gldattach(%p, %p, PPA = %d)",
4669 			    (void *)q, (void *)mp, macinfo->gldm_ppa);
4670 		}
4671 #endif
4672 		mutex_exit(&glddev->gld_devlock);
4673 		dlokack(q, mp, DL_ATTACH_REQ);
4674 		return (GLDE_OK);
4675 	}
4676 	mutex_exit(&glddev->gld_devlock);
4677 	return (DL_BADPPA);
4678 }
4679 
4680 /*
4681  * gldunattach(q, mp)
4682  * DLPI DL_DETACH_REQ
4683  * detaches the mac layer from the stream
4684  */
4685 int
4686 gldunattach(queue_t *q, mblk_t *mp)
4687 {
4688 	gld_t  *gld = (gld_t *)q->q_ptr;
4689 	glddev_t *glddev = gld->gld_device;
4690 	gld_mac_info_t *macinfo = gld->gld_mac_info;
4691 	int	state = gld->gld_state;
4692 	int	i;
4693 	gld_mac_pvt_t *mac_pvt;
4694 	gld_vlan_t *vlan;
4695 	boolean_t phys_off;
4696 	boolean_t mult_off;
4697 	int op = GLD_MAC_PROMISC_NOOP;
4698 
4699 	if (state != DL_UNBOUND)
4700 		return (DL_OUTSTATE);
4701 
4702 	ASSERT(macinfo != NULL);
4703 	ASSERT(gld->gld_sap == 0);
4704 	mac_pvt = (gld_mac_pvt_t *)macinfo->gldm_mac_pvt;
4705 
4706 #ifdef GLD_DEBUG
4707 	if (gld_debug & GLDPROT) {
4708 		cmn_err(CE_NOTE, "gldunattach(%p, %p, PPA = %d)",
4709 		    (void *)q, (void *)mp, macinfo->gldm_ppa);
4710 	}
4711 #endif
4712 
4713 	GLDM_LOCK(macinfo, RW_WRITER);
4714 
4715 	if (gld->gld_mcast) {
4716 		for (i = 0; i < gld->gld_multicnt; i++) {
4717 			gld_mcast_t *mcast;
4718 
4719 			if ((mcast = gld->gld_mcast[i]) != NULL) {
4720 				ASSERT(mcast->gldm_refcnt);
4721 				gld_send_disable_multi(macinfo, mcast);
4722 			}
4723 		}
4724 		kmem_free(gld->gld_mcast,
4725 		    sizeof (gld_mcast_t *) * gld->gld_multicnt);
4726 		gld->gld_mcast = NULL;
4727 		gld->gld_multicnt = 0;
4728 	}
4729 
4730 	/* decide if we need to turn off any promiscuity */
4731 	phys_off = (gld->gld_flags & GLD_PROM_PHYS &&
4732 	    --mac_pvt->nprom == 0);
4733 	mult_off = (gld->gld_flags & GLD_PROM_MULT &&
4734 	    --mac_pvt->nprom_multi == 0);
4735 
4736 	if (phys_off) {
4737 		op = (mac_pvt->nprom_multi == 0) ? GLD_MAC_PROMISC_NONE :
4738 		    GLD_MAC_PROMISC_MULTI;
4739 	} else if (mult_off) {
4740 		op = (mac_pvt->nprom == 0) ? GLD_MAC_PROMISC_NONE :
4741 		    GLD_MAC_PROMISC_NOOP;	/* phys overrides multi */
4742 	}
4743 
4744 	if (op != GLD_MAC_PROMISC_NOOP)
4745 		(void) (*macinfo->gldm_set_promiscuous)(macinfo, op);
4746 
4747 	vlan = (gld_vlan_t *)gld->gld_vlan;
4748 	if (gld->gld_flags & GLD_PROM_PHYS)
4749 		vlan->gldv_nprom--;
4750 	if (gld->gld_flags & GLD_PROM_MULT)
4751 		vlan->gldv_nprom--;
4752 	if (gld->gld_flags & GLD_PROM_SAP) {
4753 		vlan->gldv_nprom--;
4754 		vlan->gldv_nvlan_sap--;
4755 	}
4756 
4757 	gld->gld_flags &= ~(GLD_PROM_PHYS | GLD_PROM_SAP | GLD_PROM_MULT);
4758 
4759 	GLDM_UNLOCK(macinfo);
4760 
4761 	if (phys_off)
4762 		gld_notify_ind(macinfo, DL_NOTE_PROMISC_OFF_PHYS, NULL);
4763 
4764 	/*
4765 	 * We need to hold both locks when modifying the mac stream list
4766 	 * to protect findminor as well as everyone else.
4767 	 */
4768 	mutex_enter(&glddev->gld_devlock);
4769 	GLDM_LOCK(macinfo, RW_WRITER);
4770 
4771 	/* disassociate this stream with its vlan and underlying mac */
4772 	gldremque(gld);
4773 
4774 	if (--vlan->gldv_nstreams == 0) {
4775 		gld_rem_vlan(vlan);
4776 		gld->gld_vlan = NULL;
4777 	}
4778 
4779 	gld->gld_mac_info = NULL;
4780 	gld->gld_state = DL_UNATTACHED;
4781 
4782 	/* cleanup mac layer if last vlan */
4783 	if (mac_pvt->nvlan == 0) {
4784 		gld_stop_mac(macinfo);
4785 		macinfo->gldm_GLD_flags &= ~GLD_INTR_WAIT;
4786 	}
4787 
4788 	/* make sure no references to this gld for gld_v0_sched */
4789 	if (mac_pvt->last_sched == gld)
4790 		mac_pvt->last_sched = NULL;
4791 
4792 	GLDM_UNLOCK(macinfo);
4793 
4794 	/* put the stream on the unattached Style 2 list */
4795 	gldinsque(gld, glddev->gld_str_prev);
4796 
4797 	mutex_exit(&glddev->gld_devlock);
4798 
4799 	/* There will be no mp if we were called from close */
4800 	if (mp) {
4801 		dlokack(q, mp, DL_DETACH_REQ);
4802 	}
4803 	if (gld->gld_style == DL_STYLE2)
4804 		(void) qassociate(q, -1);
4805 	return (GLDE_OK);
4806 }
4807 
4808 /*
4809  * gld_enable_multi (q, mp)
4810  * Enables multicast address on the stream.  If the mac layer
4811  * isn't enabled for this address, enable at that level as well.
4812  */
4813 static int
4814 gld_enable_multi(queue_t *q, mblk_t *mp)
4815 {
4816 	gld_t  *gld = (gld_t *)q->q_ptr;
4817 	glddev_t *glddev;
4818 	gld_mac_info_t *macinfo = gld->gld_mac_info;
4819 	unsigned char *maddr;
4820 	dl_enabmulti_req_t *multi;
4821 	gld_mcast_t *mcast;
4822 	int	i, rc;
4823 	gld_mac_pvt_t *mac_pvt;
4824 
4825 #ifdef GLD_DEBUG
4826 	if (gld_debug & GLDPROT) {
4827 		cmn_err(CE_NOTE, "gld_enable_multi(%p, %p)", (void *)q,
4828 		    (void *)mp);
4829 	}
4830 #endif
4831 
4832 	if (gld->gld_state == DL_UNATTACHED)
4833 		return (DL_OUTSTATE);
4834 
4835 	ASSERT(macinfo != NULL);
4836 	mac_pvt = (gld_mac_pvt_t *)macinfo->gldm_mac_pvt;
4837 
4838 	if (macinfo->gldm_set_multicast == NULL) {
4839 		return (DL_UNSUPPORTED);
4840 	}
4841 
4842 	multi = (dl_enabmulti_req_t *)mp->b_rptr;
4843 
4844 	if (!MBLKIN(mp, multi->dl_addr_offset, multi->dl_addr_length) ||
4845 	    multi->dl_addr_length != macinfo->gldm_addrlen)
4846 		return (DL_BADADDR);
4847 
4848 	/* request appears to be valid */
4849 
4850 	glddev = mac_pvt->major_dev;
4851 	ASSERT(glddev == gld->gld_device);
4852 
4853 	maddr = mp->b_rptr + multi->dl_addr_offset;
4854 
4855 	/*
4856 	 * The multicast addresses live in a per-device table, along
4857 	 * with a reference count.  Each stream has a table that
4858 	 * points to entries in the device table, with the reference
4859 	 * count reflecting the number of streams pointing at it.  If
4860 	 * this multicast address is already in the per-device table,
4861 	 * all we have to do is point at it.
4862 	 */
4863 	GLDM_LOCK(macinfo, RW_WRITER);
4864 
4865 	/* does this address appear in current table? */
4866 	if (gld->gld_mcast == NULL) {
4867 		/* no mcast addresses -- allocate table */
4868 		gld->gld_mcast = GETSTRUCT(gld_mcast_t *,
4869 					    glddev->gld_multisize);
4870 		if (gld->gld_mcast == NULL) {
4871 			GLDM_UNLOCK(macinfo);
4872 			dlerrorack(q, mp, DL_ENABMULTI_REQ, DL_SYSERR, ENOSR);
4873 			return (GLDE_OK);
4874 		}
4875 		gld->gld_multicnt = glddev->gld_multisize;
4876 	} else {
4877 		for (i = 0; i < gld->gld_multicnt; i++) {
4878 			if (gld->gld_mcast[i] &&
4879 			    mac_eq(gld->gld_mcast[i]->gldm_addr,
4880 				maddr, macinfo->gldm_addrlen)) {
4881 				/* this is a match -- just succeed */
4882 				ASSERT(gld->gld_mcast[i]->gldm_refcnt);
4883 				GLDM_UNLOCK(macinfo);
4884 				dlokack(q, mp, DL_ENABMULTI_REQ);
4885 				return (GLDE_OK);
4886 			}
4887 		}
4888 	}
4889 
4890 	/*
4891 	 * it wasn't in the stream so check to see if the mac layer has it
4892 	 */
4893 	mcast = NULL;
4894 	if (mac_pvt->mcast_table == NULL) {
4895 		mac_pvt->mcast_table = GETSTRUCT(gld_mcast_t,
4896 						glddev->gld_multisize);
4897 		if (mac_pvt->mcast_table == NULL) {
4898 			GLDM_UNLOCK(macinfo);
4899 			dlerrorack(q, mp, DL_ENABMULTI_REQ, DL_SYSERR, ENOSR);
4900 			return (GLDE_OK);
4901 		}
4902 	} else {
4903 		for (i = 0; i < glddev->gld_multisize; i++) {
4904 			if (mac_pvt->mcast_table[i].gldm_refcnt &&
4905 			    mac_eq(mac_pvt->mcast_table[i].gldm_addr,
4906 			    maddr, macinfo->gldm_addrlen)) {
4907 				mcast = &mac_pvt->mcast_table[i];
4908 				break;
4909 			}
4910 		}
4911 	}
4912 	if (mcast == NULL) {
4913 		/* not in mac layer -- find an empty mac slot to fill in */
4914 		for (i = 0; i < glddev->gld_multisize; i++) {
4915 			if (mac_pvt->mcast_table[i].gldm_refcnt == 0) {
4916 				mcast = &mac_pvt->mcast_table[i];
4917 				mac_copy(maddr, mcast->gldm_addr,
4918 				    macinfo->gldm_addrlen);
4919 				break;
4920 			}
4921 		}
4922 	}
4923 	if (mcast == NULL) {
4924 		/* couldn't get a mac layer slot */
4925 		GLDM_UNLOCK(macinfo);
4926 		return (DL_TOOMANY);
4927 	}
4928 
4929 	/* now we have a mac layer slot in mcast -- get a stream slot */
4930 	for (i = 0; i < gld->gld_multicnt; i++) {
4931 		if (gld->gld_mcast[i] != NULL)
4932 			continue;
4933 		/* found an empty slot */
4934 		if (!mcast->gldm_refcnt) {
4935 			/* set mcast in hardware */
4936 			unsigned char cmaddr[GLD_MAX_ADDRLEN];
4937 
4938 			ASSERT(sizeof (cmaddr) >= macinfo->gldm_addrlen);
4939 			cmac_copy(maddr, cmaddr,
4940 			    macinfo->gldm_addrlen, macinfo);
4941 
4942 			rc = (*macinfo->gldm_set_multicast)
4943 			    (macinfo, cmaddr, GLD_MULTI_ENABLE);
4944 			if (rc == GLD_NOTSUPPORTED) {
4945 				GLDM_UNLOCK(macinfo);
4946 				return (DL_NOTSUPPORTED);
4947 			} else if (rc == GLD_NORESOURCES) {
4948 				GLDM_UNLOCK(macinfo);
4949 				return (DL_TOOMANY);
4950 			} else if (rc == GLD_BADARG) {
4951 				GLDM_UNLOCK(macinfo);
4952 				return (DL_BADADDR);
4953 			} else if (rc == GLD_RETRY) {
4954 				/*
4955 				 * The putbq and gld_xwait must be
4956 				 * within the lock to prevent races
4957 				 * with gld_sched.
4958 				 */
4959 				(void) putbq(q, mp);
4960 				gld->gld_xwait = B_TRUE;
4961 				GLDM_UNLOCK(macinfo);
4962 				return (GLDE_RETRY);
4963 			} else if (rc != GLD_SUCCESS) {
4964 				GLDM_UNLOCK(macinfo);
4965 				dlerrorack(q, mp, DL_ENABMULTI_REQ,
4966 				    DL_SYSERR, EIO);
4967 				return (GLDE_OK);
4968 			}
4969 		}
4970 		gld->gld_mcast[i] = mcast;
4971 		mcast->gldm_refcnt++;
4972 		GLDM_UNLOCK(macinfo);
4973 		dlokack(q, mp, DL_ENABMULTI_REQ);
4974 		return (GLDE_OK);
4975 	}
4976 
4977 	/* couldn't get a stream slot */
4978 	GLDM_UNLOCK(macinfo);
4979 	return (DL_TOOMANY);
4980 }
4981 
4982 
4983 /*
4984  * gld_disable_multi (q, mp)
4985  * Disable the multicast address on the stream.  If last
4986  * reference for the mac layer, disable there as well.
4987  */
4988 static int
4989 gld_disable_multi(queue_t *q, mblk_t *mp)
4990 {
4991 	gld_t  *gld;
4992 	gld_mac_info_t *macinfo;
4993 	unsigned char *maddr;
4994 	dl_disabmulti_req_t *multi;
4995 	int i;
4996 	gld_mcast_t *mcast;
4997 
4998 #ifdef GLD_DEBUG
4999 	if (gld_debug & GLDPROT) {
5000 		cmn_err(CE_NOTE, "gld_disable_multi(%p, %p)", (void *)q,
5001 		    (void *)mp);
5002 	}
5003 #endif
5004 
5005 	gld = (gld_t *)q->q_ptr;
5006 	if (gld->gld_state == DL_UNATTACHED)
5007 		return (DL_OUTSTATE);
5008 
5009 	macinfo = gld->gld_mac_info;
5010 	ASSERT(macinfo != NULL);
5011 	if (macinfo->gldm_set_multicast == NULL) {
5012 		return (DL_UNSUPPORTED);
5013 	}
5014 
5015 	multi = (dl_disabmulti_req_t *)mp->b_rptr;
5016 
5017 	if (!MBLKIN(mp, multi->dl_addr_offset, multi->dl_addr_length) ||
5018 	    multi->dl_addr_length != macinfo->gldm_addrlen)
5019 		return (DL_BADADDR);
5020 
5021 	maddr = mp->b_rptr + multi->dl_addr_offset;
5022 
5023 	/* request appears to be valid */
5024 	/* does this address appear in current table? */
5025 	GLDM_LOCK(macinfo, RW_WRITER);
5026 	if (gld->gld_mcast != NULL) {
5027 		for (i = 0; i < gld->gld_multicnt; i++)
5028 			if (((mcast = gld->gld_mcast[i]) != NULL) &&
5029 			    mac_eq(mcast->gldm_addr,
5030 			    maddr, macinfo->gldm_addrlen)) {
5031 				ASSERT(mcast->gldm_refcnt);
5032 				gld_send_disable_multi(macinfo, mcast);
5033 				gld->gld_mcast[i] = NULL;
5034 				GLDM_UNLOCK(macinfo);
5035 				dlokack(q, mp, DL_DISABMULTI_REQ);
5036 				return (GLDE_OK);
5037 			}
5038 	}
5039 	GLDM_UNLOCK(macinfo);
5040 	return (DL_NOTENAB); /* not an enabled address */
5041 }
5042 
5043 /*
5044  * gld_send_disable_multi(macinfo, mcast)
5045  * this function is used to disable a multicast address if the reference
5046  * count goes to zero. The disable request will then be forwarded to the
5047  * lower stream.
5048  */
5049 static void
5050 gld_send_disable_multi(gld_mac_info_t *macinfo, gld_mcast_t *mcast)
5051 {
5052 	ASSERT(macinfo != NULL);
5053 	ASSERT(GLDM_LOCK_HELD_WRITE(macinfo));
5054 	ASSERT(mcast != NULL);
5055 	ASSERT(mcast->gldm_refcnt);
5056 
5057 	if (!mcast->gldm_refcnt) {
5058 		return;			/* "cannot happen" */
5059 	}
5060 
5061 	if (--mcast->gldm_refcnt > 0) {
5062 		return;
5063 	}
5064 
5065 	/*
5066 	 * This must be converted from canonical form to device form.
5067 	 * The refcnt is now zero so we can trash the data.
5068 	 */
5069 	if (macinfo->gldm_options & GLDOPT_CANONICAL_ADDR)
5070 		gld_bitreverse(mcast->gldm_addr, macinfo->gldm_addrlen);
5071 
5072 	/* XXX Ought to check for GLD_NORESOURCES or GLD_FAILURE */
5073 	(void) (*macinfo->gldm_set_multicast)
5074 	    (macinfo, mcast->gldm_addr, GLD_MULTI_DISABLE);
5075 }
5076 
5077 /*
5078  * gld_promisc (q, mp, req, on)
5079  *	enable or disable the use of promiscuous mode with the hardware
5080  */
5081 static int
5082 gld_promisc(queue_t *q, mblk_t *mp, t_uscalar_t req, boolean_t on)
5083 {
5084 	gld_t *gld;
5085 	gld_mac_info_t *macinfo;
5086 	gld_mac_pvt_t *mac_pvt;
5087 	gld_vlan_t *vlan;
5088 	union DL_primitives *prim;
5089 	int macrc = GLD_SUCCESS;
5090 	int dlerr = GLDE_OK;
5091 	int op = GLD_MAC_PROMISC_NOOP;
5092 
5093 #ifdef GLD_DEBUG
5094 	if (gld_debug & GLDTRACE)
5095 		cmn_err(CE_NOTE, "gld_promisc(%p, %p, %d, %d)",
5096 		    (void *)q, (void *)mp, req, on);
5097 #endif
5098 
5099 	ASSERT(mp != NULL);
5100 	prim = (union DL_primitives *)mp->b_rptr;
5101 
5102 	/* XXX I think spec allows promisc in unattached state */
5103 	gld = (gld_t *)q->q_ptr;
5104 	if (gld->gld_state == DL_UNATTACHED)
5105 		return (DL_OUTSTATE);
5106 
5107 	macinfo = gld->gld_mac_info;
5108 	ASSERT(macinfo != NULL);
5109 	mac_pvt = (gld_mac_pvt_t *)macinfo->gldm_mac_pvt;
5110 
5111 	vlan = (gld_vlan_t *)gld->gld_vlan;
5112 	ASSERT(vlan != NULL);
5113 
5114 	GLDM_LOCK(macinfo, RW_WRITER);
5115 
5116 	/*
5117 	 * Work out what request (if any) has to be made to the MAC layer
5118 	 */
5119 	if (on) {
5120 		switch (prim->promiscon_req.dl_level) {
5121 		default:
5122 			dlerr = DL_UNSUPPORTED;	/* this is an error */
5123 			break;
5124 
5125 		case DL_PROMISC_PHYS:
5126 			if (mac_pvt->nprom == 0)
5127 				op = GLD_MAC_PROMISC_PHYS;
5128 			break;
5129 
5130 		case DL_PROMISC_MULTI:
5131 			if (mac_pvt->nprom_multi == 0)
5132 				if (mac_pvt->nprom == 0)
5133 					op = GLD_MAC_PROMISC_MULTI;
5134 			break;
5135 
5136 		case DL_PROMISC_SAP:
5137 			/* We can do this without reference to the MAC */
5138 			break;
5139 		}
5140 	} else {
5141 		switch (prim->promiscoff_req.dl_level) {
5142 		default:
5143 			dlerr = DL_UNSUPPORTED;	/* this is an error */
5144 			break;
5145 
5146 		case DL_PROMISC_PHYS:
5147 			if (!(gld->gld_flags & GLD_PROM_PHYS))
5148 				dlerr = DL_NOTENAB;
5149 			else if (mac_pvt->nprom == 1)
5150 				if (mac_pvt->nprom_multi)
5151 					op = GLD_MAC_PROMISC_MULTI;
5152 				else
5153 					op = GLD_MAC_PROMISC_NONE;
5154 			break;
5155 
5156 		case DL_PROMISC_MULTI:
5157 			if (!(gld->gld_flags & GLD_PROM_MULT))
5158 				dlerr = DL_NOTENAB;
5159 			else if (mac_pvt->nprom_multi == 1)
5160 				if (mac_pvt->nprom == 0)
5161 					op = GLD_MAC_PROMISC_NONE;
5162 			break;
5163 
5164 		case DL_PROMISC_SAP:
5165 			if (!(gld->gld_flags & GLD_PROM_SAP))
5166 				dlerr = DL_NOTENAB;
5167 
5168 			/* We can do this without reference to the MAC */
5169 			break;
5170 		}
5171 	}
5172 
5173 	/*
5174 	 * The request was invalid in some way so no need to continue.
5175 	 */
5176 	if (dlerr != GLDE_OK) {
5177 		GLDM_UNLOCK(macinfo);
5178 		return (dlerr);
5179 	}
5180 
5181 	/*
5182 	 * Issue the request to the MAC layer, if required
5183 	 */
5184 	if (op != GLD_MAC_PROMISC_NOOP) {
5185 		macrc = (*macinfo->gldm_set_promiscuous)(macinfo, op);
5186 	}
5187 
5188 	/*
5189 	 * On success, update the appropriate flags & refcounts
5190 	 */
5191 	if (macrc == GLD_SUCCESS) {
5192 		if (on) {
5193 			switch (prim->promiscon_req.dl_level) {
5194 			case DL_PROMISC_PHYS:
5195 				mac_pvt->nprom++;
5196 				vlan->gldv_nprom++;
5197 				gld->gld_flags |= GLD_PROM_PHYS;
5198 				break;
5199 
5200 			case DL_PROMISC_MULTI:
5201 				mac_pvt->nprom_multi++;
5202 				vlan->gldv_nprom++;
5203 				gld->gld_flags |= GLD_PROM_MULT;
5204 				break;
5205 
5206 			case DL_PROMISC_SAP:
5207 				gld->gld_flags |= GLD_PROM_SAP;
5208 				vlan->gldv_nprom++;
5209 				vlan->gldv_nvlan_sap++;
5210 				break;
5211 
5212 			default:
5213 				break;
5214 			}
5215 		} else {
5216 			switch (prim->promiscoff_req.dl_level) {
5217 			case DL_PROMISC_PHYS:
5218 				mac_pvt->nprom--;
5219 				vlan->gldv_nprom--;
5220 				gld->gld_flags &= ~GLD_PROM_PHYS;
5221 				break;
5222 
5223 			case DL_PROMISC_MULTI:
5224 				mac_pvt->nprom_multi--;
5225 				vlan->gldv_nprom--;
5226 				gld->gld_flags &= ~GLD_PROM_MULT;
5227 				break;
5228 
5229 			case DL_PROMISC_SAP:
5230 				gld->gld_flags &= ~GLD_PROM_SAP;
5231 				vlan->gldv_nvlan_sap--;
5232 				vlan->gldv_nprom--;
5233 				break;
5234 
5235 			default:
5236 				break;
5237 			}
5238 		}
5239 	} else if (macrc == GLD_RETRY) {
5240 		/*
5241 		 * The putbq and gld_xwait must be within the lock to
5242 		 * prevent races with gld_sched.
5243 		 */
5244 		(void) putbq(q, mp);
5245 		gld->gld_xwait = B_TRUE;
5246 	}
5247 
5248 	GLDM_UNLOCK(macinfo);
5249 
5250 	/*
5251 	 * Finally, decide how to reply.
5252 	 *
5253 	 * If <macrc> is not GLD_SUCCESS, the request was put to the MAC
5254 	 * layer but failed.  In such cases, we can return a DL_* error
5255 	 * code and let the caller send an error-ack reply upstream, or
5256 	 * we can send a reply here and then return GLDE_OK so that the
5257 	 * caller doesn't also respond.
5258 	 *
5259 	 * If physical-promiscuous mode was (successfully) switched on or
5260 	 * off, send a notification (DL_NOTIFY_IND) to anyone interested.
5261 	 */
5262 	switch (macrc) {
5263 	case GLD_NOTSUPPORTED:
5264 		return (DL_NOTSUPPORTED);
5265 
5266 	case GLD_NORESOURCES:
5267 		dlerrorack(q, mp, req, DL_SYSERR, ENOSR);
5268 		return (GLDE_OK);
5269 
5270 	case GLD_RETRY:
5271 		return (GLDE_RETRY);
5272 
5273 	default:
5274 		dlerrorack(q, mp, req, DL_SYSERR, EIO);
5275 		return (GLDE_OK);
5276 
5277 	case GLD_SUCCESS:
5278 		dlokack(q, mp, req);
5279 		break;
5280 	}
5281 
5282 	switch (op) {
5283 	case GLD_MAC_PROMISC_NOOP:
5284 		break;
5285 
5286 	case GLD_MAC_PROMISC_PHYS:
5287 		gld_notify_ind(macinfo, DL_NOTE_PROMISC_ON_PHYS, NULL);
5288 		break;
5289 
5290 	default:
5291 		gld_notify_ind(macinfo, DL_NOTE_PROMISC_OFF_PHYS, NULL);
5292 		break;
5293 	}
5294 
5295 	return (GLDE_OK);
5296 }
5297 
5298 /*
5299  * gld_physaddr()
5300  *	get the current or factory physical address value
5301  */
5302 static int
5303 gld_physaddr(queue_t *q, mblk_t *mp)
5304 {
5305 	gld_t *gld = (gld_t *)q->q_ptr;
5306 	gld_mac_info_t *macinfo;
5307 	union DL_primitives *prim = (union DL_primitives *)mp->b_rptr;
5308 	unsigned char addr[GLD_MAX_ADDRLEN];
5309 
5310 	if (gld->gld_state == DL_UNATTACHED)
5311 		return (DL_OUTSTATE);
5312 
5313 	macinfo = (gld_mac_info_t *)gld->gld_mac_info;
5314 	ASSERT(macinfo != NULL);
5315 	ASSERT(macinfo->gldm_addrlen <= GLD_MAX_ADDRLEN);
5316 
5317 	switch (prim->physaddr_req.dl_addr_type) {
5318 	case DL_FACT_PHYS_ADDR:
5319 		mac_copy((caddr_t)macinfo->gldm_vendor_addr,
5320 		    (caddr_t)addr, macinfo->gldm_addrlen);
5321 		break;
5322 	case DL_CURR_PHYS_ADDR:
5323 		/* make a copy so we don't hold the lock across qreply */
5324 		GLDM_LOCK(macinfo, RW_WRITER);
5325 		mac_copy((caddr_t)
5326 		    ((gld_mac_pvt_t *)macinfo->gldm_mac_pvt)->curr_macaddr,
5327 		    (caddr_t)addr, macinfo->gldm_addrlen);
5328 		GLDM_UNLOCK(macinfo);
5329 		break;
5330 	default:
5331 		return (DL_BADPRIM);
5332 	}
5333 	dlphysaddrack(q, mp, (caddr_t)addr, macinfo->gldm_addrlen);
5334 	return (GLDE_OK);
5335 }
5336 
5337 /*
5338  * gld_setaddr()
5339  *	change the hardware's physical address to a user specified value
5340  */
5341 static int
5342 gld_setaddr(queue_t *q, mblk_t *mp)
5343 {
5344 	gld_t *gld = (gld_t *)q->q_ptr;
5345 	gld_mac_info_t *macinfo;
5346 	gld_mac_pvt_t *mac_pvt;
5347 	union DL_primitives *prim = (union DL_primitives *)mp->b_rptr;
5348 	unsigned char *addr;
5349 	unsigned char cmaddr[GLD_MAX_ADDRLEN];
5350 	int rc;
5351 	gld_vlan_t *vlan;
5352 
5353 	if (gld->gld_state == DL_UNATTACHED)
5354 		return (DL_OUTSTATE);
5355 
5356 	vlan = (gld_vlan_t *)gld->gld_vlan;
5357 	ASSERT(vlan != NULL);
5358 
5359 	if (vlan->gldv_id != VLAN_VID_NONE)
5360 		return (DL_NOTSUPPORTED);
5361 
5362 	macinfo = (gld_mac_info_t *)gld->gld_mac_info;
5363 	ASSERT(macinfo != NULL);
5364 	mac_pvt = (gld_mac_pvt_t *)macinfo->gldm_mac_pvt;
5365 
5366 	if (!MBLKIN(mp, prim->set_physaddr_req.dl_addr_offset,
5367 	    prim->set_physaddr_req.dl_addr_length) ||
5368 	    prim->set_physaddr_req.dl_addr_length != macinfo->gldm_addrlen)
5369 		return (DL_BADADDR);
5370 
5371 	GLDM_LOCK(macinfo, RW_WRITER);
5372 
5373 	/* now do the set at the hardware level */
5374 	addr = mp->b_rptr + prim->set_physaddr_req.dl_addr_offset;
5375 	ASSERT(sizeof (cmaddr) >= macinfo->gldm_addrlen);
5376 	cmac_copy(addr, cmaddr, macinfo->gldm_addrlen, macinfo);
5377 
5378 	rc = (*macinfo->gldm_set_mac_addr)(macinfo, cmaddr);
5379 	if (rc == GLD_SUCCESS)
5380 		mac_copy(addr, mac_pvt->curr_macaddr,
5381 		    macinfo->gldm_addrlen);
5382 
5383 	GLDM_UNLOCK(macinfo);
5384 
5385 	switch (rc) {
5386 	case GLD_SUCCESS:
5387 		break;
5388 	case GLD_NOTSUPPORTED:
5389 		return (DL_NOTSUPPORTED);
5390 	case GLD_BADARG:
5391 		return (DL_BADADDR);
5392 	case GLD_NORESOURCES:
5393 		dlerrorack(q, mp, DL_SET_PHYS_ADDR_REQ, DL_SYSERR, ENOSR);
5394 		return (GLDE_OK);
5395 	default:
5396 		dlerrorack(q, mp, DL_SET_PHYS_ADDR_REQ, DL_SYSERR, EIO);
5397 		return (GLDE_OK);
5398 	}
5399 
5400 	gld_notify_ind(macinfo, DL_NOTE_PHYS_ADDR, NULL);
5401 
5402 	dlokack(q, mp, DL_SET_PHYS_ADDR_REQ);
5403 	return (GLDE_OK);
5404 }
5405 
5406 int
5407 gld_get_statistics(queue_t *q, mblk_t *mp)
5408 {
5409 	dl_get_statistics_ack_t *dlsp;
5410 	gld_t  *gld = (gld_t *)q->q_ptr;
5411 	gld_mac_info_t *macinfo = gld->gld_mac_info;
5412 	gld_mac_pvt_t *mac_pvt;
5413 
5414 	if (gld->gld_state == DL_UNATTACHED)
5415 		return (DL_OUTSTATE);
5416 
5417 	ASSERT(macinfo != NULL);
5418 
5419 	mac_pvt = (gld_mac_pvt_t *)macinfo->gldm_mac_pvt;
5420 	(void) gld_update_kstat(mac_pvt->kstatp, KSTAT_READ);
5421 
5422 	mp = mexchange(q, mp, DL_GET_STATISTICS_ACK_SIZE +
5423 	    sizeof (struct gldkstats), M_PCPROTO, DL_GET_STATISTICS_ACK);
5424 
5425 	if (mp == NULL)
5426 		return (GLDE_OK);	/* mexchange already sent merror */
5427 
5428 	dlsp = (dl_get_statistics_ack_t *)mp->b_rptr;
5429 	dlsp->dl_primitive = DL_GET_STATISTICS_ACK;
5430 	dlsp->dl_stat_length = sizeof (struct gldkstats);
5431 	dlsp->dl_stat_offset = DL_GET_STATISTICS_ACK_SIZE;
5432 
5433 	GLDM_LOCK(macinfo, RW_WRITER);
5434 	bcopy(mac_pvt->kstatp->ks_data,
5435 	    (mp->b_rptr + DL_GET_STATISTICS_ACK_SIZE),
5436 	    sizeof (struct gldkstats));
5437 	GLDM_UNLOCK(macinfo);
5438 
5439 	qreply(q, mp);
5440 	return (GLDE_OK);
5441 }
5442 
5443 /* =================================================== */
5444 /* misc utilities, some requiring various mutexes held */
5445 /* =================================================== */
5446 
5447 /*
5448  * Initialize and start the driver.
5449  */
5450 static int
5451 gld_start_mac(gld_mac_info_t *macinfo)
5452 {
5453 	int	rc;
5454 	unsigned char cmaddr[GLD_MAX_ADDRLEN];
5455 	gld_mac_pvt_t *mac_pvt = (gld_mac_pvt_t *)macinfo->gldm_mac_pvt;
5456 
5457 	ASSERT(GLDM_LOCK_HELD_WRITE(macinfo));
5458 	ASSERT(!mac_pvt->started);
5459 
5460 	rc = (*macinfo->gldm_reset)(macinfo);
5461 	if (rc != GLD_SUCCESS)
5462 		return (GLD_FAILURE);
5463 
5464 	/* set the addr after we reset the device */
5465 	ASSERT(sizeof (cmaddr) >= macinfo->gldm_addrlen);
5466 	cmac_copy(((gld_mac_pvt_t *)macinfo->gldm_mac_pvt)
5467 	    ->curr_macaddr, cmaddr, macinfo->gldm_addrlen, macinfo);
5468 
5469 	rc = (*macinfo->gldm_set_mac_addr)(macinfo, cmaddr);
5470 	ASSERT(rc != GLD_BADARG);  /* this address was good before */
5471 	if (rc != GLD_SUCCESS && rc != GLD_NOTSUPPORTED)
5472 		return (GLD_FAILURE);
5473 
5474 	rc = (*macinfo->gldm_start)(macinfo);
5475 	if (rc != GLD_SUCCESS)
5476 		return (GLD_FAILURE);
5477 
5478 	mac_pvt->started = B_TRUE;
5479 	return (GLD_SUCCESS);
5480 }
5481 
5482 /*
5483  * Stop the driver.
5484  */
5485 static void
5486 gld_stop_mac(gld_mac_info_t *macinfo)
5487 {
5488 	gld_mac_pvt_t *mac_pvt = (gld_mac_pvt_t *)macinfo->gldm_mac_pvt;
5489 
5490 	ASSERT(GLDM_LOCK_HELD_WRITE(macinfo));
5491 	ASSERT(mac_pvt->started);
5492 
5493 	(void) (*macinfo->gldm_stop)(macinfo);
5494 
5495 	mac_pvt->started = B_FALSE;
5496 }
5497 
5498 
5499 /*
5500  * gld_set_ipq will set a pointer to the queue which is bound to the
5501  * IP sap if:
5502  * o the device type is ethernet or IPoIB.
5503  * o there is no stream in SAP promiscuous mode.
5504  * o there is exactly one stream bound to the IP sap.
5505  * o the stream is in "fastpath" mode.
5506  */
5507 static void
5508 gld_set_ipq(gld_t *gld)
5509 {
5510 	gld_vlan_t	*vlan;
5511 	gld_mac_info_t	*macinfo = gld->gld_mac_info;
5512 	gld_t		*ip_gld = NULL;
5513 	uint_t		ipq_candidates = 0;
5514 	gld_t		*ipv6_gld = NULL;
5515 	uint_t		ipv6q_candidates = 0;
5516 
5517 	ASSERT(GLDM_LOCK_HELD_WRITE(macinfo));
5518 
5519 	/* The ipq code in gld_recv() is intimate with ethernet/IPoIB */
5520 	if (((macinfo->gldm_type != DL_ETHER) &&
5521 	    (macinfo->gldm_type != DL_IB)) ||
5522 	    (gld_global_options & GLD_OPT_NO_IPQ))
5523 		return;
5524 
5525 	vlan = (gld_vlan_t *)gld->gld_vlan;
5526 	ASSERT(vlan != NULL);
5527 
5528 	/* clear down any previously defined ipqs */
5529 	vlan->gldv_ipq = NULL;
5530 	vlan->gldv_ipv6q = NULL;
5531 
5532 	/* Try to find a single stream eligible to receive IP packets */
5533 	for (gld = vlan->gldv_str_next;
5534 	    gld != (gld_t *)&vlan->gldv_str_next; gld = gld->gld_next) {
5535 		if (gld->gld_state != DL_IDLE)
5536 			continue;	/* not eligible to receive */
5537 		if (gld->gld_flags & GLD_STR_CLOSING)
5538 			continue;	/* not eligible to receive */
5539 
5540 		if (gld->gld_sap == ETHERTYPE_IP) {
5541 			ip_gld = gld;
5542 			ipq_candidates++;
5543 		}
5544 
5545 		if (gld->gld_sap == ETHERTYPE_IPV6) {
5546 			ipv6_gld = gld;
5547 			ipv6q_candidates++;
5548 		}
5549 	}
5550 
5551 	if (ipq_candidates == 1) {
5552 		ASSERT(ip_gld != NULL);
5553 
5554 		if (ip_gld->gld_flags & GLD_FAST)	/* eligible for ipq */
5555 			vlan->gldv_ipq = ip_gld->gld_qptr;
5556 	}
5557 
5558 	if (ipv6q_candidates == 1) {
5559 		ASSERT(ipv6_gld != NULL);
5560 
5561 		if (ipv6_gld->gld_flags & GLD_FAST)	/* eligible for ipq */
5562 			vlan->gldv_ipv6q = ipv6_gld->gld_qptr;
5563 	}
5564 }
5565 
5566 /*
5567  * gld_flushqueue (q)
5568  *	used by DLPI primitives that require flushing the queues.
5569  *	essentially, this is DL_UNBIND_REQ.
5570  */
5571 static void
5572 gld_flushqueue(queue_t *q)
5573 {
5574 	/* flush all data in both queues */
5575 	/* XXX Should these be FLUSHALL? */
5576 	flushq(q, FLUSHDATA);
5577 	flushq(WR(q), FLUSHDATA);
5578 	/* flush all the queues upstream */
5579 	(void) putctl1(q, M_FLUSH, FLUSHRW);
5580 }
5581 
5582 /*
5583  * gld_devlookup (major)
5584  * search the device table for the device with specified
5585  * major number and return a pointer to it if it exists
5586  */
5587 static glddev_t *
5588 gld_devlookup(int major)
5589 {
5590 	struct glddevice *dev;
5591 
5592 	ASSERT(mutex_owned(&gld_device_list.gld_devlock));
5593 
5594 	for (dev = gld_device_list.gld_next;
5595 	    dev != &gld_device_list;
5596 	    dev = dev->gld_next) {
5597 		ASSERT(dev);
5598 		if (dev->gld_major == major)
5599 			return (dev);
5600 	}
5601 	return (NULL);
5602 }
5603 
5604 /*
5605  * gld_findminor(device)
5606  * Returns a minor number currently unused by any stream in the current
5607  * device class (major) list.
5608  */
5609 static int
5610 gld_findminor(glddev_t *device)
5611 {
5612 	gld_t		*next;
5613 	gld_mac_info_t	*nextmac;
5614 	gld_vlan_t	*nextvlan;
5615 	int		minor;
5616 	int		i;
5617 
5618 	ASSERT(mutex_owned(&device->gld_devlock));
5619 
5620 	/* The fast way */
5621 	if (device->gld_nextminor >= GLD_MIN_CLONE_MINOR &&
5622 	    device->gld_nextminor <= GLD_MAX_CLONE_MINOR)
5623 		return (device->gld_nextminor++);
5624 
5625 	/* The steady way */
5626 	for (minor = GLD_MIN_CLONE_MINOR; minor <= GLD_MAX_CLONE_MINOR;
5627 	    minor++) {
5628 		/* Search all unattached streams */
5629 		for (next = device->gld_str_next;
5630 		    next != (gld_t *)&device->gld_str_next;
5631 		    next = next->gld_next) {
5632 			if (minor == next->gld_minor)
5633 				goto nextminor;
5634 		}
5635 		/* Search all attached streams; we don't need maclock because */
5636 		/* mac stream list is protected by devlock as well as maclock */
5637 		for (nextmac = device->gld_mac_next;
5638 		    nextmac != (gld_mac_info_t *)&device->gld_mac_next;
5639 		    nextmac = nextmac->gldm_next) {
5640 			gld_mac_pvt_t *pvt =
5641 			    (gld_mac_pvt_t *)nextmac->gldm_mac_pvt;
5642 
5643 			if (!(nextmac->gldm_GLD_flags & GLD_MAC_READY))
5644 				continue;	/* this one's not ready yet */
5645 
5646 			for (i = 0; i < VLAN_HASHSZ; i++) {
5647 				for (nextvlan = pvt->vlan_hash[i];
5648 				    nextvlan != NULL;
5649 				    nextvlan = nextvlan->gldv_next) {
5650 					for (next = nextvlan->gldv_str_next;
5651 					    next !=
5652 					    (gld_t *)&nextvlan->gldv_str_next;
5653 					    next = next->gld_next) {
5654 						if (minor == next->gld_minor)
5655 							goto nextminor;
5656 					}
5657 				}
5658 			}
5659 		}
5660 
5661 		return (minor);
5662 nextminor:
5663 		/* don't need to do anything */
5664 		;
5665 	}
5666 	cmn_err(CE_WARN, "GLD ran out of minor numbers for %s",
5667 		device->gld_name);
5668 	return (0);
5669 }
5670 
5671 /*
5672  * version of insque/remque for use by this driver
5673  */
5674 struct qelem {
5675 	struct qelem *q_forw;
5676 	struct qelem *q_back;
5677 	/* rest of structure */
5678 };
5679 
5680 static void
5681 gldinsque(void *elem, void *pred)
5682 {
5683 	struct qelem *pelem = elem;
5684 	struct qelem *ppred = pred;
5685 	struct qelem *pnext = ppred->q_forw;
5686 
5687 	pelem->q_forw = pnext;
5688 	pelem->q_back = ppred;
5689 	ppred->q_forw = pelem;
5690 	pnext->q_back = pelem;
5691 }
5692 
5693 static void
5694 gldremque(void *arg)
5695 {
5696 	struct qelem *pelem = arg;
5697 	struct qelem *elem = arg;
5698 
5699 	pelem->q_forw->q_back = pelem->q_back;
5700 	pelem->q_back->q_forw = pelem->q_forw;
5701 	elem->q_back = elem->q_forw = NULL;
5702 }
5703 
5704 static gld_vlan_t *
5705 gld_add_vlan(gld_mac_info_t *macinfo, uint32_t vid)
5706 {
5707 	gld_mac_pvt_t	*mac_pvt = (gld_mac_pvt_t *)macinfo->gldm_mac_pvt;
5708 	gld_vlan_t	**pp;
5709 	gld_vlan_t	*p;
5710 
5711 	pp = &(mac_pvt->vlan_hash[vid % VLAN_HASHSZ]);
5712 	while ((p = *pp) != NULL) {
5713 		ASSERT(p->gldv_id != vid);
5714 		pp = &(p->gldv_next);
5715 	}
5716 
5717 	if ((p = kmem_zalloc(sizeof (gld_vlan_t), KM_NOSLEEP)) == NULL)
5718 		return (NULL);
5719 
5720 	p->gldv_mac = macinfo;
5721 	p->gldv_id = vid;
5722 
5723 	if (vid == VLAN_VID_NONE) {
5724 		p->gldv_ptag = VLAN_VTAG_NONE;
5725 		p->gldv_stats = mac_pvt->statistics;
5726 		p->gldv_kstatp = NULL;
5727 	} else {
5728 		p->gldv_ptag = GLD_MK_PTAG(VLAN_CFI_ETHER, vid);
5729 		p->gldv_stats = kmem_zalloc(sizeof (struct gld_stats),
5730 		    KM_SLEEP);
5731 
5732 		if (gld_init_vlan_stats(p) != GLD_SUCCESS) {
5733 			kmem_free(p->gldv_stats, sizeof (struct gld_stats));
5734 			kmem_free(p, sizeof (gld_vlan_t));
5735 			return (NULL);
5736 		}
5737 	}
5738 
5739 	p->gldv_str_next = p->gldv_str_prev = (gld_t *)&p->gldv_str_next;
5740 	mac_pvt->nvlan++;
5741 	*pp = p;
5742 
5743 	return (p);
5744 }
5745 
5746 static void
5747 gld_rem_vlan(gld_vlan_t *vlan)
5748 {
5749 	gld_mac_info_t	*macinfo = vlan->gldv_mac;
5750 	gld_mac_pvt_t	*mac_pvt = (gld_mac_pvt_t *)macinfo->gldm_mac_pvt;
5751 	gld_vlan_t	**pp;
5752 	gld_vlan_t	*p;
5753 
5754 	pp = &(mac_pvt->vlan_hash[vlan->gldv_id % VLAN_HASHSZ]);
5755 	while ((p = *pp) != NULL) {
5756 		if (p->gldv_id == vlan->gldv_id)
5757 			break;
5758 		pp = &(p->gldv_next);
5759 	}
5760 	ASSERT(p != NULL);
5761 
5762 	*pp = p->gldv_next;
5763 	mac_pvt->nvlan--;
5764 	if (p->gldv_id != VLAN_VID_NONE) {
5765 		ASSERT(p->gldv_kstatp != NULL);
5766 		kstat_delete(p->gldv_kstatp);
5767 		kmem_free(p->gldv_stats, sizeof (struct gld_stats));
5768 	}
5769 	kmem_free(p, sizeof (gld_vlan_t));
5770 }
5771 
5772 gld_vlan_t *
5773 gld_find_vlan(gld_mac_info_t *macinfo, uint32_t vid)
5774 {
5775 	gld_mac_pvt_t	*mac_pvt = (gld_mac_pvt_t *)macinfo->gldm_mac_pvt;
5776 	gld_vlan_t	*p;
5777 
5778 	p = mac_pvt->vlan_hash[vid % VLAN_HASHSZ];
5779 	while (p != NULL) {
5780 		if (p->gldv_id == vid)
5781 			return (p);
5782 		p = p->gldv_next;
5783 	}
5784 	return (NULL);
5785 }
5786 
5787 gld_vlan_t *
5788 gld_get_vlan(gld_mac_info_t *macinfo, uint32_t vid)
5789 {
5790 	gld_vlan_t	*vlan;
5791 
5792 	if ((vlan = gld_find_vlan(macinfo, vid)) == NULL)
5793 		vlan = gld_add_vlan(macinfo, vid);
5794 
5795 	return (vlan);
5796 }
5797 
5798 /*
5799  * gld_bitrevcopy()
5800  * This is essentially bcopy, with the ability to bit reverse the
5801  * the source bytes. The MAC addresses bytes as transmitted by FDDI
5802  * interfaces are bit reversed.
5803  */
5804 void
5805 gld_bitrevcopy(caddr_t src, caddr_t target, size_t n)
5806 {
5807 	while (n--)
5808 		*target++ = bit_rev[(uchar_t)*src++];
5809 }
5810 
5811 /*
5812  * gld_bitreverse()
5813  * Convert the bit order by swaping all the bits, using a
5814  * lookup table.
5815  */
5816 void
5817 gld_bitreverse(uchar_t *rptr, size_t n)
5818 {
5819 	while (n--) {
5820 		*rptr = bit_rev[*rptr];
5821 		rptr++;
5822 	}
5823 }
5824 
5825 char *
5826 gld_macaddr_sprintf(char *etherbuf, unsigned char *ap, int len)
5827 {
5828 	int i;
5829 	char *cp = etherbuf;
5830 	static char digits[] = "0123456789abcdef";
5831 
5832 	for (i = 0; i < len; i++) {
5833 		*cp++ = digits[*ap >> 4];
5834 		*cp++ = digits[*ap++ & 0xf];
5835 		*cp++ = ':';
5836 	}
5837 	*--cp = 0;
5838 	return (etherbuf);
5839 }
5840 
5841 #ifdef GLD_DEBUG
5842 static void
5843 gld_check_assertions()
5844 {
5845 	glddev_t	*dev;
5846 	gld_mac_info_t	*mac;
5847 	gld_t		*str;
5848 	gld_vlan_t	*vlan;
5849 	int		i;
5850 
5851 	mutex_enter(&gld_device_list.gld_devlock);
5852 
5853 	for (dev = gld_device_list.gld_next;
5854 	    dev != (glddev_t *)&gld_device_list.gld_next;
5855 	    dev = dev->gld_next) {
5856 		mutex_enter(&dev->gld_devlock);
5857 		ASSERT(dev->gld_broadcast != NULL);
5858 		for (str = dev->gld_str_next;
5859 		    str != (gld_t *)&dev->gld_str_next;
5860 		    str = str->gld_next) {
5861 			ASSERT(str->gld_device == dev);
5862 			ASSERT(str->gld_mac_info == NULL);
5863 			ASSERT(str->gld_qptr != NULL);
5864 			ASSERT(str->gld_minor >= GLD_MIN_CLONE_MINOR);
5865 			ASSERT(str->gld_multicnt == 0);
5866 			ASSERT(str->gld_mcast == NULL);
5867 			ASSERT(!(str->gld_flags &
5868 			    (GLD_PROM_PHYS|GLD_PROM_MULT|GLD_PROM_SAP)));
5869 			ASSERT(str->gld_sap == 0);
5870 			ASSERT(str->gld_state == DL_UNATTACHED);
5871 		}
5872 		for (mac = dev->gld_mac_next;
5873 		    mac != (gld_mac_info_t *)&dev->gld_mac_next;
5874 		    mac = mac->gldm_next) {
5875 			int nvlan = 0;
5876 			gld_mac_pvt_t *pvt = (gld_mac_pvt_t *)mac->gldm_mac_pvt;
5877 
5878 			if (!(mac->gldm_GLD_flags & GLD_MAC_READY))
5879 				continue;	/* this one's not ready yet */
5880 
5881 			GLDM_LOCK(mac, RW_WRITER);
5882 			ASSERT(mac->gldm_devinfo != NULL);
5883 			ASSERT(mac->gldm_mac_pvt != NULL);
5884 			ASSERT(pvt->interfacep != NULL);
5885 			ASSERT(pvt->kstatp != NULL);
5886 			ASSERT(pvt->statistics != NULL);
5887 			ASSERT(pvt->major_dev == dev);
5888 
5889 			for (i = 0; i < VLAN_HASHSZ; i++) {
5890 				for (vlan = pvt->vlan_hash[i];
5891 				    vlan != NULL; vlan = vlan->gldv_next) {
5892 					int nstr = 0;
5893 
5894 					ASSERT(vlan->gldv_mac == mac);
5895 
5896 					for (str = vlan->gldv_str_next;
5897 					    str !=
5898 					    (gld_t *)&vlan->gldv_str_next;
5899 					    str = str->gld_next) {
5900 						ASSERT(str->gld_device == dev);
5901 						ASSERT(str->gld_mac_info ==
5902 						    mac);
5903 						ASSERT(str->gld_qptr != NULL);
5904 						ASSERT(str->gld_minor >=
5905 						    GLD_MIN_CLONE_MINOR);
5906 						ASSERT(
5907 						    str->gld_multicnt == 0 ||
5908 						    str->gld_mcast);
5909 						nstr++;
5910 					}
5911 					ASSERT(vlan->gldv_nstreams == nstr);
5912 					nvlan++;
5913 				}
5914 			}
5915 			ASSERT(pvt->nvlan == nvlan);
5916 			GLDM_UNLOCK(mac);
5917 		}
5918 		mutex_exit(&dev->gld_devlock);
5919 	}
5920 	mutex_exit(&gld_device_list.gld_devlock);
5921 }
5922 #endif
5923