xref: /titanic_50/usr/src/uts/common/io/gld.c (revision 0a1278f26ea4b7c8c0285d4f2d6c5b680904aa01)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 /*
27  * gld - Generic LAN Driver Version 2, PSARC/1997/382
28  *
29  * This is a utility module that provides generic facilities for
30  * LAN	drivers.  The DLPI protocol and most STREAMS interfaces
31  * are handled here.
32  *
33  * It no longer provides compatibility with drivers
34  * implemented according to the GLD v0 documentation published
35  * in 1993. (See PSARC 2003/728)
36  */
37 
38 
39 #include <sys/types.h>
40 #include <sys/errno.h>
41 #include <sys/stropts.h>
42 #include <sys/stream.h>
43 #include <sys/kmem.h>
44 #include <sys/stat.h>
45 #include <sys/modctl.h>
46 #include <sys/kstat.h>
47 #include <sys/debug.h>
48 #include <sys/note.h>
49 #include <sys/sysmacros.h>
50 
51 #include <sys/byteorder.h>
52 #include <sys/strsun.h>
53 #include <sys/strsubr.h>
54 #include <sys/dlpi.h>
55 #include <sys/pattr.h>
56 #include <sys/ethernet.h>
57 #include <sys/ib/clients/ibd/ibd.h>
58 #include <sys/policy.h>
59 #include <sys/atomic.h>
60 
61 #include <sys/multidata.h>
62 #include <sys/gld.h>
63 #include <sys/gldpriv.h>
64 
65 #include <sys/ddi.h>
66 #include <sys/sunddi.h>
67 
68 /*
69  * Macros to increment statistics.
70  */
71 
72 /*
73  * Increase kstats. Note this operation is not atomic. It can be used when
74  * GLDM_LOCK_HELD_WRITE(macinfo).
75  */
76 #define	BUMP(stats, vstats, stat, delta)	do {			\
77 	((stats)->stat) += (delta);					\
78 	_NOTE(CONSTANTCONDITION)					\
79 	if ((vstats) != NULL)						\
80 		((struct gld_stats *)(vstats))->stat += (delta);	\
81 	_NOTE(CONSTANTCONDITION)					\
82 } while (0)
83 
84 #define	ATOMIC_BUMP_STAT(stat, delta)	do {			\
85 	_NOTE(CONSTANTCONDITION)				\
86 	if (sizeof ((stat)) == sizeof (uint32_t)) {		\
87 		atomic_add_32((uint32_t *)&(stat), (delta));	\
88 	_NOTE(CONSTANTCONDITION)				\
89 	} else if (sizeof ((stat)) == sizeof (uint64_t)) {	\
90 		atomic_add_64((uint64_t *)&(stat), (delta));	\
91 	}							\
92 	_NOTE(CONSTANTCONDITION)				\
93 } while (0)
94 
95 #define	ATOMIC_BUMP(stats, vstats, stat, delta)	do {			\
96 	ATOMIC_BUMP_STAT((stats)->stat, (delta));			\
97 	_NOTE(CONSTANTCONDITION)					\
98 	if ((vstats) != NULL) {						\
99 		ATOMIC_BUMP_STAT(((struct gld_stats *)(vstats))->stat,	\
100 		    (delta));						\
101 	}								\
102 	_NOTE(CONSTANTCONDITION)					\
103 } while (0)
104 
105 #define	UPDATE_STATS(stats, vstats, pktinfo, delta) {			\
106 	if ((pktinfo).isBroadcast) {					\
107 		ATOMIC_BUMP((stats), (vstats),				\
108 		    glds_brdcstxmt, (delta));				\
109 	} else if ((pktinfo).isMulticast) {				\
110 		ATOMIC_BUMP((stats), (vstats), glds_multixmt, (delta));	\
111 	}								\
112 	ATOMIC_BUMP((stats), (vstats), glds_bytexmt64,			\
113 	    ((pktinfo).pktLen));					\
114 	ATOMIC_BUMP((stats), (vstats), glds_pktxmt64, (delta));		\
115 }
116 
117 #ifdef GLD_DEBUG
118 int gld_debug = GLDERRS;
119 #endif
120 
121 /* called from gld_register */
122 static int gld_initstats(gld_mac_info_t *);
123 
124 /* called from kstat mechanism, and from wsrv's get_statistics */
125 static int gld_update_kstat(kstat_t *, int);
126 
127 /* statistics for additional vlans */
128 static int gld_init_vlan_stats(gld_vlan_t *);
129 static int gld_update_vlan_kstat(kstat_t *, int);
130 
131 /* called from gld_getinfo */
132 static dev_info_t *gld_finddevinfo(dev_t);
133 
134 /* called from wput, wsrv, unidata, and v0_sched to send a packet */
135 /* also from the source routing stuff for sending RDE protocol packets */
136 static int gld_start(queue_t *, mblk_t *, int, uint32_t);
137 static int gld_start_mdt(queue_t *, mblk_t *, int);
138 
139 /* called from gld_start[_mdt] to loopback packet(s) in promiscuous mode */
140 static void gld_precv(gld_mac_info_t *, mblk_t *, uint32_t, struct gld_stats *);
141 static void gld_precv_mdt(gld_mac_info_t *, gld_vlan_t *, mblk_t *,
142     pdesc_t *, pktinfo_t *);
143 
144 /* receive group: called from gld_recv and gld_precv* with maclock held */
145 static void gld_sendup(gld_mac_info_t *, pktinfo_t *, mblk_t *,
146     int (*)());
147 static int gld_accept(gld_t *, pktinfo_t *);
148 static int gld_mcmatch(gld_t *, pktinfo_t *);
149 static int gld_multicast(unsigned char *, gld_t *);
150 static int gld_paccept(gld_t *, pktinfo_t *);
151 static void gld_passon(gld_t *, mblk_t *, pktinfo_t *,
152     void (*)(queue_t *, mblk_t *));
153 static mblk_t *gld_addudind(gld_t *, mblk_t *, pktinfo_t *, boolean_t);
154 
155 /* wsrv group: called from wsrv, single threaded per queue */
156 static int gld_ioctl(queue_t *, mblk_t *);
157 static void gld_fastpath(gld_t *, queue_t *, mblk_t *);
158 static int gld_cmds(queue_t *, mblk_t *);
159 static mblk_t *gld_bindack(queue_t *, mblk_t *);
160 static int gld_notify_req(queue_t *, mblk_t *);
161 static int gld_udqos(queue_t *, mblk_t *);
162 static int gld_bind(queue_t *, mblk_t *);
163 static int gld_unbind(queue_t *, mblk_t *);
164 static int gld_inforeq(queue_t *, mblk_t *);
165 static int gld_unitdata(queue_t *, mblk_t *);
166 static int gldattach(queue_t *, mblk_t *);
167 static int gldunattach(queue_t *, mblk_t *);
168 static int gld_enable_multi(queue_t *, mblk_t *);
169 static int gld_disable_multi(queue_t *, mblk_t *);
170 static void gld_send_disable_multi(gld_mac_info_t *, gld_mcast_t *);
171 static int gld_promisc(queue_t *, mblk_t *, t_uscalar_t, boolean_t);
172 static int gld_physaddr(queue_t *, mblk_t *);
173 static int gld_setaddr(queue_t *, mblk_t *);
174 static int gld_get_statistics(queue_t *, mblk_t *);
175 static int gld_cap(queue_t *, mblk_t *);
176 static int gld_cap_ack(queue_t *, mblk_t *);
177 static int gld_cap_enable(queue_t *, mblk_t *);
178 
179 /* misc utilities, some requiring various mutexes held */
180 static int gld_start_mac(gld_mac_info_t *);
181 static void gld_stop_mac(gld_mac_info_t *);
182 static void gld_set_ipq(gld_t *);
183 static void gld_flushqueue(queue_t *);
184 static glddev_t *gld_devlookup(int);
185 static int gld_findminor(glddev_t *);
186 static void gldinsque(void *, void *);
187 static void gldremque(void *);
188 void gld_bitrevcopy(caddr_t, caddr_t, size_t);
189 void gld_bitreverse(uchar_t *, size_t);
190 char *gld_macaddr_sprintf(char *, unsigned char *, int);
191 static gld_vlan_t *gld_add_vlan(gld_mac_info_t *, uint32_t vid);
192 static void gld_rem_vlan(gld_vlan_t *);
193 gld_vlan_t *gld_find_vlan(gld_mac_info_t *, uint32_t);
194 gld_vlan_t *gld_get_vlan(gld_mac_info_t *, uint32_t);
195 
196 #ifdef GLD_DEBUG
197 static void gld_check_assertions(void);
198 extern void gld_sr_dump(gld_mac_info_t *);
199 #endif
200 
201 /*
202  * Allocate and zero-out "number" structures each of type "structure" in
203  * kernel memory.
204  */
205 #define	GLD_GETSTRUCT(structure, number)   \
206 	(kmem_zalloc((uint_t)(sizeof (structure) * (number)), KM_NOSLEEP))
207 
208 #define	abs(a) ((a) < 0 ? -(a) : a)
209 
210 uint32_t gld_global_options = GLD_OPT_NO_ETHRXSNAP;
211 
212 /*
213  * The device is of DL_ETHER type and is able to support VLAN by itself.
214  */
215 #define	VLAN_CAPABLE(macinfo) \
216 	((macinfo)->gldm_type == DL_ETHER && \
217 	(macinfo)->gldm_send_tagged != NULL)
218 
219 /*
220  * The set of notifications generatable by GLD itself, the additional
221  * set that can be generated if the MAC driver provide the link-state
222  * tracking callback capability, and the set supported by the GLD
223  * notification code below.
224  *
225  * PLEASE keep these in sync with what the code actually does!
226  */
227 static const uint32_t gld_internal_notes =	DL_NOTE_PROMISC_ON_PHYS |
228 						DL_NOTE_PROMISC_OFF_PHYS |
229 						DL_NOTE_PHYS_ADDR;
230 static const uint32_t gld_linkstate_notes =	DL_NOTE_LINK_DOWN |
231 						DL_NOTE_LINK_UP |
232 						DL_NOTE_SPEED;
233 static const uint32_t gld_supported_notes =	DL_NOTE_PROMISC_ON_PHYS |
234 						DL_NOTE_PROMISC_OFF_PHYS |
235 						DL_NOTE_PHYS_ADDR |
236 						DL_NOTE_LINK_DOWN |
237 						DL_NOTE_LINK_UP |
238 						DL_NOTE_SPEED;
239 
240 /* Media must correspond to #defines in gld.h */
241 static char *gld_media[] = {
242 	"unknown",	/* GLDM_UNKNOWN - driver cannot determine media */
243 	"aui",		/* GLDM_AUI */
244 	"bnc",		/* GLDM_BNC */
245 	"twpair",	/* GLDM_TP */
246 	"fiber",	/* GLDM_FIBER */
247 	"100baseT",	/* GLDM_100BT */
248 	"100vgAnyLan",	/* GLDM_VGANYLAN */
249 	"10baseT",	/* GLDM_10BT */
250 	"ring4",	/* GLDM_RING4 */
251 	"ring16",	/* GLDM_RING16 */
252 	"PHY/MII",	/* GLDM_PHYMII */
253 	"100baseTX",	/* GLDM_100BTX */
254 	"100baseT4",	/* GLDM_100BT4 */
255 	"unknown",	/* skip */
256 	"ipib",		/* GLDM_IB */
257 };
258 
259 /* Must correspond to #defines in gld.h */
260 static char *gld_duplex[] = {
261 	"unknown",	/* GLD_DUPLEX_UNKNOWN - not known or not applicable */
262 	"half",		/* GLD_DUPLEX_HALF */
263 	"full"		/* GLD_DUPLEX_FULL */
264 };
265 
266 /*
267  * Interface types currently supported by GLD.
268  * If you add new types, you must check all "XXX" strings in the GLD source
269  * for implementation issues that may affect the support of your new type.
270  * In particular, any type with gldm_addrlen > 6, or gldm_saplen != -2, will
271  * require generalizing this GLD source to handle the new cases.  In other
272  * words there are assumptions built into the code in a few places that must
273  * be fixed.  Be sure to turn on DEBUG/ASSERT code when testing a new type.
274  */
275 static gld_interface_t interfaces[] = {
276 
277 	/* Ethernet Bus */
278 	{
279 		DL_ETHER,
280 		(uint_t)-1,
281 		sizeof (struct ether_header),
282 		gld_interpret_ether,
283 		NULL,
284 		gld_fastpath_ether,
285 		gld_unitdata_ether,
286 		gld_init_ether,
287 		gld_uninit_ether,
288 		"ether"
289 	},
290 
291 	/* Fiber Distributed data interface */
292 	{
293 		DL_FDDI,
294 		4352,
295 		sizeof (struct fddi_mac_frm),
296 		gld_interpret_fddi,
297 		NULL,
298 		gld_fastpath_fddi,
299 		gld_unitdata_fddi,
300 		gld_init_fddi,
301 		gld_uninit_fddi,
302 		"fddi"
303 	},
304 
305 	/* Token Ring interface */
306 	{
307 		DL_TPR,
308 		17914,
309 		-1,			/* variable header size */
310 		gld_interpret_tr,
311 		NULL,
312 		gld_fastpath_tr,
313 		gld_unitdata_tr,
314 		gld_init_tr,
315 		gld_uninit_tr,
316 		"tpr"
317 	},
318 
319 	/* Infiniband */
320 	{
321 		DL_IB,
322 		4092,
323 		sizeof (struct ipoib_header),
324 		gld_interpret_ib,
325 		gld_interpret_mdt_ib,
326 		gld_fastpath_ib,
327 		gld_unitdata_ib,
328 		gld_init_ib,
329 		gld_uninit_ib,
330 		"ipib"
331 	},
332 };
333 
334 /*
335  * bit reversal lookup table.
336  */
337 static	uchar_t bit_rev[] = {
338 	0x00, 0x80, 0x40, 0xc0, 0x20, 0xa0, 0x60, 0xe0, 0x10, 0x90, 0x50, 0xd0,
339 	0x30, 0xb0, 0x70, 0xf0, 0x08, 0x88, 0x48, 0xc8, 0x28, 0xa8, 0x68, 0xe8,
340 	0x18, 0x98, 0x58, 0xd8, 0x38, 0xb8, 0x78, 0xf8, 0x04, 0x84, 0x44, 0xc4,
341 	0x24, 0xa4, 0x64, 0xe4, 0x14, 0x94, 0x54, 0xd4, 0x34, 0xb4, 0x74, 0xf4,
342 	0x0c, 0x8c, 0x4c, 0xcc, 0x2c, 0xac, 0x6c, 0xec, 0x1c, 0x9c, 0x5c, 0xdc,
343 	0x3c, 0xbc, 0x7c, 0xfc, 0x02, 0x82, 0x42, 0xc2, 0x22, 0xa2, 0x62, 0xe2,
344 	0x12, 0x92, 0x52, 0xd2, 0x32, 0xb2, 0x72, 0xf2, 0x0a, 0x8a, 0x4a, 0xca,
345 	0x2a, 0xaa, 0x6a, 0xea, 0x1a, 0x9a, 0x5a, 0xda, 0x3a, 0xba, 0x7a, 0xfa,
346 	0x06, 0x86, 0x46, 0xc6, 0x26, 0xa6, 0x66, 0xe6, 0x16, 0x96, 0x56, 0xd6,
347 	0x36, 0xb6, 0x76, 0xf6, 0x0e, 0x8e, 0x4e, 0xce, 0x2e, 0xae, 0x6e, 0xee,
348 	0x1e, 0x9e, 0x5e, 0xde, 0x3e, 0xbe, 0x7e, 0xfe, 0x01, 0x81, 0x41, 0xc1,
349 	0x21, 0xa1, 0x61, 0xe1, 0x11, 0x91, 0x51, 0xd1, 0x31, 0xb1, 0x71, 0xf1,
350 	0x09, 0x89, 0x49, 0xc9, 0x29, 0xa9, 0x69, 0xe9, 0x19, 0x99, 0x59, 0xd9,
351 	0x39, 0xb9, 0x79, 0xf9, 0x05, 0x85, 0x45, 0xc5, 0x25, 0xa5, 0x65, 0xe5,
352 	0x15, 0x95, 0x55, 0xd5, 0x35, 0xb5, 0x75, 0xf5, 0x0d, 0x8d, 0x4d, 0xcd,
353 	0x2d, 0xad, 0x6d, 0xed, 0x1d, 0x9d, 0x5d, 0xdd, 0x3d, 0xbd, 0x7d, 0xfd,
354 	0x03, 0x83, 0x43, 0xc3, 0x23, 0xa3, 0x63, 0xe3, 0x13, 0x93, 0x53, 0xd3,
355 	0x33, 0xb3, 0x73, 0xf3, 0x0b, 0x8b, 0x4b, 0xcb, 0x2b, 0xab, 0x6b, 0xeb,
356 	0x1b, 0x9b, 0x5b, 0xdb, 0x3b, 0xbb, 0x7b, 0xfb, 0x07, 0x87, 0x47, 0xc7,
357 	0x27, 0xa7, 0x67, 0xe7, 0x17, 0x97, 0x57, 0xd7, 0x37, 0xb7, 0x77, 0xf7,
358 	0x0f, 0x8f, 0x4f, 0xcf, 0x2f, 0xaf, 0x6f, 0xef, 0x1f, 0x9f, 0x5f, 0xdf,
359 	0x3f, 0xbf, 0x7f, 0xff,
360 };
361 
362 /*
363  * User priorities, mapped from b_band.
364  */
365 static uint32_t user_priority[] = {
366 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
367 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
368 	1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
369 	1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
370 	2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
371 	2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
372 	3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
373 	3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
374 	4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
375 	4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
376 	5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
377 	5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
378 	6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
379 	6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
380 	7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
381 	7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7
382 };
383 
384 #define	UPRI(gld, band)	((band != 0) ? user_priority[(band)] : (gld)->gld_upri)
385 
386 static struct glddevice gld_device_list;  /* Per-system root of GLD tables */
387 
388 /*
389  * Module linkage information for the kernel.
390  */
391 
392 static struct modldrv modlmisc = {
393 	&mod_miscops,		/* Type of module - a utility provider */
394 	"Generic LAN Driver (" GLD_VERSION_STRING ")"
395 #ifdef GLD_DEBUG
396 	" DEBUG"
397 #endif
398 };
399 
400 static struct modlinkage modlinkage = {
401 	MODREV_1, &modlmisc, NULL
402 };
403 
404 int
405 _init(void)
406 {
407 	int e;
408 
409 	/* initialize gld_device_list mutex */
410 	mutex_init(&gld_device_list.gld_devlock, NULL, MUTEX_DRIVER, NULL);
411 
412 	/* initialize device driver (per-major) list */
413 	gld_device_list.gld_next =
414 	    gld_device_list.gld_prev = &gld_device_list;
415 
416 	if ((e = mod_install(&modlinkage)) != 0)
417 		mutex_destroy(&gld_device_list.gld_devlock);
418 
419 	return (e);
420 }
421 
422 int
423 _fini(void)
424 {
425 	int e;
426 
427 	if ((e = mod_remove(&modlinkage)) != 0)
428 		return (e);
429 
430 	ASSERT(gld_device_list.gld_next ==
431 	    (glddev_t *)&gld_device_list.gld_next);
432 	ASSERT(gld_device_list.gld_prev ==
433 	    (glddev_t *)&gld_device_list.gld_next);
434 	mutex_destroy(&gld_device_list.gld_devlock);
435 
436 	return (e);
437 }
438 
439 int
440 _info(struct modinfo *modinfop)
441 {
442 	return (mod_info(&modlinkage, modinfop));
443 }
444 
445 /*
446  * GLD service routines
447  */
448 
449 /* So this gld binary maybe can be forward compatible with future v2 drivers */
450 #define	GLD_MAC_RESERVED (16 * sizeof (caddr_t))
451 
452 /*ARGSUSED*/
453 gld_mac_info_t *
454 gld_mac_alloc(dev_info_t *devinfo)
455 {
456 	gld_mac_info_t *macinfo;
457 
458 	macinfo = kmem_zalloc(sizeof (gld_mac_info_t) + GLD_MAC_RESERVED,
459 	    KM_SLEEP);
460 
461 	/*
462 	 * The setting of gldm_driver_version will not be documented or allowed
463 	 * until a future release.
464 	 */
465 	macinfo->gldm_driver_version = GLD_VERSION_200;
466 
467 	/*
468 	 * GLD's version.  This also is undocumented for now, but will be
469 	 * available if needed in the future.
470 	 */
471 	macinfo->gldm_GLD_version = GLD_VERSION;
472 
473 	return (macinfo);
474 }
475 
476 /*
477  * gld_mac_free must be called after the driver has removed interrupts
478  * and completely stopped calling gld_recv() and gld_sched().  At that
479  * point the interrupt routine is guaranteed by the system to have been
480  * exited and the maclock is no longer needed.  Of course, it is
481  * expected (required) that (assuming gld_register() succeeded),
482  * gld_unregister() was called before gld_mac_free().
483  */
484 void
485 gld_mac_free(gld_mac_info_t *macinfo)
486 {
487 	ASSERT(macinfo);
488 	ASSERT(macinfo->gldm_GLD_version == GLD_VERSION);
489 
490 	/*
491 	 * Assert that if we made it through gld_register, then we must
492 	 * have unregistered.
493 	 */
494 	ASSERT(!GLDM_LOCK_INITED(macinfo) ||
495 	    (macinfo->gldm_GLD_flags & GLD_UNREGISTERED));
496 
497 	GLDM_LOCK_DESTROY(macinfo);
498 
499 	kmem_free(macinfo, sizeof (gld_mac_info_t) + GLD_MAC_RESERVED);
500 }
501 
502 /*
503  * gld_register -- called once per device instance (PPA)
504  *
505  * During its attach routine, a real device driver will register with GLD
506  * so that later opens and dl_attach_reqs will work.  The arguments are the
507  * devinfo pointer, the device name, and a macinfo structure describing the
508  * physical device instance.
509  */
510 int
511 gld_register(dev_info_t *devinfo, char *devname, gld_mac_info_t *macinfo)
512 {
513 	int mediatype;
514 	int major = ddi_name_to_major(devname), i;
515 	glddev_t *glddev;
516 	gld_mac_pvt_t *mac_pvt;
517 	char minordev[32];
518 	char pbuf[3*GLD_MAX_ADDRLEN];
519 	gld_interface_t *ifp;
520 
521 	ASSERT(devinfo != NULL);
522 	ASSERT(macinfo != NULL);
523 
524 	if (macinfo->gldm_driver_version != GLD_VERSION)
525 		return (DDI_FAILURE);
526 
527 	mediatype = macinfo->gldm_type;
528 
529 	/*
530 	 * Entry points should be ready for us.
531 	 * ioctl is optional.
532 	 * set_multicast and get_stats are optional in v0.
533 	 * intr is only required if you add an interrupt.
534 	 */
535 	ASSERT(macinfo->gldm_reset != NULL);
536 	ASSERT(macinfo->gldm_start != NULL);
537 	ASSERT(macinfo->gldm_stop != NULL);
538 	ASSERT(macinfo->gldm_set_mac_addr != NULL);
539 	ASSERT(macinfo->gldm_set_promiscuous != NULL);
540 	ASSERT(macinfo->gldm_send != NULL);
541 
542 	ASSERT(macinfo->gldm_maxpkt >= macinfo->gldm_minpkt);
543 	ASSERT(macinfo->gldm_GLD_version == GLD_VERSION);
544 	ASSERT(macinfo->gldm_broadcast_addr != NULL);
545 	ASSERT(macinfo->gldm_vendor_addr != NULL);
546 	ASSERT(macinfo->gldm_ident != NULL);
547 
548 	if (macinfo->gldm_addrlen > GLD_MAX_ADDRLEN) {
549 		cmn_err(CE_WARN, "GLD: %s driver gldm_addrlen %d > %d not sup"
550 		    "ported", devname, macinfo->gldm_addrlen, GLD_MAX_ADDRLEN);
551 		return (DDI_FAILURE);
552 	}
553 
554 	/*
555 	 * GLD only functions properly with saplen == -2
556 	 */
557 	if (macinfo->gldm_saplen != -2) {
558 		cmn_err(CE_WARN, "GLD: %s driver gldm_saplen %d != -2 "
559 		    "not supported", devname, macinfo->gldm_saplen);
560 		return (DDI_FAILURE);
561 	}
562 
563 	/* see gld_rsrv() */
564 	if (ddi_getprop(DDI_DEV_T_NONE, devinfo, 0, "fast_recv", 0))
565 		macinfo->gldm_options |= GLDOPT_FAST_RECV;
566 
567 	mutex_enter(&gld_device_list.gld_devlock);
568 	glddev = gld_devlookup(major);
569 
570 	/*
571 	 *  Allocate per-driver (major) data structure if necessary
572 	 */
573 	if (glddev == NULL) {
574 		/* first occurrence of this device name (major number) */
575 		glddev = GLD_GETSTRUCT(glddev_t, 1);
576 		if (glddev == NULL) {
577 			mutex_exit(&gld_device_list.gld_devlock);
578 			return (DDI_FAILURE);
579 		}
580 		(void) strncpy(glddev->gld_name, devname,
581 		    sizeof (glddev->gld_name) - 1);
582 		glddev->gld_major = major;
583 		glddev->gld_nextminor = GLD_MIN_CLONE_MINOR;
584 		glddev->gld_mac_next = glddev->gld_mac_prev =
585 		    (gld_mac_info_t *)&glddev->gld_mac_next;
586 		glddev->gld_str_next = glddev->gld_str_prev =
587 		    (gld_t *)&glddev->gld_str_next;
588 		mutex_init(&glddev->gld_devlock, NULL, MUTEX_DRIVER, NULL);
589 
590 		/* allow increase of number of supported multicast addrs */
591 		glddev->gld_multisize = ddi_getprop(DDI_DEV_T_NONE,
592 		    devinfo, 0, "multisize", GLD_MAX_MULTICAST);
593 
594 		/*
595 		 * Optionally restrict DLPI provider style
596 		 *
597 		 * -1 - don't create style 1 nodes
598 		 * -2 - don't create style 2 nodes
599 		 */
600 		glddev->gld_styles = ddi_getprop(DDI_DEV_T_NONE, devinfo, 0,
601 		    "gld-provider-styles", 0);
602 
603 		/* Stuff that's needed before any PPA gets attached */
604 		glddev->gld_type = macinfo->gldm_type;
605 		glddev->gld_minsdu = macinfo->gldm_minpkt;
606 		glddev->gld_saplen = macinfo->gldm_saplen;
607 		glddev->gld_addrlen = macinfo->gldm_addrlen;
608 		glddev->gld_broadcast = kmem_zalloc(macinfo->gldm_addrlen,
609 		    KM_SLEEP);
610 		bcopy(macinfo->gldm_broadcast_addr,
611 		    glddev->gld_broadcast, macinfo->gldm_addrlen);
612 		glddev->gld_maxsdu = macinfo->gldm_maxpkt;
613 		gldinsque(glddev, gld_device_list.gld_prev);
614 	}
615 	glddev->gld_ndevice++;
616 	/* Now glddev can't go away until we unregister this mac (or fail) */
617 	mutex_exit(&gld_device_list.gld_devlock);
618 
619 	/*
620 	 *  Per-instance initialization
621 	 */
622 
623 	/*
624 	 * Initialize per-mac structure that is private to GLD.
625 	 * Set up interface pointer. These are device class specific pointers
626 	 * used to handle FDDI/TR/ETHER/IPoIB specific packets.
627 	 */
628 	for (i = 0; i < sizeof (interfaces)/sizeof (*interfaces); i++) {
629 		if (mediatype != interfaces[i].mac_type)
630 			continue;
631 
632 		macinfo->gldm_mac_pvt = kmem_zalloc(sizeof (gld_mac_pvt_t),
633 		    KM_SLEEP);
634 		((gld_mac_pvt_t *)macinfo->gldm_mac_pvt)->interfacep = ifp =
635 		    &interfaces[i];
636 		break;
637 	}
638 
639 	if (ifp == NULL) {
640 		cmn_err(CE_WARN, "GLD: this version does not support %s driver "
641 		    "of type %d", devname, mediatype);
642 		goto failure;
643 	}
644 
645 	/*
646 	 * Driver can only register MTU within legal media range.
647 	 */
648 	if (macinfo->gldm_maxpkt > ifp->mtu_size) {
649 		cmn_err(CE_WARN, "GLD: oversize MTU is specified by driver %s",
650 		    devname);
651 		goto failure;
652 	}
653 
654 	/*
655 	 * Correct margin size if it is not set.
656 	 */
657 	if (VLAN_CAPABLE(macinfo) && (macinfo->gldm_margin == 0))
658 		macinfo->gldm_margin = VTAG_SIZE;
659 
660 	/*
661 	 * For now, only Infiniband drivers can use MDT. Do not add
662 	 * support for Ethernet, FDDI or TR.
663 	 */
664 	if (macinfo->gldm_mdt_pre != NULL) {
665 		if (mediatype != DL_IB) {
666 			cmn_err(CE_WARN, "GLD: MDT not supported for %s "
667 			    "driver of type %d", devname, mediatype);
668 			goto failure;
669 		}
670 
671 		/*
672 		 * Validate entry points.
673 		 */
674 		if ((macinfo->gldm_mdt_send == NULL) ||
675 		    (macinfo->gldm_mdt_post == NULL)) {
676 			cmn_err(CE_WARN, "GLD: invalid MDT entry points for "
677 			    "%s driver of type %d", devname, mediatype);
678 			goto failure;
679 		}
680 		macinfo->gldm_options |= GLDOPT_MDT;
681 	}
682 
683 	mac_pvt = (gld_mac_pvt_t *)macinfo->gldm_mac_pvt;
684 	mac_pvt->major_dev = glddev;
685 
686 	mac_pvt->curr_macaddr = kmem_zalloc(macinfo->gldm_addrlen, KM_SLEEP);
687 	/*
688 	 * XXX Do bit-reversed devices store gldm_vendor in canonical
689 	 * format or in wire format?  Also gldm_broadcast.  For now
690 	 * we are assuming canonical, but I'm not sure that makes the
691 	 * most sense for ease of driver implementation.
692 	 */
693 	bcopy(macinfo->gldm_vendor_addr, mac_pvt->curr_macaddr,
694 	    macinfo->gldm_addrlen);
695 	mac_pvt->statistics = kmem_zalloc(sizeof (struct gld_stats), KM_SLEEP);
696 
697 	/*
698 	 * The available set of notifications is those generatable by GLD
699 	 * itself, plus those corresponding to the capabilities of the MAC
700 	 * driver, intersected with those supported by gld_notify_ind() above.
701 	 */
702 	mac_pvt->notifications = gld_internal_notes;
703 	if (macinfo->gldm_capabilities & GLD_CAP_LINKSTATE)
704 		mac_pvt->notifications |= gld_linkstate_notes;
705 	mac_pvt->notifications &= gld_supported_notes;
706 
707 	GLDM_LOCK_INIT(macinfo);
708 
709 	ddi_set_driver_private(devinfo, macinfo);
710 
711 	/*
712 	 * Now atomically get a PPA and put ourselves on the mac list.
713 	 */
714 	mutex_enter(&glddev->gld_devlock);
715 
716 #ifdef DEBUG
717 	if (macinfo->gldm_ppa != ddi_get_instance(devinfo))
718 		cmn_err(CE_WARN, "%s%d instance != ppa %d",
719 		    ddi_driver_name(devinfo), ddi_get_instance(devinfo),
720 		    macinfo->gldm_ppa);
721 #endif
722 
723 	/*
724 	 * Create style 2 node (gated by gld-provider-styles property).
725 	 *
726 	 * NOTE: When the CLONE_DEV flag is specified to
727 	 *	 ddi_create_minor_node() the minor number argument is
728 	 *	 immaterial. Opens of that node will go via the clone
729 	 *	 driver and gld_open() will always be passed a dev_t with
730 	 *	 minor of zero.
731 	 */
732 	if (glddev->gld_styles != -2) {
733 		if (ddi_create_minor_node(devinfo, glddev->gld_name, S_IFCHR,
734 		    0, DDI_NT_NET, CLONE_DEV) == DDI_FAILURE) {
735 			mutex_exit(&glddev->gld_devlock);
736 			goto late_failure;
737 		}
738 	}
739 
740 	/*
741 	 * Create style 1 node (gated by gld-provider-styles property)
742 	 */
743 	if (glddev->gld_styles != -1) {
744 		(void) sprintf(minordev, "%s%d", glddev->gld_name,
745 		    macinfo->gldm_ppa);
746 		if (ddi_create_minor_node(devinfo, minordev, S_IFCHR,
747 		    GLD_STYLE1_PPA_TO_MINOR(macinfo->gldm_ppa), DDI_NT_NET,
748 		    0) != DDI_SUCCESS) {
749 			mutex_exit(&glddev->gld_devlock);
750 			goto late_failure;
751 		}
752 	}
753 
754 	/* add ourselves to this major device's linked list of instances */
755 	gldinsque(macinfo, glddev->gld_mac_prev);
756 
757 	mutex_exit(&glddev->gld_devlock);
758 
759 	/*
760 	 * Unfortunately we need the ppa before we call gld_initstats();
761 	 * otherwise we would like to do this just above the mutex_enter
762 	 * above.  In which case we could have set MAC_READY inside the
763 	 * mutex and we wouldn't have needed to check it in open and
764 	 * DL_ATTACH.  We wouldn't like to do the initstats/kstat_create
765 	 * inside the mutex because it might get taken in our kstat_update
766 	 * routine and cause a deadlock with kstat_chain_lock.
767 	 */
768 
769 	/* gld_initstats() calls (*ifp->init)() */
770 	if (gld_initstats(macinfo) != GLD_SUCCESS) {
771 		mutex_enter(&glddev->gld_devlock);
772 		gldremque(macinfo);
773 		mutex_exit(&glddev->gld_devlock);
774 		goto late_failure;
775 	}
776 
777 	/*
778 	 * Need to indicate we are NOW ready to process interrupts;
779 	 * any interrupt before this is set is for someone else.
780 	 * This flag is also now used to tell open, et. al. that this
781 	 * mac is now fully ready and available for use.
782 	 */
783 	GLDM_LOCK(macinfo, RW_WRITER);
784 	macinfo->gldm_GLD_flags |= GLD_MAC_READY;
785 	GLDM_UNLOCK(macinfo);
786 
787 	/* log local ethernet address -- XXX not DDI compliant */
788 	if (macinfo->gldm_addrlen == sizeof (struct ether_addr))
789 		(void) localetheraddr(
790 		    (struct ether_addr *)macinfo->gldm_vendor_addr, NULL);
791 
792 	/* now put announcement into the message buffer */
793 	cmn_err(CE_CONT, "!%s%d: %s: type \"%s\" mac address %s\n",
794 	    glddev->gld_name,
795 	    macinfo->gldm_ppa, macinfo->gldm_ident,
796 	    mac_pvt->interfacep->mac_string,
797 	    gld_macaddr_sprintf(pbuf, macinfo->gldm_vendor_addr,
798 	    macinfo->gldm_addrlen));
799 
800 	ddi_report_dev(devinfo);
801 	return (DDI_SUCCESS);
802 
803 late_failure:
804 	ddi_remove_minor_node(devinfo, NULL);
805 	GLDM_LOCK_DESTROY(macinfo);
806 	if (mac_pvt->curr_macaddr != NULL)
807 		kmem_free(mac_pvt->curr_macaddr, macinfo->gldm_addrlen);
808 	if (mac_pvt->statistics != NULL)
809 		kmem_free(mac_pvt->statistics, sizeof (struct gld_stats));
810 	kmem_free(macinfo->gldm_mac_pvt, sizeof (gld_mac_pvt_t));
811 	macinfo->gldm_mac_pvt = NULL;
812 
813 failure:
814 	mutex_enter(&gld_device_list.gld_devlock);
815 	glddev->gld_ndevice--;
816 	/*
817 	 * Note that just because this goes to zero here does not necessarily
818 	 * mean that we were the one who added the glddev above.  It's
819 	 * possible that the first mac unattached while were were in here
820 	 * failing to attach the second mac.  But we're now the last.
821 	 */
822 	if (glddev->gld_ndevice == 0) {
823 		/* There should be no macinfos left */
824 		ASSERT(glddev->gld_mac_next ==
825 		    (gld_mac_info_t *)&glddev->gld_mac_next);
826 		ASSERT(glddev->gld_mac_prev ==
827 		    (gld_mac_info_t *)&glddev->gld_mac_next);
828 
829 		/*
830 		 * There should be no DL_UNATTACHED streams: the system
831 		 * should not have detached the "first" devinfo which has
832 		 * all the open style 2 streams.
833 		 *
834 		 * XXX This is not clear.  See gld_getinfo and Bug 1165519
835 		 */
836 		ASSERT(glddev->gld_str_next == (gld_t *)&glddev->gld_str_next);
837 		ASSERT(glddev->gld_str_prev == (gld_t *)&glddev->gld_str_next);
838 
839 		gldremque(glddev);
840 		mutex_destroy(&glddev->gld_devlock);
841 		if (glddev->gld_broadcast != NULL)
842 			kmem_free(glddev->gld_broadcast, glddev->gld_addrlen);
843 		kmem_free(glddev, sizeof (glddev_t));
844 	}
845 	mutex_exit(&gld_device_list.gld_devlock);
846 
847 	return (DDI_FAILURE);
848 }
849 
850 /*
851  * gld_unregister (macinfo)
852  * remove the macinfo structure from local structures
853  * this is cleanup for a driver to be unloaded
854  */
855 int
856 gld_unregister(gld_mac_info_t *macinfo)
857 {
858 	gld_mac_pvt_t *mac_pvt = (gld_mac_pvt_t *)macinfo->gldm_mac_pvt;
859 	glddev_t *glddev = mac_pvt->major_dev;
860 	gld_interface_t *ifp;
861 	int multisize = sizeof (gld_mcast_t) * glddev->gld_multisize;
862 
863 	mutex_enter(&glddev->gld_devlock);
864 	GLDM_LOCK(macinfo, RW_WRITER);
865 
866 	if (mac_pvt->nvlan > 0) {
867 		GLDM_UNLOCK(macinfo);
868 		mutex_exit(&glddev->gld_devlock);
869 		return (DDI_FAILURE);
870 	}
871 
872 #ifdef	GLD_DEBUG
873 	{
874 		int i;
875 
876 		for (i = 0; i < VLAN_HASHSZ; i++) {
877 			if ((mac_pvt->vlan_hash[i] != NULL))
878 				cmn_err(CE_PANIC,
879 				    "%s, line %d: "
880 				    "mac_pvt->vlan_hash[%d] != NULL",
881 				    __FILE__, __LINE__, i);
882 		}
883 	}
884 #endif
885 
886 	/* Delete this mac */
887 	gldremque(macinfo);
888 
889 	/* Disallow further entries to gld_recv() and gld_sched() */
890 	macinfo->gldm_GLD_flags |= GLD_UNREGISTERED;
891 
892 	GLDM_UNLOCK(macinfo);
893 	mutex_exit(&glddev->gld_devlock);
894 
895 	ifp = ((gld_mac_pvt_t *)macinfo->gldm_mac_pvt)->interfacep;
896 	(*ifp->uninit)(macinfo);
897 
898 	ASSERT(mac_pvt->kstatp);
899 	kstat_delete(mac_pvt->kstatp);
900 
901 	ASSERT(GLDM_LOCK_INITED(macinfo));
902 	kmem_free(mac_pvt->curr_macaddr, macinfo->gldm_addrlen);
903 	kmem_free(mac_pvt->statistics, sizeof (struct gld_stats));
904 
905 	if (mac_pvt->mcast_table != NULL)
906 		kmem_free(mac_pvt->mcast_table, multisize);
907 	kmem_free(macinfo->gldm_mac_pvt, sizeof (gld_mac_pvt_t));
908 	macinfo->gldm_mac_pvt = (caddr_t)NULL;
909 
910 	/* We now have one fewer instance for this major device */
911 	mutex_enter(&gld_device_list.gld_devlock);
912 	glddev->gld_ndevice--;
913 	if (glddev->gld_ndevice == 0) {
914 		/* There should be no macinfos left */
915 		ASSERT(glddev->gld_mac_next ==
916 		    (gld_mac_info_t *)&glddev->gld_mac_next);
917 		ASSERT(glddev->gld_mac_prev ==
918 		    (gld_mac_info_t *)&glddev->gld_mac_next);
919 
920 		/*
921 		 * There should be no DL_UNATTACHED streams: the system
922 		 * should not have detached the "first" devinfo which has
923 		 * all the open style 2 streams.
924 		 *
925 		 * XXX This is not clear.  See gld_getinfo and Bug 1165519
926 		 */
927 		ASSERT(glddev->gld_str_next == (gld_t *)&glddev->gld_str_next);
928 		ASSERT(glddev->gld_str_prev == (gld_t *)&glddev->gld_str_next);
929 
930 		ddi_remove_minor_node(macinfo->gldm_devinfo, NULL);
931 		gldremque(glddev);
932 		mutex_destroy(&glddev->gld_devlock);
933 		if (glddev->gld_broadcast != NULL)
934 			kmem_free(glddev->gld_broadcast, glddev->gld_addrlen);
935 		kmem_free(glddev, sizeof (glddev_t));
936 	}
937 	mutex_exit(&gld_device_list.gld_devlock);
938 
939 	return (DDI_SUCCESS);
940 }
941 
942 /*
943  * gld_initstats
944  * called from gld_register
945  */
946 static int
947 gld_initstats(gld_mac_info_t *macinfo)
948 {
949 	gld_mac_pvt_t *mac_pvt = (gld_mac_pvt_t *)macinfo->gldm_mac_pvt;
950 	struct gldkstats *sp;
951 	glddev_t *glddev;
952 	kstat_t *ksp;
953 	gld_interface_t *ifp;
954 
955 	glddev = mac_pvt->major_dev;
956 
957 	if ((ksp = kstat_create(glddev->gld_name, macinfo->gldm_ppa,
958 	    NULL, "net", KSTAT_TYPE_NAMED,
959 	    sizeof (struct gldkstats) / sizeof (kstat_named_t), 0)) == NULL) {
960 		cmn_err(CE_WARN,
961 		    "GLD: failed to create kstat structure for %s%d",
962 		    glddev->gld_name, macinfo->gldm_ppa);
963 		return (GLD_FAILURE);
964 	}
965 	mac_pvt->kstatp = ksp;
966 
967 	ksp->ks_update = gld_update_kstat;
968 	ksp->ks_private = (void *)macinfo;
969 
970 	sp = ksp->ks_data;
971 	kstat_named_init(&sp->glds_pktrcv, "ipackets", KSTAT_DATA_UINT32);
972 	kstat_named_init(&sp->glds_pktxmt, "opackets", KSTAT_DATA_UINT32);
973 	kstat_named_init(&sp->glds_errrcv, "ierrors", KSTAT_DATA_ULONG);
974 	kstat_named_init(&sp->glds_errxmt, "oerrors", KSTAT_DATA_ULONG);
975 	kstat_named_init(&sp->glds_bytexmt, "obytes", KSTAT_DATA_UINT32);
976 	kstat_named_init(&sp->glds_bytercv, "rbytes", KSTAT_DATA_UINT32);
977 	kstat_named_init(&sp->glds_multixmt, "multixmt", KSTAT_DATA_ULONG);
978 	kstat_named_init(&sp->glds_multircv, "multircv", KSTAT_DATA_ULONG);
979 	kstat_named_init(&sp->glds_brdcstxmt, "brdcstxmt", KSTAT_DATA_ULONG);
980 	kstat_named_init(&sp->glds_brdcstrcv, "brdcstrcv", KSTAT_DATA_ULONG);
981 	kstat_named_init(&sp->glds_blocked, "blocked", KSTAT_DATA_ULONG);
982 	kstat_named_init(&sp->glds_noxmtbuf, "noxmtbuf", KSTAT_DATA_ULONG);
983 	kstat_named_init(&sp->glds_norcvbuf, "norcvbuf", KSTAT_DATA_ULONG);
984 	kstat_named_init(&sp->glds_xmtretry, "xmtretry", KSTAT_DATA_ULONG);
985 	kstat_named_init(&sp->glds_intr, "intr", KSTAT_DATA_ULONG);
986 	kstat_named_init(&sp->glds_pktrcv64, "ipackets64", KSTAT_DATA_UINT64);
987 	kstat_named_init(&sp->glds_pktxmt64, "opackets64", KSTAT_DATA_UINT64);
988 	kstat_named_init(&sp->glds_bytexmt64, "obytes64", KSTAT_DATA_UINT64);
989 	kstat_named_init(&sp->glds_bytercv64, "rbytes64", KSTAT_DATA_UINT64);
990 	kstat_named_init(&sp->glds_unknowns, "unknowns", KSTAT_DATA_ULONG);
991 	kstat_named_init(&sp->glds_speed, "ifspeed", KSTAT_DATA_UINT64);
992 	kstat_named_init(&sp->glds_media, "media", KSTAT_DATA_CHAR);
993 	kstat_named_init(&sp->glds_prom, "promisc", KSTAT_DATA_CHAR);
994 
995 	kstat_named_init(&sp->glds_overflow, "oflo", KSTAT_DATA_ULONG);
996 	kstat_named_init(&sp->glds_underflow, "uflo", KSTAT_DATA_ULONG);
997 	kstat_named_init(&sp->glds_missed, "missed", KSTAT_DATA_ULONG);
998 
999 	kstat_named_init(&sp->glds_xmtbadinterp, "xmt_badinterp",
1000 	    KSTAT_DATA_UINT32);
1001 	kstat_named_init(&sp->glds_rcvbadinterp, "rcv_badinterp",
1002 	    KSTAT_DATA_UINT32);
1003 
1004 	ifp = ((gld_mac_pvt_t *)macinfo->gldm_mac_pvt)->interfacep;
1005 
1006 	(*ifp->init)(macinfo);
1007 
1008 	kstat_install(ksp);
1009 
1010 	return (GLD_SUCCESS);
1011 }
1012 
1013 /* called from kstat mechanism, and from wsrv's get_statistics_req */
1014 static int
1015 gld_update_kstat(kstat_t *ksp, int rw)
1016 {
1017 	gld_mac_info_t	*macinfo;
1018 	gld_mac_pvt_t	*mac_pvt;
1019 	struct gldkstats *gsp;
1020 	struct gld_stats *stats;
1021 
1022 	if (rw == KSTAT_WRITE)
1023 		return (EACCES);
1024 
1025 	macinfo = (gld_mac_info_t *)ksp->ks_private;
1026 	ASSERT(macinfo != NULL);
1027 
1028 	GLDM_LOCK(macinfo, RW_WRITER);
1029 
1030 	if (!(macinfo->gldm_GLD_flags & GLD_MAC_READY)) {
1031 		GLDM_UNLOCK(macinfo);
1032 		return (EIO);	/* this one's not ready yet */
1033 	}
1034 
1035 	if (macinfo->gldm_GLD_flags & GLD_UNREGISTERED) {
1036 		GLDM_UNLOCK(macinfo);
1037 		return (EIO);	/* this one's not ready any more */
1038 	}
1039 
1040 	mac_pvt = (gld_mac_pvt_t *)macinfo->gldm_mac_pvt;
1041 	gsp = mac_pvt->kstatp->ks_data;
1042 	ASSERT(gsp);
1043 	stats = mac_pvt->statistics;
1044 
1045 	if (macinfo->gldm_get_stats)
1046 		(void) (*macinfo->gldm_get_stats)(macinfo, stats);
1047 
1048 	gsp->glds_pktxmt.value.ui32 = stats->glds_pktxmt64 & 0xffffffff;
1049 	gsp->glds_bytexmt.value.ui32 = stats->glds_bytexmt64 & 0xffffffff;
1050 	gsp->glds_multixmt.value.ul = stats->glds_multixmt;
1051 	gsp->glds_brdcstxmt.value.ul = stats->glds_brdcstxmt;
1052 	gsp->glds_noxmtbuf.value.ul = stats->glds_noxmtbuf;	/* 0 for now */
1053 	gsp->glds_xmtretry.value.ul = stats->glds_xmtretry;
1054 
1055 	gsp->glds_pktxmt64.value.ui64 = stats->glds_pktxmt64;
1056 	gsp->glds_bytexmt64.value.ui64 = stats->glds_bytexmt64;
1057 	gsp->glds_xmtbadinterp.value.ui32 = stats->glds_xmtbadinterp;
1058 
1059 	gsp->glds_pktrcv.value.ui32 = stats->glds_pktrcv64 & 0xffffffff;
1060 	gsp->glds_errxmt.value.ul = stats->glds_errxmt;
1061 	gsp->glds_errrcv.value.ul = stats->glds_errrcv;
1062 	gsp->glds_bytercv.value.ui32 = stats->glds_bytercv64 & 0xffffffff;
1063 	gsp->glds_multircv.value.ul = stats->glds_multircv;
1064 	gsp->glds_brdcstrcv.value.ul = stats->glds_brdcstrcv;
1065 	gsp->glds_blocked.value.ul = stats->glds_blocked;
1066 	gsp->glds_overflow.value.ul = stats->glds_overflow;
1067 	gsp->glds_underflow.value.ul = stats->glds_underflow;
1068 	gsp->glds_missed.value.ul = stats->glds_missed;
1069 	gsp->glds_norcvbuf.value.ul = stats->glds_norcvbuf +
1070 	    stats->glds_gldnorcvbuf;
1071 	gsp->glds_intr.value.ul = stats->glds_intr;
1072 
1073 	gsp->glds_speed.value.ui64 = stats->glds_speed;
1074 	gsp->glds_unknowns.value.ul = stats->glds_unknowns;
1075 	gsp->glds_pktrcv64.value.ui64 = stats->glds_pktrcv64;
1076 	gsp->glds_bytercv64.value.ui64 = stats->glds_bytercv64;
1077 	gsp->glds_rcvbadinterp.value.ui32 = stats->glds_rcvbadinterp;
1078 
1079 	if (mac_pvt->nprom)
1080 		(void) strcpy(gsp->glds_prom.value.c, "phys");
1081 	else if (mac_pvt->nprom_multi)
1082 		(void) strcpy(gsp->glds_prom.value.c, "multi");
1083 	else
1084 		(void) strcpy(gsp->glds_prom.value.c, "off");
1085 
1086 	(void) strcpy(gsp->glds_media.value.c, gld_media[
1087 	    stats->glds_media < sizeof (gld_media) / sizeof (gld_media[0])
1088 	    ? stats->glds_media : 0]);
1089 
1090 	switch (macinfo->gldm_type) {
1091 	case DL_ETHER:
1092 		gsp->glds_frame.value.ul = stats->glds_frame;
1093 		gsp->glds_crc.value.ul = stats->glds_crc;
1094 		gsp->glds_collisions.value.ul = stats->glds_collisions;
1095 		gsp->glds_excoll.value.ul = stats->glds_excoll;
1096 		gsp->glds_defer.value.ul = stats->glds_defer;
1097 		gsp->glds_short.value.ul = stats->glds_short;
1098 		gsp->glds_xmtlatecoll.value.ul = stats->glds_xmtlatecoll;
1099 		gsp->glds_nocarrier.value.ul = stats->glds_nocarrier;
1100 		gsp->glds_dot3_first_coll.value.ui32 =
1101 		    stats->glds_dot3_first_coll;
1102 		gsp->glds_dot3_multi_coll.value.ui32 =
1103 		    stats->glds_dot3_multi_coll;
1104 		gsp->glds_dot3_sqe_error.value.ui32 =
1105 		    stats->glds_dot3_sqe_error;
1106 		gsp->glds_dot3_mac_xmt_error.value.ui32 =
1107 		    stats->glds_dot3_mac_xmt_error;
1108 		gsp->glds_dot3_mac_rcv_error.value.ui32 =
1109 		    stats->glds_dot3_mac_rcv_error;
1110 		gsp->glds_dot3_frame_too_long.value.ui32 =
1111 		    stats->glds_dot3_frame_too_long;
1112 		(void) strcpy(gsp->glds_duplex.value.c, gld_duplex[
1113 		    stats->glds_duplex <
1114 		    sizeof (gld_duplex) / sizeof (gld_duplex[0]) ?
1115 		    stats->glds_duplex : 0]);
1116 		break;
1117 	case DL_TPR:
1118 		gsp->glds_dot5_line_error.value.ui32 =
1119 		    stats->glds_dot5_line_error;
1120 		gsp->glds_dot5_burst_error.value.ui32 =
1121 		    stats->glds_dot5_burst_error;
1122 		gsp->glds_dot5_signal_loss.value.ui32 =
1123 		    stats->glds_dot5_signal_loss;
1124 		gsp->glds_dot5_ace_error.value.ui32 =
1125 		    stats->glds_dot5_ace_error;
1126 		gsp->glds_dot5_internal_error.value.ui32 =
1127 		    stats->glds_dot5_internal_error;
1128 		gsp->glds_dot5_lost_frame_error.value.ui32 =
1129 		    stats->glds_dot5_lost_frame_error;
1130 		gsp->glds_dot5_frame_copied_error.value.ui32 =
1131 		    stats->glds_dot5_frame_copied_error;
1132 		gsp->glds_dot5_token_error.value.ui32 =
1133 		    stats->glds_dot5_token_error;
1134 		gsp->glds_dot5_freq_error.value.ui32 =
1135 		    stats->glds_dot5_freq_error;
1136 		break;
1137 	case DL_FDDI:
1138 		gsp->glds_fddi_mac_error.value.ui32 =
1139 		    stats->glds_fddi_mac_error;
1140 		gsp->glds_fddi_mac_lost.value.ui32 =
1141 		    stats->glds_fddi_mac_lost;
1142 		gsp->glds_fddi_mac_token.value.ui32 =
1143 		    stats->glds_fddi_mac_token;
1144 		gsp->glds_fddi_mac_tvx_expired.value.ui32 =
1145 		    stats->glds_fddi_mac_tvx_expired;
1146 		gsp->glds_fddi_mac_late.value.ui32 =
1147 		    stats->glds_fddi_mac_late;
1148 		gsp->glds_fddi_mac_ring_op.value.ui32 =
1149 		    stats->glds_fddi_mac_ring_op;
1150 		break;
1151 	case DL_IB:
1152 		break;
1153 	default:
1154 		break;
1155 	}
1156 
1157 	GLDM_UNLOCK(macinfo);
1158 
1159 #ifdef GLD_DEBUG
1160 	gld_check_assertions();
1161 	if (gld_debug & GLDRDE)
1162 		gld_sr_dump(macinfo);
1163 #endif
1164 
1165 	return (0);
1166 }
1167 
1168 static int
1169 gld_init_vlan_stats(gld_vlan_t *vlan)
1170 {
1171 	gld_mac_info_t *mac = vlan->gldv_mac;
1172 	gld_mac_pvt_t *mac_pvt = (gld_mac_pvt_t *)mac->gldm_mac_pvt;
1173 	struct gldkstats *sp;
1174 	glddev_t *glddev;
1175 	kstat_t *ksp;
1176 	char *name;
1177 	int instance;
1178 
1179 	glddev = mac_pvt->major_dev;
1180 	name = glddev->gld_name;
1181 	instance = (vlan->gldv_id * GLD_VLAN_SCALE) + mac->gldm_ppa;
1182 
1183 	if ((ksp = kstat_create(name, instance,
1184 	    NULL, "net", KSTAT_TYPE_NAMED,
1185 	    sizeof (struct gldkstats) / sizeof (kstat_named_t), 0)) == NULL) {
1186 		cmn_err(CE_WARN,
1187 		    "GLD: failed to create kstat structure for %s%d",
1188 		    name, instance);
1189 		return (GLD_FAILURE);
1190 	}
1191 
1192 	vlan->gldv_kstatp = ksp;
1193 
1194 	ksp->ks_update = gld_update_vlan_kstat;
1195 	ksp->ks_private = (void *)vlan;
1196 
1197 	sp = ksp->ks_data;
1198 	kstat_named_init(&sp->glds_pktrcv, "ipackets", KSTAT_DATA_UINT32);
1199 	kstat_named_init(&sp->glds_pktxmt, "opackets", KSTAT_DATA_UINT32);
1200 	kstat_named_init(&sp->glds_errrcv, "ierrors", KSTAT_DATA_ULONG);
1201 	kstat_named_init(&sp->glds_errxmt, "oerrors", KSTAT_DATA_ULONG);
1202 	kstat_named_init(&sp->glds_bytexmt, "obytes", KSTAT_DATA_UINT32);
1203 	kstat_named_init(&sp->glds_bytercv, "rbytes", KSTAT_DATA_UINT32);
1204 	kstat_named_init(&sp->glds_multixmt, "multixmt", KSTAT_DATA_ULONG);
1205 	kstat_named_init(&sp->glds_multircv, "multircv", KSTAT_DATA_ULONG);
1206 	kstat_named_init(&sp->glds_brdcstxmt, "brdcstxmt", KSTAT_DATA_ULONG);
1207 	kstat_named_init(&sp->glds_brdcstrcv, "brdcstrcv", KSTAT_DATA_ULONG);
1208 	kstat_named_init(&sp->glds_blocked, "blocked", KSTAT_DATA_ULONG);
1209 	kstat_named_init(&sp->glds_noxmtbuf, "noxmtbuf", KSTAT_DATA_ULONG);
1210 	kstat_named_init(&sp->glds_norcvbuf, "norcvbuf", KSTAT_DATA_ULONG);
1211 	kstat_named_init(&sp->glds_xmtretry, "xmtretry", KSTAT_DATA_ULONG);
1212 	kstat_named_init(&sp->glds_intr, "intr", KSTAT_DATA_ULONG);
1213 	kstat_named_init(&sp->glds_pktrcv64, "ipackets64", KSTAT_DATA_UINT64);
1214 	kstat_named_init(&sp->glds_pktxmt64, "opackets64", KSTAT_DATA_UINT64);
1215 	kstat_named_init(&sp->glds_bytexmt64, "obytes64", KSTAT_DATA_UINT64);
1216 	kstat_named_init(&sp->glds_bytercv64, "rbytes64", KSTAT_DATA_UINT64);
1217 	kstat_named_init(&sp->glds_unknowns, "unknowns", KSTAT_DATA_ULONG);
1218 	kstat_named_init(&sp->glds_speed, "ifspeed", KSTAT_DATA_UINT64);
1219 	kstat_named_init(&sp->glds_media, "media", KSTAT_DATA_CHAR);
1220 	kstat_named_init(&sp->glds_prom, "promisc", KSTAT_DATA_CHAR);
1221 
1222 	kstat_named_init(&sp->glds_overflow, "oflo", KSTAT_DATA_ULONG);
1223 	kstat_named_init(&sp->glds_underflow, "uflo", KSTAT_DATA_ULONG);
1224 	kstat_named_init(&sp->glds_missed, "missed", KSTAT_DATA_ULONG);
1225 
1226 	kstat_named_init(&sp->glds_xmtbadinterp, "xmt_badinterp",
1227 	    KSTAT_DATA_UINT32);
1228 	kstat_named_init(&sp->glds_rcvbadinterp, "rcv_badinterp",
1229 	    KSTAT_DATA_UINT32);
1230 
1231 	kstat_install(ksp);
1232 	return (GLD_SUCCESS);
1233 }
1234 
1235 static int
1236 gld_update_vlan_kstat(kstat_t *ksp, int rw)
1237 {
1238 	gld_vlan_t	*vlan;
1239 	gld_mac_info_t	*macinfo;
1240 	struct gldkstats *gsp;
1241 	struct gld_stats *stats;
1242 	gld_mac_pvt_t *mac_pvt;
1243 	uint32_t media;
1244 
1245 	if (rw == KSTAT_WRITE)
1246 		return (EACCES);
1247 
1248 	vlan = (gld_vlan_t *)ksp->ks_private;
1249 	ASSERT(vlan != NULL);
1250 
1251 	macinfo = vlan->gldv_mac;
1252 	GLDM_LOCK(macinfo, RW_WRITER);
1253 
1254 	mac_pvt = (gld_mac_pvt_t *)macinfo->gldm_mac_pvt;
1255 
1256 	gsp = vlan->gldv_kstatp->ks_data;
1257 	ASSERT(gsp);
1258 	stats = vlan->gldv_stats;
1259 
1260 	gsp->glds_pktxmt.value.ui32 = stats->glds_pktxmt64 & 0xffffffff;
1261 	gsp->glds_bytexmt.value.ui32 = stats->glds_bytexmt64 & 0xffffffff;
1262 	gsp->glds_errxmt.value.ul = stats->glds_errxmt;
1263 	gsp->glds_multixmt.value.ul = stats->glds_multixmt;
1264 	gsp->glds_brdcstxmt.value.ul = stats->glds_brdcstxmt;
1265 	gsp->glds_noxmtbuf.value.ul = stats->glds_noxmtbuf;
1266 	gsp->glds_xmtretry.value.ul = stats->glds_xmtretry;
1267 	gsp->glds_pktxmt64.value.ui64 = stats->glds_pktxmt64;
1268 	gsp->glds_bytexmt64.value.ui64 = stats->glds_bytexmt64;
1269 
1270 	gsp->glds_pktrcv.value.ui32 = stats->glds_pktrcv64 & 0xffffffff;
1271 	gsp->glds_bytercv.value.ui32 = stats->glds_bytercv64 & 0xffffffff;
1272 	gsp->glds_errrcv.value.ul = stats->glds_errrcv;
1273 	gsp->glds_multircv.value.ul = stats->glds_multircv;
1274 	gsp->glds_brdcstrcv.value.ul = stats->glds_brdcstrcv;
1275 	gsp->glds_blocked.value.ul = stats->glds_blocked;
1276 	gsp->glds_pktrcv64.value.ui64 = stats->glds_pktrcv64;
1277 	gsp->glds_bytercv64.value.ui64 = stats->glds_bytercv64;
1278 	gsp->glds_unknowns.value.ul = stats->glds_unknowns;
1279 	gsp->glds_xmtbadinterp.value.ui32 = stats->glds_xmtbadinterp;
1280 	gsp->glds_rcvbadinterp.value.ui32 = stats->glds_rcvbadinterp;
1281 
1282 	gsp->glds_speed.value.ui64 = mac_pvt->statistics->glds_speed;
1283 	media = mac_pvt->statistics->glds_media;
1284 	(void) strcpy(gsp->glds_media.value.c,
1285 	    gld_media[media < sizeof (gld_media) / sizeof (gld_media[0]) ?
1286 	    media : 0]);
1287 
1288 	GLDM_UNLOCK(macinfo);
1289 	return (0);
1290 }
1291 
1292 /*
1293  * The device dependent driver specifies gld_getinfo as its getinfo routine.
1294  */
1295 /*ARGSUSED*/
1296 int
1297 gld_getinfo(dev_info_t *dip, ddi_info_cmd_t cmd, void *arg, void **resultp)
1298 {
1299 	dev_info_t	*devinfo;
1300 	minor_t		minor = getminor((dev_t)arg);
1301 	int		rc = DDI_FAILURE;
1302 
1303 	switch (cmd) {
1304 	case DDI_INFO_DEVT2DEVINFO:
1305 		if ((devinfo = gld_finddevinfo((dev_t)arg)) != NULL) {
1306 			*(dev_info_t **)resultp = devinfo;
1307 			rc = DDI_SUCCESS;
1308 		}
1309 		break;
1310 	case DDI_INFO_DEVT2INSTANCE:
1311 		/* Need static mapping for deferred attach */
1312 		if (minor == GLD_USE_STYLE2) {
1313 			/*
1314 			 * Style 2:  this minor number does not correspond to
1315 			 * any particular instance number.
1316 			 */
1317 			rc = DDI_FAILURE;
1318 		} else if (minor <= GLD_MAX_STYLE1_MINOR) {
1319 			/* Style 1:  calculate the PPA from the minor */
1320 			*resultp = (void *)(uintptr_t)
1321 			    GLD_STYLE1_MINOR_TO_PPA(minor);
1322 			rc = DDI_SUCCESS;
1323 		} else {
1324 			/* Clone:  look for it.  Not a static mapping */
1325 			if ((devinfo = gld_finddevinfo((dev_t)arg)) != NULL) {
1326 				*resultp = (void *)(uintptr_t)
1327 				    ddi_get_instance(devinfo);
1328 				rc = DDI_SUCCESS;
1329 			}
1330 		}
1331 		break;
1332 	}
1333 
1334 	return (rc);
1335 }
1336 
1337 /* called from gld_getinfo */
1338 dev_info_t *
1339 gld_finddevinfo(dev_t dev)
1340 {
1341 	minor_t		minor = getminor(dev);
1342 	glddev_t	*device;
1343 	gld_mac_info_t	*mac;
1344 	gld_vlan_t	*vlan;
1345 	gld_t		*str;
1346 	dev_info_t	*devinfo = NULL;
1347 	int		i;
1348 
1349 	if (minor == GLD_USE_STYLE2) {
1350 		/*
1351 		 * Style 2:  this minor number does not correspond to
1352 		 * any particular instance number.
1353 		 *
1354 		 * XXX We don't know what to say.  See Bug 1165519.
1355 		 */
1356 		return (NULL);
1357 	}
1358 
1359 	mutex_enter(&gld_device_list.gld_devlock);	/* hold the device */
1360 
1361 	device = gld_devlookup(getmajor(dev));
1362 	if (device == NULL) {
1363 		/* There are no attached instances of this device */
1364 		mutex_exit(&gld_device_list.gld_devlock);
1365 		return (NULL);
1366 	}
1367 
1368 	/*
1369 	 * Search all attached macs and streams.
1370 	 *
1371 	 * XXX We don't bother checking the DL_UNATTACHED streams since
1372 	 * we don't know what devinfo we should report back even if we
1373 	 * found the minor.  Maybe we should associate streams that are
1374 	 * not currently attached to a PPA with the "first" devinfo node
1375 	 * of the major device to attach -- the one that created the
1376 	 * minor node for the generic device.
1377 	 */
1378 	mutex_enter(&device->gld_devlock);
1379 
1380 	for (mac = device->gld_mac_next;
1381 	    mac != (gld_mac_info_t *)&device->gld_mac_next;
1382 	    mac = mac->gldm_next) {
1383 		gld_mac_pvt_t *pvt = (gld_mac_pvt_t *)mac->gldm_mac_pvt;
1384 
1385 		if (!(mac->gldm_GLD_flags & GLD_MAC_READY))
1386 			continue;	/* this one's not ready yet */
1387 		if (minor <= GLD_MAX_STYLE1_MINOR) {
1388 			/* Style 1 -- look for the corresponding PPA */
1389 			if (minor == GLD_STYLE1_PPA_TO_MINOR(mac->gldm_ppa)) {
1390 				devinfo = mac->gldm_devinfo;
1391 				goto out;	/* found it! */
1392 			} else
1393 				continue;	/* not this PPA */
1394 		}
1395 
1396 		/* We are looking for a clone */
1397 		for (i = 0; i < VLAN_HASHSZ; i++) {
1398 			for (vlan = pvt->vlan_hash[i];
1399 			    vlan != NULL; vlan = vlan->gldv_next) {
1400 				for (str = vlan->gldv_str_next;
1401 				    str != (gld_t *)&vlan->gldv_str_next;
1402 				    str = str->gld_next) {
1403 					ASSERT(str->gld_mac_info == mac);
1404 					if (minor == str->gld_minor) {
1405 						devinfo = mac->gldm_devinfo;
1406 						goto out;
1407 					}
1408 				}
1409 			}
1410 		}
1411 	}
1412 out:
1413 	mutex_exit(&device->gld_devlock);
1414 	mutex_exit(&gld_device_list.gld_devlock);
1415 	return (devinfo);
1416 }
1417 
1418 /*
1419  * STREAMS open routine.  The device dependent driver specifies this as its
1420  * open entry point.
1421  */
1422 /*ARGSUSED2*/
1423 int
1424 gld_open(queue_t *q, dev_t *dev, int flag, int sflag, cred_t *cred)
1425 {
1426 	gld_mac_pvt_t *mac_pvt;
1427 	gld_t *gld;
1428 	glddev_t *glddev;
1429 	gld_mac_info_t *macinfo;
1430 	minor_t minor = getminor(*dev);
1431 	gld_vlan_t *vlan;
1432 	t_uscalar_t ppa;
1433 
1434 	ASSERT(q != NULL);
1435 
1436 	if (minor > GLD_MAX_STYLE1_MINOR)
1437 		return (ENXIO);
1438 
1439 	ASSERT(q->q_ptr == NULL);	/* Clone device gives us a fresh Q */
1440 
1441 	/* Find our per-major glddev_t structure */
1442 	mutex_enter(&gld_device_list.gld_devlock);
1443 	glddev = gld_devlookup(getmajor(*dev));
1444 
1445 	/*
1446 	 * This glddev will hang around since detach (and therefore
1447 	 * gld_unregister) can't run while we're here in the open routine.
1448 	 */
1449 	mutex_exit(&gld_device_list.gld_devlock);
1450 
1451 	if (glddev == NULL)
1452 		return (ENXIO);
1453 
1454 #ifdef GLD_DEBUG
1455 	if (gld_debug & GLDPROT) {
1456 		if (minor == GLD_USE_STYLE2)
1457 			cmn_err(CE_NOTE, "gld_open(%p, Style 2)", (void *)q);
1458 		else
1459 			cmn_err(CE_NOTE, "gld_open(%p, Style 1, minor = %d)",
1460 			    (void *)q, minor);
1461 	}
1462 #endif
1463 
1464 	/*
1465 	 * get a per-stream structure and link things together so we
1466 	 * can easily find them later.
1467 	 */
1468 	gld = kmem_zalloc(sizeof (gld_t), KM_SLEEP);
1469 
1470 	/*
1471 	 * fill in the structure and state info
1472 	 */
1473 	gld->gld_qptr = q;
1474 	gld->gld_device = glddev;
1475 	gld->gld_state = DL_UNATTACHED;
1476 
1477 	/*
1478 	 * we must atomically find a free minor number and add the stream
1479 	 * to a list, because gld_findminor has to traverse the lists to
1480 	 * determine which minor numbers are free.
1481 	 */
1482 	mutex_enter(&glddev->gld_devlock);
1483 
1484 	/* find a free minor device number for the clone */
1485 	gld->gld_minor = gld_findminor(glddev);
1486 	if (gld->gld_minor == 0) {
1487 		mutex_exit(&glddev->gld_devlock);
1488 		kmem_free(gld, sizeof (gld_t));
1489 		return (ENOSR);
1490 	}
1491 
1492 #ifdef GLD_VERBOSE_DEBUG
1493 	if (gld_debug & GLDPROT)
1494 		cmn_err(CE_NOTE, "gld_open() gld ptr: %p minor: %d",
1495 		    (void *)gld, gld->gld_minor);
1496 #endif
1497 
1498 	if (minor == GLD_USE_STYLE2) {
1499 		gld->gld_style = DL_STYLE2;
1500 		*dev = makedevice(getmajor(*dev), gld->gld_minor);
1501 		WR(q)->q_ptr = q->q_ptr = (caddr_t)gld;
1502 		gldinsque(gld, glddev->gld_str_prev);
1503 #ifdef GLD_VERBOSE_DEBUG
1504 		if (gld_debug & GLDPROT)
1505 			cmn_err(CE_NOTE, "GLDstruct added to device list");
1506 #endif
1507 		(void) qassociate(q, -1);
1508 		goto done;
1509 	}
1510 
1511 	gld->gld_style = DL_STYLE1;
1512 
1513 	/* the PPA is actually 1 less than the minordev */
1514 	ppa = GLD_STYLE1_MINOR_TO_PPA(minor);
1515 
1516 	for (macinfo = glddev->gld_mac_next;
1517 	    macinfo != (gld_mac_info_t *)(&glddev->gld_mac_next);
1518 	    macinfo = macinfo->gldm_next) {
1519 		ASSERT(macinfo != NULL);
1520 		if (macinfo->gldm_ppa != ppa)
1521 			continue;
1522 
1523 		if (!(macinfo->gldm_GLD_flags & GLD_MAC_READY))
1524 			continue;	/* this one's not ready yet */
1525 
1526 		/*
1527 		 * we found the correct PPA
1528 		 */
1529 		GLDM_LOCK(macinfo, RW_WRITER);
1530 
1531 		gld->gld_mac_info = macinfo;
1532 
1533 		if (macinfo->gldm_send_tagged != NULL)
1534 			gld->gld_send = macinfo->gldm_send_tagged;
1535 		else
1536 			gld->gld_send = macinfo->gldm_send;
1537 
1538 		/* now ready for action */
1539 		gld->gld_state = DL_UNBOUND;
1540 
1541 		if ((vlan = gld_get_vlan(macinfo, VLAN_VID_NONE)) == NULL) {
1542 			GLDM_UNLOCK(macinfo);
1543 			mutex_exit(&glddev->gld_devlock);
1544 			kmem_free(gld, sizeof (gld_t));
1545 			return (EIO);
1546 		}
1547 
1548 		mac_pvt = (gld_mac_pvt_t *)macinfo->gldm_mac_pvt;
1549 		if (!mac_pvt->started) {
1550 			if (gld_start_mac(macinfo) != GLD_SUCCESS) {
1551 				gld_rem_vlan(vlan);
1552 				GLDM_UNLOCK(macinfo);
1553 				mutex_exit(&glddev->gld_devlock);
1554 				kmem_free(gld, sizeof (gld_t));
1555 				return (EIO);
1556 			}
1557 		}
1558 
1559 		gld->gld_vlan = vlan;
1560 		vlan->gldv_nstreams++;
1561 		gldinsque(gld, vlan->gldv_str_prev);
1562 		*dev = makedevice(getmajor(*dev), gld->gld_minor);
1563 		WR(q)->q_ptr = q->q_ptr = (caddr_t)gld;
1564 
1565 		GLDM_UNLOCK(macinfo);
1566 #ifdef GLD_VERBOSE_DEBUG
1567 		if (gld_debug & GLDPROT)
1568 			cmn_err(CE_NOTE,
1569 			    "GLDstruct added to instance list");
1570 #endif
1571 		break;
1572 	}
1573 
1574 	if (gld->gld_state == DL_UNATTACHED) {
1575 		mutex_exit(&glddev->gld_devlock);
1576 		kmem_free(gld, sizeof (gld_t));
1577 		return (ENXIO);
1578 	}
1579 
1580 done:
1581 	mutex_exit(&glddev->gld_devlock);
1582 	noenable(WR(q));	/* We'll do the qenables manually */
1583 	qprocson(q);		/* start the queues running */
1584 	qenable(WR(q));
1585 	return (0);
1586 }
1587 
1588 /*
1589  * normal stream close call checks current status and cleans up
1590  * data structures that were dynamically allocated
1591  */
1592 /*ARGSUSED1*/
1593 int
1594 gld_close(queue_t *q, int flag, cred_t *cred)
1595 {
1596 	gld_t	*gld = (gld_t *)q->q_ptr;
1597 	glddev_t *glddev = gld->gld_device;
1598 
1599 	ASSERT(q);
1600 	ASSERT(gld);
1601 
1602 #ifdef GLD_DEBUG
1603 	if (gld_debug & GLDPROT) {
1604 		cmn_err(CE_NOTE, "gld_close(%p, Style %d)",
1605 		    (void *)q, (gld->gld_style & 0x1) + 1);
1606 	}
1607 #endif
1608 
1609 	/* Hold all device streams lists still while we check for a macinfo */
1610 	mutex_enter(&glddev->gld_devlock);
1611 
1612 	if (gld->gld_mac_info != NULL) {
1613 		/* If there's a macinfo, block recv while we change state */
1614 		GLDM_LOCK(gld->gld_mac_info, RW_WRITER);
1615 		gld->gld_flags |= GLD_STR_CLOSING; /* no more rcv putnexts */
1616 		GLDM_UNLOCK(gld->gld_mac_info);
1617 	} else {
1618 		/* no mac DL_ATTACHED right now */
1619 		gld->gld_flags |= GLD_STR_CLOSING;
1620 	}
1621 
1622 	mutex_exit(&glddev->gld_devlock);
1623 
1624 	/*
1625 	 * qprocsoff before we call gld_unbind/gldunattach, so that
1626 	 * we know wsrv isn't in there trying to undo what we're doing.
1627 	 */
1628 	qprocsoff(q);
1629 
1630 	ASSERT(gld->gld_wput_count == 0);
1631 	gld->gld_wput_count = 0;	/* just in case */
1632 
1633 	if (gld->gld_state == DL_IDLE) {
1634 		/* Need to unbind */
1635 		ASSERT(gld->gld_mac_info != NULL);
1636 		(void) gld_unbind(WR(q), NULL);
1637 	}
1638 
1639 	if (gld->gld_state == DL_UNBOUND) {
1640 		/*
1641 		 * Need to unattach
1642 		 * For style 2 stream, gldunattach also
1643 		 * associate queue with NULL dip
1644 		 */
1645 		ASSERT(gld->gld_mac_info != NULL);
1646 		(void) gldunattach(WR(q), NULL);
1647 	}
1648 
1649 	/* disassociate the stream from the device */
1650 	q->q_ptr = WR(q)->q_ptr = NULL;
1651 
1652 	/*
1653 	 * Since we unattached above (if necessary), we know that we're
1654 	 * on the per-major list of unattached streams, rather than a
1655 	 * per-PPA list.  So we know we should hold the devlock.
1656 	 */
1657 	mutex_enter(&glddev->gld_devlock);
1658 	gldremque(gld);			/* remove from Style 2 list */
1659 	mutex_exit(&glddev->gld_devlock);
1660 
1661 	kmem_free(gld, sizeof (gld_t));
1662 
1663 	return (0);
1664 }
1665 
1666 /*
1667  * gld_rsrv (q)
1668  *	simple read service procedure
1669  *	purpose is to avoid the time it takes for packets
1670  *	to move through IP so we can get them off the board
1671  *	as fast as possible due to limited PC resources.
1672  *
1673  *	This is not normally used in the current implementation.  It
1674  *	can be selected with the undocumented property "fast_recv".
1675  *	If that property is set, gld_recv will send the packet
1676  *	upstream with a putq() rather than a putnext(), thus causing
1677  *	this routine to be scheduled.
1678  */
1679 int
1680 gld_rsrv(queue_t *q)
1681 {
1682 	mblk_t *mp;
1683 
1684 	while ((mp = getq(q)) != NULL) {
1685 		if (canputnext(q)) {
1686 			putnext(q, mp);
1687 		} else {
1688 			freemsg(mp);
1689 		}
1690 	}
1691 	return (0);
1692 }
1693 
1694 /*
1695  * gld_wput (q, mp)
1696  * general gld stream write put routine. Receives fastpath data from upper
1697  * modules and processes it immediately.  ioctl and M_PROTO/M_PCPROTO are
1698  * queued for later processing by the service procedure.
1699  */
1700 
1701 int
1702 gld_wput(queue_t *q, mblk_t *mp)
1703 {
1704 	gld_t  *gld = (gld_t *)(q->q_ptr);
1705 	int	rc;
1706 	boolean_t multidata = B_TRUE;
1707 	uint32_t upri;
1708 
1709 #ifdef GLD_DEBUG
1710 	if (gld_debug & GLDTRACE)
1711 		cmn_err(CE_NOTE, "gld_wput(%p %p): type %x",
1712 		    (void *)q, (void *)mp, DB_TYPE(mp));
1713 #endif
1714 	switch (DB_TYPE(mp)) {
1715 
1716 	case M_DATA:
1717 		/* fast data / raw support */
1718 		/* we must be DL_ATTACHED and DL_BOUND to do this */
1719 		/* Tricky to access memory without taking the mutex */
1720 		if ((gld->gld_flags & (GLD_RAW | GLD_FAST)) == 0 ||
1721 		    gld->gld_state != DL_IDLE) {
1722 			merror(q, mp, EPROTO);
1723 			break;
1724 		}
1725 		/*
1726 		 * Cleanup MBLK_VTAG in case it is set by other
1727 		 * modules. MBLK_VTAG is used to save the vtag information.
1728 		 */
1729 		GLD_CLEAR_MBLK_VTAG(mp);
1730 		multidata = B_FALSE;
1731 		/* LINTED: E_CASE_FALLTHRU */
1732 	case M_MULTIDATA:
1733 		/* Only call gld_start() directly if nothing queued ahead */
1734 		/* No guarantees about ordering with different threads */
1735 		if (q->q_first)
1736 			goto use_wsrv;
1737 
1738 		/*
1739 		 * This can happen if wsrv has taken off the last mblk but
1740 		 * is still processing it.
1741 		 */
1742 		membar_consumer();
1743 		if (gld->gld_in_wsrv)
1744 			goto use_wsrv;
1745 
1746 		/*
1747 		 * Keep a count of current wput calls to start.
1748 		 * Nonzero count delays any attempted DL_UNBIND.
1749 		 * See comments above gld_start().
1750 		 */
1751 		atomic_add_32((uint32_t *)&gld->gld_wput_count, 1);
1752 		membar_enter();
1753 
1754 		/* Recheck state now wput_count is set to prevent DL_UNBIND */
1755 		/* If this Q is in process of DL_UNBIND, don't call start */
1756 		if (gld->gld_state != DL_IDLE || gld->gld_in_unbind) {
1757 			/* Extremely unlikely */
1758 			atomic_add_32((uint32_t *)&gld->gld_wput_count, -1);
1759 			goto use_wsrv;
1760 		}
1761 
1762 		/*
1763 		 * Get the priority value. Note that in raw mode, the
1764 		 * per-packet priority value kept in b_band is ignored.
1765 		 */
1766 		upri = (gld->gld_flags & GLD_RAW) ? gld->gld_upri :
1767 		    UPRI(gld, mp->b_band);
1768 
1769 		rc = (multidata) ? gld_start_mdt(q, mp, GLD_WPUT) :
1770 		    gld_start(q, mp, GLD_WPUT, upri);
1771 
1772 		/* Allow DL_UNBIND again */
1773 		membar_exit();
1774 		atomic_add_32((uint32_t *)&gld->gld_wput_count, -1);
1775 
1776 		if (rc == GLD_NORESOURCES)
1777 			qenable(q);
1778 		break;	/*  Done with this packet */
1779 
1780 use_wsrv:
1781 		/* Q not empty, in DL_DETACH, or start gave NORESOURCES */
1782 		(void) putq(q, mp);
1783 		qenable(q);
1784 		break;
1785 
1786 	case M_IOCTL:
1787 		/* ioctl relies on wsrv single threading per queue */
1788 		(void) putq(q, mp);
1789 		qenable(q);
1790 		break;
1791 
1792 	case M_CTL:
1793 		(void) putq(q, mp);
1794 		qenable(q);
1795 		break;
1796 
1797 	case M_FLUSH:		/* canonical flush handling */
1798 		/* XXX Should these be FLUSHALL? */
1799 		if (*mp->b_rptr & FLUSHW)
1800 			flushq(q, 0);
1801 		if (*mp->b_rptr & FLUSHR) {
1802 			flushq(RD(q), 0);
1803 			*mp->b_rptr &= ~FLUSHW;
1804 			qreply(q, mp);
1805 		} else
1806 			freemsg(mp);
1807 		break;
1808 
1809 	case M_PROTO:
1810 	case M_PCPROTO:
1811 		/* these rely on wsrv single threading per queue */
1812 		(void) putq(q, mp);
1813 		qenable(q);
1814 		break;
1815 
1816 	default:
1817 #ifdef GLD_DEBUG
1818 		if (gld_debug & GLDETRACE)
1819 			cmn_err(CE_WARN,
1820 			    "gld: Unexpected packet type from queue: 0x%x",
1821 			    DB_TYPE(mp));
1822 #endif
1823 		freemsg(mp);
1824 	}
1825 	return (0);
1826 }
1827 
1828 /*
1829  * gld_wsrv - Incoming messages are processed according to the DLPI protocol
1830  * specification.
1831  *
1832  * wsrv is single-threaded per Q.  We make use of this to avoid taking the
1833  * lock for reading data items that are only ever written by us.
1834  */
1835 
1836 int
1837 gld_wsrv(queue_t *q)
1838 {
1839 	mblk_t *mp;
1840 	gld_t *gld = (gld_t *)q->q_ptr;
1841 	gld_mac_info_t *macinfo;
1842 	union DL_primitives *prim;
1843 	int err;
1844 	boolean_t multidata;
1845 	uint32_t upri;
1846 
1847 #ifdef GLD_DEBUG
1848 	if (gld_debug & GLDTRACE)
1849 		cmn_err(CE_NOTE, "gld_wsrv(%p)", (void *)q);
1850 #endif
1851 
1852 	ASSERT(!gld->gld_in_wsrv);
1853 
1854 	gld->gld_xwait = B_FALSE; /* We are now going to process this Q */
1855 
1856 	if (q->q_first == NULL)
1857 		return (0);
1858 
1859 	macinfo = gld->gld_mac_info;
1860 
1861 	/*
1862 	 * Help wput avoid a call to gld_start if there might be a message
1863 	 * previously queued by that thread being processed here.
1864 	 */
1865 	gld->gld_in_wsrv = B_TRUE;
1866 	membar_enter();
1867 
1868 	while ((mp = getq(q)) != NULL) {
1869 		switch (DB_TYPE(mp)) {
1870 		case M_DATA:
1871 		case M_MULTIDATA:
1872 			multidata = (DB_TYPE(mp) == M_MULTIDATA);
1873 
1874 			/*
1875 			 * retry of a previously processed UNITDATA_REQ
1876 			 * or is a RAW or FAST message from above.
1877 			 */
1878 			if (macinfo == NULL) {
1879 				/* No longer attached to a PPA, drop packet */
1880 				freemsg(mp);
1881 				break;
1882 			}
1883 
1884 			gld->gld_sched_ran = B_FALSE;
1885 			membar_enter();
1886 
1887 			/*
1888 			 * Get the priority value. Note that in raw mode, the
1889 			 * per-packet priority value kept in b_band is ignored.
1890 			 */
1891 			upri = (gld->gld_flags & GLD_RAW) ? gld->gld_upri :
1892 			    UPRI(gld, mp->b_band);
1893 
1894 			err = (multidata) ? gld_start_mdt(q, mp, GLD_WSRV) :
1895 			    gld_start(q, mp, GLD_WSRV, upri);
1896 			if (err == GLD_NORESOURCES) {
1897 				/* gld_sched will qenable us later */
1898 				gld->gld_xwait = B_TRUE; /* want qenable */
1899 				membar_enter();
1900 				/*
1901 				 * v2:  we're not holding the lock; it's
1902 				 * possible that the driver could have already
1903 				 * called gld_sched (following up on its
1904 				 * return of GLD_NORESOURCES), before we got a
1905 				 * chance to do the putbq() and set gld_xwait.
1906 				 * So if we saw a call to gld_sched that
1907 				 * examined this queue, since our call to
1908 				 * gld_start() above, then it's possible we've
1909 				 * already seen the only call to gld_sched()
1910 				 * we're ever going to see.  So we better retry
1911 				 * transmitting this packet right now.
1912 				 */
1913 				if (gld->gld_sched_ran) {
1914 #ifdef GLD_DEBUG
1915 					if (gld_debug & GLDTRACE)
1916 						cmn_err(CE_NOTE, "gld_wsrv: "
1917 						    "sched was called");
1918 #endif
1919 					break;	/* try again right now */
1920 				}
1921 				gld->gld_in_wsrv = B_FALSE;
1922 				return (0);
1923 			}
1924 			break;
1925 
1926 		case M_IOCTL:
1927 			(void) gld_ioctl(q, mp);
1928 			break;
1929 
1930 		case M_CTL:
1931 			if (macinfo == NULL) {
1932 				freemsg(mp);
1933 				break;
1934 			}
1935 
1936 			if (macinfo->gldm_mctl != NULL) {
1937 				GLDM_LOCK(macinfo, RW_WRITER);
1938 				(void) (*macinfo->gldm_mctl) (macinfo, q, mp);
1939 				GLDM_UNLOCK(macinfo);
1940 			} else {
1941 				/* This driver doesn't recognize, just drop */
1942 				freemsg(mp);
1943 			}
1944 			break;
1945 
1946 		case M_PROTO:	/* Will be an DLPI message of some type */
1947 		case M_PCPROTO:
1948 			if ((err = gld_cmds(q, mp)) != GLDE_OK) {
1949 				if (err == GLDE_RETRY) {
1950 					gld->gld_in_wsrv = B_FALSE;
1951 					return (0); /* quit while we're ahead */
1952 				}
1953 				prim = (union DL_primitives *)mp->b_rptr;
1954 				dlerrorack(q, mp, prim->dl_primitive, err, 0);
1955 			}
1956 			break;
1957 
1958 		default:
1959 			/* This should never happen */
1960 #ifdef GLD_DEBUG
1961 			if (gld_debug & GLDERRS)
1962 				cmn_err(CE_WARN,
1963 				    "gld_wsrv: db_type(%x) not supported",
1964 				    mp->b_datap->db_type);
1965 #endif
1966 			freemsg(mp);	/* unknown types are discarded */
1967 			break;
1968 		}
1969 	}
1970 
1971 	membar_exit();
1972 	gld->gld_in_wsrv = B_FALSE;
1973 	return (0);
1974 }
1975 
1976 /*
1977  * gld_start() can get called from gld_wput(), gld_wsrv(), or gld_unitdata().
1978  *
1979  * We only come directly from wput() in the GLD_FAST (fastpath) or RAW case.
1980  *
1981  * In particular, we must avoid calling gld_precv*() if we came from wput().
1982  * gld_precv*() is where we, on the transmit side, loop back our outgoing
1983  * packets to the receive side if we are in physical promiscuous mode.
1984  * Since the receive side holds a lock across its call to the upstream
1985  * putnext, and that upstream module could well have looped back to our
1986  * wput() routine on the same thread, we cannot call gld_precv* from here
1987  * for fear of causing a recursive lock entry in our receive code.
1988  *
1989  * There is a problem here when coming from gld_wput().  While wput
1990  * only comes here if the queue is attached to a PPA and bound to a SAP
1991  * and there are no messages on the queue ahead of the M_DATA that could
1992  * change that, it is theoretically possible that another thread could
1993  * now wput a DL_UNBIND and a DL_DETACH message, and the wsrv() routine
1994  * could wake up and process them, before we finish processing this
1995  * send of the M_DATA.  This can only possibly happen on a Style 2 RAW or
1996  * FAST (fastpath) stream:  non RAW/FAST streams always go through wsrv(),
1997  * and Style 1 streams only DL_DETACH in the close routine, where
1998  * qprocsoff() protects us.  If this happens we could end up calling
1999  * gldm_send() after we have detached the stream and possibly called
2000  * gldm_stop().  Worse, once the number of attached streams goes to zero,
2001  * detach/unregister could be called, and the macinfo could go away entirely.
2002  *
2003  * No one has ever seen this happen.
2004  *
2005  * It is some trouble to fix this, and we would rather not add any mutex
2006  * logic into the wput() routine, which is supposed to be a "fast"
2007  * path.
2008  *
2009  * What I've done is use an atomic counter to keep a count of the number
2010  * of threads currently calling gld_start() from wput() on this stream.
2011  * If DL_DETACH sees this as nonzero, it putbqs the request back onto
2012  * the queue and qenables, hoping to have better luck next time.  Since
2013  * people shouldn't be trying to send after they've asked to DL_DETACH,
2014  * hopefully very soon all the wput=>start threads should have returned
2015  * and the DL_DETACH will succeed.  It's hard to test this since the odds
2016  * of the failure even trying to happen are so small.  I probably could
2017  * have ignored the whole issue and never been the worse for it.
2018  *
2019  * Because some GLDv2 Ethernet drivers do not allow the size of transmitted
2020  * packet to be greater than ETHERMAX, we must first strip the VLAN tag
2021  * from a tagged packet before passing it to the driver's gld_send() entry
2022  * point function, and pass the VLAN tag as a separate argument. The
2023  * gld_send() function may fail. In that case, the packet will need to be
2024  * queued in order to be processed again in GLD's service routine. As the
2025  * VTAG has already been stripped at that time, we save the VTAG information
2026  * in (the unused fields of) dblk using GLD_SAVE_MBLK_VTAG(), so that the
2027  * VTAG can also be queued and be able to be got when gld_start() is called
2028  * next time from gld_wsrv().
2029  *
2030  * Some rules to use GLD_{CLEAR|SAVE}_MBLK_VTAG macros:
2031  *
2032  * - GLD_SAVE_MBLK_VTAG() must be called to save the VTAG information each time
2033  *   the message is queued by putbq().
2034  *
2035  * - GLD_CLEAR_MBLK_VTAG() must be called to clear the bogus VTAG information
2036  *   (if any) in dblk before the message is passed to the gld_start() function.
2037  */
2038 static int
2039 gld_start(queue_t *q, mblk_t *mp, int caller, uint32_t upri)
2040 {
2041 	mblk_t *nmp;
2042 	gld_t *gld = (gld_t *)q->q_ptr;
2043 	gld_mac_info_t *macinfo;
2044 	gld_mac_pvt_t *mac_pvt;
2045 	int rc;
2046 	gld_interface_t *ifp;
2047 	pktinfo_t pktinfo;
2048 	uint32_t vtag, vid;
2049 	uint32_t raw_vtag = 0;
2050 	gld_vlan_t *vlan;
2051 	struct gld_stats *stats0, *stats = NULL;
2052 
2053 	ASSERT(DB_TYPE(mp) == M_DATA);
2054 	macinfo = gld->gld_mac_info;
2055 	mac_pvt = (gld_mac_pvt_t *)macinfo->gldm_mac_pvt;
2056 	ifp = mac_pvt->interfacep;
2057 	vlan = (gld_vlan_t *)gld->gld_vlan;
2058 	vid = vlan->gldv_id;
2059 
2060 	/*
2061 	 * If this interface is a VLAN, the kstats of corresponding
2062 	 * "VLAN 0" should also be updated. Note that the gld_vlan_t
2063 	 * structure for VLAN 0 might not exist if there are no DLPI
2064 	 * consumers attaching on VLAN 0. Fortunately we can directly
2065 	 * access VLAN 0's kstats from macinfo.
2066 	 *
2067 	 * Therefore, stats0 (VLAN 0's kstats) must always be
2068 	 * updated, and stats must to be updated if it is not NULL.
2069 	 */
2070 	stats0 = mac_pvt->statistics;
2071 	if (vid != VLAN_VID_NONE)
2072 		stats = vlan->gldv_stats;
2073 
2074 	if ((*ifp->interpreter)(macinfo, mp, &pktinfo, GLD_TX) != 0) {
2075 #ifdef GLD_DEBUG
2076 		if (gld_debug & GLDERRS)
2077 			cmn_err(CE_WARN,
2078 			    "gld_start: failed to interpret outbound packet");
2079 #endif
2080 		goto badarg;
2081 	}
2082 
2083 	vtag = VLAN_VID_NONE;
2084 	raw_vtag = GLD_GET_MBLK_VTAG(mp);
2085 	if (GLD_VTAG_TCI(raw_vtag) != 0) {
2086 		uint16_t raw_pri, raw_vid, evid;
2087 
2088 		/*
2089 		 * Tagged packet.
2090 		 */
2091 		raw_pri = GLD_VTAG_PRI(raw_vtag);
2092 		raw_vid = GLD_VTAG_VID(raw_vtag);
2093 		GLD_CLEAR_MBLK_VTAG(mp);
2094 
2095 		if (gld->gld_flags & GLD_RAW) {
2096 			/*
2097 			 * In raw mode, we only expect untagged packets or
2098 			 * special priority-tagged packets on a VLAN stream.
2099 			 * Drop the packet if its VID is not zero.
2100 			 */
2101 			if (vid != VLAN_VID_NONE && raw_vid != VLAN_VID_NONE)
2102 				goto badarg;
2103 
2104 			/*
2105 			 * If it is raw mode, use the per-stream priority if
2106 			 * the priority is not specified in the packet.
2107 			 * Otherwise, ignore the priority bits in the packet.
2108 			 */
2109 			upri = (raw_pri != 0) ? raw_pri : upri;
2110 		}
2111 
2112 		if (vid == VLAN_VID_NONE && vid != raw_vid) {
2113 			gld_vlan_t *tmp_vlan;
2114 
2115 			/*
2116 			 * This link is a physical link but the packet is
2117 			 * a VLAN tagged packet, the kstats of corresponding
2118 			 * VLAN (if any) should also be updated.
2119 			 */
2120 			tmp_vlan = gld_find_vlan(macinfo, raw_vid);
2121 			if (tmp_vlan != NULL)
2122 				stats = tmp_vlan->gldv_stats;
2123 		}
2124 
2125 		evid = (vid == VLAN_VID_NONE) ? raw_vid : vid;
2126 		if (evid != VLAN_VID_NONE || upri != 0)
2127 			vtag = GLD_MAKE_VTAG(upri, VLAN_CFI_ETHER, evid);
2128 	} else {
2129 		/*
2130 		 * Untagged packet:
2131 		 * Get vtag from the attached PPA of this stream.
2132 		 */
2133 		if ((vid != VLAN_VID_NONE) ||
2134 		    ((macinfo->gldm_type == DL_ETHER) && (upri != 0))) {
2135 			vtag = GLD_MAKE_VTAG(upri, VLAN_CFI_ETHER, vid);
2136 		}
2137 	}
2138 
2139 	/*
2140 	 * We're not holding the lock for this check.  If the promiscuous
2141 	 * state is in flux it doesn't matter much if we get this wrong.
2142 	 */
2143 	if (mac_pvt->nprom > 0) {
2144 		/*
2145 		 * We want to loopback to the receive side, but to avoid
2146 		 * recursive lock entry:  if we came from wput(), which
2147 		 * could have looped back via IP from our own receive
2148 		 * interrupt thread, we decline this request.  wput()
2149 		 * will then queue the packet for wsrv().  This means
2150 		 * that when snoop is running we don't get the advantage
2151 		 * of the wput() multithreaded direct entry to the
2152 		 * driver's send routine.
2153 		 */
2154 		if (caller == GLD_WPUT) {
2155 			GLD_SAVE_MBLK_VTAG(mp, raw_vtag);
2156 			(void) putbq(q, mp);
2157 			return (GLD_NORESOURCES);
2158 		}
2159 		if (macinfo->gldm_capabilities & GLD_CAP_ZEROCOPY)
2160 			nmp = dupmsg_noloan(mp);
2161 		else
2162 			nmp = dupmsg(mp);
2163 	} else
2164 		nmp = NULL;		/* we need no loopback */
2165 
2166 	if (ifp->hdr_size > 0 &&
2167 	    pktinfo.pktLen > ifp->hdr_size + (vtag == 0 ? 0 : VTAG_SIZE) +
2168 	    macinfo->gldm_maxpkt) {
2169 		if (nmp)
2170 			freemsg(nmp);	/* free the duped message */
2171 #ifdef GLD_DEBUG
2172 		if (gld_debug & GLDERRS)
2173 			cmn_err(CE_WARN,
2174 			    "gld_start: oversize outbound packet, size %d,"
2175 			    "max %d", pktinfo.pktLen,
2176 			    ifp->hdr_size + (vtag == 0 ? 0 : VTAG_SIZE) +
2177 			    macinfo->gldm_maxpkt);
2178 #endif
2179 		goto badarg;
2180 	}
2181 
2182 	rc = (*gld->gld_send)(macinfo, mp, vtag);
2183 
2184 	if (rc != GLD_SUCCESS) {
2185 		if (rc == GLD_NORESOURCES) {
2186 			ATOMIC_BUMP(stats0, stats, glds_xmtretry, 1);
2187 			GLD_SAVE_MBLK_VTAG(mp, raw_vtag);
2188 			(void) putbq(q, mp);
2189 		} else {
2190 			/* transmit error; drop the packet */
2191 			freemsg(mp);
2192 			/* We're supposed to count failed attempts as well */
2193 			UPDATE_STATS(stats0, stats, pktinfo, 1);
2194 #ifdef GLD_DEBUG
2195 			if (gld_debug & GLDERRS)
2196 				cmn_err(CE_WARN,
2197 				    "gld_start: gldm_send failed %d", rc);
2198 #endif
2199 		}
2200 		if (nmp)
2201 			freemsg(nmp);	/* free the dupped message */
2202 		return (rc);
2203 	}
2204 
2205 	UPDATE_STATS(stats0, stats, pktinfo, 1);
2206 
2207 	/*
2208 	 * Loopback case. The message needs to be returned back on
2209 	 * the read side. This would silently fail if the dupmsg fails
2210 	 * above. This is probably OK, if there is no memory to dup the
2211 	 * block, then there isn't much we could do anyway.
2212 	 */
2213 	if (nmp) {
2214 		GLDM_LOCK(macinfo, RW_WRITER);
2215 		gld_precv(macinfo, nmp, vtag, stats);
2216 		GLDM_UNLOCK(macinfo);
2217 	}
2218 
2219 	return (GLD_SUCCESS);
2220 badarg:
2221 	freemsg(mp);
2222 
2223 	ATOMIC_BUMP(stats0, stats, glds_xmtbadinterp, 1);
2224 	return (GLD_BADARG);
2225 }
2226 
2227 /*
2228  * With MDT V.2 a single message mp can have one header area and multiple
2229  * payload areas. A packet is described by dl_pkt_info, and each packet can
2230  * span multiple payload areas (currently with TCP, each packet will have one
2231  * header and at the most two payload areas). MACs might have a limit on the
2232  * number of payload segments (i.e. per packet scatter-gather limit), and
2233  * MDT V.2 has a way of specifying that with mdt_span_limit; the MAC driver
2234  * might also have a limit on the total number of payloads in a message, and
2235  * that is specified by mdt_max_pld.
2236  */
2237 static int
2238 gld_start_mdt(queue_t *q, mblk_t *mp, int caller)
2239 {
2240 	mblk_t *nextmp;
2241 	gld_t *gld = (gld_t *)q->q_ptr;
2242 	gld_mac_info_t *macinfo = gld->gld_mac_info;
2243 	gld_mac_pvt_t *mac_pvt = (gld_mac_pvt_t *)macinfo->gldm_mac_pvt;
2244 	int numpacks, mdtpacks;
2245 	gld_interface_t *ifp = mac_pvt->interfacep;
2246 	pktinfo_t pktinfo;
2247 	gld_vlan_t *vlan = (gld_vlan_t *)gld->gld_vlan;
2248 	boolean_t doloop = B_FALSE;
2249 	multidata_t *dlmdp;
2250 	pdescinfo_t pinfo;
2251 	pdesc_t *dl_pkt;
2252 	void *cookie;
2253 	uint_t totLen = 0;
2254 
2255 	ASSERT(DB_TYPE(mp) == M_MULTIDATA);
2256 
2257 	/*
2258 	 * We're not holding the lock for this check.  If the promiscuous
2259 	 * state is in flux it doesn't matter much if we get this wrong.
2260 	 */
2261 	if (mac_pvt->nprom > 0) {
2262 		/*
2263 		 * We want to loopback to the receive side, but to avoid
2264 		 * recursive lock entry:  if we came from wput(), which
2265 		 * could have looped back via IP from our own receive
2266 		 * interrupt thread, we decline this request.  wput()
2267 		 * will then queue the packet for wsrv().  This means
2268 		 * that when snoop is running we don't get the advantage
2269 		 * of the wput() multithreaded direct entry to the
2270 		 * driver's send routine.
2271 		 */
2272 		if (caller == GLD_WPUT) {
2273 			(void) putbq(q, mp);
2274 			return (GLD_NORESOURCES);
2275 		}
2276 		doloop = B_TRUE;
2277 
2278 		/*
2279 		 * unlike the M_DATA case, we don't have to call
2280 		 * dupmsg_noloan here because mmd_transform
2281 		 * (called by gld_precv_mdt) will make a copy of
2282 		 * each dblk.
2283 		 */
2284 	}
2285 
2286 	while (mp != NULL) {
2287 		/*
2288 		 * The lower layer driver only gets a single multidata
2289 		 * message; this also makes it easier to handle noresources.
2290 		 */
2291 		nextmp = mp->b_cont;
2292 		mp->b_cont = NULL;
2293 
2294 		/*
2295 		 * Get number of packets in this message; if nothing
2296 		 * to transmit, go to next message.
2297 		 */
2298 		dlmdp = mmd_getmultidata(mp);
2299 		if ((mdtpacks = (int)mmd_getcnt(dlmdp, NULL, NULL)) == 0) {
2300 			freemsg(mp);
2301 			mp = nextmp;
2302 			continue;
2303 		}
2304 
2305 		/*
2306 		 * Run interpreter to populate media specific pktinfo fields.
2307 		 * This collects per MDT message information like sap,
2308 		 * broad/multicast etc.
2309 		 */
2310 		(void) (*ifp->interpreter_mdt)(macinfo, mp, NULL, &pktinfo,
2311 		    GLD_MDT_TX);
2312 
2313 		numpacks = (*macinfo->gldm_mdt_pre)(macinfo, mp, &cookie);
2314 
2315 		if (numpacks > 0) {
2316 			/*
2317 			 * Driver indicates it can transmit at least 1, and
2318 			 * possibly all, packets in MDT message.
2319 			 */
2320 			int count = numpacks;
2321 
2322 			for (dl_pkt = mmd_getfirstpdesc(dlmdp, &pinfo);
2323 			    (dl_pkt != NULL);
2324 			    dl_pkt = mmd_getnextpdesc(dl_pkt, &pinfo)) {
2325 				/*
2326 				 * Format this packet by adding link header and
2327 				 * adjusting pdescinfo to include it; get
2328 				 * packet length.
2329 				 */
2330 				(void) (*ifp->interpreter_mdt)(macinfo, NULL,
2331 				    &pinfo, &pktinfo, GLD_MDT_TXPKT);
2332 
2333 				totLen += pktinfo.pktLen;
2334 
2335 				/*
2336 				 * Loop back packet before handing to the
2337 				 * driver.
2338 				 */
2339 				if (doloop &&
2340 				    mmd_adjpdesc(dl_pkt, &pinfo) != NULL) {
2341 					GLDM_LOCK(macinfo, RW_WRITER);
2342 					gld_precv_mdt(macinfo, vlan, mp,
2343 					    dl_pkt, &pktinfo);
2344 					GLDM_UNLOCK(macinfo);
2345 				}
2346 
2347 				/*
2348 				 * And send off to driver.
2349 				 */
2350 				(*macinfo->gldm_mdt_send)(macinfo, cookie,
2351 				    &pinfo);
2352 
2353 				/*
2354 				 * Be careful not to invoke getnextpdesc if we
2355 				 * already sent the last packet, since driver
2356 				 * might have posted it to hardware causing a
2357 				 * completion and freemsg() so the MDT data
2358 				 * structures might not be valid anymore.
2359 				 */
2360 				if (--count == 0)
2361 					break;
2362 			}
2363 			(*macinfo->gldm_mdt_post)(macinfo, mp, cookie);
2364 			pktinfo.pktLen = totLen;
2365 			UPDATE_STATS(vlan->gldv_stats, NULL, pktinfo, numpacks);
2366 
2367 			/*
2368 			 * In the noresources case (when driver indicates it
2369 			 * can not transmit all packets in the MDT message),
2370 			 * adjust to skip the first few packets on retrial.
2371 			 */
2372 			if (numpacks != mdtpacks) {
2373 				/*
2374 				 * Release already processed packet descriptors.
2375 				 */
2376 				for (count = 0; count < numpacks; count++) {
2377 					dl_pkt = mmd_getfirstpdesc(dlmdp,
2378 					    &pinfo);
2379 					mmd_rempdesc(dl_pkt);
2380 				}
2381 				ATOMIC_BUMP(vlan->gldv_stats, NULL,
2382 				    glds_xmtretry, 1);
2383 				mp->b_cont = nextmp;
2384 				(void) putbq(q, mp);
2385 				return (GLD_NORESOURCES);
2386 			}
2387 		} else if (numpacks == 0) {
2388 			/*
2389 			 * Driver indicates it can not transmit any packets
2390 			 * currently and will request retrial later.
2391 			 */
2392 			ATOMIC_BUMP(vlan->gldv_stats, NULL, glds_xmtretry, 1);
2393 			mp->b_cont = nextmp;
2394 			(void) putbq(q, mp);
2395 			return (GLD_NORESOURCES);
2396 		} else {
2397 			ASSERT(numpacks == -1);
2398 			/*
2399 			 * We're supposed to count failed attempts as well.
2400 			 */
2401 			dl_pkt = mmd_getfirstpdesc(dlmdp, &pinfo);
2402 			while (dl_pkt != NULL) {
2403 				/*
2404 				 * Call interpreter to determine total packet
2405 				 * bytes that are being dropped.
2406 				 */
2407 				(void) (*ifp->interpreter_mdt)(macinfo, NULL,
2408 				    &pinfo, &pktinfo, GLD_MDT_TXPKT);
2409 
2410 				totLen += pktinfo.pktLen;
2411 
2412 				dl_pkt = mmd_getnextpdesc(dl_pkt, &pinfo);
2413 			}
2414 			pktinfo.pktLen = totLen;
2415 			UPDATE_STATS(vlan->gldv_stats, NULL, pktinfo, mdtpacks);
2416 
2417 			/*
2418 			 * Transmit error; drop the message, move on
2419 			 * to the next one.
2420 			 */
2421 			freemsg(mp);
2422 		}
2423 
2424 		/*
2425 		 * Process the next multidata block, if there is one.
2426 		 */
2427 		mp = nextmp;
2428 	}
2429 
2430 	return (GLD_SUCCESS);
2431 }
2432 
2433 /*
2434  * gld_intr (macinfo)
2435  */
2436 uint_t
2437 gld_intr(gld_mac_info_t *macinfo)
2438 {
2439 	ASSERT(macinfo != NULL);
2440 
2441 	if (!(macinfo->gldm_GLD_flags & GLD_MAC_READY))
2442 		return (DDI_INTR_UNCLAIMED);
2443 
2444 	return ((*macinfo->gldm_intr)(macinfo));
2445 }
2446 
2447 /*
2448  * gld_sched (macinfo)
2449  *
2450  * This routine scans the streams that refer to a specific macinfo
2451  * structure and causes the STREAMS scheduler to try to run them if
2452  * they are marked as waiting for the transmit buffer.
2453  */
2454 void
2455 gld_sched(gld_mac_info_t *macinfo)
2456 {
2457 	gld_mac_pvt_t *mac_pvt;
2458 	gld_t *gld;
2459 	gld_vlan_t *vlan;
2460 	int i;
2461 
2462 	ASSERT(macinfo != NULL);
2463 
2464 	GLDM_LOCK(macinfo, RW_WRITER);
2465 
2466 	if (macinfo->gldm_GLD_flags & GLD_UNREGISTERED) {
2467 		/* We're probably being called from a leftover interrupt */
2468 		GLDM_UNLOCK(macinfo);
2469 		return;
2470 	}
2471 
2472 	mac_pvt = (gld_mac_pvt_t *)macinfo->gldm_mac_pvt;
2473 
2474 	for (i = 0; i < VLAN_HASHSZ; i++) {
2475 		for (vlan = mac_pvt->vlan_hash[i];
2476 		    vlan != NULL; vlan = vlan->gldv_next) {
2477 			for (gld = vlan->gldv_str_next;
2478 			    gld != (gld_t *)&vlan->gldv_str_next;
2479 			    gld = gld->gld_next) {
2480 				ASSERT(gld->gld_mac_info == macinfo);
2481 				gld->gld_sched_ran = B_TRUE;
2482 				membar_enter();
2483 				if (gld->gld_xwait) {
2484 					gld->gld_xwait = B_FALSE;
2485 					qenable(WR(gld->gld_qptr));
2486 				}
2487 			}
2488 		}
2489 	}
2490 
2491 	GLDM_UNLOCK(macinfo);
2492 }
2493 
2494 /*
2495  * gld_precv (macinfo, mp, vtag, stats)
2496  * called from gld_start to loopback a packet when in promiscuous mode
2497  *
2498  * VLAN 0's statistics need to be updated. If stats is not NULL,
2499  * it needs to be updated as well.
2500  */
2501 static void
2502 gld_precv(gld_mac_info_t *macinfo, mblk_t *mp, uint32_t vtag,
2503     struct gld_stats *stats)
2504 {
2505 	gld_mac_pvt_t *mac_pvt;
2506 	gld_interface_t *ifp;
2507 	pktinfo_t pktinfo;
2508 
2509 	ASSERT(GLDM_LOCK_HELD_WRITE(macinfo));
2510 
2511 	mac_pvt = (gld_mac_pvt_t *)macinfo->gldm_mac_pvt;
2512 	ifp = mac_pvt->interfacep;
2513 
2514 	/*
2515 	 * call the media specific packet interpreter routine
2516 	 */
2517 	if ((*ifp->interpreter)(macinfo, mp, &pktinfo, GLD_RXLOOP) != 0) {
2518 		freemsg(mp);
2519 		BUMP(mac_pvt->statistics, stats, glds_rcvbadinterp, 1);
2520 #ifdef GLD_DEBUG
2521 		if (gld_debug & GLDERRS)
2522 			cmn_err(CE_WARN,
2523 			    "gld_precv: interpreter failed");
2524 #endif
2525 		return;
2526 	}
2527 
2528 	/*
2529 	 * Update the vtag information.
2530 	 */
2531 	pktinfo.isTagged = (vtag != VLAN_VID_NONE);
2532 	pktinfo.vid = GLD_VTAG_VID(vtag);
2533 	pktinfo.cfi = GLD_VTAG_CFI(vtag);
2534 	pktinfo.user_pri = GLD_VTAG_PRI(vtag);
2535 
2536 	gld_sendup(macinfo, &pktinfo, mp, gld_paccept);
2537 }
2538 
2539 /*
2540  * Called from gld_start_mdt to loopback packet(s) when in promiscuous mode.
2541  * Note that 'vlan' is always a physical link, because MDT can only be
2542  * enabled on non-VLAN streams.
2543  */
2544 /*ARGSUSED*/
2545 static void
2546 gld_precv_mdt(gld_mac_info_t *macinfo, gld_vlan_t *vlan, mblk_t *mp,
2547     pdesc_t *dl_pkt, pktinfo_t *pktinfo)
2548 {
2549 	mblk_t *adjmp;
2550 	gld_mac_pvt_t *mac_pvt = (gld_mac_pvt_t *)macinfo->gldm_mac_pvt;
2551 	gld_interface_t *ifp = mac_pvt->interfacep;
2552 
2553 	ASSERT(GLDM_LOCK_HELD_WRITE(macinfo));
2554 
2555 	/*
2556 	 * Get source/destination.
2557 	 */
2558 	(void) (*ifp->interpreter_mdt)(macinfo, mp, NULL, pktinfo,
2559 	    GLD_MDT_RXLOOP);
2560 	if ((adjmp = mmd_transform(dl_pkt)) != NULL)
2561 		gld_sendup(macinfo, pktinfo, adjmp, gld_paccept);
2562 }
2563 
2564 /*
2565  * gld_recv (macinfo, mp)
2566  * called with an mac-level packet in a mblock; take the maclock,
2567  * try the ip4q and ip6q hack, and otherwise call gld_sendup.
2568  *
2569  * V0 drivers already are holding the mutex when they call us.
2570  */
2571 void
2572 gld_recv(gld_mac_info_t *macinfo, mblk_t *mp)
2573 {
2574 	gld_recv_tagged(macinfo, mp, VLAN_VTAG_NONE);
2575 }
2576 
2577 void
2578 gld_recv_tagged(gld_mac_info_t *macinfo, mblk_t *mp, uint32_t vtag)
2579 {
2580 	gld_mac_pvt_t *mac_pvt;
2581 	char pbuf[3*GLD_MAX_ADDRLEN];
2582 	pktinfo_t pktinfo;
2583 	gld_interface_t *ifp;
2584 	queue_t *ipq = NULL;
2585 	gld_vlan_t *vlan = NULL, *vlan0 = NULL, *vlann = NULL;
2586 	struct gld_stats *stats0, *stats = NULL;
2587 	uint32_t vid;
2588 	int err;
2589 
2590 	ASSERT(macinfo != NULL);
2591 	ASSERT(mp->b_datap->db_ref);
2592 
2593 	GLDM_LOCK(macinfo, RW_READER);
2594 
2595 	if (macinfo->gldm_GLD_flags & GLD_UNREGISTERED) {
2596 		/* We're probably being called from a leftover interrupt */
2597 		freemsg(mp);
2598 		goto done;
2599 	}
2600 
2601 	/*
2602 	 * If this packet is a VLAN tagged packet, the kstats of corresponding
2603 	 * "VLAN 0" should also be updated. We can directly access VLAN 0's
2604 	 * kstats from macinfo.
2605 	 *
2606 	 * Further, the packets needs to be passed to VLAN 0 if there is
2607 	 * any DLPI consumer on VLAN 0 who is interested in tagged packets
2608 	 * (DL_PROMISC_SAP is on or is bounded to ETHERTYPE_VLAN SAP).
2609 	 */
2610 	mac_pvt = (gld_mac_pvt_t *)macinfo->gldm_mac_pvt;
2611 	stats0 = mac_pvt->statistics;
2612 
2613 	vid = GLD_VTAG_VID(vtag);
2614 	vlan0 = gld_find_vlan(macinfo, VLAN_VID_NONE);
2615 	if (vid != VLAN_VID_NONE) {
2616 		/*
2617 		 * If there are no physical DLPI consumers interested in the
2618 		 * VLAN packet, clear vlan0.
2619 		 */
2620 		if ((vlan0 != NULL) && (vlan0->gldv_nvlan_sap == 0))
2621 			vlan0 = NULL;
2622 		/*
2623 		 * vlann is the VLAN with the same VID as the VLAN packet.
2624 		 */
2625 		vlann = gld_find_vlan(macinfo, vid);
2626 		if (vlann != NULL)
2627 			stats = vlann->gldv_stats;
2628 	}
2629 
2630 	vlan = (vid == VLAN_VID_NONE) ? vlan0 : vlann;
2631 
2632 	ifp = mac_pvt->interfacep;
2633 	err = (*ifp->interpreter)(macinfo, mp, &pktinfo, GLD_RXQUICK);
2634 
2635 	BUMP(stats0, stats, glds_bytercv64, pktinfo.pktLen);
2636 	BUMP(stats0, stats, glds_pktrcv64, 1);
2637 
2638 	if ((vlann == NULL) && (vlan0 == NULL)) {
2639 		freemsg(mp);
2640 		goto done;
2641 	}
2642 
2643 	/*
2644 	 * Check whether underlying media code supports the IPQ hack:
2645 	 *
2646 	 * - the interpreter could quickly parse the packet
2647 	 * - the device type supports IPQ (ethernet and IPoIB)
2648 	 * - there is one, and only one, IP stream bound (to this VLAN)
2649 	 * - that stream is a "fastpath" stream
2650 	 * - the packet is of type ETHERTYPE_IP or ETHERTYPE_IPV6
2651 	 * - there are no streams in promiscuous mode (on this VLAN)
2652 	 * - if this packet is tagged, there is no need to send this
2653 	 *   packet to physical streams
2654 	 */
2655 	if ((err != 0) && ((vlan != NULL) && (vlan->gldv_nprom == 0)) &&
2656 	    (vlan == vlan0 || vlan0 == NULL)) {
2657 		switch (pktinfo.ethertype) {
2658 		case ETHERTYPE_IP:
2659 			ipq = vlan->gldv_ipq;
2660 			break;
2661 		case ETHERTYPE_IPV6:
2662 			ipq = vlan->gldv_ipv6q;
2663 			break;
2664 		}
2665 	}
2666 
2667 	/*
2668 	 * Special case for IP; we can simply do the putnext here, if:
2669 	 * o The IPQ hack is possible (ipq != NULL).
2670 	 * o the packet is specifically for me, and therefore:
2671 	 * - the packet is not multicast or broadcast (fastpath only
2672 	 *   wants unicast packets).
2673 	 *
2674 	 * o the stream is not asserting flow control.
2675 	 */
2676 	if (ipq != NULL &&
2677 	    pktinfo.isForMe &&
2678 	    canputnext(ipq)) {
2679 		/*
2680 		 * Skip the mac header. We know there is no LLC1/SNAP header
2681 		 * in this packet
2682 		 */
2683 		mp->b_rptr += pktinfo.macLen;
2684 		putnext(ipq, mp);
2685 		goto done;
2686 	}
2687 
2688 	/*
2689 	 * call the media specific packet interpreter routine
2690 	 */
2691 	if ((*ifp->interpreter)(macinfo, mp, &pktinfo, GLD_RX) != 0) {
2692 		BUMP(stats0, stats, glds_rcvbadinterp, 1);
2693 #ifdef GLD_DEBUG
2694 		if (gld_debug & GLDERRS)
2695 			cmn_err(CE_WARN,
2696 			    "gld_recv_tagged: interpreter failed");
2697 #endif
2698 		freemsg(mp);
2699 		goto done;
2700 	}
2701 
2702 	/*
2703 	 * This is safe even if vtag is VLAN_VTAG_NONE
2704 	 */
2705 	pktinfo.vid = vid;
2706 	pktinfo.cfi = GLD_VTAG_CFI(vtag);
2707 #ifdef GLD_DEBUG
2708 	if (pktinfo.cfi != VLAN_CFI_ETHER)
2709 		cmn_err(CE_WARN, "gld_recv_tagged: non-ETHER CFI");
2710 #endif
2711 	pktinfo.user_pri = GLD_VTAG_PRI(vtag);
2712 	pktinfo.isTagged = (vtag != VLAN_VID_NONE);
2713 
2714 #ifdef GLD_DEBUG
2715 	if ((gld_debug & GLDRECV) &&
2716 	    (!(gld_debug & GLDNOBR) ||
2717 	    (!pktinfo.isBroadcast && !pktinfo.isMulticast))) {
2718 		char pbuf2[3*GLD_MAX_ADDRLEN];
2719 
2720 		cmn_err(CE_CONT, "gld_recv_tagged: machdr=<%s -> %s>\n",
2721 		    gld_macaddr_sprintf(pbuf, pktinfo.shost,
2722 		    macinfo->gldm_addrlen), gld_macaddr_sprintf(pbuf2,
2723 		    pktinfo.dhost, macinfo->gldm_addrlen));
2724 		cmn_err(CE_CONT, "gld_recv_tagged: VlanId %d UserPri %d\n",
2725 		    pktinfo.vid,
2726 		    pktinfo.user_pri);
2727 		cmn_err(CE_CONT, "gld_recv_tagged: ethertype: %4x Len: %4d "
2728 		    "Hdr: %d,%d isMulticast: %s\n",
2729 		    pktinfo.ethertype,
2730 		    pktinfo.pktLen,
2731 		    pktinfo.macLen,
2732 		    pktinfo.hdrLen,
2733 		    pktinfo.isMulticast ? "Y" : "N");
2734 	}
2735 #endif
2736 
2737 	gld_sendup(macinfo, &pktinfo, mp, gld_accept);
2738 
2739 done:
2740 	GLDM_UNLOCK(macinfo);
2741 }
2742 
2743 /* =================================================================== */
2744 /* receive group: called from gld_recv and gld_precv* with maclock held */
2745 /* =================================================================== */
2746 
2747 /*
2748  * Search all the streams attached to the specified VLAN looking for
2749  * those eligible to receive the packet.
2750  * Note that in order to avoid an extra dupmsg(), if this is the first
2751  * eligible stream, remember it (in fgldp) so that we can send up the
2752  * message after this function.
2753  *
2754  * Return errno if fails. Currently the only error is ENOMEM.
2755  */
2756 static int
2757 gld_sendup_vlan(gld_vlan_t *vlan, pktinfo_t *pktinfo, mblk_t *mp,
2758     int (*acceptfunc)(), void (*send)(), int (*cansend)(), gld_t **fgldp)
2759 {
2760 	mblk_t *nmp;
2761 	gld_t *gld;
2762 	int err = 0;
2763 
2764 	ASSERT(vlan != NULL);
2765 	for (gld = vlan->gldv_str_next; gld != (gld_t *)&vlan->gldv_str_next;
2766 	    gld = gld->gld_next) {
2767 #ifdef GLD_VERBOSE_DEBUG
2768 		cmn_err(CE_NOTE, "gld_sendup_vlan: SAP: %4x QPTR: %p "
2769 		    "QSTATE: %s", gld->gld_sap, (void *)gld->gld_qptr,
2770 		    gld->gld_state == DL_IDLE ? "IDLE" : "NOT IDLE");
2771 #endif
2772 		ASSERT(gld->gld_qptr != NULL);
2773 		ASSERT(gld->gld_state == DL_IDLE ||
2774 		    gld->gld_state == DL_UNBOUND);
2775 		ASSERT(gld->gld_vlan == vlan);
2776 
2777 		if (gld->gld_state != DL_IDLE)
2778 			continue;	/* not eligible to receive */
2779 		if (gld->gld_flags & GLD_STR_CLOSING)
2780 			continue;	/* not eligible to receive */
2781 
2782 #ifdef GLD_DEBUG
2783 		if ((gld_debug & GLDRECV) &&
2784 		    (!(gld_debug & GLDNOBR) ||
2785 		    (!pktinfo->isBroadcast && !pktinfo->isMulticast)))
2786 			cmn_err(CE_NOTE,
2787 			    "gld_sendup: queue sap: %4x promis: %s %s %s",
2788 			    gld->gld_sap,
2789 			    gld->gld_flags & GLD_PROM_PHYS ? "phys " : "     ",
2790 			    gld->gld_flags & GLD_PROM_SAP  ? "sap  " : "     ",
2791 			    gld->gld_flags & GLD_PROM_MULT ? "multi" : "     ");
2792 #endif
2793 
2794 		/*
2795 		 * The accept function differs depending on whether this is
2796 		 * a packet that we received from the wire or a loopback.
2797 		 */
2798 		if ((*acceptfunc)(gld, pktinfo)) {
2799 			/* sap matches */
2800 			pktinfo->wasAccepted = 1; /* known protocol */
2801 
2802 			if (!(*cansend)(gld->gld_qptr)) {
2803 				/*
2804 				 * Upper stream is not accepting messages, i.e.
2805 				 * it is flow controlled, therefore we will
2806 				 * forgo sending the message up this stream.
2807 				 */
2808 #ifdef GLD_DEBUG
2809 				if (gld_debug & GLDETRACE)
2810 					cmn_err(CE_WARN,
2811 					    "gld_sendup: canput failed");
2812 #endif
2813 				BUMP(vlan->gldv_stats, NULL, glds_blocked, 1);
2814 				qenable(gld->gld_qptr);
2815 				continue;
2816 			}
2817 
2818 			/*
2819 			 * In order to avoid an extra dupmsg(), remember this
2820 			 * gld if this is the first eligible stream.
2821 			 */
2822 			if (*fgldp == NULL) {
2823 				*fgldp = gld;
2824 				continue;
2825 			}
2826 
2827 			/* duplicate the packet for this stream */
2828 			nmp = dupmsg(mp);
2829 			if (nmp == NULL) {
2830 				BUMP(vlan->gldv_stats, NULL,
2831 				    glds_gldnorcvbuf, 1);
2832 #ifdef GLD_DEBUG
2833 				if (gld_debug & GLDERRS)
2834 					cmn_err(CE_WARN,
2835 					    "gld_sendup: dupmsg failed");
2836 #endif
2837 				/* couldn't get resources; drop it */
2838 				err = ENOMEM;
2839 				break;
2840 			}
2841 			/* pass the message up the stream */
2842 			gld_passon(gld, nmp, pktinfo, send);
2843 		}
2844 	}
2845 	return (err);
2846 }
2847 
2848 /*
2849  * gld_sendup (macinfo, pktinfo, mp, acceptfunc)
2850  * called with an ethernet packet in an mblk; must decide whether
2851  * packet is for us and which streams to queue it to.
2852  */
2853 static void
2854 gld_sendup(gld_mac_info_t *macinfo, pktinfo_t *pktinfo,
2855     mblk_t *mp, int (*acceptfunc)())
2856 {
2857 	gld_t *fgld = NULL;
2858 	void (*send)(queue_t *qp, mblk_t *mp);
2859 	int (*cansend)(queue_t *qp);
2860 	gld_vlan_t *vlan0, *vlann = NULL;
2861 	struct gld_stats *stats0, *stats = NULL;
2862 	int err = 0;
2863 
2864 #ifdef GLD_DEBUG
2865 	if (gld_debug & GLDTRACE)
2866 		cmn_err(CE_NOTE, "gld_sendup(%p, %p)", (void *)mp,
2867 		    (void *)macinfo);
2868 #endif
2869 
2870 	ASSERT(mp != NULL);
2871 	ASSERT(macinfo != NULL);
2872 	ASSERT(pktinfo != NULL);
2873 	ASSERT(GLDM_LOCK_HELD(macinfo));
2874 
2875 	/*
2876 	 * The tagged packets should also be looped back (transmit-side)
2877 	 * or sent up (receive-side) to VLAN 0 if VLAN 0 is set to
2878 	 * DL_PROMISC_SAP or there is any DLPI consumer bind to the
2879 	 * ETHERTYPE_VLAN SAP. The kstats of VLAN 0 needs to be updated
2880 	 * as well.
2881 	 */
2882 	stats0 = ((gld_mac_pvt_t *)macinfo->gldm_mac_pvt)->statistics;
2883 	vlan0 = gld_find_vlan(macinfo, VLAN_VID_NONE);
2884 	if (pktinfo->vid != VLAN_VID_NONE) {
2885 		if ((vlan0 != NULL) && (vlan0->gldv_nvlan_sap == 0))
2886 			vlan0 = NULL;
2887 		vlann = gld_find_vlan(macinfo, pktinfo->vid);
2888 		if (vlann != NULL)
2889 			stats = vlann->gldv_stats;
2890 	}
2891 
2892 	ASSERT((vlan0 != NULL) || (vlann != NULL));
2893 
2894 	/*
2895 	 * The "fast" in "GLDOPT_FAST_RECV" refers to the speed at which
2896 	 * gld_recv returns to the caller's interrupt routine.  The total
2897 	 * network throughput would normally be lower when selecting this
2898 	 * option, because we putq the messages and process them later,
2899 	 * instead of sending them with putnext now.  Some time critical
2900 	 * device might need this, so it's here but undocumented.
2901 	 */
2902 	if (macinfo->gldm_options & GLDOPT_FAST_RECV) {
2903 		send = (void (*)(queue_t *, mblk_t *))putq;
2904 		cansend = canput;
2905 	} else {
2906 		send = (void (*)(queue_t *, mblk_t *))putnext;
2907 		cansend = canputnext;
2908 	}
2909 
2910 	/*
2911 	 * Send the packets for all eligible streams.
2912 	 */
2913 	if (vlan0 != NULL) {
2914 		err = gld_sendup_vlan(vlan0, pktinfo, mp, acceptfunc, send,
2915 		    cansend, &fgld);
2916 	}
2917 	if ((err == 0) && (vlann != NULL)) {
2918 		err = gld_sendup_vlan(vlann, pktinfo, mp, acceptfunc, send,
2919 		    cansend, &fgld);
2920 	}
2921 
2922 	ASSERT(mp);
2923 	/* send the original dup of the packet up the first stream found */
2924 	if (fgld)
2925 		gld_passon(fgld, mp, pktinfo, send);
2926 	else
2927 		freemsg(mp);	/* no streams matched */
2928 
2929 	/* We do not count looped back packets */
2930 	if (acceptfunc == gld_paccept)
2931 		return;		/* transmit loopback case */
2932 
2933 	if (pktinfo->isBroadcast)
2934 		BUMP(stats0, stats, glds_brdcstrcv, 1);
2935 	else if (pktinfo->isMulticast)
2936 		BUMP(stats0, stats, glds_multircv, 1);
2937 
2938 	/* No stream accepted this packet */
2939 	if (!pktinfo->wasAccepted)
2940 		BUMP(stats0, stats, glds_unknowns, 1);
2941 }
2942 
2943 #define	GLD_IS_PHYS(gld)	\
2944 	(((gld_vlan_t *)gld->gld_vlan)->gldv_id == VLAN_VID_NONE)
2945 
2946 /*
2947  * A packet matches a stream if:
2948  *      The stream's VLAN id is the same as the one in the packet.
2949  *  and the stream accepts EtherType encoded packets and the type matches
2950  *  or  the stream accepts LLC packets and the packet is an LLC packet
2951  */
2952 #define	MATCH(stream, pktinfo) \
2953 	((((gld_vlan_t *)stream->gld_vlan)->gldv_id == pktinfo->vid) && \
2954 	((stream->gld_ethertype && stream->gld_sap == pktinfo->ethertype) || \
2955 	(!stream->gld_ethertype && pktinfo->isLLC)))
2956 
2957 /*
2958  * This function validates a packet for sending up a particular
2959  * stream. The message header has been parsed and its characteristic
2960  * are recorded in the pktinfo data structure. The streams stack info
2961  * are presented in gld data structures.
2962  */
2963 static int
2964 gld_accept(gld_t *gld, pktinfo_t *pktinfo)
2965 {
2966 	/*
2967 	 * if there is no match do not bother checking further.
2968 	 * Note that it is okay to examine gld_vlan because
2969 	 * macinfo->gldm_lock is held.
2970 	 *
2971 	 * Because all tagged packets have SAP value ETHERTYPE_VLAN,
2972 	 * these packets will pass the SAP filter check if the stream
2973 	 * is a ETHERTYPE_VLAN listener.
2974 	 */
2975 	if ((!MATCH(gld, pktinfo) && !(gld->gld_flags & GLD_PROM_SAP) &&
2976 	    !(GLD_IS_PHYS(gld) && gld->gld_sap == ETHERTYPE_VLAN &&
2977 	    pktinfo->isTagged)))
2978 		return (0);
2979 
2980 	/*
2981 	 * We don't accept any packet from the hardware if we originated it.
2982 	 * (Contrast gld_paccept, the send-loopback accept function.)
2983 	 */
2984 	if (pktinfo->isLooped)
2985 		return (0);
2986 
2987 	/*
2988 	 * If the packet is broadcast or sent to us directly we will accept it.
2989 	 * Also we will accept multicast packets requested by the stream.
2990 	 */
2991 	if (pktinfo->isForMe || pktinfo->isBroadcast ||
2992 	    gld_mcmatch(gld, pktinfo))
2993 		return (1);
2994 
2995 	/*
2996 	 * Finally, accept anything else if we're in promiscuous mode
2997 	 */
2998 	if (gld->gld_flags & GLD_PROM_PHYS)
2999 		return (1);
3000 
3001 	return (0);
3002 }
3003 
3004 /*
3005  * Return TRUE if the given multicast address is one
3006  * of those that this particular Stream is interested in.
3007  */
3008 static int
3009 gld_mcmatch(gld_t *gld, pktinfo_t *pktinfo)
3010 {
3011 	/*
3012 	 * Return FALSE if not a multicast address.
3013 	 */
3014 	if (!pktinfo->isMulticast)
3015 		return (0);
3016 
3017 	/*
3018 	 * Check if all multicasts have been enabled for this Stream
3019 	 */
3020 	if (gld->gld_flags & GLD_PROM_MULT)
3021 		return (1);
3022 
3023 	/*
3024 	 * Return FALSE if no multicast addresses enabled for this Stream.
3025 	 */
3026 	if (!gld->gld_mcast)
3027 		return (0);
3028 
3029 	/*
3030 	 * Otherwise, look for it in the table.
3031 	 */
3032 	return (gld_multicast(pktinfo->dhost, gld));
3033 }
3034 
3035 /*
3036  * gld_multicast determines if the address is a multicast address for
3037  * this stream.
3038  */
3039 static int
3040 gld_multicast(unsigned char *macaddr, gld_t *gld)
3041 {
3042 	int i;
3043 
3044 	ASSERT(GLDM_LOCK_HELD(gld->gld_mac_info));
3045 
3046 	if (!gld->gld_mcast)
3047 		return (0);
3048 
3049 	for (i = 0; i < gld->gld_multicnt; i++) {
3050 		if (gld->gld_mcast[i]) {
3051 			ASSERT(gld->gld_mcast[i]->gldm_refcnt);
3052 			if (mac_eq(gld->gld_mcast[i]->gldm_addr, macaddr,
3053 			    gld->gld_mac_info->gldm_addrlen))
3054 				return (1);
3055 		}
3056 	}
3057 
3058 	return (0);
3059 }
3060 
3061 /*
3062  * accept function for looped back packets
3063  */
3064 static int
3065 gld_paccept(gld_t *gld, pktinfo_t *pktinfo)
3066 {
3067 	/*
3068 	 * Note that it is okay to examine gld_vlan because macinfo->gldm_lock
3069 	 * is held.
3070 	 *
3071 	 * If a stream is a ETHERTYPE_VLAN listener, it must
3072 	 * accept all tagged packets as those packets have SAP value
3073 	 * ETHERTYPE_VLAN.
3074 	 */
3075 	return (gld->gld_flags & GLD_PROM_PHYS &&
3076 	    (MATCH(gld, pktinfo) || gld->gld_flags & GLD_PROM_SAP ||
3077 	    (GLD_IS_PHYS(gld) && gld->gld_sap == ETHERTYPE_VLAN &&
3078 	    pktinfo->isTagged)));
3079 
3080 }
3081 
3082 static void
3083 gld_passon(gld_t *gld, mblk_t *mp, pktinfo_t *pktinfo,
3084 	void (*send)(queue_t *qp, mblk_t *mp))
3085 {
3086 	boolean_t is_phys = GLD_IS_PHYS(gld);
3087 	int skiplen;
3088 	boolean_t addtag = B_FALSE;
3089 	uint32_t vtag = 0;
3090 
3091 #ifdef GLD_DEBUG
3092 	if (gld_debug & GLDTRACE)
3093 		cmn_err(CE_NOTE, "gld_passon(%p, %p, %p)", (void *)gld,
3094 		    (void *)mp, (void *)pktinfo);
3095 
3096 	if ((gld_debug & GLDRECV) && (!(gld_debug & GLDNOBR) ||
3097 	    (!pktinfo->isBroadcast && !pktinfo->isMulticast)))
3098 		cmn_err(CE_NOTE, "gld_passon: q: %p mblk: %p minor: %d sap: %x",
3099 		    (void *)gld->gld_qptr->q_next, (void *)mp, gld->gld_minor,
3100 		    gld->gld_sap);
3101 #endif
3102 	/*
3103 	 * Figure out how much of the packet header to throw away.
3104 	 *
3105 	 * Normal DLPI (non RAW/FAST) streams also want the
3106 	 * DL_UNITDATA_IND M_PROTO message block prepended to the M_DATA.
3107 	 */
3108 	if (gld->gld_flags & GLD_RAW) {
3109 		/*
3110 		 * The packet will be tagged in the following cases:
3111 		 *   - if priority is not 0
3112 		 *   - a tagged packet sent on a physical link
3113 		 */
3114 		if ((pktinfo->isTagged && is_phys) || (pktinfo->user_pri != 0))
3115 			addtag = B_TRUE;
3116 		skiplen = 0;
3117 	} else {
3118 		/*
3119 		 * The packet will be tagged if it meets all below conditions:
3120 		 *   -  this is a physical stream
3121 		 *   -  this packet is tagged packet
3122 		 *   -  the stream is either a DL_PROMISC_SAP listener or a
3123 		 *	ETHERTYPE_VLAN listener
3124 		 */
3125 		if (is_phys && pktinfo->isTagged &&
3126 		    ((gld->gld_sap == ETHERTYPE_VLAN) ||
3127 		    (gld->gld_flags & GLD_PROM_SAP))) {
3128 			addtag = B_TRUE;
3129 		}
3130 
3131 		skiplen = pktinfo->macLen;		/* skip mac header */
3132 		if (gld->gld_ethertype)
3133 			skiplen += pktinfo->hdrLen;	/* skip any extra */
3134 	}
3135 	if (skiplen >= pktinfo->pktLen) {
3136 		/*
3137 		 * If the interpreter did its job right, then it cannot be
3138 		 * asking us to skip more bytes than are in the packet!
3139 		 * However, there could be zero data bytes left after the
3140 		 * amount to skip.  DLPI specifies that passed M_DATA blocks
3141 		 * should contain at least one byte of data, so if we have
3142 		 * none we just drop it.
3143 		 */
3144 		ASSERT(!(skiplen > pktinfo->pktLen));
3145 		freemsg(mp);
3146 		return;
3147 	}
3148 
3149 	if (addtag) {
3150 		mblk_t *savemp = mp;
3151 
3152 		vtag = GLD_MAKE_VTAG(pktinfo->user_pri, pktinfo->cfi,
3153 		    is_phys ? pktinfo->vid : VLAN_VID_NONE);
3154 		if ((mp = gld_insert_vtag_ether(mp, vtag)) == NULL) {
3155 			freemsg(savemp);
3156 			return;
3157 		}
3158 	}
3159 
3160 	/*
3161 	 * Skip over the header(s), taking care to possibly handle message
3162 	 * fragments shorter than the amount we need to skip.  Hopefully
3163 	 * the driver will put the entire packet, or at least the entire
3164 	 * header, into a single message block.  But we handle it if not.
3165 	 */
3166 	while (skiplen >= MBLKL(mp)) {
3167 		mblk_t *savemp = mp;
3168 		skiplen -= MBLKL(mp);
3169 		mp = mp->b_cont;
3170 		ASSERT(mp != NULL);	/* because skiplen < pktinfo->pktLen */
3171 		freeb(savemp);
3172 	}
3173 	mp->b_rptr += skiplen;
3174 
3175 	/* Add M_PROTO if necessary, and pass upstream */
3176 	if (((gld->gld_flags & GLD_FAST) && !pktinfo->isMulticast &&
3177 	    !pktinfo->isBroadcast) || (gld->gld_flags & GLD_RAW)) {
3178 		/* RAW/FAST: just send up the M_DATA */
3179 		(*send)(gld->gld_qptr, mp);
3180 	} else {
3181 		/* everybody else wants to see a unitdata_ind structure */
3182 		mp = gld_addudind(gld, mp, pktinfo, addtag);
3183 		if (mp)
3184 			(*send)(gld->gld_qptr, mp);
3185 		/* if it failed, gld_addudind already bumped statistic */
3186 	}
3187 }
3188 
3189 /*
3190  * gld_addudind(gld, mp, pktinfo)
3191  * format a DL_UNITDATA_IND message to be sent upstream to the user
3192  */
3193 static mblk_t *
3194 gld_addudind(gld_t *gld, mblk_t *mp, pktinfo_t *pktinfo, boolean_t tagged)
3195 {
3196 	gld_mac_info_t		*macinfo = gld->gld_mac_info;
3197 	gld_vlan_t		*vlan = (gld_vlan_t *)gld->gld_vlan;
3198 	dl_unitdata_ind_t	*dludindp;
3199 	mblk_t			*nmp;
3200 	int			size;
3201 	int			type;
3202 
3203 #ifdef GLD_DEBUG
3204 	if (gld_debug & GLDTRACE)
3205 		cmn_err(CE_NOTE, "gld_addudind(%p, %p, %p)", (void *)gld,
3206 		    (void *)mp, (void *)pktinfo);
3207 #endif
3208 	ASSERT(macinfo != NULL);
3209 
3210 	/*
3211 	 * Allocate the DL_UNITDATA_IND M_PROTO header, if allocation fails
3212 	 * might as well discard since we can't go further
3213 	 */
3214 	size = sizeof (dl_unitdata_ind_t) +
3215 	    2 * (macinfo->gldm_addrlen + abs(macinfo->gldm_saplen));
3216 	if ((nmp = allocb(size, BPRI_MED)) == NULL) {
3217 		freemsg(mp);
3218 		BUMP(vlan->gldv_stats, NULL, glds_gldnorcvbuf, 1);
3219 #ifdef GLD_DEBUG
3220 		if (gld_debug & GLDERRS)
3221 			cmn_err(CE_WARN,
3222 			    "gld_addudind: allocb failed");
3223 #endif
3224 		return ((mblk_t *)NULL);
3225 	}
3226 	DB_TYPE(nmp) = M_PROTO;
3227 	nmp->b_rptr = nmp->b_datap->db_lim - size;
3228 
3229 	if (tagged)
3230 		type = ETHERTYPE_VLAN;
3231 	else
3232 		type = (gld->gld_ethertype) ? pktinfo->ethertype : 0;
3233 
3234 
3235 	/*
3236 	 * now setup the DL_UNITDATA_IND header
3237 	 *
3238 	 * XXX This looks broken if the saps aren't two bytes.
3239 	 */
3240 	dludindp = (dl_unitdata_ind_t *)nmp->b_rptr;
3241 	dludindp->dl_primitive = DL_UNITDATA_IND;
3242 	dludindp->dl_src_addr_length =
3243 	    dludindp->dl_dest_addr_length = macinfo->gldm_addrlen +
3244 	    abs(macinfo->gldm_saplen);
3245 	dludindp->dl_dest_addr_offset = sizeof (dl_unitdata_ind_t);
3246 	dludindp->dl_src_addr_offset = dludindp->dl_dest_addr_offset +
3247 	    dludindp->dl_dest_addr_length;
3248 
3249 	dludindp->dl_group_address = (pktinfo->isMulticast ||
3250 	    pktinfo->isBroadcast);
3251 
3252 	nmp->b_wptr = nmp->b_rptr + dludindp->dl_dest_addr_offset;
3253 
3254 	mac_copy(pktinfo->dhost, nmp->b_wptr, macinfo->gldm_addrlen);
3255 	nmp->b_wptr += macinfo->gldm_addrlen;
3256 
3257 	ASSERT(macinfo->gldm_saplen == -2);	/* XXX following code assumes */
3258 	*(ushort_t *)(nmp->b_wptr) = type;
3259 	nmp->b_wptr += abs(macinfo->gldm_saplen);
3260 
3261 	ASSERT(nmp->b_wptr == nmp->b_rptr + dludindp->dl_src_addr_offset);
3262 
3263 	mac_copy(pktinfo->shost, nmp->b_wptr, macinfo->gldm_addrlen);
3264 	nmp->b_wptr += macinfo->gldm_addrlen;
3265 
3266 	*(ushort_t *)(nmp->b_wptr) = type;
3267 	nmp->b_wptr += abs(macinfo->gldm_saplen);
3268 
3269 	if (pktinfo->nosource)
3270 		dludindp->dl_src_addr_offset = dludindp->dl_src_addr_length = 0;
3271 	linkb(nmp, mp);
3272 	return (nmp);
3273 }
3274 
3275 /* ======================================================= */
3276 /* wsrv group: called from wsrv, single threaded per queue */
3277 /* ======================================================= */
3278 
3279 /*
3280  * We go to some trouble to avoid taking the same lock during normal
3281  * transmit processing as we do during normal receive processing.
3282  *
3283  * Elements of the per-instance macinfo and per-stream gld_t structures
3284  * are for the most part protected by the GLDM_LOCK rwlock/mutex.
3285  * (Elements of the gld_mac_pvt_t structure are considered part of the
3286  * macinfo structure for purposes of this discussion).
3287  *
3288  * However, it is more complicated than that:
3289  *
3290  *	Elements of the macinfo structure that are set before the macinfo
3291  *	structure is added to its device list by gld_register(), and never
3292  *	thereafter modified, are accessed without requiring taking the lock.
3293  *	A similar rule applies to those elements of the gld_t structure that
3294  *	are written by gld_open() before the stream is added to any list.
3295  *
3296  *	Most other elements of the macinfo structure may only be read or
3297  *	written while holding the maclock.
3298  *
3299  *	Most writable elements of the gld_t structure are written only
3300  *	within the single-threaded domain of wsrv() and subsidiaries.
3301  *	(This domain includes open/close while qprocs are not on.)
3302  *	The maclock need not be taken while within that domain
3303  *	simply to read those elements.  Writing to them, even within
3304  *	that domain, or reading from it outside that domain, requires
3305  *	holding the maclock.  Exception:  if the stream is not
3306  *	presently attached to a PPA, there is no associated macinfo,
3307  *	and no maclock need be taken.
3308  *
3309  *	The curr_macaddr element of the mac private structure is also
3310  *      protected by the GLDM_LOCK rwlock/mutex, like most other members
3311  *      of that structure. However, there are a few instances in the
3312  *      transmit path where we choose to forgo lock protection when
3313  *      reading this variable. This is to avoid lock contention between
3314  *      threads executing the DL_UNITDATA_REQ case and receive threads.
3315  *      In doing so we will take a small risk or a few corrupted packets
3316  *      during the short an rare times when someone is changing the interface's
3317  *      physical address. We consider the small cost in this rare case to be
3318  *      worth the benefit of reduced lock contention under normal operating
3319  *      conditions. The risk/cost is small because:
3320  *          1. there is no guarantee at this layer of uncorrupted delivery.
3321  *          2. the physaddr doesn't change very often - no performance hit.
3322  *          3. if the physaddr changes, other stuff is going to be screwed
3323  *             up for a while anyway, while other sites refigure ARP, etc.,
3324  *             so losing a couple of packets is the least of our worries.
3325  *
3326  *	The list of streams associated with a macinfo is protected by
3327  *	two locks:  the per-macinfo maclock, and the per-major-device
3328  *	gld_devlock.  Both must be held to modify the list, but either
3329  *	may be held to protect the list during reading/traversing.  This
3330  *	allows independent locking for multiple instances in the receive
3331  *	path (using macinfo), while facilitating routines that must search
3332  *	the entire set of streams associated with a major device, such as
3333  *	gld_findminor(), gld_finddevinfo(), close().  The "nstreams"
3334  *	macinfo	element, and the gld_mac_info gld_t element, are similarly
3335  *	protected, since they change at exactly the same time macinfo
3336  *	streams list does.
3337  *
3338  *	The list of macinfo structures associated with a major device
3339  *	structure is protected by the gld_devlock, as is the per-major
3340  *	list of Style 2 streams in the DL_UNATTACHED state.
3341  *
3342  *	The list of major devices is kept on a module-global list
3343  *	gld_device_list, which has its own lock to protect the list.
3344  *
3345  *	When it is necessary to hold more than one lock at a time, they
3346  *	are acquired in this "outside in" order:
3347  *		gld_device_list.gld_devlock
3348  *		glddev->gld_devlock
3349  *		GLDM_LOCK(macinfo)
3350  *
3351  *	Finally, there are some "volatile" elements of the gld_t structure
3352  *	used for synchronization between various routines that don't share
3353  *	the same mutexes.  See the routines for details.  These are:
3354  *		gld_xwait	between gld_wsrv() and gld_sched()
3355  *		gld_sched_ran	between gld_wsrv() and gld_sched()
3356  *		gld_in_unbind	between gld_wput() and wsrv's gld_unbind()
3357  *		gld_wput_count	between gld_wput() and wsrv's gld_unbind()
3358  *		gld_in_wsrv	between gld_wput() and gld_wsrv()
3359  *				(used in conjunction with q->q_first)
3360  */
3361 
3362 /*
3363  * gld_ioctl (q, mp)
3364  * handles all ioctl requests passed downstream. This routine is
3365  * passed a pointer to the message block with the ioctl request in it, and a
3366  * pointer to the queue so it can respond to the ioctl request with an ack.
3367  */
3368 int
3369 gld_ioctl(queue_t *q, mblk_t *mp)
3370 {
3371 	struct iocblk *iocp;
3372 	gld_t *gld;
3373 	gld_mac_info_t *macinfo;
3374 
3375 #ifdef GLD_DEBUG
3376 	if (gld_debug & GLDTRACE)
3377 		cmn_err(CE_NOTE, "gld_ioctl(%p %p)", (void *)q, (void *)mp);
3378 #endif
3379 	gld = (gld_t *)q->q_ptr;
3380 	iocp = (struct iocblk *)mp->b_rptr;
3381 	switch (iocp->ioc_cmd) {
3382 	case DLIOCRAW:		/* raw M_DATA mode */
3383 		gld->gld_flags |= GLD_RAW;
3384 		DB_TYPE(mp) = M_IOCACK;
3385 		qreply(q, mp);
3386 		break;
3387 
3388 	case DL_IOC_HDR_INFO:	/* fastpath */
3389 		/*
3390 		 * DL_IOC_HDR_INFO should only come from IP. The one
3391 		 * initiated from user-land should not be allowed.
3392 		 */
3393 		if ((gld_global_options & GLD_OPT_NO_FASTPATH) ||
3394 		    (iocp->ioc_cr != kcred)) {
3395 			miocnak(q, mp, 0, EINVAL);
3396 			break;
3397 		}
3398 		gld_fastpath(gld, q, mp);
3399 		break;
3400 
3401 	case DLIOCMARGININFO: {	/* margin size */
3402 		int err;
3403 
3404 		if ((macinfo = gld->gld_mac_info) == NULL) {
3405 			miocnak(q, mp, 0, EINVAL);
3406 			break;
3407 		}
3408 
3409 		if ((err = miocpullup(mp, sizeof (uint32_t))) != 0) {
3410 			miocnak(q, mp, 0, err);
3411 			break;
3412 		}
3413 
3414 		*((uint32_t *)mp->b_cont->b_rptr) = macinfo->gldm_margin;
3415 		miocack(q, mp, sizeof (uint32_t), 0);
3416 		break;
3417 	}
3418 	default:
3419 		macinfo	 = gld->gld_mac_info;
3420 		if (macinfo == NULL || macinfo->gldm_ioctl == NULL) {
3421 			miocnak(q, mp, 0, EINVAL);
3422 			break;
3423 		}
3424 
3425 		GLDM_LOCK(macinfo, RW_WRITER);
3426 		(void) (*macinfo->gldm_ioctl) (macinfo, q, mp);
3427 		GLDM_UNLOCK(macinfo);
3428 		break;
3429 	}
3430 	return (0);
3431 }
3432 
3433 /*
3434  * Since the rules for "fastpath" mode don't seem to be documented
3435  * anywhere, I will describe GLD's rules for fastpath users here:
3436  *
3437  * Once in this mode you remain there until close.
3438  * If you unbind/rebind you should get a new header using DL_IOC_HDR_INFO.
3439  * You must be bound (DL_IDLE) to transmit.
3440  * There are other rules not listed above.
3441  */
3442 static void
3443 gld_fastpath(gld_t *gld, queue_t *q, mblk_t *mp)
3444 {
3445 	gld_interface_t *ifp;
3446 	gld_mac_info_t *macinfo;
3447 	dl_unitdata_req_t *dludp;
3448 	mblk_t *nmp;
3449 	t_scalar_t off, len;
3450 	uint_t maclen;
3451 	int error;
3452 
3453 	if (gld->gld_state != DL_IDLE) {
3454 		miocnak(q, mp, 0, EINVAL);
3455 		return;
3456 	}
3457 
3458 	macinfo = gld->gld_mac_info;
3459 	ASSERT(macinfo != NULL);
3460 	maclen = macinfo->gldm_addrlen + abs(macinfo->gldm_saplen);
3461 
3462 	error = miocpullup(mp, sizeof (dl_unitdata_req_t) + maclen);
3463 	if (error != 0) {
3464 		miocnak(q, mp, 0, error);
3465 		return;
3466 	}
3467 
3468 	dludp = (dl_unitdata_req_t *)mp->b_cont->b_rptr;
3469 	off = dludp->dl_dest_addr_offset;
3470 	len = dludp->dl_dest_addr_length;
3471 	if (dludp->dl_primitive != DL_UNITDATA_REQ ||
3472 	    !MBLKIN(mp->b_cont, off, len) || len != maclen) {
3473 		miocnak(q, mp, 0, EINVAL);
3474 		return;
3475 	}
3476 
3477 	/*
3478 	 * We take his fastpath request as a declaration that he will accept
3479 	 * M_DATA messages from us, whether or not we are willing to accept
3480 	 * them from him.  This allows us to have fastpath in one direction
3481 	 * (flow upstream) even on media with Source Routing, where we are
3482 	 * unable to provide a fixed MAC header to be prepended to downstream
3483 	 * flowing packets.  So we set GLD_FAST whether or not we decide to
3484 	 * allow him to send M_DATA down to us.
3485 	 */
3486 	GLDM_LOCK(macinfo, RW_WRITER);
3487 	gld->gld_flags |= GLD_FAST;
3488 	GLDM_UNLOCK(macinfo);
3489 
3490 	ifp = ((gld_mac_pvt_t *)macinfo->gldm_mac_pvt)->interfacep;
3491 
3492 	/* This will fail for Source Routing media */
3493 	/* Also on Ethernet on 802.2 SAPs */
3494 	if ((nmp = (*ifp->mkfastpath)(gld, mp)) == NULL) {
3495 		miocnak(q, mp, 0, ENOMEM);
3496 		return;
3497 	}
3498 
3499 	/*
3500 	 * Link new mblk in after the "request" mblks.
3501 	 */
3502 	linkb(mp, nmp);
3503 	miocack(q, mp, msgdsize(mp->b_cont), 0);
3504 }
3505 
3506 /*
3507  * gld_cmds (q, mp)
3508  *	process the DL commands as defined in dlpi.h
3509  *	note that the primitives return status which is passed back
3510  *	to the service procedure.  If the value is GLDE_RETRY, then
3511  *	it is assumed that processing must stop and the primitive has
3512  *	been put back onto the queue.  If the value is any other error,
3513  *	then an error ack is generated by the service procedure.
3514  */
3515 static int
3516 gld_cmds(queue_t *q, mblk_t *mp)
3517 {
3518 	union DL_primitives *dlp = (union DL_primitives *)mp->b_rptr;
3519 	gld_t *gld = (gld_t *)(q->q_ptr);
3520 	int result = DL_BADPRIM;
3521 	int mblkl = MBLKL(mp);
3522 	t_uscalar_t dlreq;
3523 
3524 	/* Make sure we have at least dlp->dl_primitive */
3525 	if (mblkl < sizeof (dlp->dl_primitive))
3526 		return (DL_BADPRIM);
3527 
3528 	dlreq = dlp->dl_primitive;
3529 #ifdef	GLD_DEBUG
3530 	if (gld_debug & GLDTRACE)
3531 		cmn_err(CE_NOTE,
3532 		    "gld_cmds(%p, %p):dlp=%p, dlp->dl_primitive=%d",
3533 		    (void *)q, (void *)mp, (void *)dlp, dlreq);
3534 #endif
3535 
3536 	switch (dlreq) {
3537 	case DL_UDQOS_REQ:
3538 		if (mblkl < DL_UDQOS_REQ_SIZE)
3539 			break;
3540 		result = gld_udqos(q, mp);
3541 		break;
3542 
3543 	case DL_BIND_REQ:
3544 		if (mblkl < DL_BIND_REQ_SIZE)
3545 			break;
3546 		result = gld_bind(q, mp);
3547 		break;
3548 
3549 	case DL_UNBIND_REQ:
3550 		if (mblkl < DL_UNBIND_REQ_SIZE)
3551 			break;
3552 		result = gld_unbind(q, mp);
3553 		break;
3554 
3555 	case DL_UNITDATA_REQ:
3556 		if (mblkl < DL_UNITDATA_REQ_SIZE)
3557 			break;
3558 		result = gld_unitdata(q, mp);
3559 		break;
3560 
3561 	case DL_INFO_REQ:
3562 		if (mblkl < DL_INFO_REQ_SIZE)
3563 			break;
3564 		result = gld_inforeq(q, mp);
3565 		break;
3566 
3567 	case DL_ATTACH_REQ:
3568 		if (mblkl < DL_ATTACH_REQ_SIZE)
3569 			break;
3570 		if (gld->gld_style == DL_STYLE2)
3571 			result = gldattach(q, mp);
3572 		else
3573 			result = DL_NOTSUPPORTED;
3574 		break;
3575 
3576 	case DL_DETACH_REQ:
3577 		if (mblkl < DL_DETACH_REQ_SIZE)
3578 			break;
3579 		if (gld->gld_style == DL_STYLE2)
3580 			result = gldunattach(q, mp);
3581 		else
3582 			result = DL_NOTSUPPORTED;
3583 		break;
3584 
3585 	case DL_ENABMULTI_REQ:
3586 		if (mblkl < DL_ENABMULTI_REQ_SIZE)
3587 			break;
3588 		result = gld_enable_multi(q, mp);
3589 		break;
3590 
3591 	case DL_DISABMULTI_REQ:
3592 		if (mblkl < DL_DISABMULTI_REQ_SIZE)
3593 			break;
3594 		result = gld_disable_multi(q, mp);
3595 		break;
3596 
3597 	case DL_PHYS_ADDR_REQ:
3598 		if (mblkl < DL_PHYS_ADDR_REQ_SIZE)
3599 			break;
3600 		result = gld_physaddr(q, mp);
3601 		break;
3602 
3603 	case DL_SET_PHYS_ADDR_REQ:
3604 		if (mblkl < DL_SET_PHYS_ADDR_REQ_SIZE)
3605 			break;
3606 		result = gld_setaddr(q, mp);
3607 		break;
3608 
3609 	case DL_PROMISCON_REQ:
3610 		if (mblkl < DL_PROMISCON_REQ_SIZE)
3611 			break;
3612 		result = gld_promisc(q, mp, dlreq, B_TRUE);
3613 		break;
3614 
3615 	case DL_PROMISCOFF_REQ:
3616 		if (mblkl < DL_PROMISCOFF_REQ_SIZE)
3617 			break;
3618 		result = gld_promisc(q, mp, dlreq, B_FALSE);
3619 		break;
3620 
3621 	case DL_GET_STATISTICS_REQ:
3622 		if (mblkl < DL_GET_STATISTICS_REQ_SIZE)
3623 			break;
3624 		result = gld_get_statistics(q, mp);
3625 		break;
3626 
3627 	case DL_CAPABILITY_REQ:
3628 		if (mblkl < DL_CAPABILITY_REQ_SIZE)
3629 			break;
3630 		result = gld_cap(q, mp);
3631 		break;
3632 
3633 	case DL_NOTIFY_REQ:
3634 		if (mblkl < DL_NOTIFY_REQ_SIZE)
3635 			break;
3636 		result = gld_notify_req(q, mp);
3637 		break;
3638 
3639 	case DL_XID_REQ:
3640 	case DL_XID_RES:
3641 	case DL_TEST_REQ:
3642 	case DL_TEST_RES:
3643 	case DL_CONTROL_REQ:
3644 	case DL_PASSIVE_REQ:
3645 		result = DL_NOTSUPPORTED;
3646 		break;
3647 
3648 	default:
3649 #ifdef	GLD_DEBUG
3650 		if (gld_debug & GLDERRS)
3651 			cmn_err(CE_WARN,
3652 			    "gld_cmds: unknown M_PROTO message: %d",
3653 			    dlreq);
3654 #endif
3655 		result = DL_BADPRIM;
3656 	}
3657 
3658 	return (result);
3659 }
3660 
3661 static int
3662 gld_cap(queue_t *q, mblk_t *mp)
3663 {
3664 	gld_t *gld = (gld_t *)q->q_ptr;
3665 	dl_capability_req_t *dlp = (dl_capability_req_t *)mp->b_rptr;
3666 
3667 	if (gld->gld_state == DL_UNATTACHED)
3668 		return (DL_OUTSTATE);
3669 
3670 	if (dlp->dl_sub_length == 0)
3671 		return (gld_cap_ack(q, mp));
3672 
3673 	return (gld_cap_enable(q, mp));
3674 }
3675 
3676 static int
3677 gld_cap_ack(queue_t *q, mblk_t *mp)
3678 {
3679 	gld_t *gld = (gld_t *)q->q_ptr;
3680 	gld_mac_info_t *macinfo = gld->gld_mac_info;
3681 	gld_interface_t *ifp;
3682 	dl_capability_ack_t *dlap;
3683 	dl_capability_sub_t *dlsp;
3684 	size_t size = sizeof (dl_capability_ack_t);
3685 	size_t subsize = 0;
3686 
3687 	ifp = ((gld_mac_pvt_t *)macinfo->gldm_mac_pvt)->interfacep;
3688 
3689 	if (macinfo->gldm_capabilities & GLD_CAP_CKSUM_ANY)
3690 		subsize += sizeof (dl_capability_sub_t) +
3691 		    sizeof (dl_capab_hcksum_t);
3692 	if (macinfo->gldm_capabilities & GLD_CAP_ZEROCOPY)
3693 		subsize += sizeof (dl_capability_sub_t) +
3694 		    sizeof (dl_capab_zerocopy_t);
3695 	if (macinfo->gldm_options & GLDOPT_MDT)
3696 		subsize += (sizeof (dl_capability_sub_t) +
3697 		    sizeof (dl_capab_mdt_t));
3698 
3699 	if ((mp = mexchange(q, mp, size + subsize, M_PROTO,
3700 	    DL_CAPABILITY_ACK)) == NULL)
3701 		return (GLDE_OK);
3702 
3703 	dlap = (dl_capability_ack_t *)mp->b_rptr;
3704 	dlap->dl_sub_offset = 0;
3705 	if ((dlap->dl_sub_length = subsize) != 0)
3706 		dlap->dl_sub_offset = sizeof (dl_capability_ack_t);
3707 	dlsp = (dl_capability_sub_t *)&dlap[1];
3708 
3709 	if (macinfo->gldm_capabilities & GLD_CAP_CKSUM_ANY) {
3710 		dl_capab_hcksum_t *dlhp = (dl_capab_hcksum_t *)&dlsp[1];
3711 
3712 		dlsp->dl_cap = DL_CAPAB_HCKSUM;
3713 		dlsp->dl_length = sizeof (dl_capab_hcksum_t);
3714 
3715 		dlhp->hcksum_version = HCKSUM_VERSION_1;
3716 
3717 		dlhp->hcksum_txflags = 0;
3718 		if (macinfo->gldm_capabilities & GLD_CAP_CKSUM_PARTIAL)
3719 			dlhp->hcksum_txflags |= HCKSUM_INET_PARTIAL;
3720 		if (macinfo->gldm_capabilities & GLD_CAP_CKSUM_FULL_V4)
3721 			dlhp->hcksum_txflags |= HCKSUM_INET_FULL_V4;
3722 		if (macinfo->gldm_capabilities & GLD_CAP_CKSUM_FULL_V6)
3723 			dlhp->hcksum_txflags |= HCKSUM_INET_FULL_V6;
3724 		if (macinfo->gldm_capabilities & GLD_CAP_CKSUM_IPHDR)
3725 			dlhp->hcksum_txflags |= HCKSUM_IPHDRCKSUM;
3726 
3727 		dlcapabsetqid(&(dlhp->hcksum_mid), RD(q));
3728 		dlsp = (dl_capability_sub_t *)&dlhp[1];
3729 	}
3730 
3731 	if (macinfo->gldm_capabilities & GLD_CAP_ZEROCOPY) {
3732 		dl_capab_zerocopy_t *dlzp = (dl_capab_zerocopy_t *)&dlsp[1];
3733 
3734 		dlsp->dl_cap = DL_CAPAB_ZEROCOPY;
3735 		dlsp->dl_length = sizeof (dl_capab_zerocopy_t);
3736 		dlzp->zerocopy_version = ZEROCOPY_VERSION_1;
3737 		dlzp->zerocopy_flags = DL_CAPAB_VMSAFE_MEM;
3738 
3739 		dlcapabsetqid(&(dlzp->zerocopy_mid), RD(q));
3740 		dlsp = (dl_capability_sub_t *)&dlzp[1];
3741 	}
3742 
3743 	if (macinfo->gldm_options & GLDOPT_MDT) {
3744 		dl_capab_mdt_t *dlmp = (dl_capab_mdt_t *)&dlsp[1];
3745 
3746 		dlsp->dl_cap = DL_CAPAB_MDT;
3747 		dlsp->dl_length = sizeof (dl_capab_mdt_t);
3748 
3749 		dlmp->mdt_version = MDT_VERSION_2;
3750 		dlmp->mdt_max_pld = macinfo->gldm_mdt_segs;
3751 		dlmp->mdt_span_limit = macinfo->gldm_mdt_sgl;
3752 		dlcapabsetqid(&dlmp->mdt_mid, OTHERQ(q));
3753 		dlmp->mdt_flags = DL_CAPAB_MDT_ENABLE;
3754 		dlmp->mdt_hdr_head = ifp->hdr_size;
3755 		dlmp->mdt_hdr_tail = 0;
3756 	}
3757 
3758 	qreply(q, mp);
3759 	return (GLDE_OK);
3760 }
3761 
3762 static int
3763 gld_cap_enable(queue_t *q, mblk_t *mp)
3764 {
3765 	dl_capability_req_t *dlp;
3766 	dl_capability_sub_t *dlsp;
3767 	dl_capab_hcksum_t *dlhp;
3768 	offset_t off;
3769 	size_t len;
3770 	size_t size;
3771 	offset_t end;
3772 
3773 	dlp = (dl_capability_req_t *)mp->b_rptr;
3774 	dlp->dl_primitive = DL_CAPABILITY_ACK;
3775 
3776 	off = dlp->dl_sub_offset;
3777 	len = dlp->dl_sub_length;
3778 
3779 	if (!MBLKIN(mp, off, len))
3780 		return (DL_BADPRIM);
3781 
3782 	end = off + len;
3783 	while (off < end) {
3784 		dlsp = (dl_capability_sub_t *)(mp->b_rptr + off);
3785 		size = sizeof (dl_capability_sub_t) + dlsp->dl_length;
3786 		if (off + size > end)
3787 			return (DL_BADPRIM);
3788 
3789 		switch (dlsp->dl_cap) {
3790 		case DL_CAPAB_HCKSUM:
3791 			dlhp = (dl_capab_hcksum_t *)&dlsp[1];
3792 			/* nothing useful we can do with the contents */
3793 			dlcapabsetqid(&(dlhp->hcksum_mid), RD(q));
3794 			break;
3795 		default:
3796 			break;
3797 		}
3798 
3799 		off += size;
3800 	}
3801 
3802 	qreply(q, mp);
3803 	return (GLDE_OK);
3804 }
3805 
3806 /*
3807  * Send a copy of the DL_NOTIFY_IND message <mp> to each stream that has
3808  * requested the specific <notification> that the message carries AND is
3809  * eligible and ready to receive the notification immediately.
3810  *
3811  * This routine ignores flow control. Notifications will be sent regardless.
3812  *
3813  * In all cases, the original message passed in is freed at the end of
3814  * the routine.
3815  */
3816 static void
3817 gld_notify_qs(gld_mac_info_t *macinfo, mblk_t *mp, uint32_t notification)
3818 {
3819 	gld_mac_pvt_t *mac_pvt;
3820 	gld_vlan_t *vlan;
3821 	gld_t *gld;
3822 	mblk_t *nmp;
3823 	int i;
3824 
3825 	ASSERT(GLDM_LOCK_HELD_WRITE(macinfo));
3826 
3827 	mac_pvt = (gld_mac_pvt_t *)macinfo->gldm_mac_pvt;
3828 
3829 	/*
3830 	 * Search all the streams attached to this macinfo looking
3831 	 * for those eligible to receive the present notification.
3832 	 */
3833 	for (i = 0; i < VLAN_HASHSZ; i++) {
3834 		for (vlan = mac_pvt->vlan_hash[i];
3835 		    vlan != NULL; vlan = vlan->gldv_next) {
3836 			for (gld = vlan->gldv_str_next;
3837 			    gld != (gld_t *)&vlan->gldv_str_next;
3838 			    gld = gld->gld_next) {
3839 				ASSERT(gld->gld_qptr != NULL);
3840 				ASSERT(gld->gld_state == DL_IDLE ||
3841 				    gld->gld_state == DL_UNBOUND);
3842 				ASSERT(gld->gld_mac_info == macinfo);
3843 
3844 				if (gld->gld_flags & GLD_STR_CLOSING)
3845 					continue; /* not eligible - skip */
3846 				if (!(notification & gld->gld_notifications))
3847 					continue; /* not wanted - skip */
3848 				if ((nmp = dupmsg(mp)) == NULL)
3849 					continue; /* can't copy - skip */
3850 
3851 				/*
3852 				 * All OK; send dup'd notification up this
3853 				 * stream
3854 				 */
3855 				qreply(WR(gld->gld_qptr), nmp);
3856 			}
3857 		}
3858 	}
3859 
3860 	/*
3861 	 * Drop the original message block now
3862 	 */
3863 	freemsg(mp);
3864 }
3865 
3866 /*
3867  * For each (understood) bit in the <notifications> argument, contruct
3868  * a DL_NOTIFY_IND message and send it to the specified <q>, or to all
3869  * eligible queues if <q> is NULL.
3870  */
3871 static void
3872 gld_notify_ind(gld_mac_info_t *macinfo, uint32_t notifications, queue_t *q)
3873 {
3874 	gld_mac_pvt_t *mac_pvt;
3875 	dl_notify_ind_t *dlnip;
3876 	struct gld_stats *stats;
3877 	mblk_t *mp;
3878 	size_t size;
3879 	uint32_t bit;
3880 
3881 	GLDM_LOCK(macinfo, RW_WRITER);
3882 
3883 	/*
3884 	 * The following cases shouldn't happen, but just in case the
3885 	 * MAC driver calls gld_linkstate() at an inappropriate time, we
3886 	 * check anyway ...
3887 	 */
3888 	if (!(macinfo->gldm_GLD_flags & GLD_MAC_READY)) {
3889 		GLDM_UNLOCK(macinfo);
3890 		return;				/* not ready yet	*/
3891 	}
3892 
3893 	if (macinfo->gldm_GLD_flags & GLD_UNREGISTERED) {
3894 		GLDM_UNLOCK(macinfo);
3895 		return;				/* not ready anymore	*/
3896 	}
3897 
3898 	/*
3899 	 * Make sure the kstats are up to date, 'cos we use some of
3900 	 * the kstat values below, specifically the link speed ...
3901 	 */
3902 	mac_pvt = (gld_mac_pvt_t *)macinfo->gldm_mac_pvt;
3903 	stats = mac_pvt->statistics;
3904 	if (macinfo->gldm_get_stats)
3905 		(void) (*macinfo->gldm_get_stats)(macinfo, stats);
3906 
3907 	for (bit = 1; notifications != 0; bit <<= 1) {
3908 		if ((notifications & bit) == 0)
3909 			continue;
3910 		notifications &= ~bit;
3911 
3912 		size = DL_NOTIFY_IND_SIZE;
3913 		if (bit == DL_NOTE_PHYS_ADDR)
3914 			size += macinfo->gldm_addrlen;
3915 		if ((mp = allocb(size, BPRI_MED)) == NULL)
3916 			continue;
3917 
3918 		mp->b_datap->db_type = M_PROTO;
3919 		mp->b_wptr = mp->b_rptr + size;
3920 		dlnip = (dl_notify_ind_t *)mp->b_rptr;
3921 		dlnip->dl_primitive = DL_NOTIFY_IND;
3922 		dlnip->dl_notification = 0;
3923 		dlnip->dl_data = 0;
3924 		dlnip->dl_addr_length = 0;
3925 		dlnip->dl_addr_offset = 0;
3926 
3927 		switch (bit) {
3928 		case DL_NOTE_PROMISC_ON_PHYS:
3929 		case DL_NOTE_PROMISC_OFF_PHYS:
3930 			if (mac_pvt->nprom != 0)
3931 				dlnip->dl_notification = bit;
3932 			break;
3933 
3934 		case DL_NOTE_LINK_DOWN:
3935 			if (macinfo->gldm_linkstate == GLD_LINKSTATE_DOWN)
3936 				dlnip->dl_notification = bit;
3937 			break;
3938 
3939 		case DL_NOTE_LINK_UP:
3940 			if (macinfo->gldm_linkstate == GLD_LINKSTATE_UP)
3941 				dlnip->dl_notification = bit;
3942 			break;
3943 
3944 		case DL_NOTE_SPEED:
3945 			/*
3946 			 * Conversion required here:
3947 			 *	GLD keeps the speed in bit/s in a uint64
3948 			 *	DLPI wants it in kb/s in a uint32
3949 			 * Fortunately this is still big enough for 10Gb/s!
3950 			 */
3951 			dlnip->dl_notification = bit;
3952 			dlnip->dl_data = stats->glds_speed/1000ULL;
3953 			break;
3954 
3955 		case DL_NOTE_PHYS_ADDR:
3956 			dlnip->dl_notification = bit;
3957 			dlnip->dl_data = DL_CURR_PHYS_ADDR;
3958 			dlnip->dl_addr_offset = sizeof (dl_notify_ind_t);
3959 			dlnip->dl_addr_length = macinfo->gldm_addrlen +
3960 			    abs(macinfo->gldm_saplen);
3961 			mac_pvt = (gld_mac_pvt_t *)macinfo->gldm_mac_pvt;
3962 			mac_copy(mac_pvt->curr_macaddr,
3963 			    mp->b_rptr + sizeof (dl_notify_ind_t),
3964 			    macinfo->gldm_addrlen);
3965 			break;
3966 
3967 		default:
3968 			break;
3969 		}
3970 
3971 		if (dlnip->dl_notification == 0)
3972 			freemsg(mp);
3973 		else if (q != NULL)
3974 			qreply(q, mp);
3975 		else
3976 			gld_notify_qs(macinfo, mp, bit);
3977 	}
3978 
3979 	GLDM_UNLOCK(macinfo);
3980 }
3981 
3982 /*
3983  * gld_notify_req - handle a DL_NOTIFY_REQ message
3984  */
3985 static int
3986 gld_notify_req(queue_t *q, mblk_t *mp)
3987 {
3988 	gld_t *gld = (gld_t *)q->q_ptr;
3989 	gld_mac_info_t *macinfo;
3990 	gld_mac_pvt_t *pvt;
3991 	dl_notify_req_t *dlnrp;
3992 	dl_notify_ack_t *dlnap;
3993 
3994 	ASSERT(gld != NULL);
3995 	ASSERT(gld->gld_qptr == RD(q));
3996 
3997 	dlnrp = (dl_notify_req_t *)mp->b_rptr;
3998 
3999 #ifdef GLD_DEBUG
4000 	if (gld_debug & GLDTRACE)
4001 		cmn_err(CE_NOTE, "gld_notify_req(%p %p)",
4002 		    (void *)q, (void *)mp);
4003 #endif
4004 
4005 	if (gld->gld_state == DL_UNATTACHED) {
4006 #ifdef GLD_DEBUG
4007 		if (gld_debug & GLDERRS)
4008 			cmn_err(CE_NOTE, "gld_notify_req: wrong state (%d)",
4009 			    gld->gld_state);
4010 #endif
4011 		return (DL_OUTSTATE);
4012 	}
4013 
4014 	/*
4015 	 * Remember what notifications are required by this stream
4016 	 */
4017 	macinfo = gld->gld_mac_info;
4018 	pvt = (gld_mac_pvt_t *)macinfo->gldm_mac_pvt;
4019 
4020 	gld->gld_notifications = dlnrp->dl_notifications & pvt->notifications;
4021 
4022 	/*
4023 	 * The return DL_NOTIFY_ACK carries the bitset of notifications
4024 	 * that this driver can provide, independently of which ones have
4025 	 * previously been or are now being requested.
4026 	 */
4027 	if ((mp = mexchange(q, mp, sizeof (dl_notify_ack_t), M_PCPROTO,
4028 	    DL_NOTIFY_ACK)) == NULL)
4029 		return (DL_SYSERR);
4030 
4031 	dlnap = (dl_notify_ack_t *)mp->b_rptr;
4032 	dlnap->dl_notifications = pvt->notifications;
4033 	qreply(q, mp);
4034 
4035 	/*
4036 	 * A side effect of a DL_NOTIFY_REQ is that after the DL_NOTIFY_ACK
4037 	 * reply, the the requestor gets zero or more DL_NOTIFY_IND messages
4038 	 * that provide the current status.
4039 	 */
4040 	gld_notify_ind(macinfo, gld->gld_notifications, q);
4041 
4042 	return (GLDE_OK);
4043 }
4044 
4045 /*
4046  * gld_linkstate()
4047  *	Called by driver to tell GLD the state of the physical link.
4048  *	As a side effect, sends a DL_NOTE_LINK_UP or DL_NOTE_LINK_DOWN
4049  *	notification to each client that has previously requested such
4050  *	notifications
4051  */
4052 void
4053 gld_linkstate(gld_mac_info_t *macinfo, int32_t newstate)
4054 {
4055 	uint32_t notification;
4056 
4057 	switch (newstate) {
4058 	default:
4059 		return;
4060 
4061 	case GLD_LINKSTATE_DOWN:
4062 		notification = DL_NOTE_LINK_DOWN;
4063 		break;
4064 
4065 	case GLD_LINKSTATE_UP:
4066 		notification = DL_NOTE_LINK_UP | DL_NOTE_SPEED;
4067 		break;
4068 
4069 	case GLD_LINKSTATE_UNKNOWN:
4070 		notification = 0;
4071 		break;
4072 	}
4073 
4074 	GLDM_LOCK(macinfo, RW_WRITER);
4075 	if (macinfo->gldm_linkstate == newstate)
4076 		notification = 0;
4077 	else
4078 		macinfo->gldm_linkstate = newstate;
4079 	GLDM_UNLOCK(macinfo);
4080 
4081 	if (notification)
4082 		gld_notify_ind(macinfo, notification, NULL);
4083 }
4084 
4085 /*
4086  * gld_udqos - set the current QoS parameters (priority only at the moment).
4087  */
4088 static int
4089 gld_udqos(queue_t *q, mblk_t *mp)
4090 {
4091 	dl_udqos_req_t *dlp;
4092 	gld_t  *gld = (gld_t *)q->q_ptr;
4093 	int off;
4094 	int len;
4095 	dl_qos_cl_sel1_t *selp;
4096 
4097 	ASSERT(gld);
4098 	ASSERT(gld->gld_qptr == RD(q));
4099 
4100 #ifdef GLD_DEBUG
4101 	if (gld_debug & GLDTRACE)
4102 		cmn_err(CE_NOTE, "gld_udqos(%p %p)", (void *)q, (void *)mp);
4103 #endif
4104 
4105 	if (gld->gld_state != DL_IDLE) {
4106 #ifdef GLD_DEBUG
4107 		if (gld_debug & GLDERRS)
4108 			cmn_err(CE_NOTE, "gld_udqos: wrong state (%d)",
4109 			    gld->gld_state);
4110 #endif
4111 		return (DL_OUTSTATE);
4112 	}
4113 
4114 	dlp = (dl_udqos_req_t *)mp->b_rptr;
4115 	off = dlp->dl_qos_offset;
4116 	len = dlp->dl_qos_length;
4117 
4118 	if (len != sizeof (dl_qos_cl_sel1_t) || !MBLKIN(mp, off, len))
4119 		return (DL_BADQOSTYPE);
4120 
4121 	selp = (dl_qos_cl_sel1_t *)(mp->b_rptr + off);
4122 	if (selp->dl_qos_type != DL_QOS_CL_SEL1)
4123 		return (DL_BADQOSTYPE);
4124 
4125 	if (selp->dl_trans_delay != 0 &&
4126 	    selp->dl_trans_delay != DL_QOS_DONT_CARE)
4127 		return (DL_BADQOSPARAM);
4128 	if (selp->dl_protection != 0 &&
4129 	    selp->dl_protection != DL_QOS_DONT_CARE)
4130 		return (DL_BADQOSPARAM);
4131 	if (selp->dl_residual_error != 0 &&
4132 	    selp->dl_residual_error != DL_QOS_DONT_CARE)
4133 		return (DL_BADQOSPARAM);
4134 	if (selp->dl_priority < 0 || selp->dl_priority > 7)
4135 		return (DL_BADQOSPARAM);
4136 
4137 	gld->gld_upri = selp->dl_priority;
4138 
4139 	dlokack(q, mp, DL_UDQOS_REQ);
4140 	return (GLDE_OK);
4141 }
4142 
4143 static mblk_t *
4144 gld_bindack(queue_t *q, mblk_t *mp)
4145 {
4146 	gld_t *gld = (gld_t *)q->q_ptr;
4147 	gld_mac_info_t *macinfo = gld->gld_mac_info;
4148 	gld_mac_pvt_t *mac_pvt = (gld_mac_pvt_t *)macinfo->gldm_mac_pvt;
4149 	dl_bind_ack_t *dlp;
4150 	size_t size;
4151 	t_uscalar_t addrlen;
4152 	uchar_t *sapp;
4153 
4154 	addrlen = macinfo->gldm_addrlen + abs(macinfo->gldm_saplen);
4155 	size = sizeof (dl_bind_ack_t) + addrlen;
4156 	if ((mp = mexchange(q, mp, size, M_PCPROTO, DL_BIND_ACK)) == NULL)
4157 		return (NULL);
4158 
4159 	dlp = (dl_bind_ack_t *)mp->b_rptr;
4160 	dlp->dl_sap = gld->gld_sap;
4161 	dlp->dl_addr_length = addrlen;
4162 	dlp->dl_addr_offset = sizeof (dl_bind_ack_t);
4163 	dlp->dl_max_conind = 0;
4164 	dlp->dl_xidtest_flg = 0;
4165 
4166 	mac_copy(mac_pvt->curr_macaddr, (uchar_t *)&dlp[1],
4167 	    macinfo->gldm_addrlen);
4168 	sapp = mp->b_rptr + dlp->dl_addr_offset + macinfo->gldm_addrlen;
4169 	*(ushort_t *)sapp = gld->gld_sap;
4170 
4171 	return (mp);
4172 }
4173 
4174 /*
4175  * gld_bind - determine if a SAP is already allocated and whether it is legal
4176  * to do the bind at this time
4177  */
4178 static int
4179 gld_bind(queue_t *q, mblk_t *mp)
4180 {
4181 	ulong_t	sap;
4182 	dl_bind_req_t *dlp;
4183 	gld_t *gld = (gld_t *)q->q_ptr;
4184 	gld_mac_info_t *macinfo = gld->gld_mac_info;
4185 
4186 	ASSERT(gld);
4187 	ASSERT(gld->gld_qptr == RD(q));
4188 
4189 #ifdef GLD_DEBUG
4190 	if (gld_debug & GLDTRACE)
4191 		cmn_err(CE_NOTE, "gld_bind(%p %p)", (void *)q, (void *)mp);
4192 #endif
4193 
4194 	dlp = (dl_bind_req_t *)mp->b_rptr;
4195 	sap = dlp->dl_sap;
4196 
4197 #ifdef GLD_DEBUG
4198 	if (gld_debug & GLDPROT)
4199 		cmn_err(CE_NOTE, "gld_bind: lsap=%lx", sap);
4200 #endif
4201 
4202 	if (gld->gld_state != DL_UNBOUND) {
4203 #ifdef GLD_DEBUG
4204 		if (gld_debug & GLDERRS)
4205 			cmn_err(CE_NOTE, "gld_bind: bound or not attached (%d)",
4206 			    gld->gld_state);
4207 #endif
4208 		return (DL_OUTSTATE);
4209 	}
4210 	ASSERT(macinfo);
4211 
4212 	if (dlp->dl_service_mode != DL_CLDLS) {
4213 		return (DL_UNSUPPORTED);
4214 	}
4215 	if (dlp->dl_xidtest_flg & (DL_AUTO_XID | DL_AUTO_TEST)) {
4216 		return (DL_NOAUTO);
4217 	}
4218 
4219 	/*
4220 	 * Check sap validity and decide whether this stream accepts
4221 	 * IEEE 802.2 (LLC) packets.
4222 	 */
4223 	if (sap > ETHERTYPE_MAX)
4224 		return (DL_BADSAP);
4225 
4226 	/*
4227 	 * Decide whether the SAP value selects EtherType encoding/decoding.
4228 	 * For compatibility with monolithic ethernet drivers, the range of
4229 	 * SAP values is different for DL_ETHER media.
4230 	 */
4231 	switch (macinfo->gldm_type) {
4232 	case DL_ETHER:
4233 		gld->gld_ethertype = (sap > ETHERMTU);
4234 		break;
4235 	default:
4236 		gld->gld_ethertype = (sap > GLD_MAX_802_SAP);
4237 		break;
4238 	}
4239 
4240 	/* if we get to here, then the SAP is legal enough */
4241 	GLDM_LOCK(macinfo, RW_WRITER);
4242 	gld->gld_state = DL_IDLE;	/* bound and ready */
4243 	gld->gld_sap = sap;
4244 	if ((macinfo->gldm_type == DL_ETHER) && (sap == ETHERTYPE_VLAN))
4245 		((gld_vlan_t *)gld->gld_vlan)->gldv_nvlan_sap++;
4246 	gld_set_ipq(gld);
4247 
4248 #ifdef GLD_DEBUG
4249 	if (gld_debug & GLDPROT)
4250 		cmn_err(CE_NOTE, "gld_bind: ok - sap = %d", gld->gld_sap);
4251 #endif
4252 
4253 	/* ACK the BIND */
4254 	mp = gld_bindack(q, mp);
4255 	GLDM_UNLOCK(macinfo);
4256 
4257 	if (mp != NULL) {
4258 		qreply(q, mp);
4259 		return (GLDE_OK);
4260 	}
4261 
4262 	return (DL_SYSERR);
4263 }
4264 
4265 /*
4266  * gld_unbind - perform an unbind of an LSAP or ether type on the stream.
4267  * The stream is still open and can be re-bound.
4268  */
4269 static int
4270 gld_unbind(queue_t *q, mblk_t *mp)
4271 {
4272 	gld_t *gld = (gld_t *)q->q_ptr;
4273 	gld_mac_info_t *macinfo = gld->gld_mac_info;
4274 
4275 	ASSERT(gld);
4276 
4277 #ifdef GLD_DEBUG
4278 	if (gld_debug & GLDTRACE)
4279 		cmn_err(CE_NOTE, "gld_unbind(%p %p)", (void *)q, (void *)mp);
4280 #endif
4281 
4282 	if (gld->gld_state != DL_IDLE) {
4283 #ifdef GLD_DEBUG
4284 		if (gld_debug & GLDERRS)
4285 			cmn_err(CE_NOTE, "gld_unbind: wrong state (%d)",
4286 			    gld->gld_state);
4287 #endif
4288 		return (DL_OUTSTATE);
4289 	}
4290 	ASSERT(macinfo);
4291 
4292 	/*
4293 	 * Avoid unbinding (DL_UNBIND_REQ) while FAST/RAW is inside wput.
4294 	 * See comments above gld_start().
4295 	 */
4296 	gld->gld_in_unbind = B_TRUE;	/* disallow wput=>start */
4297 	membar_enter();
4298 	if (gld->gld_wput_count != 0) {
4299 		gld->gld_in_unbind = B_FALSE;
4300 		ASSERT(mp);		/* we didn't come from close */
4301 #ifdef GLD_DEBUG
4302 		if (gld_debug & GLDETRACE)
4303 			cmn_err(CE_NOTE, "gld_unbind: defer for wput");
4304 #endif
4305 		(void) putbq(q, mp);
4306 		qenable(q);		/* try again soon */
4307 		return (GLDE_RETRY);
4308 	}
4309 
4310 	GLDM_LOCK(macinfo, RW_WRITER);
4311 	if ((macinfo->gldm_type == DL_ETHER) &&
4312 	    (gld->gld_sap == ETHERTYPE_VLAN)) {
4313 		((gld_vlan_t *)gld->gld_vlan)->gldv_nvlan_sap--;
4314 	}
4315 	gld->gld_state = DL_UNBOUND;
4316 	gld->gld_sap = 0;
4317 	gld_set_ipq(gld);
4318 	GLDM_UNLOCK(macinfo);
4319 
4320 	membar_exit();
4321 	gld->gld_in_unbind = B_FALSE;
4322 
4323 	/* mp is NULL if we came from close */
4324 	if (mp) {
4325 		gld_flushqueue(q);	/* flush the queues */
4326 		dlokack(q, mp, DL_UNBIND_REQ);
4327 	}
4328 	return (GLDE_OK);
4329 }
4330 
4331 /*
4332  * gld_inforeq - generate the response to an info request
4333  */
4334 static int
4335 gld_inforeq(queue_t *q, mblk_t *mp)
4336 {
4337 	gld_t		*gld;
4338 	dl_info_ack_t	*dlp;
4339 	int		bufsize;
4340 	glddev_t	*glddev;
4341 	gld_mac_info_t	*macinfo;
4342 	gld_mac_pvt_t	*mac_pvt;
4343 	int		sel_offset = 0;
4344 	int		range_offset = 0;
4345 	int		addr_offset;
4346 	int		addr_length;
4347 	int		sap_length;
4348 	int		brdcst_offset;
4349 	int		brdcst_length;
4350 	uchar_t		*sapp;
4351 
4352 #ifdef GLD_DEBUG
4353 	if (gld_debug & GLDTRACE)
4354 		cmn_err(CE_NOTE, "gld_inforeq(%p %p)", (void *)q, (void *)mp);
4355 #endif
4356 	gld = (gld_t *)q->q_ptr;
4357 	ASSERT(gld);
4358 	glddev = gld->gld_device;
4359 	ASSERT(glddev);
4360 
4361 	if (gld->gld_state == DL_IDLE || gld->gld_state == DL_UNBOUND) {
4362 		macinfo = gld->gld_mac_info;
4363 		ASSERT(macinfo != NULL);
4364 
4365 		mac_pvt = (gld_mac_pvt_t *)macinfo->gldm_mac_pvt;
4366 
4367 		addr_length = macinfo->gldm_addrlen;
4368 		sap_length = macinfo->gldm_saplen;
4369 		brdcst_length = macinfo->gldm_addrlen;
4370 	} else {
4371 		addr_length = glddev->gld_addrlen;
4372 		sap_length = glddev->gld_saplen;
4373 		brdcst_length = glddev->gld_addrlen;
4374 	}
4375 
4376 	bufsize = sizeof (dl_info_ack_t);
4377 
4378 	addr_offset = bufsize;
4379 	bufsize += addr_length;
4380 	bufsize += abs(sap_length);
4381 
4382 	brdcst_offset = bufsize;
4383 	bufsize += brdcst_length;
4384 
4385 	if (((gld_vlan_t *)gld->gld_vlan) != NULL) {
4386 		sel_offset = P2ROUNDUP(bufsize, sizeof (int64_t));
4387 		bufsize = sel_offset + sizeof (dl_qos_cl_sel1_t);
4388 
4389 		range_offset = P2ROUNDUP(bufsize, sizeof (int64_t));
4390 		bufsize = range_offset + sizeof (dl_qos_cl_range1_t);
4391 	}
4392 
4393 	if ((mp = mexchange(q, mp, bufsize, M_PCPROTO, DL_INFO_ACK)) == NULL)
4394 		return (GLDE_OK);	/* nothing more to be done */
4395 
4396 	bzero(mp->b_rptr, bufsize);
4397 
4398 	dlp = (dl_info_ack_t *)mp->b_rptr;
4399 	dlp->dl_primitive = DL_INFO_ACK;
4400 	dlp->dl_version = DL_VERSION_2;
4401 	dlp->dl_service_mode = DL_CLDLS;
4402 	dlp->dl_current_state = gld->gld_state;
4403 	dlp->dl_provider_style = gld->gld_style;
4404 
4405 	if (sel_offset != 0) {
4406 		dl_qos_cl_sel1_t	*selp;
4407 		dl_qos_cl_range1_t	*rangep;
4408 
4409 		ASSERT(range_offset != 0);
4410 
4411 		dlp->dl_qos_offset = sel_offset;
4412 		dlp->dl_qos_length = sizeof (dl_qos_cl_sel1_t);
4413 		dlp->dl_qos_range_offset = range_offset;
4414 		dlp->dl_qos_range_length = sizeof (dl_qos_cl_range1_t);
4415 
4416 		selp = (dl_qos_cl_sel1_t *)(mp->b_rptr + sel_offset);
4417 		selp->dl_qos_type = DL_QOS_CL_SEL1;
4418 		selp->dl_priority = gld->gld_upri;
4419 
4420 		rangep = (dl_qos_cl_range1_t *)(mp->b_rptr + range_offset);
4421 		rangep->dl_qos_type = DL_QOS_CL_RANGE1;
4422 		rangep->dl_priority.dl_min = 0;
4423 		rangep->dl_priority.dl_max = 7;
4424 	}
4425 
4426 	if (gld->gld_state == DL_IDLE || gld->gld_state == DL_UNBOUND) {
4427 		dlp->dl_min_sdu = macinfo->gldm_minpkt;
4428 		dlp->dl_max_sdu = macinfo->gldm_maxpkt;
4429 		dlp->dl_mac_type = macinfo->gldm_type;
4430 		dlp->dl_addr_length = addr_length + abs(sap_length);
4431 		dlp->dl_sap_length = sap_length;
4432 
4433 		if (gld->gld_state == DL_IDLE) {
4434 			/*
4435 			 * If we are bound to a non-LLC SAP on any medium
4436 			 * other than Ethernet, then we need room for a
4437 			 * SNAP header.  So we have to adjust the MTU size
4438 			 * accordingly.  XXX I suppose this should be done
4439 			 * in gldutil.c, but it seems likely that this will
4440 			 * always be true for everything GLD supports but
4441 			 * Ethernet.  Check this if you add another medium.
4442 			 */
4443 			if ((macinfo->gldm_type == DL_TPR ||
4444 			    macinfo->gldm_type == DL_FDDI) &&
4445 			    gld->gld_ethertype)
4446 				dlp->dl_max_sdu -= LLC_SNAP_HDR_LEN;
4447 
4448 			/* copy macaddr and sap */
4449 			dlp->dl_addr_offset = addr_offset;
4450 
4451 			mac_copy(mac_pvt->curr_macaddr, mp->b_rptr +
4452 			    addr_offset, macinfo->gldm_addrlen);
4453 			sapp = mp->b_rptr + addr_offset +
4454 			    macinfo->gldm_addrlen;
4455 			*(ushort_t *)sapp = gld->gld_sap;
4456 		} else {
4457 			dlp->dl_addr_offset = 0;
4458 		}
4459 
4460 		/* copy broadcast addr */
4461 		dlp->dl_brdcst_addr_length = macinfo->gldm_addrlen;
4462 		dlp->dl_brdcst_addr_offset = brdcst_offset;
4463 		mac_copy((caddr_t)macinfo->gldm_broadcast_addr,
4464 		    mp->b_rptr + brdcst_offset, brdcst_length);
4465 	} else {
4466 		/*
4467 		 * No PPA is attached.
4468 		 * The best we can do is use the values provided
4469 		 * by the first mac that called gld_register.
4470 		 */
4471 		dlp->dl_min_sdu = glddev->gld_minsdu;
4472 		dlp->dl_max_sdu = glddev->gld_maxsdu;
4473 		dlp->dl_mac_type = glddev->gld_type;
4474 		dlp->dl_addr_length = addr_length + abs(sap_length);
4475 		dlp->dl_sap_length = sap_length;
4476 		dlp->dl_addr_offset = 0;
4477 		dlp->dl_brdcst_addr_offset = brdcst_offset;
4478 		dlp->dl_brdcst_addr_length = brdcst_length;
4479 		mac_copy((caddr_t)glddev->gld_broadcast,
4480 		    mp->b_rptr + brdcst_offset, brdcst_length);
4481 	}
4482 	qreply(q, mp);
4483 	return (GLDE_OK);
4484 }
4485 
4486 /*
4487  * gld_unitdata (q, mp)
4488  * send a datagram.  Destination address/lsap is in M_PROTO
4489  * message (first mblock), data is in remainder of message.
4490  *
4491  */
4492 static int
4493 gld_unitdata(queue_t *q, mblk_t *mp)
4494 {
4495 	gld_t *gld = (gld_t *)q->q_ptr;
4496 	dl_unitdata_req_t *dlp = (dl_unitdata_req_t *)mp->b_rptr;
4497 	gld_mac_info_t *macinfo = gld->gld_mac_info;
4498 	size_t	msglen;
4499 	mblk_t	*nmp;
4500 	gld_interface_t *ifp;
4501 	uint32_t start;
4502 	uint32_t stuff;
4503 	uint32_t end;
4504 	uint32_t value;
4505 	uint32_t flags;
4506 	uint32_t upri;
4507 
4508 #ifdef GLD_DEBUG
4509 	if (gld_debug & GLDTRACE)
4510 		cmn_err(CE_NOTE, "gld_unitdata(%p %p)", (void *)q, (void *)mp);
4511 #endif
4512 
4513 	if (gld->gld_state != DL_IDLE) {
4514 #ifdef GLD_DEBUG
4515 		if (gld_debug & GLDERRS)
4516 			cmn_err(CE_NOTE, "gld_unitdata: wrong state (%d)",
4517 			    gld->gld_state);
4518 #endif
4519 		dluderrorind(q, mp, mp->b_rptr + dlp->dl_dest_addr_offset,
4520 		    dlp->dl_dest_addr_length, DL_OUTSTATE, 0);
4521 		return (GLDE_OK);
4522 	}
4523 	ASSERT(macinfo != NULL);
4524 
4525 	if (!MBLKIN(mp, dlp->dl_dest_addr_offset, dlp->dl_dest_addr_length) ||
4526 	    dlp->dl_dest_addr_length !=
4527 	    macinfo->gldm_addrlen + abs(macinfo->gldm_saplen)) {
4528 		dluderrorind(q, mp, mp->b_rptr + dlp->dl_dest_addr_offset,
4529 		    dlp->dl_dest_addr_length, DL_BADADDR, 0);
4530 		return (GLDE_OK);
4531 	}
4532 
4533 	upri = dlp->dl_priority.dl_max;
4534 
4535 	msglen = msgdsize(mp);
4536 	if (msglen == 0 || msglen > macinfo->gldm_maxpkt) {
4537 #ifdef GLD_DEBUG
4538 		if (gld_debug & GLDERRS)
4539 			cmn_err(CE_NOTE, "gld_unitdata: bad msglen (%d)",
4540 			    (int)msglen);
4541 #endif
4542 		dluderrorind(q, mp, mp->b_rptr + dlp->dl_dest_addr_offset,
4543 		    dlp->dl_dest_addr_length, DL_BADDATA, 0);
4544 		return (GLDE_OK);
4545 	}
4546 
4547 	ASSERT(mp->b_cont != NULL);	/* because msgdsize(mp) is nonzero */
4548 
4549 	ifp = ((gld_mac_pvt_t *)macinfo->gldm_mac_pvt)->interfacep;
4550 
4551 	/* grab any checksum information that may be present */
4552 	hcksum_retrieve(mp->b_cont, NULL, NULL, &start, &stuff, &end,
4553 	    &value, &flags);
4554 
4555 	/*
4556 	 * Prepend a valid header for transmission
4557 	 */
4558 	if ((nmp = (*ifp->mkunitdata)(gld, mp)) == NULL) {
4559 #ifdef GLD_DEBUG
4560 		if (gld_debug & GLDERRS)
4561 			cmn_err(CE_NOTE, "gld_unitdata: mkunitdata failed.");
4562 #endif
4563 		dluderrorind(q, mp, mp->b_rptr + dlp->dl_dest_addr_offset,
4564 		    dlp->dl_dest_addr_length, DL_SYSERR, ENOSR);
4565 		return (GLDE_OK);
4566 	}
4567 
4568 	/* apply any checksum information to the first block in the chain */
4569 	(void) hcksum_assoc(nmp, NULL, NULL, start, stuff, end, value,
4570 	    flags, 0);
4571 
4572 	GLD_CLEAR_MBLK_VTAG(nmp);
4573 	if (gld_start(q, nmp, GLD_WSRV, upri) == GLD_NORESOURCES) {
4574 		qenable(q);
4575 		return (GLDE_RETRY);
4576 	}
4577 
4578 	return (GLDE_OK);
4579 }
4580 
4581 /*
4582  * gldattach(q, mp)
4583  * DLPI DL_ATTACH_REQ
4584  * this attaches the stream to a PPA
4585  */
4586 static int
4587 gldattach(queue_t *q, mblk_t *mp)
4588 {
4589 	dl_attach_req_t *at;
4590 	gld_mac_info_t *macinfo;
4591 	gld_t  *gld = (gld_t *)q->q_ptr;
4592 	glddev_t *glddev;
4593 	gld_mac_pvt_t *mac_pvt;
4594 	uint32_t ppa;
4595 	uint32_t vid;
4596 	gld_vlan_t *vlan;
4597 
4598 	at = (dl_attach_req_t *)mp->b_rptr;
4599 
4600 	if (gld->gld_state != DL_UNATTACHED)
4601 		return (DL_OUTSTATE);
4602 
4603 	ASSERT(!gld->gld_mac_info);
4604 
4605 	ppa = at->dl_ppa % GLD_VLAN_SCALE;	/* 0 .. 999	*/
4606 	vid = at->dl_ppa / GLD_VLAN_SCALE;	/* 0 .. 4094	*/
4607 	if (vid > VLAN_VID_MAX)
4608 		return (DL_BADPPA);
4609 
4610 	glddev = gld->gld_device;
4611 	mutex_enter(&glddev->gld_devlock);
4612 	for (macinfo = glddev->gld_mac_next;
4613 	    macinfo != (gld_mac_info_t *)&glddev->gld_mac_next;
4614 	    macinfo = macinfo->gldm_next) {
4615 		int inst;
4616 
4617 		ASSERT(macinfo != NULL);
4618 		if (macinfo->gldm_ppa != ppa)
4619 			continue;
4620 
4621 		if (!(macinfo->gldm_GLD_flags & GLD_MAC_READY))
4622 			continue;	/* this one's not ready yet */
4623 
4624 		/*
4625 		 * VLAN sanity check
4626 		 */
4627 		if (vid != VLAN_VID_NONE && !VLAN_CAPABLE(macinfo)) {
4628 			mutex_exit(&glddev->gld_devlock);
4629 			return (DL_BADPPA);
4630 		}
4631 
4632 		/*
4633 		 * We found the correct PPA, hold the instance
4634 		 */
4635 		inst = ddi_get_instance(macinfo->gldm_devinfo);
4636 		if (inst == -1 || qassociate(q, inst) != 0) {
4637 			mutex_exit(&glddev->gld_devlock);
4638 			return (DL_BADPPA);
4639 		}
4640 
4641 		/* Take the stream off the per-driver-class list */
4642 		gldremque(gld);
4643 
4644 		/*
4645 		 * We must hold the lock to prevent multiple calls
4646 		 * to the reset and start routines.
4647 		 */
4648 		GLDM_LOCK(macinfo, RW_WRITER);
4649 
4650 		gld->gld_mac_info = macinfo;
4651 
4652 		if (macinfo->gldm_send_tagged != NULL)
4653 			gld->gld_send = macinfo->gldm_send_tagged;
4654 		else
4655 			gld->gld_send = macinfo->gldm_send;
4656 
4657 		if ((vlan = gld_get_vlan(macinfo, vid)) == NULL) {
4658 			GLDM_UNLOCK(macinfo);
4659 			gldinsque(gld, glddev->gld_str_prev);
4660 			mutex_exit(&glddev->gld_devlock);
4661 			(void) qassociate(q, -1);
4662 			return (DL_BADPPA);
4663 		}
4664 
4665 		mac_pvt = (gld_mac_pvt_t *)macinfo->gldm_mac_pvt;
4666 		if (!mac_pvt->started) {
4667 			if (gld_start_mac(macinfo) != GLD_SUCCESS) {
4668 				gld_rem_vlan(vlan);
4669 				GLDM_UNLOCK(macinfo);
4670 				gldinsque(gld, glddev->gld_str_prev);
4671 				mutex_exit(&glddev->gld_devlock);
4672 				dlerrorack(q, mp, DL_ATTACH_REQ, DL_SYSERR,
4673 				    EIO);
4674 				(void) qassociate(q, -1);
4675 				return (GLDE_OK);
4676 			}
4677 		}
4678 
4679 		gld->gld_vlan = vlan;
4680 		vlan->gldv_nstreams++;
4681 		gldinsque(gld, vlan->gldv_str_prev);
4682 		gld->gld_state = DL_UNBOUND;
4683 		GLDM_UNLOCK(macinfo);
4684 
4685 #ifdef GLD_DEBUG
4686 		if (gld_debug & GLDPROT) {
4687 			cmn_err(CE_NOTE, "gldattach(%p, %p, PPA = %d)",
4688 			    (void *)q, (void *)mp, macinfo->gldm_ppa);
4689 		}
4690 #endif
4691 		mutex_exit(&glddev->gld_devlock);
4692 		dlokack(q, mp, DL_ATTACH_REQ);
4693 		return (GLDE_OK);
4694 	}
4695 	mutex_exit(&glddev->gld_devlock);
4696 	return (DL_BADPPA);
4697 }
4698 
4699 /*
4700  * gldunattach(q, mp)
4701  * DLPI DL_DETACH_REQ
4702  * detaches the mac layer from the stream
4703  */
4704 int
4705 gldunattach(queue_t *q, mblk_t *mp)
4706 {
4707 	gld_t  *gld = (gld_t *)q->q_ptr;
4708 	glddev_t *glddev = gld->gld_device;
4709 	gld_mac_info_t *macinfo = gld->gld_mac_info;
4710 	int	state = gld->gld_state;
4711 	int	i;
4712 	gld_mac_pvt_t *mac_pvt;
4713 	gld_vlan_t *vlan;
4714 	boolean_t phys_off;
4715 	boolean_t mult_off;
4716 	int op = GLD_MAC_PROMISC_NOOP;
4717 
4718 	if (state != DL_UNBOUND)
4719 		return (DL_OUTSTATE);
4720 
4721 	ASSERT(macinfo != NULL);
4722 	ASSERT(gld->gld_sap == 0);
4723 	mac_pvt = (gld_mac_pvt_t *)macinfo->gldm_mac_pvt;
4724 
4725 #ifdef GLD_DEBUG
4726 	if (gld_debug & GLDPROT) {
4727 		cmn_err(CE_NOTE, "gldunattach(%p, %p, PPA = %d)",
4728 		    (void *)q, (void *)mp, macinfo->gldm_ppa);
4729 	}
4730 #endif
4731 
4732 	GLDM_LOCK(macinfo, RW_WRITER);
4733 
4734 	if (gld->gld_mcast) {
4735 		for (i = 0; i < gld->gld_multicnt; i++) {
4736 			gld_mcast_t *mcast;
4737 
4738 			if ((mcast = gld->gld_mcast[i]) != NULL) {
4739 				ASSERT(mcast->gldm_refcnt);
4740 				gld_send_disable_multi(macinfo, mcast);
4741 			}
4742 		}
4743 		kmem_free(gld->gld_mcast,
4744 		    sizeof (gld_mcast_t *) * gld->gld_multicnt);
4745 		gld->gld_mcast = NULL;
4746 		gld->gld_multicnt = 0;
4747 	}
4748 
4749 	/* decide if we need to turn off any promiscuity */
4750 	phys_off = (gld->gld_flags & GLD_PROM_PHYS &&
4751 	    --mac_pvt->nprom == 0);
4752 	mult_off = (gld->gld_flags & GLD_PROM_MULT &&
4753 	    --mac_pvt->nprom_multi == 0);
4754 
4755 	if (phys_off) {
4756 		op = (mac_pvt->nprom_multi == 0) ? GLD_MAC_PROMISC_NONE :
4757 		    GLD_MAC_PROMISC_MULTI;
4758 	} else if (mult_off) {
4759 		op = (mac_pvt->nprom == 0) ? GLD_MAC_PROMISC_NONE :
4760 		    GLD_MAC_PROMISC_NOOP;	/* phys overrides multi */
4761 	}
4762 
4763 	if (op != GLD_MAC_PROMISC_NOOP)
4764 		(void) (*macinfo->gldm_set_promiscuous)(macinfo, op);
4765 
4766 	vlan = (gld_vlan_t *)gld->gld_vlan;
4767 	if (gld->gld_flags & GLD_PROM_PHYS)
4768 		vlan->gldv_nprom--;
4769 	if (gld->gld_flags & GLD_PROM_MULT)
4770 		vlan->gldv_nprom--;
4771 	if (gld->gld_flags & GLD_PROM_SAP) {
4772 		vlan->gldv_nprom--;
4773 		vlan->gldv_nvlan_sap--;
4774 	}
4775 
4776 	gld->gld_flags &= ~(GLD_PROM_PHYS | GLD_PROM_SAP | GLD_PROM_MULT);
4777 
4778 	GLDM_UNLOCK(macinfo);
4779 
4780 	if (phys_off)
4781 		gld_notify_ind(macinfo, DL_NOTE_PROMISC_OFF_PHYS, NULL);
4782 
4783 	/*
4784 	 * We need to hold both locks when modifying the mac stream list
4785 	 * to protect findminor as well as everyone else.
4786 	 */
4787 	mutex_enter(&glddev->gld_devlock);
4788 	GLDM_LOCK(macinfo, RW_WRITER);
4789 
4790 	/* disassociate this stream with its vlan and underlying mac */
4791 	gldremque(gld);
4792 
4793 	if (--vlan->gldv_nstreams == 0) {
4794 		gld_rem_vlan(vlan);
4795 		gld->gld_vlan = NULL;
4796 	}
4797 
4798 	gld->gld_mac_info = NULL;
4799 	gld->gld_state = DL_UNATTACHED;
4800 
4801 	/* cleanup mac layer if last vlan */
4802 	if (mac_pvt->nvlan == 0) {
4803 		gld_stop_mac(macinfo);
4804 		macinfo->gldm_GLD_flags &= ~GLD_INTR_WAIT;
4805 	}
4806 
4807 	/* make sure no references to this gld for gld_v0_sched */
4808 	if (mac_pvt->last_sched == gld)
4809 		mac_pvt->last_sched = NULL;
4810 
4811 	GLDM_UNLOCK(macinfo);
4812 
4813 	/* put the stream on the unattached Style 2 list */
4814 	gldinsque(gld, glddev->gld_str_prev);
4815 
4816 	mutex_exit(&glddev->gld_devlock);
4817 
4818 	/* There will be no mp if we were called from close */
4819 	if (mp) {
4820 		dlokack(q, mp, DL_DETACH_REQ);
4821 	}
4822 	if (gld->gld_style == DL_STYLE2)
4823 		(void) qassociate(q, -1);
4824 	return (GLDE_OK);
4825 }
4826 
4827 /*
4828  * gld_enable_multi (q, mp)
4829  * Enables multicast address on the stream.  If the mac layer
4830  * isn't enabled for this address, enable at that level as well.
4831  */
4832 static int
4833 gld_enable_multi(queue_t *q, mblk_t *mp)
4834 {
4835 	gld_t  *gld = (gld_t *)q->q_ptr;
4836 	glddev_t *glddev;
4837 	gld_mac_info_t *macinfo = gld->gld_mac_info;
4838 	unsigned char *maddr;
4839 	dl_enabmulti_req_t *multi;
4840 	gld_mcast_t *mcast;
4841 	int	i, rc;
4842 	gld_mac_pvt_t *mac_pvt;
4843 
4844 #ifdef GLD_DEBUG
4845 	if (gld_debug & GLDPROT) {
4846 		cmn_err(CE_NOTE, "gld_enable_multi(%p, %p)", (void *)q,
4847 		    (void *)mp);
4848 	}
4849 #endif
4850 
4851 	if (gld->gld_state == DL_UNATTACHED)
4852 		return (DL_OUTSTATE);
4853 
4854 	ASSERT(macinfo != NULL);
4855 	mac_pvt = (gld_mac_pvt_t *)macinfo->gldm_mac_pvt;
4856 
4857 	if (macinfo->gldm_set_multicast == NULL) {
4858 		return (DL_UNSUPPORTED);
4859 	}
4860 
4861 	multi = (dl_enabmulti_req_t *)mp->b_rptr;
4862 
4863 	if (!MBLKIN(mp, multi->dl_addr_offset, multi->dl_addr_length) ||
4864 	    multi->dl_addr_length != macinfo->gldm_addrlen)
4865 		return (DL_BADADDR);
4866 
4867 	/* request appears to be valid */
4868 
4869 	glddev = mac_pvt->major_dev;
4870 	ASSERT(glddev == gld->gld_device);
4871 
4872 	maddr = mp->b_rptr + multi->dl_addr_offset;
4873 
4874 	/*
4875 	 * The multicast addresses live in a per-device table, along
4876 	 * with a reference count.  Each stream has a table that
4877 	 * points to entries in the device table, with the reference
4878 	 * count reflecting the number of streams pointing at it.  If
4879 	 * this multicast address is already in the per-device table,
4880 	 * all we have to do is point at it.
4881 	 */
4882 	GLDM_LOCK(macinfo, RW_WRITER);
4883 
4884 	/* does this address appear in current table? */
4885 	if (gld->gld_mcast == NULL) {
4886 		/* no mcast addresses -- allocate table */
4887 		gld->gld_mcast = GLD_GETSTRUCT(gld_mcast_t *,
4888 		    glddev->gld_multisize);
4889 		if (gld->gld_mcast == NULL) {
4890 			GLDM_UNLOCK(macinfo);
4891 			dlerrorack(q, mp, DL_ENABMULTI_REQ, DL_SYSERR, ENOSR);
4892 			return (GLDE_OK);
4893 		}
4894 		gld->gld_multicnt = glddev->gld_multisize;
4895 	} else {
4896 		for (i = 0; i < gld->gld_multicnt; i++) {
4897 			if (gld->gld_mcast[i] &&
4898 			    mac_eq(gld->gld_mcast[i]->gldm_addr,
4899 			    maddr, macinfo->gldm_addrlen)) {
4900 				/* this is a match -- just succeed */
4901 				ASSERT(gld->gld_mcast[i]->gldm_refcnt);
4902 				GLDM_UNLOCK(macinfo);
4903 				dlokack(q, mp, DL_ENABMULTI_REQ);
4904 				return (GLDE_OK);
4905 			}
4906 		}
4907 	}
4908 
4909 	/*
4910 	 * it wasn't in the stream so check to see if the mac layer has it
4911 	 */
4912 	mcast = NULL;
4913 	if (mac_pvt->mcast_table == NULL) {
4914 		mac_pvt->mcast_table = GLD_GETSTRUCT(gld_mcast_t,
4915 		    glddev->gld_multisize);
4916 		if (mac_pvt->mcast_table == NULL) {
4917 			GLDM_UNLOCK(macinfo);
4918 			dlerrorack(q, mp, DL_ENABMULTI_REQ, DL_SYSERR, ENOSR);
4919 			return (GLDE_OK);
4920 		}
4921 	} else {
4922 		for (i = 0; i < glddev->gld_multisize; i++) {
4923 			if (mac_pvt->mcast_table[i].gldm_refcnt &&
4924 			    mac_eq(mac_pvt->mcast_table[i].gldm_addr,
4925 			    maddr, macinfo->gldm_addrlen)) {
4926 				mcast = &mac_pvt->mcast_table[i];
4927 				break;
4928 			}
4929 		}
4930 	}
4931 	if (mcast == NULL) {
4932 		/* not in mac layer -- find an empty mac slot to fill in */
4933 		for (i = 0; i < glddev->gld_multisize; i++) {
4934 			if (mac_pvt->mcast_table[i].gldm_refcnt == 0) {
4935 				mcast = &mac_pvt->mcast_table[i];
4936 				mac_copy(maddr, mcast->gldm_addr,
4937 				    macinfo->gldm_addrlen);
4938 				break;
4939 			}
4940 		}
4941 	}
4942 	if (mcast == NULL) {
4943 		/* couldn't get a mac layer slot */
4944 		GLDM_UNLOCK(macinfo);
4945 		return (DL_TOOMANY);
4946 	}
4947 
4948 	/* now we have a mac layer slot in mcast -- get a stream slot */
4949 	for (i = 0; i < gld->gld_multicnt; i++) {
4950 		if (gld->gld_mcast[i] != NULL)
4951 			continue;
4952 		/* found an empty slot */
4953 		if (!mcast->gldm_refcnt) {
4954 			/* set mcast in hardware */
4955 			unsigned char cmaddr[GLD_MAX_ADDRLEN];
4956 
4957 			ASSERT(sizeof (cmaddr) >= macinfo->gldm_addrlen);
4958 			cmac_copy(maddr, cmaddr,
4959 			    macinfo->gldm_addrlen, macinfo);
4960 
4961 			rc = (*macinfo->gldm_set_multicast)
4962 			    (macinfo, cmaddr, GLD_MULTI_ENABLE);
4963 			if (rc == GLD_NOTSUPPORTED) {
4964 				GLDM_UNLOCK(macinfo);
4965 				return (DL_NOTSUPPORTED);
4966 			} else if (rc == GLD_NORESOURCES) {
4967 				GLDM_UNLOCK(macinfo);
4968 				return (DL_TOOMANY);
4969 			} else if (rc == GLD_BADARG) {
4970 				GLDM_UNLOCK(macinfo);
4971 				return (DL_BADADDR);
4972 			} else if (rc == GLD_RETRY) {
4973 				/*
4974 				 * The putbq and gld_xwait must be
4975 				 * within the lock to prevent races
4976 				 * with gld_sched.
4977 				 */
4978 				(void) putbq(q, mp);
4979 				gld->gld_xwait = B_TRUE;
4980 				GLDM_UNLOCK(macinfo);
4981 				return (GLDE_RETRY);
4982 			} else if (rc != GLD_SUCCESS) {
4983 				GLDM_UNLOCK(macinfo);
4984 				dlerrorack(q, mp, DL_ENABMULTI_REQ,
4985 				    DL_SYSERR, EIO);
4986 				return (GLDE_OK);
4987 			}
4988 		}
4989 		gld->gld_mcast[i] = mcast;
4990 		mcast->gldm_refcnt++;
4991 		GLDM_UNLOCK(macinfo);
4992 		dlokack(q, mp, DL_ENABMULTI_REQ);
4993 		return (GLDE_OK);
4994 	}
4995 
4996 	/* couldn't get a stream slot */
4997 	GLDM_UNLOCK(macinfo);
4998 	return (DL_TOOMANY);
4999 }
5000 
5001 
5002 /*
5003  * gld_disable_multi (q, mp)
5004  * Disable the multicast address on the stream.  If last
5005  * reference for the mac layer, disable there as well.
5006  */
5007 static int
5008 gld_disable_multi(queue_t *q, mblk_t *mp)
5009 {
5010 	gld_t  *gld;
5011 	gld_mac_info_t *macinfo;
5012 	unsigned char *maddr;
5013 	dl_disabmulti_req_t *multi;
5014 	int i;
5015 	gld_mcast_t *mcast;
5016 
5017 #ifdef GLD_DEBUG
5018 	if (gld_debug & GLDPROT) {
5019 		cmn_err(CE_NOTE, "gld_disable_multi(%p, %p)", (void *)q,
5020 		    (void *)mp);
5021 	}
5022 #endif
5023 
5024 	gld = (gld_t *)q->q_ptr;
5025 	if (gld->gld_state == DL_UNATTACHED)
5026 		return (DL_OUTSTATE);
5027 
5028 	macinfo = gld->gld_mac_info;
5029 	ASSERT(macinfo != NULL);
5030 	if (macinfo->gldm_set_multicast == NULL) {
5031 		return (DL_UNSUPPORTED);
5032 	}
5033 
5034 	multi = (dl_disabmulti_req_t *)mp->b_rptr;
5035 
5036 	if (!MBLKIN(mp, multi->dl_addr_offset, multi->dl_addr_length) ||
5037 	    multi->dl_addr_length != macinfo->gldm_addrlen)
5038 		return (DL_BADADDR);
5039 
5040 	maddr = mp->b_rptr + multi->dl_addr_offset;
5041 
5042 	/* request appears to be valid */
5043 	/* does this address appear in current table? */
5044 	GLDM_LOCK(macinfo, RW_WRITER);
5045 	if (gld->gld_mcast != NULL) {
5046 		for (i = 0; i < gld->gld_multicnt; i++)
5047 			if (((mcast = gld->gld_mcast[i]) != NULL) &&
5048 			    mac_eq(mcast->gldm_addr,
5049 			    maddr, macinfo->gldm_addrlen)) {
5050 				ASSERT(mcast->gldm_refcnt);
5051 				gld_send_disable_multi(macinfo, mcast);
5052 				gld->gld_mcast[i] = NULL;
5053 				GLDM_UNLOCK(macinfo);
5054 				dlokack(q, mp, DL_DISABMULTI_REQ);
5055 				return (GLDE_OK);
5056 			}
5057 	}
5058 	GLDM_UNLOCK(macinfo);
5059 	return (DL_NOTENAB); /* not an enabled address */
5060 }
5061 
5062 /*
5063  * gld_send_disable_multi(macinfo, mcast)
5064  * this function is used to disable a multicast address if the reference
5065  * count goes to zero. The disable request will then be forwarded to the
5066  * lower stream.
5067  */
5068 static void
5069 gld_send_disable_multi(gld_mac_info_t *macinfo, gld_mcast_t *mcast)
5070 {
5071 	ASSERT(macinfo != NULL);
5072 	ASSERT(GLDM_LOCK_HELD_WRITE(macinfo));
5073 	ASSERT(mcast != NULL);
5074 	ASSERT(mcast->gldm_refcnt);
5075 
5076 	if (!mcast->gldm_refcnt) {
5077 		return;			/* "cannot happen" */
5078 	}
5079 
5080 	if (--mcast->gldm_refcnt > 0) {
5081 		return;
5082 	}
5083 
5084 	/*
5085 	 * This must be converted from canonical form to device form.
5086 	 * The refcnt is now zero so we can trash the data.
5087 	 */
5088 	if (macinfo->gldm_options & GLDOPT_CANONICAL_ADDR)
5089 		gld_bitreverse(mcast->gldm_addr, macinfo->gldm_addrlen);
5090 
5091 	/* XXX Ought to check for GLD_NORESOURCES or GLD_FAILURE */
5092 	(void) (*macinfo->gldm_set_multicast)
5093 	    (macinfo, mcast->gldm_addr, GLD_MULTI_DISABLE);
5094 }
5095 
5096 /*
5097  * gld_promisc (q, mp, req, on)
5098  *	enable or disable the use of promiscuous mode with the hardware
5099  */
5100 static int
5101 gld_promisc(queue_t *q, mblk_t *mp, t_uscalar_t req, boolean_t on)
5102 {
5103 	gld_t *gld;
5104 	gld_mac_info_t *macinfo;
5105 	gld_mac_pvt_t *mac_pvt;
5106 	gld_vlan_t *vlan;
5107 	union DL_primitives *prim;
5108 	int macrc = GLD_SUCCESS;
5109 	int dlerr = GLDE_OK;
5110 	int op = GLD_MAC_PROMISC_NOOP;
5111 
5112 #ifdef GLD_DEBUG
5113 	if (gld_debug & GLDTRACE)
5114 		cmn_err(CE_NOTE, "gld_promisc(%p, %p, %d, %d)",
5115 		    (void *)q, (void *)mp, req, on);
5116 #endif
5117 
5118 	ASSERT(mp != NULL);
5119 	prim = (union DL_primitives *)mp->b_rptr;
5120 
5121 	/* XXX I think spec allows promisc in unattached state */
5122 	gld = (gld_t *)q->q_ptr;
5123 	if (gld->gld_state == DL_UNATTACHED)
5124 		return (DL_OUTSTATE);
5125 
5126 	macinfo = gld->gld_mac_info;
5127 	ASSERT(macinfo != NULL);
5128 	mac_pvt = (gld_mac_pvt_t *)macinfo->gldm_mac_pvt;
5129 
5130 	vlan = (gld_vlan_t *)gld->gld_vlan;
5131 	ASSERT(vlan != NULL);
5132 
5133 	GLDM_LOCK(macinfo, RW_WRITER);
5134 
5135 	/*
5136 	 * Work out what request (if any) has to be made to the MAC layer
5137 	 */
5138 	if (on) {
5139 		switch (prim->promiscon_req.dl_level) {
5140 		default:
5141 			dlerr = DL_UNSUPPORTED;	/* this is an error */
5142 			break;
5143 
5144 		case DL_PROMISC_PHYS:
5145 			if (mac_pvt->nprom == 0)
5146 				op = GLD_MAC_PROMISC_PHYS;
5147 			break;
5148 
5149 		case DL_PROMISC_MULTI:
5150 			if (mac_pvt->nprom_multi == 0)
5151 				if (mac_pvt->nprom == 0)
5152 					op = GLD_MAC_PROMISC_MULTI;
5153 			break;
5154 
5155 		case DL_PROMISC_SAP:
5156 			/* We can do this without reference to the MAC */
5157 			break;
5158 		}
5159 	} else {
5160 		switch (prim->promiscoff_req.dl_level) {
5161 		default:
5162 			dlerr = DL_UNSUPPORTED;	/* this is an error */
5163 			break;
5164 
5165 		case DL_PROMISC_PHYS:
5166 			if (!(gld->gld_flags & GLD_PROM_PHYS))
5167 				dlerr = DL_NOTENAB;
5168 			else if (mac_pvt->nprom == 1)
5169 				if (mac_pvt->nprom_multi)
5170 					op = GLD_MAC_PROMISC_MULTI;
5171 				else
5172 					op = GLD_MAC_PROMISC_NONE;
5173 			break;
5174 
5175 		case DL_PROMISC_MULTI:
5176 			if (!(gld->gld_flags & GLD_PROM_MULT))
5177 				dlerr = DL_NOTENAB;
5178 			else if (mac_pvt->nprom_multi == 1)
5179 				if (mac_pvt->nprom == 0)
5180 					op = GLD_MAC_PROMISC_NONE;
5181 			break;
5182 
5183 		case DL_PROMISC_SAP:
5184 			if (!(gld->gld_flags & GLD_PROM_SAP))
5185 				dlerr = DL_NOTENAB;
5186 
5187 			/* We can do this without reference to the MAC */
5188 			break;
5189 		}
5190 	}
5191 
5192 	/*
5193 	 * The request was invalid in some way so no need to continue.
5194 	 */
5195 	if (dlerr != GLDE_OK) {
5196 		GLDM_UNLOCK(macinfo);
5197 		return (dlerr);
5198 	}
5199 
5200 	/*
5201 	 * Issue the request to the MAC layer, if required
5202 	 */
5203 	if (op != GLD_MAC_PROMISC_NOOP) {
5204 		macrc = (*macinfo->gldm_set_promiscuous)(macinfo, op);
5205 	}
5206 
5207 	/*
5208 	 * On success, update the appropriate flags & refcounts
5209 	 */
5210 	if (macrc == GLD_SUCCESS) {
5211 		if (on) {
5212 			switch (prim->promiscon_req.dl_level) {
5213 			case DL_PROMISC_PHYS:
5214 				mac_pvt->nprom++;
5215 				vlan->gldv_nprom++;
5216 				gld->gld_flags |= GLD_PROM_PHYS;
5217 				break;
5218 
5219 			case DL_PROMISC_MULTI:
5220 				mac_pvt->nprom_multi++;
5221 				vlan->gldv_nprom++;
5222 				gld->gld_flags |= GLD_PROM_MULT;
5223 				break;
5224 
5225 			case DL_PROMISC_SAP:
5226 				gld->gld_flags |= GLD_PROM_SAP;
5227 				vlan->gldv_nprom++;
5228 				vlan->gldv_nvlan_sap++;
5229 				break;
5230 
5231 			default:
5232 				break;
5233 			}
5234 		} else {
5235 			switch (prim->promiscoff_req.dl_level) {
5236 			case DL_PROMISC_PHYS:
5237 				mac_pvt->nprom--;
5238 				vlan->gldv_nprom--;
5239 				gld->gld_flags &= ~GLD_PROM_PHYS;
5240 				break;
5241 
5242 			case DL_PROMISC_MULTI:
5243 				mac_pvt->nprom_multi--;
5244 				vlan->gldv_nprom--;
5245 				gld->gld_flags &= ~GLD_PROM_MULT;
5246 				break;
5247 
5248 			case DL_PROMISC_SAP:
5249 				gld->gld_flags &= ~GLD_PROM_SAP;
5250 				vlan->gldv_nvlan_sap--;
5251 				vlan->gldv_nprom--;
5252 				break;
5253 
5254 			default:
5255 				break;
5256 			}
5257 		}
5258 	} else if (macrc == GLD_RETRY) {
5259 		/*
5260 		 * The putbq and gld_xwait must be within the lock to
5261 		 * prevent races with gld_sched.
5262 		 */
5263 		(void) putbq(q, mp);
5264 		gld->gld_xwait = B_TRUE;
5265 	}
5266 
5267 	GLDM_UNLOCK(macinfo);
5268 
5269 	/*
5270 	 * Finally, decide how to reply.
5271 	 *
5272 	 * If <macrc> is not GLD_SUCCESS, the request was put to the MAC
5273 	 * layer but failed.  In such cases, we can return a DL_* error
5274 	 * code and let the caller send an error-ack reply upstream, or
5275 	 * we can send a reply here and then return GLDE_OK so that the
5276 	 * caller doesn't also respond.
5277 	 *
5278 	 * If physical-promiscuous mode was (successfully) switched on or
5279 	 * off, send a notification (DL_NOTIFY_IND) to anyone interested.
5280 	 */
5281 	switch (macrc) {
5282 	case GLD_NOTSUPPORTED:
5283 		return (DL_NOTSUPPORTED);
5284 
5285 	case GLD_NORESOURCES:
5286 		dlerrorack(q, mp, req, DL_SYSERR, ENOSR);
5287 		return (GLDE_OK);
5288 
5289 	case GLD_RETRY:
5290 		return (GLDE_RETRY);
5291 
5292 	default:
5293 		dlerrorack(q, mp, req, DL_SYSERR, EIO);
5294 		return (GLDE_OK);
5295 
5296 	case GLD_SUCCESS:
5297 		dlokack(q, mp, req);
5298 		break;
5299 	}
5300 
5301 	switch (op) {
5302 	case GLD_MAC_PROMISC_NOOP:
5303 		break;
5304 
5305 	case GLD_MAC_PROMISC_PHYS:
5306 		gld_notify_ind(macinfo, DL_NOTE_PROMISC_ON_PHYS, NULL);
5307 		break;
5308 
5309 	default:
5310 		gld_notify_ind(macinfo, DL_NOTE_PROMISC_OFF_PHYS, NULL);
5311 		break;
5312 	}
5313 
5314 	return (GLDE_OK);
5315 }
5316 
5317 /*
5318  * gld_physaddr()
5319  *	get the current or factory physical address value
5320  */
5321 static int
5322 gld_physaddr(queue_t *q, mblk_t *mp)
5323 {
5324 	gld_t *gld = (gld_t *)q->q_ptr;
5325 	gld_mac_info_t *macinfo;
5326 	union DL_primitives *prim = (union DL_primitives *)mp->b_rptr;
5327 	unsigned char addr[GLD_MAX_ADDRLEN];
5328 
5329 	if (gld->gld_state == DL_UNATTACHED)
5330 		return (DL_OUTSTATE);
5331 
5332 	macinfo = (gld_mac_info_t *)gld->gld_mac_info;
5333 	ASSERT(macinfo != NULL);
5334 	ASSERT(macinfo->gldm_addrlen <= GLD_MAX_ADDRLEN);
5335 
5336 	switch (prim->physaddr_req.dl_addr_type) {
5337 	case DL_FACT_PHYS_ADDR:
5338 		mac_copy((caddr_t)macinfo->gldm_vendor_addr,
5339 		    (caddr_t)addr, macinfo->gldm_addrlen);
5340 		break;
5341 	case DL_CURR_PHYS_ADDR:
5342 		/* make a copy so we don't hold the lock across qreply */
5343 		GLDM_LOCK(macinfo, RW_WRITER);
5344 		mac_copy((caddr_t)
5345 		    ((gld_mac_pvt_t *)macinfo->gldm_mac_pvt)->curr_macaddr,
5346 		    (caddr_t)addr, macinfo->gldm_addrlen);
5347 		GLDM_UNLOCK(macinfo);
5348 		break;
5349 	default:
5350 		return (DL_BADPRIM);
5351 	}
5352 	dlphysaddrack(q, mp, (caddr_t)addr, macinfo->gldm_addrlen);
5353 	return (GLDE_OK);
5354 }
5355 
5356 /*
5357  * gld_setaddr()
5358  *	change the hardware's physical address to a user specified value
5359  */
5360 static int
5361 gld_setaddr(queue_t *q, mblk_t *mp)
5362 {
5363 	gld_t *gld = (gld_t *)q->q_ptr;
5364 	gld_mac_info_t *macinfo;
5365 	gld_mac_pvt_t *mac_pvt;
5366 	union DL_primitives *prim = (union DL_primitives *)mp->b_rptr;
5367 	unsigned char *addr;
5368 	unsigned char cmaddr[GLD_MAX_ADDRLEN];
5369 	int rc;
5370 	gld_vlan_t *vlan;
5371 
5372 	if (gld->gld_state == DL_UNATTACHED)
5373 		return (DL_OUTSTATE);
5374 
5375 	vlan = (gld_vlan_t *)gld->gld_vlan;
5376 	ASSERT(vlan != NULL);
5377 
5378 	if (vlan->gldv_id != VLAN_VID_NONE)
5379 		return (DL_NOTSUPPORTED);
5380 
5381 	macinfo = (gld_mac_info_t *)gld->gld_mac_info;
5382 	ASSERT(macinfo != NULL);
5383 	mac_pvt = (gld_mac_pvt_t *)macinfo->gldm_mac_pvt;
5384 
5385 	if (!MBLKIN(mp, prim->set_physaddr_req.dl_addr_offset,
5386 	    prim->set_physaddr_req.dl_addr_length) ||
5387 	    prim->set_physaddr_req.dl_addr_length != macinfo->gldm_addrlen)
5388 		return (DL_BADADDR);
5389 
5390 	GLDM_LOCK(macinfo, RW_WRITER);
5391 
5392 	/* now do the set at the hardware level */
5393 	addr = mp->b_rptr + prim->set_physaddr_req.dl_addr_offset;
5394 	ASSERT(sizeof (cmaddr) >= macinfo->gldm_addrlen);
5395 	cmac_copy(addr, cmaddr, macinfo->gldm_addrlen, macinfo);
5396 
5397 	rc = (*macinfo->gldm_set_mac_addr)(macinfo, cmaddr);
5398 	if (rc == GLD_SUCCESS)
5399 		mac_copy(addr, mac_pvt->curr_macaddr,
5400 		    macinfo->gldm_addrlen);
5401 
5402 	GLDM_UNLOCK(macinfo);
5403 
5404 	switch (rc) {
5405 	case GLD_SUCCESS:
5406 		break;
5407 	case GLD_NOTSUPPORTED:
5408 		return (DL_NOTSUPPORTED);
5409 	case GLD_BADARG:
5410 		return (DL_BADADDR);
5411 	case GLD_NORESOURCES:
5412 		dlerrorack(q, mp, DL_SET_PHYS_ADDR_REQ, DL_SYSERR, ENOSR);
5413 		return (GLDE_OK);
5414 	default:
5415 		dlerrorack(q, mp, DL_SET_PHYS_ADDR_REQ, DL_SYSERR, EIO);
5416 		return (GLDE_OK);
5417 	}
5418 
5419 	gld_notify_ind(macinfo, DL_NOTE_PHYS_ADDR, NULL);
5420 
5421 	dlokack(q, mp, DL_SET_PHYS_ADDR_REQ);
5422 	return (GLDE_OK);
5423 }
5424 
5425 int
5426 gld_get_statistics(queue_t *q, mblk_t *mp)
5427 {
5428 	dl_get_statistics_ack_t *dlsp;
5429 	gld_t  *gld = (gld_t *)q->q_ptr;
5430 	gld_mac_info_t *macinfo = gld->gld_mac_info;
5431 	gld_mac_pvt_t *mac_pvt;
5432 
5433 	if (gld->gld_state == DL_UNATTACHED)
5434 		return (DL_OUTSTATE);
5435 
5436 	ASSERT(macinfo != NULL);
5437 
5438 	mac_pvt = (gld_mac_pvt_t *)macinfo->gldm_mac_pvt;
5439 	(void) gld_update_kstat(mac_pvt->kstatp, KSTAT_READ);
5440 
5441 	mp = mexchange(q, mp, DL_GET_STATISTICS_ACK_SIZE +
5442 	    sizeof (struct gldkstats), M_PCPROTO, DL_GET_STATISTICS_ACK);
5443 
5444 	if (mp == NULL)
5445 		return (GLDE_OK);	/* mexchange already sent merror */
5446 
5447 	dlsp = (dl_get_statistics_ack_t *)mp->b_rptr;
5448 	dlsp->dl_primitive = DL_GET_STATISTICS_ACK;
5449 	dlsp->dl_stat_length = sizeof (struct gldkstats);
5450 	dlsp->dl_stat_offset = DL_GET_STATISTICS_ACK_SIZE;
5451 
5452 	GLDM_LOCK(macinfo, RW_WRITER);
5453 	bcopy(mac_pvt->kstatp->ks_data,
5454 	    (mp->b_rptr + DL_GET_STATISTICS_ACK_SIZE),
5455 	    sizeof (struct gldkstats));
5456 	GLDM_UNLOCK(macinfo);
5457 
5458 	qreply(q, mp);
5459 	return (GLDE_OK);
5460 }
5461 
5462 /* =================================================== */
5463 /* misc utilities, some requiring various mutexes held */
5464 /* =================================================== */
5465 
5466 /*
5467  * Initialize and start the driver.
5468  */
5469 static int
5470 gld_start_mac(gld_mac_info_t *macinfo)
5471 {
5472 	int	rc;
5473 	unsigned char cmaddr[GLD_MAX_ADDRLEN];
5474 	gld_mac_pvt_t *mac_pvt = (gld_mac_pvt_t *)macinfo->gldm_mac_pvt;
5475 
5476 	ASSERT(GLDM_LOCK_HELD_WRITE(macinfo));
5477 	ASSERT(!mac_pvt->started);
5478 
5479 	rc = (*macinfo->gldm_reset)(macinfo);
5480 	if (rc != GLD_SUCCESS)
5481 		return (GLD_FAILURE);
5482 
5483 	/* set the addr after we reset the device */
5484 	ASSERT(sizeof (cmaddr) >= macinfo->gldm_addrlen);
5485 	cmac_copy(((gld_mac_pvt_t *)macinfo->gldm_mac_pvt)
5486 	    ->curr_macaddr, cmaddr, macinfo->gldm_addrlen, macinfo);
5487 
5488 	rc = (*macinfo->gldm_set_mac_addr)(macinfo, cmaddr);
5489 	ASSERT(rc != GLD_BADARG);  /* this address was good before */
5490 	if (rc != GLD_SUCCESS && rc != GLD_NOTSUPPORTED)
5491 		return (GLD_FAILURE);
5492 
5493 	rc = (*macinfo->gldm_start)(macinfo);
5494 	if (rc != GLD_SUCCESS)
5495 		return (GLD_FAILURE);
5496 
5497 	mac_pvt->started = B_TRUE;
5498 	return (GLD_SUCCESS);
5499 }
5500 
5501 /*
5502  * Stop the driver.
5503  */
5504 static void
5505 gld_stop_mac(gld_mac_info_t *macinfo)
5506 {
5507 	gld_mac_pvt_t *mac_pvt = (gld_mac_pvt_t *)macinfo->gldm_mac_pvt;
5508 
5509 	ASSERT(GLDM_LOCK_HELD_WRITE(macinfo));
5510 	ASSERT(mac_pvt->started);
5511 
5512 	(void) (*macinfo->gldm_stop)(macinfo);
5513 
5514 	mac_pvt->started = B_FALSE;
5515 }
5516 
5517 
5518 /*
5519  * gld_set_ipq will set a pointer to the queue which is bound to the
5520  * IP sap if:
5521  * o the device type is ethernet or IPoIB.
5522  * o there is no stream in SAP promiscuous mode.
5523  * o there is exactly one stream bound to the IP sap.
5524  * o the stream is in "fastpath" mode.
5525  */
5526 static void
5527 gld_set_ipq(gld_t *gld)
5528 {
5529 	gld_vlan_t	*vlan;
5530 	gld_mac_info_t	*macinfo = gld->gld_mac_info;
5531 	gld_t		*ip_gld = NULL;
5532 	uint_t		ipq_candidates = 0;
5533 	gld_t		*ipv6_gld = NULL;
5534 	uint_t		ipv6q_candidates = 0;
5535 
5536 	ASSERT(GLDM_LOCK_HELD_WRITE(macinfo));
5537 
5538 	/* The ipq code in gld_recv() is intimate with ethernet/IPoIB */
5539 	if (((macinfo->gldm_type != DL_ETHER) &&
5540 	    (macinfo->gldm_type != DL_IB)) ||
5541 	    (gld_global_options & GLD_OPT_NO_IPQ))
5542 		return;
5543 
5544 	vlan = (gld_vlan_t *)gld->gld_vlan;
5545 	ASSERT(vlan != NULL);
5546 
5547 	/* clear down any previously defined ipqs */
5548 	vlan->gldv_ipq = NULL;
5549 	vlan->gldv_ipv6q = NULL;
5550 
5551 	/* Try to find a single stream eligible to receive IP packets */
5552 	for (gld = vlan->gldv_str_next;
5553 	    gld != (gld_t *)&vlan->gldv_str_next; gld = gld->gld_next) {
5554 		if (gld->gld_state != DL_IDLE)
5555 			continue;	/* not eligible to receive */
5556 		if (gld->gld_flags & GLD_STR_CLOSING)
5557 			continue;	/* not eligible to receive */
5558 
5559 		if (gld->gld_sap == ETHERTYPE_IP) {
5560 			ip_gld = gld;
5561 			ipq_candidates++;
5562 		}
5563 
5564 		if (gld->gld_sap == ETHERTYPE_IPV6) {
5565 			ipv6_gld = gld;
5566 			ipv6q_candidates++;
5567 		}
5568 	}
5569 
5570 	if (ipq_candidates == 1) {
5571 		ASSERT(ip_gld != NULL);
5572 
5573 		if (ip_gld->gld_flags & GLD_FAST)	/* eligible for ipq */
5574 			vlan->gldv_ipq = ip_gld->gld_qptr;
5575 	}
5576 
5577 	if (ipv6q_candidates == 1) {
5578 		ASSERT(ipv6_gld != NULL);
5579 
5580 		if (ipv6_gld->gld_flags & GLD_FAST)	/* eligible for ipq */
5581 			vlan->gldv_ipv6q = ipv6_gld->gld_qptr;
5582 	}
5583 }
5584 
5585 /*
5586  * gld_flushqueue (q)
5587  *	used by DLPI primitives that require flushing the queues.
5588  *	essentially, this is DL_UNBIND_REQ.
5589  */
5590 static void
5591 gld_flushqueue(queue_t *q)
5592 {
5593 	/* flush all data in both queues */
5594 	/* XXX Should these be FLUSHALL? */
5595 	flushq(q, FLUSHDATA);
5596 	flushq(WR(q), FLUSHDATA);
5597 	/* flush all the queues upstream */
5598 	(void) putctl1(q, M_FLUSH, FLUSHRW);
5599 }
5600 
5601 /*
5602  * gld_devlookup (major)
5603  * search the device table for the device with specified
5604  * major number and return a pointer to it if it exists
5605  */
5606 static glddev_t *
5607 gld_devlookup(int major)
5608 {
5609 	struct glddevice *dev;
5610 
5611 	ASSERT(mutex_owned(&gld_device_list.gld_devlock));
5612 
5613 	for (dev = gld_device_list.gld_next;
5614 	    dev != &gld_device_list;
5615 	    dev = dev->gld_next) {
5616 		ASSERT(dev);
5617 		if (dev->gld_major == major)
5618 			return (dev);
5619 	}
5620 	return (NULL);
5621 }
5622 
5623 /*
5624  * gld_findminor(device)
5625  * Returns a minor number currently unused by any stream in the current
5626  * device class (major) list.
5627  */
5628 static int
5629 gld_findminor(glddev_t *device)
5630 {
5631 	gld_t		*next;
5632 	gld_mac_info_t	*nextmac;
5633 	gld_vlan_t	*nextvlan;
5634 	int		minor;
5635 	int		i;
5636 
5637 	ASSERT(mutex_owned(&device->gld_devlock));
5638 
5639 	/* The fast way */
5640 	if (device->gld_nextminor >= GLD_MIN_CLONE_MINOR &&
5641 	    device->gld_nextminor <= GLD_MAX_CLONE_MINOR)
5642 		return (device->gld_nextminor++);
5643 
5644 	/* The steady way */
5645 	for (minor = GLD_MIN_CLONE_MINOR; minor <= GLD_MAX_CLONE_MINOR;
5646 	    minor++) {
5647 		/* Search all unattached streams */
5648 		for (next = device->gld_str_next;
5649 		    next != (gld_t *)&device->gld_str_next;
5650 		    next = next->gld_next) {
5651 			if (minor == next->gld_minor)
5652 				goto nextminor;
5653 		}
5654 		/* Search all attached streams; we don't need maclock because */
5655 		/* mac stream list is protected by devlock as well as maclock */
5656 		for (nextmac = device->gld_mac_next;
5657 		    nextmac != (gld_mac_info_t *)&device->gld_mac_next;
5658 		    nextmac = nextmac->gldm_next) {
5659 			gld_mac_pvt_t *pvt =
5660 			    (gld_mac_pvt_t *)nextmac->gldm_mac_pvt;
5661 
5662 			if (!(nextmac->gldm_GLD_flags & GLD_MAC_READY))
5663 				continue;	/* this one's not ready yet */
5664 
5665 			for (i = 0; i < VLAN_HASHSZ; i++) {
5666 				for (nextvlan = pvt->vlan_hash[i];
5667 				    nextvlan != NULL;
5668 				    nextvlan = nextvlan->gldv_next) {
5669 					for (next = nextvlan->gldv_str_next;
5670 					    next !=
5671 					    (gld_t *)&nextvlan->gldv_str_next;
5672 					    next = next->gld_next) {
5673 						if (minor == next->gld_minor)
5674 							goto nextminor;
5675 					}
5676 				}
5677 			}
5678 		}
5679 
5680 		return (minor);
5681 nextminor:
5682 		/* don't need to do anything */
5683 		;
5684 	}
5685 	cmn_err(CE_WARN, "GLD ran out of minor numbers for %s",
5686 	    device->gld_name);
5687 	return (0);
5688 }
5689 
5690 /*
5691  * version of insque/remque for use by this driver
5692  */
5693 struct qelem {
5694 	struct qelem *q_forw;
5695 	struct qelem *q_back;
5696 	/* rest of structure */
5697 };
5698 
5699 static void
5700 gldinsque(void *elem, void *pred)
5701 {
5702 	struct qelem *pelem = elem;
5703 	struct qelem *ppred = pred;
5704 	struct qelem *pnext = ppred->q_forw;
5705 
5706 	pelem->q_forw = pnext;
5707 	pelem->q_back = ppred;
5708 	ppred->q_forw = pelem;
5709 	pnext->q_back = pelem;
5710 }
5711 
5712 static void
5713 gldremque(void *arg)
5714 {
5715 	struct qelem *pelem = arg;
5716 	struct qelem *elem = arg;
5717 
5718 	pelem->q_forw->q_back = pelem->q_back;
5719 	pelem->q_back->q_forw = pelem->q_forw;
5720 	elem->q_back = elem->q_forw = NULL;
5721 }
5722 
5723 static gld_vlan_t *
5724 gld_add_vlan(gld_mac_info_t *macinfo, uint32_t vid)
5725 {
5726 	gld_mac_pvt_t	*mac_pvt = (gld_mac_pvt_t *)macinfo->gldm_mac_pvt;
5727 	gld_vlan_t	**pp;
5728 	gld_vlan_t	*p;
5729 
5730 	pp = &(mac_pvt->vlan_hash[vid % VLAN_HASHSZ]);
5731 	while ((p = *pp) != NULL) {
5732 		ASSERT(p->gldv_id != vid);
5733 		pp = &(p->gldv_next);
5734 	}
5735 
5736 	if ((p = kmem_zalloc(sizeof (gld_vlan_t), KM_NOSLEEP)) == NULL)
5737 		return (NULL);
5738 
5739 	p->gldv_mac = macinfo;
5740 	p->gldv_id = vid;
5741 
5742 	if (vid == VLAN_VID_NONE) {
5743 		p->gldv_ptag = VLAN_VTAG_NONE;
5744 		p->gldv_stats = mac_pvt->statistics;
5745 		p->gldv_kstatp = NULL;
5746 	} else {
5747 		p->gldv_ptag = GLD_MK_PTAG(VLAN_CFI_ETHER, vid);
5748 		p->gldv_stats = kmem_zalloc(sizeof (struct gld_stats),
5749 		    KM_SLEEP);
5750 
5751 		if (gld_init_vlan_stats(p) != GLD_SUCCESS) {
5752 			kmem_free(p->gldv_stats, sizeof (struct gld_stats));
5753 			kmem_free(p, sizeof (gld_vlan_t));
5754 			return (NULL);
5755 		}
5756 	}
5757 
5758 	p->gldv_str_next = p->gldv_str_prev = (gld_t *)&p->gldv_str_next;
5759 	mac_pvt->nvlan++;
5760 	*pp = p;
5761 
5762 	return (p);
5763 }
5764 
5765 static void
5766 gld_rem_vlan(gld_vlan_t *vlan)
5767 {
5768 	gld_mac_info_t	*macinfo = vlan->gldv_mac;
5769 	gld_mac_pvt_t	*mac_pvt = (gld_mac_pvt_t *)macinfo->gldm_mac_pvt;
5770 	gld_vlan_t	**pp;
5771 	gld_vlan_t	*p;
5772 
5773 	pp = &(mac_pvt->vlan_hash[vlan->gldv_id % VLAN_HASHSZ]);
5774 	while ((p = *pp) != NULL) {
5775 		if (p->gldv_id == vlan->gldv_id)
5776 			break;
5777 		pp = &(p->gldv_next);
5778 	}
5779 	ASSERT(p != NULL);
5780 
5781 	*pp = p->gldv_next;
5782 	mac_pvt->nvlan--;
5783 	if (p->gldv_id != VLAN_VID_NONE) {
5784 		ASSERT(p->gldv_kstatp != NULL);
5785 		kstat_delete(p->gldv_kstatp);
5786 		kmem_free(p->gldv_stats, sizeof (struct gld_stats));
5787 	}
5788 	kmem_free(p, sizeof (gld_vlan_t));
5789 }
5790 
5791 gld_vlan_t *
5792 gld_find_vlan(gld_mac_info_t *macinfo, uint32_t vid)
5793 {
5794 	gld_mac_pvt_t	*mac_pvt = (gld_mac_pvt_t *)macinfo->gldm_mac_pvt;
5795 	gld_vlan_t	*p;
5796 
5797 	p = mac_pvt->vlan_hash[vid % VLAN_HASHSZ];
5798 	while (p != NULL) {
5799 		if (p->gldv_id == vid)
5800 			return (p);
5801 		p = p->gldv_next;
5802 	}
5803 	return (NULL);
5804 }
5805 
5806 gld_vlan_t *
5807 gld_get_vlan(gld_mac_info_t *macinfo, uint32_t vid)
5808 {
5809 	gld_vlan_t	*vlan;
5810 
5811 	if ((vlan = gld_find_vlan(macinfo, vid)) == NULL)
5812 		vlan = gld_add_vlan(macinfo, vid);
5813 
5814 	return (vlan);
5815 }
5816 
5817 /*
5818  * gld_bitrevcopy()
5819  * This is essentially bcopy, with the ability to bit reverse the
5820  * the source bytes. The MAC addresses bytes as transmitted by FDDI
5821  * interfaces are bit reversed.
5822  */
5823 void
5824 gld_bitrevcopy(caddr_t src, caddr_t target, size_t n)
5825 {
5826 	while (n--)
5827 		*target++ = bit_rev[(uchar_t)*src++];
5828 }
5829 
5830 /*
5831  * gld_bitreverse()
5832  * Convert the bit order by swaping all the bits, using a
5833  * lookup table.
5834  */
5835 void
5836 gld_bitreverse(uchar_t *rptr, size_t n)
5837 {
5838 	while (n--) {
5839 		*rptr = bit_rev[*rptr];
5840 		rptr++;
5841 	}
5842 }
5843 
5844 char *
5845 gld_macaddr_sprintf(char *etherbuf, unsigned char *ap, int len)
5846 {
5847 	int i;
5848 	char *cp = etherbuf;
5849 	static char digits[] = "0123456789abcdef";
5850 
5851 	for (i = 0; i < len; i++) {
5852 		*cp++ = digits[*ap >> 4];
5853 		*cp++ = digits[*ap++ & 0xf];
5854 		*cp++ = ':';
5855 	}
5856 	*--cp = 0;
5857 	return (etherbuf);
5858 }
5859 
5860 #ifdef GLD_DEBUG
5861 static void
5862 gld_check_assertions()
5863 {
5864 	glddev_t	*dev;
5865 	gld_mac_info_t	*mac;
5866 	gld_t		*str;
5867 	gld_vlan_t	*vlan;
5868 	int		i;
5869 
5870 	mutex_enter(&gld_device_list.gld_devlock);
5871 
5872 	for (dev = gld_device_list.gld_next;
5873 	    dev != (glddev_t *)&gld_device_list.gld_next;
5874 	    dev = dev->gld_next) {
5875 		mutex_enter(&dev->gld_devlock);
5876 		ASSERT(dev->gld_broadcast != NULL);
5877 		for (str = dev->gld_str_next;
5878 		    str != (gld_t *)&dev->gld_str_next;
5879 		    str = str->gld_next) {
5880 			ASSERT(str->gld_device == dev);
5881 			ASSERT(str->gld_mac_info == NULL);
5882 			ASSERT(str->gld_qptr != NULL);
5883 			ASSERT(str->gld_minor >= GLD_MIN_CLONE_MINOR);
5884 			ASSERT(str->gld_multicnt == 0);
5885 			ASSERT(str->gld_mcast == NULL);
5886 			ASSERT(!(str->gld_flags &
5887 			    (GLD_PROM_PHYS|GLD_PROM_MULT|GLD_PROM_SAP)));
5888 			ASSERT(str->gld_sap == 0);
5889 			ASSERT(str->gld_state == DL_UNATTACHED);
5890 		}
5891 		for (mac = dev->gld_mac_next;
5892 		    mac != (gld_mac_info_t *)&dev->gld_mac_next;
5893 		    mac = mac->gldm_next) {
5894 			int nvlan = 0;
5895 			gld_mac_pvt_t *pvt = (gld_mac_pvt_t *)mac->gldm_mac_pvt;
5896 
5897 			if (!(mac->gldm_GLD_flags & GLD_MAC_READY))
5898 				continue;	/* this one's not ready yet */
5899 
5900 			GLDM_LOCK(mac, RW_WRITER);
5901 			ASSERT(mac->gldm_devinfo != NULL);
5902 			ASSERT(mac->gldm_mac_pvt != NULL);
5903 			ASSERT(pvt->interfacep != NULL);
5904 			ASSERT(pvt->kstatp != NULL);
5905 			ASSERT(pvt->statistics != NULL);
5906 			ASSERT(pvt->major_dev == dev);
5907 
5908 			for (i = 0; i < VLAN_HASHSZ; i++) {
5909 				for (vlan = pvt->vlan_hash[i];
5910 				    vlan != NULL; vlan = vlan->gldv_next) {
5911 					int nstr = 0;
5912 
5913 					ASSERT(vlan->gldv_mac == mac);
5914 
5915 					for (str = vlan->gldv_str_next;
5916 					    str !=
5917 					    (gld_t *)&vlan->gldv_str_next;
5918 					    str = str->gld_next) {
5919 						ASSERT(str->gld_device == dev);
5920 						ASSERT(str->gld_mac_info ==
5921 						    mac);
5922 						ASSERT(str->gld_qptr != NULL);
5923 						ASSERT(str->gld_minor >=
5924 						    GLD_MIN_CLONE_MINOR);
5925 						ASSERT(
5926 						    str->gld_multicnt == 0 ||
5927 						    str->gld_mcast);
5928 						nstr++;
5929 					}
5930 					ASSERT(vlan->gldv_nstreams == nstr);
5931 					nvlan++;
5932 				}
5933 			}
5934 			ASSERT(pvt->nvlan == nvlan);
5935 			GLDM_UNLOCK(mac);
5936 		}
5937 		mutex_exit(&dev->gld_devlock);
5938 	}
5939 	mutex_exit(&gld_device_list.gld_devlock);
5940 }
5941 #endif
5942