xref: /titanic_41/usr/src/uts/sun4u/starfire/io/idn_dlpi.c (revision bb25c06cca41ca78e5fb87fbb8e81d55beb18c95)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License, Version 1.0 only
6  * (the "License").  You may not use this file except in compliance
7  * with the License.
8  *
9  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10  * or http://www.opensolaris.org/os/licensing.
11  * See the License for the specific language governing permissions
12  * and limitations under the License.
13  *
14  * When distributing Covered Code, include this CDDL HEADER in each
15  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16  * If applicable, add the following below this CDDL HEADER, with the
17  * fields enclosed by brackets "[]" replaced with your own identifying
18  * information: Portions Copyright [yyyy] [name of copyright owner]
19  *
20  * CDDL HEADER END
21  */
22 /*
23  * Copyright 2005 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 #pragma ident	"%Z%%M%	%I%	%E% SMI"
28 
29 /*
30  * IDN DLPI support (based on QE implementation).
31  */
32 #include <sys/types.h>
33 #include <sys/debug.h>
34 #include <sys/stropts.h>
35 #include <sys/stream.h>
36 #include <sys/systm.h>
37 #include <sys/cmn_err.h>
38 #include <sys/errno.h>
39 #ifdef xxx_trace
40 #include <sys/vtrace.h>
41 #endif /* xxx_trace */
42 #include <sys/kmem.h>
43 #include <sys/ddi.h>
44 #include <sys/sunddi.h>
45 #include <sys/strsun.h>
46 #include <sys/stat.h>
47 #include <sys/kstat.h>
48 #include <sys/dlpi.h>
49 #include <sys/time.h>
50 #include <sys/cpuvar.h>
51 
52 #include <sys/idn.h>
53 
54 #ifdef	IPV6
55 #define	IS_ETHERTYPE_IPV4(x)	((x) == ETHERTYPE_IP)
56 #define	IS_ETHERTYPE_IPV6(x)	((x) == ETHERTYPE_IPV6)
57 #define	IS_ETHERTYPE_IP(x)	(IS_ETHERTYPE_IPV4(x) || IS_ETHERTYPE_IPV6(x))
58 #else
59 #define	IS_ETHERTYPE_IPV4(x)	((x) == ETHERTYPE_IP)
60 #define	IS_ETHERTYPE_IPV6(x)	(0)
61 #define	IS_ETHERTYPE_IP		IS_ETHERTYPE_IPV4
62 #endif /* IPV6 */
63 
64 #ifdef IDN_TRACE
65 /*
66  * This stuff should go into <sys/vtrace.h>
67  */
68 #define	TR_FAC_IDN		100
69 /*
70  * TR_FAC_IDN tags
71  */
72 #define	TR_IDN_OPEN		0
73 #define	TR_IDN_CLOSE		1
74 #define	TR_IDN_WPUT_START	2
75 #define	TR_IDN_WPUT_END		3
76 #define	TR_IDN_WSRV_START	4
77 #define	TR_IDN_WSRV_END		5
78 #define	TR_IDN_START_START	6
79 #define	TR_IDN_START_END	7
80 #define	TR_IDN_INTR_START	8
81 #define	TR_IDN_INTR_END		9
82 #define	TR_IDN_READ_START	10
83 #define	TR_IDN_READ_END		11
84 #define	TR_IDN_SENDUP_START	12
85 #define	TR_IDN_SENDUP_END	13
86 #define	TR_IDN_ADDUDIND_START	14
87 #define	TR_IDN_ADDUDIND_END	15
88 #define	TR_IDN_GETBUF_START	16
89 #define	TR_IDN_GETBUF_END	17
90 #define	TR_IDN_FREEBUF_START	18
91 #define	TR_IDN_FREEBUF_END	19
92 #define	TR_IDN_PROTO_START	20
93 #define	TR_IDN_PROTO_END	21
94 #define	TR_IDN_INIT_START	22
95 #define	TR_IDN_INIT_END		23
96 #define	TR_IDN_PROTO_IN		24
97 #define	TR_IDN_PROTO_OUT	25
98 
99 #define	IDNTRACE(fac, tag)	(printf("idn.TRACE: "))
100 
101 #define	TRACE_0(fac, tag, name) \
102 	IDNTRACE((fac), (tag)); \
103 	printf(name); printf("\n");
104 
105 #define	TRACE_1(fac, tag, name, d1) \
106 	IDNTRACE((fac), (tag)); \
107 	printf(name, (d1)); printf("\n");
108 
109 #define	TRACE_2(fac, tag, name, d1, d2) \
110 	IDNTRACE((fac), (tag)); \
111 	printf(name, (d1), (d2)); printf("\n");
112 
113 #define	TRACE_3(fac, tag, name, d1, d2, d3) \
114 	IDNTRACE((fac), (tag)); \
115 	printf(name, (d1), (d2), (d3)); printf("\n");
116 
117 #define	TRACE_4(fac, tag, name, d1, d2, d3, d4) \
118 	IDNTRACE((fac), (tag)); \
119 	printf(name, (d1), (d2), (d3), (d4)); printf("\n");
120 
121 #define	TRACE_5(fac, tag, name, d1, d2, d3, d4, d5) \
122 	IDNTRACE((fac), (tag)); \
123 	printf(name, (d1), (d2), (d3), (d4), (d5)); printf("\n");
124 
125 #else /* IDN_TRACE */
126 
127 #define	TRACE_0(fac, tag, name) 			{}
128 #define	TRACE_1(fac, tag, name, d1) 			{}
129 #define	TRACE_2(fac, tag, name, d1, d2) 		{}
130 #define	TRACE_3(fac, tag, name, d1, d2, d3) 		{}
131 #define	TRACE_4(fac, tag, name, d1, d2, d3, d4) 	{}
132 #define	TRACE_5(fac, tag, name, d1, d2, d3, d4, d5) 	{}
133 
134 #endif /* IDN_TRACE */
135 
136 #ifdef DEBUG
137 #define	DLERRORACK(qq, mm, cc, ee, xx) \
138 { \
139 	PR_DLPI("dlpi: ERRORACK: 0x%x(%s), err = 0x%x(%s)\n", \
140 		(uint_t)(cc), dlprim2str(cc), \
141 		(uint_t)(ee), dlerr2str((int)(ee))); \
142 	dlerrorack((qq), (mm), (cc), (ee), (xx)); \
143 }
144 #define	DLOKACK(qq, mm, cc) \
145 { \
146 	PR_DLPI("dlpi: OKACK: 0x%x(%s)\n", (cc), dlprim2str(cc)); \
147 	dlokack((qq), (mm), (cc)); \
148 }
149 #define	DLBINDACK(qq, mm, ss, aa, ll, xx, yy) \
150 { \
151 	PR_DLPI("dlpi: BINDACK: eth=%x:%x:%x:%x:%x:%x, sap=0x%x, l=%d\n", \
152 		((struct idndladdr *)(aa))->dl_phys.ether_addr_octet[0], \
153 		((struct idndladdr *)(aa))->dl_phys.ether_addr_octet[1], \
154 		((struct idndladdr *)(aa))->dl_phys.ether_addr_octet[2], \
155 		((struct idndladdr *)(aa))->dl_phys.ether_addr_octet[3], \
156 		((struct idndladdr *)(aa))->dl_phys.ether_addr_octet[4], \
157 		((struct idndladdr *)(aa))->dl_phys.ether_addr_octet[5], \
158 		(uint_t)(ss), (int)(ll)); \
159 	dlbindack((qq), (mm), (ss), (aa), (ll), (xx), (yy)); \
160 }
161 #define	DLPHYSADDRACK(qq, mm, aa, ll) \
162 { \
163 	PR_DLPI("dlpi: PHYSACK: eth=%x:%x:%x:%x:%x:%x, l=%d\n", \
164 		((struct idndladdr *)(aa))->dl_phys.ether_addr_octet[0], \
165 		((struct idndladdr *)(aa))->dl_phys.ether_addr_octet[1], \
166 		((struct idndladdr *)(aa))->dl_phys.ether_addr_octet[2], \
167 		((struct idndladdr *)(aa))->dl_phys.ether_addr_octet[3], \
168 		((struct idndladdr *)(aa))->dl_phys.ether_addr_octet[4], \
169 		((struct idndladdr *)(aa))->dl_phys.ether_addr_octet[5], \
170 		(ll)); \
171 	dlphysaddrack((qq), (mm), (aa), (ll)); \
172 }
173 
174 static char *dlerrstr[] = {
175 	"DL_BADSAP",
176 	"DL_BADADDR",
177 	"DL_ACCESS",
178 	"DL_OUTSTATE",
179 	"DL_SYSERR",
180 	"DL_BADCORR",
181 	"DL_BADDATA",
182 	"DL_UNSUPPORTED",
183 	"DL_BADPPA",
184 	"DL_BADPRIM",
185 	"DL_BADQOSPARAM",
186 	"DL_BADQOSTYPE",
187 	"DL_BADTOKEN",
188 	"DL_BOUND",
189 	"DL_INITFAILED",
190 	"DL_NOADDR",
191 	"DL_NOTINIT",
192 	"DL_UNDELIVERABLE",
193 	"DL_NOTSUPPORTED",
194 	"DL_TOOMANY",
195 	"DL_NOTENAB",
196 	"DL_BUSY",
197 	"DL_NOAUTO",
198 	"DL_NOXIDAUTO",
199 	"DL_NOTESTAUTO",
200 	"DL_XIDAUTO",
201 	"DL_TESTAUTO",
202 	"DL_PENDING"
203 };
204 static int dlerrnum = (sizeof (dlerrstr) / sizeof (char *));
205 
206 static char *
207 dlerr2str(int err)
208 {
209 	if ((err < 0) || (err >= dlerrnum))
210 		return ("unknown");
211 	else
212 		return (dlerrstr[err]);
213 }
214 
215 static char *
216 dlprim2str(int prim)
217 {
218 	char	*pstr;
219 
220 	switch (prim) {
221 	case DL_UNITDATA_REQ:	pstr = "UNITDATA_REQ";		break;
222 	case DL_ATTACH_REQ:	pstr = "ATTACH_REQ";		break;
223 	case DL_DETACH_REQ:	pstr = "DETACH_REQ";		break;
224 	case DL_BIND_REQ:	pstr = "BIND_REQ";		break;
225 	case DL_UNBIND_REQ:	pstr = "UNBIND_REQ";		break;
226 	case DL_INFO_REQ:	pstr = "INFO_REQ";		break;
227 	case DL_PROMISCON_REQ:	pstr = "PROMISCON_REQ";		break;
228 	case DL_PROMISCOFF_REQ:	pstr = "PROMISCOFF_REQ";	break;
229 	case DL_ENABMULTI_REQ:	pstr = "ENABMULTI_REQ";		break;
230 	case DL_DISABMULTI_REQ:	pstr = "DISABMULTI_REQ";	break;
231 	case DL_PHYS_ADDR_REQ:	pstr = "PHYS_ADDR_REQ";		break;
232 	case DL_SET_PHYS_ADDR_REQ:
233 				pstr = "SET_PHYS_ADDR_REQ";	break;
234 	default:		pstr = "unsupported";		break;
235 	}
236 	return (pstr);
237 }
238 #else /* DEBUG */
239 #define	DLERRORACK(qq, mm, cc, ee, xx) \
240 			(dlerrorack((qq), (mm), (cc), (ee), (xx)))
241 #define	DLOKACK(qq, mm, cc) \
242 			(dlokack((qq), (mm), (cc)))
243 #define	DLBINDACK(qq, mm, ss, aa, ll, xx, yy) \
244 			(dlbindack((qq), (mm), (ss), (aa), (ll), (xx), (yy)))
245 #define	DLPHYSADDRACK(qq, mm, aa, ll) \
246 			(dlphysaddrack((qq), (mm), (aa), (ll)))
247 #endif /* DEBUG */
248 
249 #define	IDNDL_ADDR_IS_MULTICAST(ap)	(((ap)->ether_addr_octet[0] & 01) == 1)
250 /*
251  * MIB II broadcast/multicast packets
252  */
253 #define	IS_BROADCAST(ehp) \
254 		(ether_cmp(&(ehp)->ether_dhost, &etherbroadcastaddr) == 0)
255 #define	IS_MULTICAST(ehp) \
256 		IDNDL_ADDR_IS_MULTICAST(&(ehp)->ether_dhost)
257 #define	BUMP_InNUcast(sip, ehp)					\
258 		if (IS_BROADCAST(ehp)) {			\
259 			(sip)->si_kstat.si_brdcstrcv++;		\
260 		} else if (IS_MULTICAST(ehp)) {			\
261 			(sip)->si_kstat.si_multircv++;		\
262 		}
263 #define	BUMP_OutNUcast(sip, ehp)				\
264 		if (IS_BROADCAST(ehp)) {			\
265 			(sip)->si_kstat.si_brdcstxmt++;		\
266 		} else if (IS_MULTICAST(ehp)) {			\
267 			(sip)->si_kstat.si_multixmt++;		\
268 		}
269 
270 /*
271  * Function prototypes.
272  */
273 static int	idndl_ioc_hdr_info(queue_t *, mblk_t *, int *);
274 static void	idndl_areq(queue_t *, mblk_t *);
275 static void	idndl_dreq(queue_t *, mblk_t *);
276 static void	idndl_breq(queue_t *, mblk_t *);
277 static void	idndl_ubreq(queue_t *, mblk_t *);
278 static void	idndl_ireq(queue_t *, mblk_t *);
279 static void	idndl_ponreq(queue_t *, mblk_t *);
280 static void	idndl_poffreq(queue_t *, mblk_t *);
281 static void	idndl_emreq(queue_t *, mblk_t *);
282 static void	idndl_dmreq(queue_t *, mblk_t *);
283 static void	idndl_pareq(queue_t *, mblk_t *);
284 #ifdef notdef
285 static void	idndl_spareq(queue_t *, mblk_t *);
286 #endif /* notdef */
287 static void	idndl_udreq(queue_t *, mblk_t *);
288 static void	serror(dev_info_t *dip, int idnerr, char *fmt, ...);
289 static mblk_t	*idndl_addudind(struct idn *, mblk_t *, struct ether_addr *,
290 				struct ether_addr *, int, ulong_t);
291 static void	idndl_setipq(struct idn *);
292 static int	idndl_mcmatch(struct idnstr *, struct ether_addr *);
293 static int	idndl_stat_kstat_update(kstat_t *ksp, int rw);
294 
295 static int		_idndl_ether2domain(struct ether_addr *eap);
296 static struct idn	*_idndl_ether2sip(struct ether_addr *eap);
297 
298 
299 #define	IDNSAPMATCH(sap, type, flags) ((sap == type)? 1 : \
300 	((flags & IDNSALLSAP)? 1 : \
301 	((sap <= ETHERMTU) && sap && (type <= ETHERMTU))? 1 : 0))
302 
303 /*
304  * Our DL_INFO_ACK template.
305  */
306 static	dl_info_ack_t idninfoack = {
307 	DL_INFO_ACK,			/* dl_primitive */
308 	0,				/* dl_max_sdu (see idndl_dlpi_init()) */
309 	0,				/* dl_min_sdu */
310 	IDNADDRL,			/* dl_addr_length */
311 	DL_ETHER, /* DL_OTHER, */	/* dl_mac_type */
312 	0,				/* dl_reserved */
313 	0,				/* dl_current_state */
314 	-2,				/* dl_sap_length */
315 	DL_CLDLS, /* DL_CODLS? */	/* dl_service_mode */
316 	0,				/* dl_qos_length */
317 	0,				/* dl_qos_offset */
318 	0,				/* dl_range_length */
319 	0,				/* dl_range_offset */
320 	DL_STYLE2,			/* dl_provider_style */
321 	sizeof (dl_info_ack_t),		/* dl_addr_offset */
322 	DL_VERSION_2,			/* dl_version */
323 	ETHERADDRL,			/* dl_brdcst_addr_length */
324 	sizeof (dl_info_ack_t) + IDNADDRL,	/* dl_brdcst_addr_offset */
325 	0				/* dl_growth */
326 };
327 
328 /*
329  * Ethernet broadcast address definition.
330  */
331 static struct ether_addr	etherbroadcastaddr = {
332 	0xff, 0xff, 0xff, 0xff, 0xff, 0xff
333 };
334 
335 /*
336  * --------------------------------------------------
337  */
338 void
339 idndl_localetheraddr(struct idn *sip, struct ether_addr *eap)
340 {
341 	int		rv;
342 	int		instance;
343 	procname_t	proc = "idndl_localetheraddr";
344 
345 	ASSERT(sip && sip->si_dip && eap);
346 
347 	instance = ddi_get_instance(sip->si_dip);
348 
349 	PR_DLPI("%s: getting local etheraddr...\n", proc);
350 
351 	rv = idndl_domain_etheraddr(idn.localid, instance, eap);
352 	ASSERT(rv == 0);
353 }
354 
355 int
356 idndl_domain_etheraddr(int domid, int channel, struct ether_addr *eap)
357 {
358 	uchar_t		netid;
359 	procname_t	proc = "idndl_domain_etheraddr";
360 
361 	if (idn_domain[domid].dcpu == IDN_NIL_DCPU)
362 		return (-1);
363 
364 	netid = (uchar_t)idn_domain[domid].dnetid;
365 
366 	PR_DLPI("%s: dnetid = 0x%x, channel = 0x%x\n",
367 		proc, (uint_t)netid, channel);
368 
369 #ifdef notdef
370 	localetheraddr(NULL, eap);
371 
372 	PR_DLPI("%s: localetheraddr = %x:%x:%x:%x:%x:%x\n",
373 		proc, eap->ether_addr_octet[0], eap->ether_addr_octet[1],
374 		eap->ether_addr_octet[2], eap->ether_addr_octet[3],
375 		eap->ether_addr_octet[4], eap->ether_addr_octet[5]):
376 #endif /* notdef */
377 
378 	eap->ether_addr_octet[IDNETHER_ZERO] = 0;
379 	eap->ether_addr_octet[IDNETHER_COOKIE1] = IDNETHER_COOKIE1_VAL;
380 	eap->ether_addr_octet[IDNETHER_COOKIE2] = IDNETHER_COOKIE2_VAL;
381 	eap->ether_addr_octet[IDNETHER_NETID] = netid;
382 	eap->ether_addr_octet[IDNETHER_CHANNEL] = (uchar_t)channel;
383 	eap->ether_addr_octet[IDNETHER_RESERVED] = IDNETHER_RESERVED_VAL;
384 
385 	PR_DLPI("%s: domain %d: etheraddr = %x:%x:%x:%x:%x:%x\n",
386 		proc, domid,
387 		eap->ether_addr_octet[0], eap->ether_addr_octet[1],
388 		eap->ether_addr_octet[2], eap->ether_addr_octet[3],
389 		eap->ether_addr_octet[4], eap->ether_addr_octet[5]);
390 
391 	return (0);
392 }
393 
394 #ifdef DEBUG
395 /*
396  */
397 static int
398 _idndl_ether2domain(struct ether_addr *eap)
399 {
400 	uchar_t	*eaop;
401 
402 	eaop = eap->ether_addr_octet;
403 
404 	ASSERT(IDNDL_ADDR_IS_MULTICAST(eap) ||
405 		((eaop[IDNETHER_COOKIE1] == IDNETHER_COOKIE1_VAL) &&
406 		    (eaop[IDNETHER_COOKIE2] == IDNETHER_COOKIE2_VAL)) ||
407 		((eaop[IDNETHER_COOKIE1] == 0xff) &&
408 		    (eaop[IDNETHER_COOKIE2] == 0xff)));
409 	/*
410 	 * Note that (IDN_NIL_DOMID) will be returned if ether address is
411 	 * a broadcast 0xff.
412 	 */
413 	return (IDN_NETID2DOMID(eaop[IDNETHER_NETID]));
414 }
415 
416 /*
417  */
418 static struct idn *
419 _idndl_ether2sip(struct ether_addr *eap)
420 {
421 	int		instance;
422 	struct idn	*sip;
423 	uchar_t		*eaop;
424 	procname_t	proc = "_idndl_ether2sip";
425 
426 	eaop = eap->ether_addr_octet;
427 
428 	if (!IDNDL_ADDR_IS_MULTICAST(eap) &&
429 	    (((eaop[IDNETHER_COOKIE1] != IDNETHER_COOKIE1_VAL) ||
430 	    (eaop[IDNETHER_COOKIE2] != IDNETHER_COOKIE2_VAL)) &&
431 	    ((eaop[IDNETHER_COOKIE1] != 0xff) ||
432 		(eaop[IDNETHER_COOKIE2] != 0xff)))) {
433 
434 		cmn_err(CE_WARN,
435 			"IDN: 400: corrupted MAC header "
436 			"(exp %x or 0xffff, act 0x%x)",
437 			(IDNETHER_COOKIE1_VAL << 8) |
438 				IDNETHER_COOKIE2_VAL,
439 			(eaop[IDNETHER_COOKIE1] << 8) |
440 				eaop[IDNETHER_COOKIE2]);
441 
442 		return (NULL);
443 	}
444 
445 	if (IDNDL_ADDR_IS_MULTICAST(eap)) {
446 		PR_DLPI("%s: MULTICAST ADDR *** ERROR ***\n", proc);
447 		sip = NULL;
448 	} else if (eaop[IDNETHER_CHANNEL] == 0xff) {
449 		/*
450 		 * Received a broadcast.  Need to manually
451 		 * find anybody the first running sip and use it.
452 		 * XXX - kind of kludgy - single threads broadcasts.
453 		 */
454 		PR_DLPI("%s: BROADCAST CHANNEL *** ERROR ***\n", proc);
455 		sip = NULL;
456 	} else {
457 		instance = (int)eaop[IDNETHER_CHANNEL];
458 
459 		sip = IDN_INST2SIP(instance);
460 	}
461 
462 	return (sip);
463 }
464 #endif /* DEBUG */
465 
466 void
467 idndl_dlpi_init()
468 {
469 	procname_t	proc = "idndl_dlpi_init";
470 
471 	PR_DLPI("%s: setting dl_max_sdu to %ld (0x%lx) bytes\n",
472 		proc, IDN_MTU, IDN_MTU);
473 	/*
474 	 * This field is dynamic because the user may
475 	 * want to dynamically set it _before_ an IDNnet
476 	 * has been established via ndd(1M).
477 	 */
478 	idninfoack.dl_max_sdu = IDN_MTU;
479 }
480 
481 static int
482 idndl_stat_kstat_update(kstat_t *ksp, int rw)
483 {
484 	struct idn	*sip;
485 	struct idn_kstat_named	*skp;
486 
487 	sip = (struct idn *)ksp->ks_private;
488 	skp = (struct idn_kstat_named *)ksp->ks_data;
489 
490 	if (rw == KSTAT_WRITE) {
491 #if 0
492 		bzero(&sg_kstat.gk_kstat, sizeof (sg_kstat.gk_kstat));
493 #endif /* 0 */
494 		bzero(&sip->si_kstat, sizeof (sip->si_kstat));
495 
496 		sip->si_kstat.si_ipackets 	= skp->sk_ipackets.value.ul;
497 		sip->si_kstat.si_ierrors	= skp->sk_ierrors.value.ul;
498 		sip->si_kstat.si_opackets 	= skp->sk_opackets.value.ul;
499 		sip->si_kstat.si_oerrors	= skp->sk_oerrors.value.ul;
500 		sip->si_kstat.si_txcoll		= skp->sk_txcoll.value.ul;
501 		sip->si_kstat.si_rxcoll		= skp->sk_rxcoll.value.ul;
502 		sip->si_kstat.si_crc		= skp->sk_crc.value.ul;
503 		sip->si_kstat.si_buff		= skp->sk_buff.value.ul;
504 		sip->si_kstat.si_nolink		= skp->sk_nolink.value.ul;
505 		sip->si_kstat.si_linkdown	= skp->sk_linkdown.value.ul;
506 		sip->si_kstat.si_inits		= skp->sk_inits.value.ul;
507 		sip->si_kstat.si_nocanput	= skp->sk_nocanput.value.ul;
508 		sip->si_kstat.si_allocbfail	= skp->sk_allocbfail.value.ul;
509 		sip->si_kstat.si_notbufs	= skp->sk_notbufs.value.ul;
510 		sip->si_kstat.si_reclaim	= skp->sk_reclaim.value.ul;
511 		sip->si_kstat.si_smraddr	= skp->sk_smraddr.value.ul;
512 		sip->si_kstat.si_txmax		= skp->sk_txmax.value.ul;
513 		sip->si_kstat.si_txfull		= skp->sk_txfull.value.ul;
514 		sip->si_kstat.si_xdcall		= skp->sk_xdcall.value.ul;
515 		sip->si_kstat.si_sigsvr		= skp->sk_sigsvr.value.ul;
516 		sip->si_kstat.si_mboxcrc	= skp->sk_mboxcrc.value.ul;
517 		/*
518 		 * MIB II kstat variables
519 		 */
520 		sip->si_kstat.si_rcvbytes	= skp->sk_rcvbytes.value.ul;
521 		sip->si_kstat.si_xmtbytes	= skp->sk_xmtbytes.value.ul;
522 		sip->si_kstat.si_multircv	= skp->sk_multircv.value.ul;
523 		sip->si_kstat.si_multixmt	= skp->sk_multixmt.value.ul;
524 		sip->si_kstat.si_brdcstrcv	= skp->sk_brdcstrcv.value.ul;
525 		sip->si_kstat.si_brdcstxmt	= skp->sk_brdcstxmt.value.ul;
526 		sip->si_kstat.si_norcvbuf	= skp->sk_norcvbuf.value.ul;
527 		sip->si_kstat.si_noxmtbuf	= skp->sk_noxmtbuf.value.ul;
528 		/*
529 		 * PSARC 1997/198 : 64bit kstats
530 		 */
531 		sip->si_kstat.si_ipackets64	= skp->sk_ipackets64.value.ull;
532 		sip->si_kstat.si_opackets64	= skp->sk_opackets64.value.ull;
533 		sip->si_kstat.si_rbytes64	= skp->sk_rbytes64.value.ull;
534 		sip->si_kstat.si_obytes64	= skp->sk_obytes64.value.ull;
535 		/*
536 		 * PSARC 1997/247 : RFC 1643
537 		 */
538 		sip->si_kstat.si_fcs_errors	= skp->sk_fcs_errors.value.ul;
539 		sip->si_kstat.si_macxmt_errors	=
540 						skp->sk_macxmt_errors.value.ul;
541 		sip->si_kstat.si_toolong_errors	=
542 						skp->sk_toolong_errors.value.ul;
543 		sip->si_kstat.si_macrcv_errors	=
544 						skp->sk_macrcv_errors.value.ul;
545 
546 		return (0);
547 	}
548 
549 	skp->sk_ipackets.value.ul 	= sip->si_kstat.si_ipackets;
550 	skp->sk_ierrors.value.ul	= sip->si_kstat.si_ierrors;
551 	skp->sk_opackets.value.ul	= sip->si_kstat.si_opackets;
552 	skp->sk_oerrors.value.ul	= sip->si_kstat.si_oerrors;
553 	skp->sk_txcoll.value.ul		= sip->si_kstat.si_txcoll;
554 	skp->sk_rxcoll.value.ul		= sip->si_kstat.si_rxcoll;
555 	skp->sk_crc.value.ul		= sip->si_kstat.si_crc;
556 	skp->sk_buff.value.ul		= sip->si_kstat.si_buff;
557 	skp->sk_nolink.value.ul		= sip->si_kstat.si_nolink;
558 	skp->sk_linkdown.value.ul	= sip->si_kstat.si_linkdown;
559 	skp->sk_inits.value.ul		= sip->si_kstat.si_inits;
560 	skp->sk_nocanput.value.ul	= sip->si_kstat.si_nocanput;
561 	skp->sk_allocbfail.value.ul	= sip->si_kstat.si_allocbfail;
562 	skp->sk_notbufs.value.ul	= sip->si_kstat.si_notbufs;
563 	skp->sk_reclaim.value.ul	= sip->si_kstat.si_reclaim;
564 	skp->sk_smraddr.value.ul	= sip->si_kstat.si_smraddr;
565 	skp->sk_txfull.value.ul		= sip->si_kstat.si_txfull;
566 	skp->sk_txmax.value.ul		= sip->si_kstat.si_txmax;
567 	skp->sk_xdcall.value.ul		= sip->si_kstat.si_xdcall;
568 	skp->sk_sigsvr.value.ul		= sip->si_kstat.si_sigsvr;
569 	skp->sk_mboxcrc.value.ul	= sip->si_kstat.si_mboxcrc;
570 	/*
571 	 * MIB II kstat variables
572 	 */
573 	skp->sk_rcvbytes.value.ul	= sip->si_kstat.si_rcvbytes;
574 	skp->sk_xmtbytes.value.ul	= sip->si_kstat.si_xmtbytes;
575 	skp->sk_multircv.value.ul	= sip->si_kstat.si_multircv;
576 	skp->sk_multixmt.value.ul	= sip->si_kstat.si_multixmt;
577 	skp->sk_brdcstrcv.value.ul	= sip->si_kstat.si_brdcstrcv;
578 	skp->sk_brdcstxmt.value.ul	= sip->si_kstat.si_brdcstxmt;
579 	skp->sk_norcvbuf.value.ul	= sip->si_kstat.si_norcvbuf;
580 	skp->sk_noxmtbuf.value.ul	= sip->si_kstat.si_noxmtbuf;
581 	/*
582 	 * PSARC 1997/198 : 64bit kstats
583 	 */
584 	skp->sk_ipackets64.value.ull	= sip->si_kstat.si_ipackets64;
585 	skp->sk_opackets64.value.ull	= sip->si_kstat.si_opackets64;
586 	skp->sk_rbytes64.value.ull	= sip->si_kstat.si_rbytes64;
587 	skp->sk_obytes64.value.ull	= sip->si_kstat.si_obytes64;
588 	/*
589 	 * PSARC 1997/247 : RFC 1643
590 	 */
591 	skp->sk_fcs_errors.value.ul	= sip->si_kstat.si_fcs_errors;
592 	skp->sk_macxmt_errors.value.ul	= sip->si_kstat.si_macxmt_errors;
593 	skp->sk_toolong_errors.value.ul	= sip->si_kstat.si_toolong_errors;
594 	skp->sk_macrcv_errors.value.ul	= sip->si_kstat.si_macrcv_errors;
595 
596 	return (0);
597 }
598 
599 void
600 idndl_statinit(struct idn *sip)
601 {
602 	struct	kstat		*ksp;
603 	struct	idn_kstat_named	*skp;
604 
605 #ifdef	kstat
606 	if ((ksp = kstat_create(IDNNAME, ddi_get_instance(sip->si_dip),
607 		NULL, "net", KSTAT_TYPE_NAMED,
608 		sizeof (struct idn_kstat_named) / sizeof (kstat_named_t),
609 		KSTAT_FLAG_PERSISTENT)) == NULL) {
610 #else
611 	if ((ksp = kstat_create(IDNNAME, ddi_get_instance(sip->si_dip),
612 		NULL, "net", KSTAT_TYPE_NAMED,
613 		sizeof (struct idn_kstat_named) /
614 		sizeof (kstat_named_t), 0)) == NULL) {
615 #endif	/* kstat */
616 		serror(sip->si_dip, 450, "kstat_create failed");
617 		return;
618 	}
619 
620 	sip->si_ksp = ksp;
621 	skp = (struct idn_kstat_named *)(ksp->ks_data);
622 	kstat_named_init(&skp->sk_ipackets,		"ipackets",
623 		KSTAT_DATA_ULONG);
624 	kstat_named_init(&skp->sk_ierrors,		"ierrors",
625 		KSTAT_DATA_ULONG);
626 	kstat_named_init(&skp->sk_opackets,		"opackets",
627 		KSTAT_DATA_ULONG);
628 	kstat_named_init(&skp->sk_oerrors,		"oerrors",
629 		KSTAT_DATA_ULONG);
630 	kstat_named_init(&skp->sk_txcoll,		"collisions",
631 		KSTAT_DATA_ULONG);
632 	kstat_named_init(&skp->sk_rxcoll,		"rx_collisions",
633 		KSTAT_DATA_ULONG);
634 	kstat_named_init(&skp->sk_crc,			"crc",
635 		KSTAT_DATA_ULONG);
636 	kstat_named_init(&skp->sk_buff,			"buff",
637 		KSTAT_DATA_ULONG);
638 	kstat_named_init(&skp->sk_nolink,		"nolink",
639 		KSTAT_DATA_ULONG);
640 	kstat_named_init(&skp->sk_linkdown,		"linkdown",
641 		KSTAT_DATA_ULONG);
642 	kstat_named_init(&skp->sk_inits,		"inits",
643 		KSTAT_DATA_ULONG);
644 	kstat_named_init(&skp->sk_nocanput,		"nocanput",
645 		KSTAT_DATA_ULONG);
646 	kstat_named_init(&skp->sk_allocbfail,		"allocbfail",
647 		KSTAT_DATA_ULONG);
648 	kstat_named_init(&skp->sk_notbufs,		"notbufs",
649 		KSTAT_DATA_ULONG);
650 	kstat_named_init(&skp->sk_reclaim,		"reclaim",
651 		KSTAT_DATA_ULONG);
652 	kstat_named_init(&skp->sk_smraddr,		"smraddr",
653 		KSTAT_DATA_ULONG);
654 	kstat_named_init(&skp->sk_txmax,		"txmax",
655 		KSTAT_DATA_ULONG);
656 	kstat_named_init(&skp->sk_txfull,		"txfull",
657 		KSTAT_DATA_ULONG);
658 	kstat_named_init(&skp->sk_xdcall,		"xdcall",
659 		KSTAT_DATA_ULONG);
660 	kstat_named_init(&skp->sk_sigsvr,		"sigsvr",
661 		KSTAT_DATA_ULONG);
662 	kstat_named_init(&skp->sk_mboxcrc,		"mboxcrc",
663 		KSTAT_DATA_ULONG);
664 	/*
665 	 * MIB II kstat variables
666 	 */
667 	kstat_named_init(&skp->sk_rcvbytes,		"rbytes",
668 		KSTAT_DATA_ULONG);
669 	kstat_named_init(&skp->sk_xmtbytes,		"obytes",
670 		KSTAT_DATA_ULONG);
671 	kstat_named_init(&skp->sk_multircv,		"multircv",
672 		KSTAT_DATA_ULONG);
673 	kstat_named_init(&skp->sk_multixmt,		"multixmt",
674 		KSTAT_DATA_ULONG);
675 	kstat_named_init(&skp->sk_brdcstrcv,		"brdcstrcv",
676 		KSTAT_DATA_ULONG);
677 	kstat_named_init(&skp->sk_brdcstxmt,		"brdcstxmt",
678 		KSTAT_DATA_ULONG);
679 	kstat_named_init(&skp->sk_norcvbuf,		"norcvbuf",
680 		KSTAT_DATA_ULONG);
681 	kstat_named_init(&skp->sk_noxmtbuf,		"noxmtbuf",
682 		KSTAT_DATA_ULONG);
683 	/*
684 	 * PSARC 1997/198 : 64bit kstats
685 	 */
686 	kstat_named_init(&skp->sk_ipackets64,		"ipackets64",
687 		KSTAT_DATA_ULONGLONG);
688 	kstat_named_init(&skp->sk_opackets64,		"opackets64",
689 		KSTAT_DATA_ULONGLONG);
690 	kstat_named_init(&skp->sk_rbytes64,		"rbytes64",
691 		KSTAT_DATA_ULONGLONG);
692 	kstat_named_init(&skp->sk_obytes64,		"obytes64",
693 		KSTAT_DATA_ULONGLONG);
694 	/*
695 	 * PSARC 1997/247 : RFC 1643
696 	 */
697 	kstat_named_init(&skp->sk_fcs_errors,		"fcs_errors",
698 		KSTAT_DATA_ULONG);
699 	kstat_named_init(&skp->sk_macxmt_errors,	"macxmt_errors",
700 		KSTAT_DATA_ULONG);
701 	kstat_named_init(&skp->sk_toolong_errors,	"toolong_errors",
702 		KSTAT_DATA_ULONG);
703 	kstat_named_init(&skp->sk_macrcv_errors,	"macrcv_errors",
704 		KSTAT_DATA_ULONG);
705 
706 	ksp->ks_update = idndl_stat_kstat_update;
707 	ksp->ks_private = (void *)sip;
708 	kstat_install(ksp);
709 }
710 
711 void
712 idndl_proto(queue_t *wq, mblk_t *mp)
713 {
714 	union DL_primitives	*dlp;
715 	struct idnstr		*stp;
716 	t_uscalar_t		prim;
717 	procname_t		proc = "idndl_proto";
718 
719 	stp = (struct idnstr *)wq->q_ptr;
720 	if (MBLKL(mp) < sizeof (t_uscalar_t)) {
721 		/*
722 		 * Gotta at least have enough room to hold
723 		 * the primitive!
724 		 */
725 		DLERRORACK(wq, mp, -1, DL_BADPRIM, 0);
726 		return;
727 	}
728 	dlp = (union DL_primitives *)mp->b_rptr;
729 	prim = dlp->dl_primitive;
730 
731 	TRACE_2(TR_FAC_IDN, TR_IDN_PROTO_START,
732 		"idndl_proto start:  wq %p dlprim %X", wq, prim);
733 
734 #ifdef DEBUG
735 	PR_DLPI("%s: stp = 0x%p, wq = 0x%p, dlprim = 0x%x(%s)\n",
736 		proc, stp, wq, prim, dlprim2str(prim));
737 #endif /* DEBUG */
738 
739 	rw_enter(&stp->ss_rwlock, RW_WRITER);
740 
741 	switch (prim) {
742 	case DL_UNITDATA_REQ:
743 		idndl_udreq(wq, mp);
744 		break;
745 
746 	case DL_ATTACH_REQ:
747 		idndl_areq(wq, mp);
748 		break;
749 
750 	case DL_DETACH_REQ:
751 		idndl_dreq(wq, mp);
752 		break;
753 
754 	case DL_BIND_REQ:
755 		idndl_breq(wq, mp);
756 		break;
757 
758 	case DL_UNBIND_REQ:
759 		idndl_ubreq(wq, mp);
760 		break;
761 
762 	case DL_INFO_REQ:
763 		idndl_ireq(wq, mp);
764 		break;
765 
766 	case DL_PROMISCON_REQ:
767 		idndl_ponreq(wq, mp);
768 		break;
769 
770 	case DL_PROMISCOFF_REQ:
771 		idndl_poffreq(wq, mp);
772 		break;
773 
774 	case DL_ENABMULTI_REQ:
775 		idndl_emreq(wq, mp);
776 		break;
777 
778 	case DL_DISABMULTI_REQ:
779 		idndl_dmreq(wq, mp);
780 		break;
781 
782 	case DL_PHYS_ADDR_REQ:
783 		idndl_pareq(wq, mp);
784 		break;
785 
786 #ifdef notdef
787 	/*
788 	 * We cannot allow this in IDN-land because we
789 	 * rely on the ethernet (physical) address to determine
790 	 * where to target the message.  Recall that unlike
791 	 * ethernet we simply cannot dump junk on the wire and
792 	 * expect it to automatically find its destination.
793 	 * In the IDN we need to target the destination.
794 	 * Note that if we used POINT-TO-POINT then we wouldn't
795 	 * have to worry about the physical address since each
796 	 * domain connection would have a separate queue.
797 	 * However, ptp then requires multiple interfaces at
798 	 * the appl level as opposed to a single one for all
799 	 * of idn.  We opt for the simpler single interface (idn0).
800 	 */
801 	case DL_SET_PHYS_ADDR_REQ:
802 		idndl_spareq(wq, mp);
803 		break;
804 #endif /* notdef */
805 
806 	default:
807 		DLERRORACK(wq, mp, prim, DL_UNSUPPORTED, 0);
808 		break;
809 	}
810 
811 	TRACE_2(TR_FAC_IDN, TR_IDN_PROTO_END,
812 		"idnproto end:  wq %p dlprim %X", wq, prim);
813 
814 	rw_exit(&stp->ss_rwlock);
815 }
816 
817 int
818 idnioc_dlpi(queue_t *wq, mblk_t *mp, int *argsizep)
819 {
820 	int	rv = 0;
821 	struct	iocblk	*iocp = (struct iocblk *)mp->b_rptr;
822 	struct	idnstr	*stp  = (struct idnstr *)wq->q_ptr;
823 	procname_t	proc = "idnioc_dlpi";
824 
825 	*argsizep = 0;
826 
827 	switch (iocp->ioc_cmd) {
828 	case DLIOCRAW:			/* raw M_DATA mode */
829 		PR_DLPI("%s: cmd = DLIOCRAW\n", proc);
830 		stp->ss_flags |= IDNSRAW;
831 		break;
832 
833 	case DL_IOC_HDR_INFO:		/* M_DATA "fastpath" info request */
834 		PR_DLPI("%s: cmd = DL_IOC_HDR_INFO\n", proc);
835 		rv = idndl_ioc_hdr_info(wq, mp, argsizep);
836 		break;
837 
838 	default:
839 		PR_DLPI("%s: invalid cmd 0x%x\n", proc, iocp->ioc_cmd);
840 		rv = EINVAL;
841 		break;
842 	}
843 	return (rv);
844 }
845 
846 /*
847  * M_DATA "fastpath" info request.
848  * Following the M_IOCTL mblk should come a DL_UNITDATA_REQ mblk.
849  * We ack with an M_IOCACK pointing to the original DL_UNITDATA_REQ mblk
850  * followed by an mblk containing the raw ethernet header corresponding
851  * to the destination address.  Subsequently, we may receive M_DATA
852  * msgs which start with this header and may send up
853  * up M_DATA msgs with b_rptr pointing to a (ulong) group address
854  * indicator followed by the network-layer data (IP packet header).
855  * This is all selectable on a per-Stream basis.
856  */
857 static int
858 idndl_ioc_hdr_info(queue_t *wq, mblk_t *mp, int *argsizep)
859 {
860 	mblk_t			*nmp;
861 	struct idnstr		*stp;
862 	struct idndladdr	*dlap;
863 	dl_unitdata_req_t	*dludp;
864 	struct ether_header	*headerp;
865 	struct idn		*sip;
866 	int	off, len;
867 	int	padding = 0;
868 	int	error;
869 	procname_t		proc = "idndl_ioc_hdr_info";
870 
871 	stp = (struct idnstr *)wq->q_ptr;
872 	sip = stp->ss_sip;
873 	if (sip == NULL) {
874 		PR_DLPI("%s: NULL sip (ret EINVAL)\n", proc);
875 		return (EINVAL);
876 	}
877 
878 	error = miocpullup(mp, sizeof (dl_unitdata_req_t) + IDNADDRL);
879 	if (error != 0) {
880 		PR_DLPI("%s: sanity error (ret %d)\n", proc, error);
881 		return (error);
882 	}
883 
884 	/*
885 	 * Sanity check the DL_UNITDATA_REQ destination address
886 	 * offset and length values.
887 	 */
888 	dludp = (dl_unitdata_req_t *)mp->b_cont->b_rptr;
889 	off = dludp->dl_dest_addr_offset;
890 	len = dludp->dl_dest_addr_length;
891 	if (dludp->dl_primitive != DL_UNITDATA_REQ ||
892 	    !MBLKIN(mp->b_cont, off, len) || len != IDNADDRL) {
893 		PR_DLPI("%s: off(0x%x)/len(%d) error (ret EINVAL)\n",
894 		    proc, off, len);
895 		return (EINVAL);
896 	}
897 
898 	dlap = (struct idndladdr *)(mp->b_cont->b_rptr + off);
899 
900 	/*
901 	 * Allocate a new mblk to hold the ether header.
902 	 */
903 	nmp = allocb(sizeof (struct ether_header) + padding, BPRI_MED);
904 	if (nmp == NULL) {
905 		IDN_KSTAT_INC(sip, si_allocbfail);
906 		return (ENOMEM);
907 	}
908 	nmp->b_rptr += padding;
909 	nmp->b_wptr = nmp->b_rptr + sizeof (struct ether_header);
910 
911 	/*
912 	 * Fill in the ether header.
913 	 */
914 	headerp = (struct ether_header *)nmp->b_rptr;
915 	ether_copy(&dlap->dl_phys, &headerp->ether_dhost);
916 	ether_copy(&sip->si_ouraddr, &headerp->ether_shost);
917 	headerp->ether_type = dlap->dl_sap;
918 
919 	/*
920 	 * Link new mblk in after the "request" mblks.
921 	 */
922 	linkb(mp, nmp);
923 
924 	stp->ss_flags |= IDNSFAST;
925 
926 	/*
927 	 * XXX Don't bother calling idndl_setipq() here.
928 	 */
929 
930 	if (argsizep)
931 		*argsizep = msgsize(mp->b_cont);
932 
933 	return (0);
934 }
935 
936 static void
937 idndl_areq(queue_t *wq, mblk_t *mp)
938 {
939 	struct idnstr		*stp;
940 	union DL_primitives	*dlp;
941 	struct idn		*sip;
942 	int	ppa;
943 	procname_t	proc = "idndl_areq";
944 
945 	stp = (struct idnstr *)wq->q_ptr;
946 	dlp = (union DL_primitives *)mp->b_rptr;
947 
948 	if (MBLKL(mp) < DL_ATTACH_REQ_SIZE) {
949 		DLERRORACK(wq, mp, DL_ATTACH_REQ, DL_BADPRIM, 0);
950 		return;
951 	}
952 
953 	if (stp->ss_state != DL_UNATTACHED) {
954 		DLERRORACK(wq, mp, DL_ATTACH_REQ, DL_OUTSTATE, 0);
955 		return;
956 	}
957 
958 	ppa = dlp->attach_req.dl_ppa;
959 
960 	/*
961 	 * Valid ppa?
962 	 */
963 	if (ppa == -1 || qassociate(wq, ppa) != 0) {
964 		PR_DLPI("%s: bad ppa %d\n", proc, ppa);
965 		DLERRORACK(wq, mp, dlp->dl_primitive, DL_BADPPA, 0);
966 		return;
967 	}
968 	mutex_enter(&idn.siplock);
969 	for (sip = idn.sip; sip; sip = sip->si_nextp) {
970 		if (ppa == ddi_get_instance(sip->si_dip))
971 			break;
972 	}
973 	mutex_exit(&idn.siplock);
974 	ASSERT(sip != NULL);	/* qassociate() succeeded */
975 
976 	/*
977 	 * Has device been initialized?  Do so if necessary.
978 	 */
979 	if ((sip->si_flags & IDNRUNNING) == 0) {
980 		if (idndl_init(sip)) {
981 			DLERRORACK(wq, mp, dlp->dl_primitive,
982 					DL_INITFAILED, 0);
983 			(void) qassociate(wq, -1);
984 			return;
985 		}
986 	}
987 
988 	/*
989 	 * Set link to device and update our state.
990 	 */
991 	stp->ss_sip = sip;
992 	stp->ss_state = DL_UNBOUND;
993 
994 	DLOKACK(wq, mp, DL_ATTACH_REQ);
995 }
996 
997 static void
998 idndl_dreq(queue_t *wq, mblk_t *mp)
999 {
1000 	struct idnstr	*stp;
1001 
1002 	stp = (struct idnstr *)wq->q_ptr;
1003 
1004 	if (MBLKL(mp) < DL_DETACH_REQ_SIZE) {
1005 		DLERRORACK(wq, mp, DL_DETACH_REQ, DL_BADPRIM, 0);
1006 		return;
1007 	}
1008 
1009 	if (stp->ss_state != DL_UNBOUND) {
1010 		DLERRORACK(wq, mp, DL_DETACH_REQ, DL_OUTSTATE, 0);
1011 		return;
1012 	}
1013 
1014 	idndl_dodetach(stp);
1015 	(void) qassociate(wq, -1);
1016 	DLOKACK(wq, mp, DL_DETACH_REQ);
1017 }
1018 
1019 /*
1020  * Detach a Stream from an interface.
1021  */
1022 void
1023 idndl_dodetach(struct idnstr *stp)
1024 {
1025 	struct idnstr	*tstp;
1026 	struct idn	*sip;
1027 	int		reinit = 0;
1028 
1029 	ASSERT(stp->ss_sip);
1030 
1031 	sip = stp->ss_sip;
1032 	stp->ss_sip = NULL;
1033 
1034 	/*
1035 	 * Disable promiscuous mode if on.
1036 	 */
1037 	if (stp->ss_flags & IDNSALLPHYS) {
1038 		stp->ss_flags &= ~IDNSALLPHYS;
1039 		reinit = 1;
1040 	}
1041 
1042 	/*
1043 	 * Disable ALLMULTI mode if on.
1044 	 */
1045 	if (stp->ss_flags & IDNSALLMULTI) {
1046 		stp->ss_flags &= ~IDNSALLMULTI;
1047 		reinit = 1;
1048 	}
1049 
1050 	/*
1051 	 * Disable any Multicast Addresses.
1052 	 */
1053 	stp->ss_mccount = 0;
1054 	if (stp->ss_mctab) {
1055 		kmem_free(stp->ss_mctab, IDNMCALLOC);
1056 		stp->ss_mctab = NULL;
1057 		reinit = 1;
1058 	}
1059 
1060 	/*
1061 	 * Detach from device structure.
1062 	 * Uninit the device when no other streams are attached to it.
1063 	 */
1064 	rw_enter(&idn.struprwlock, RW_READER);
1065 	for (tstp = idn.strup; tstp; tstp = tstp->ss_nextp)
1066 		if (tstp->ss_sip == sip)
1067 			break;
1068 	rw_exit(&idn.struprwlock);
1069 
1070 	if (tstp == NULL)
1071 		idndl_uninit(sip);
1072 	else if (reinit)
1073 		(void) idndl_init(sip);
1074 
1075 	stp->ss_state = DL_UNATTACHED;
1076 
1077 	idndl_setipq(sip);
1078 }
1079 
1080 static void
1081 idndl_breq(queue_t *wq, mblk_t *mp)
1082 {
1083 	struct idnstr		*stp;
1084 	union DL_primitives	*dlp;
1085 	struct idn		*sip;
1086 	struct idndladdr	idnaddr;
1087 	t_uscalar_t		sap;
1088 	int		xidtest;
1089 	procname_t	proc = "idndl_breq";
1090 
1091 	stp = (struct idnstr *)wq->q_ptr;
1092 
1093 	if (MBLKL(mp) < DL_BIND_REQ_SIZE) {
1094 		DLERRORACK(wq, mp, DL_BIND_REQ, DL_BADPRIM, 0);
1095 		return;
1096 	}
1097 
1098 	if (stp->ss_state != DL_UNBOUND) {
1099 		DLERRORACK(wq, mp, DL_BIND_REQ, DL_OUTSTATE, 0);
1100 		return;
1101 	}
1102 
1103 	dlp = (union DL_primitives *)mp->b_rptr;
1104 
1105 	if (dlp->bind_req.dl_service_mode != idninfoack.dl_service_mode) {
1106 		DLERRORACK(wq, mp, DL_BIND_REQ, DL_UNSUPPORTED, 0);
1107 		return;
1108 	}
1109 
1110 	sip = stp->ss_sip;
1111 	sap = dlp->bind_req.dl_sap;
1112 	xidtest = dlp->bind_req.dl_xidtest_flg;
1113 
1114 	ASSERT(sip);
1115 
1116 	if (xidtest) {
1117 		DLERRORACK(wq, mp, DL_BIND_REQ, DL_NOAUTO, 0);
1118 		return;
1119 	}
1120 
1121 	if (sap > ETHERTYPE_MAX) {
1122 		DLERRORACK(wq, mp, dlp->dl_primitive, DL_BADSAP, 0);
1123 		return;
1124 	}
1125 
1126 	/*
1127 	 * Save SAP value for this Stream and change state.
1128 	 */
1129 	stp->ss_sap = sap;
1130 	stp->ss_state = DL_IDLE;
1131 
1132 	idnaddr.dl_sap = sap;
1133 	ether_copy(&sip->si_ouraddr, &idnaddr.dl_phys);
1134 
1135 	if (IS_ETHERTYPE_IP(sap)) {
1136 		int	channel;
1137 
1138 		channel =
1139 			(int)sip->si_ouraddr.ether_addr_octet[IDNETHER_CHANNEL];
1140 		PR_DLPI("%s: IP SAP, opening channel %d\n", proc, channel);
1141 		if (idn_open_channel(channel)) {
1142 			PR_DLPI("%s: FAILED TO OPEN CHANNEL %d\n",
1143 				proc, channel);
1144 			DLERRORACK(wq, mp, dlp->dl_primitive, DL_NOADDR, 0);
1145 			return;
1146 		}
1147 	}
1148 	DLBINDACK(wq, mp, sap, &idnaddr, IDNADDRL, 0, 0);
1149 
1150 	idndl_setipq(sip);
1151 }
1152 
1153 static void
1154 idndl_ubreq(queue_t *wq, mblk_t *mp)
1155 {
1156 	struct idnstr	*stp;
1157 	procname_t	proc = "idndl_ubreq";
1158 
1159 	stp = (struct idnstr *)wq->q_ptr;
1160 
1161 	if (MBLKL(mp) < DL_UNBIND_REQ_SIZE) {
1162 		DLERRORACK(wq, mp, DL_UNBIND_REQ, DL_BADPRIM, 0);
1163 		return;
1164 	}
1165 
1166 	if (stp->ss_state != DL_IDLE) {
1167 		DLERRORACK(wq, mp, DL_UNBIND_REQ, DL_OUTSTATE, 0);
1168 		return;
1169 	}
1170 
1171 	stp->ss_state = DL_UNBOUND;
1172 
1173 	if (IS_ETHERTYPE_IP(stp->ss_sap)) {
1174 		struct idn	*sip;
1175 		int		channel;
1176 
1177 		sip = stp->ss_sip;
1178 		channel =
1179 			(int)sip->si_ouraddr.ether_addr_octet[IDNETHER_CHANNEL];
1180 		PR_DLPI("%s: IP SAP, unbinding channel %d\n", proc, channel);
1181 		/*
1182 		 * We need to do an "soft" close since there's a
1183 		 * potential that we've been called by one of the
1184 		 * IDN data server/dispatcher threads!  We'll deadlock
1185 		 * if we attempt a "hard" close of the channel from here.
1186 		 */
1187 		idn_close_channel(channel, IDNCHAN_SOFT_CLOSE);
1188 	}
1189 
1190 	stp->ss_sap = 0;
1191 
1192 	DLOKACK(wq, mp, DL_UNBIND_REQ);
1193 
1194 	idndl_setipq(stp->ss_sip);
1195 }
1196 
1197 static void
1198 idndl_ireq(queue_t *wq, mblk_t *mp)
1199 {
1200 	struct idnstr		*stp;
1201 	dl_info_ack_t		*dlip;
1202 	struct idndladdr	*dlap;
1203 	struct ether_addr	*ep;
1204 	int	size;
1205 
1206 	stp = (struct idnstr *)wq->q_ptr;
1207 
1208 	if (MBLKL(mp) < DL_INFO_REQ_SIZE) {
1209 		DLERRORACK(wq, mp, DL_INFO_REQ, DL_BADPRIM, 0);
1210 		return;
1211 	}
1212 
1213 	/*
1214 	 * Exchange current msg for a DL_INFO_ACK.
1215 	 */
1216 	size = sizeof (dl_info_ack_t) + IDNADDRL + ETHERADDRL;
1217 	if ((mp = mexchange(wq, mp, size, M_PCPROTO, DL_INFO_ACK)) == NULL)
1218 		return;
1219 
1220 	/*
1221 	 * Fill in the DL_INFO_ACK fields and reply.
1222 	 */
1223 	dlip = (dl_info_ack_t *)mp->b_rptr;
1224 	ASSERT(idninfoack.dl_max_sdu);
1225 	*dlip = idninfoack;
1226 	dlip->dl_current_state = stp->ss_state;
1227 	dlap = (struct idndladdr *)(mp->b_rptr + dlip->dl_addr_offset);
1228 	dlap->dl_sap = stp->ss_sap;
1229 	if (stp->ss_sip) {
1230 		ether_copy(&stp->ss_sip->si_ouraddr, &dlap->dl_phys);
1231 	} else {
1232 		bzero(&dlap->dl_phys, ETHERADDRL);
1233 	}
1234 	ep = (struct ether_addr *)(mp->b_rptr + dlip->dl_brdcst_addr_offset);
1235 	ether_copy(&etherbroadcastaddr, ep);
1236 
1237 	qreply(wq, mp);
1238 }
1239 
1240 static void
1241 idndl_ponreq(queue_t *wq, mblk_t *mp)
1242 {
1243 	struct idnstr	*stp;
1244 
1245 	stp = (struct idnstr *)wq->q_ptr;
1246 
1247 	if (MBLKL(mp) < DL_PROMISCON_REQ_SIZE) {
1248 		DLERRORACK(wq, mp, DL_PROMISCON_REQ, DL_BADPRIM, 0);
1249 		return;
1250 	}
1251 
1252 	switch (((dl_promiscon_req_t *)mp->b_rptr)->dl_level) {
1253 	case DL_PROMISC_PHYS:
1254 		stp->ss_flags |= IDNSALLPHYS;
1255 		break;
1256 
1257 	case DL_PROMISC_SAP:
1258 		stp->ss_flags |= IDNSALLSAP;
1259 		break;
1260 
1261 	case DL_PROMISC_MULTI:
1262 		stp->ss_flags |= IDNSALLMULTI;
1263 		break;
1264 
1265 	default:
1266 		DLERRORACK(wq, mp, DL_PROMISCON_REQ, DL_NOTSUPPORTED, 0);
1267 		return;
1268 	}
1269 
1270 	if (stp->ss_sip)
1271 		(void) idndl_init(stp->ss_sip);
1272 
1273 	if (stp->ss_sip)
1274 		idndl_setipq(stp->ss_sip);
1275 
1276 	DLOKACK(wq, mp, DL_PROMISCON_REQ);
1277 }
1278 
1279 static void
1280 idndl_poffreq(queue_t *wq, mblk_t *mp)
1281 {
1282 	struct idnstr	*stp;
1283 	int		flag;
1284 
1285 	stp = (struct idnstr *)wq->q_ptr;
1286 
1287 	if (MBLKL(mp) < DL_PROMISCOFF_REQ_SIZE) {
1288 		DLERRORACK(wq, mp, DL_PROMISCOFF_REQ, DL_BADPRIM, 0);
1289 		return;
1290 	}
1291 
1292 	switch (((dl_promiscoff_req_t *)mp->b_rptr)->dl_level) {
1293 	case DL_PROMISC_PHYS:
1294 		flag = IDNSALLPHYS;
1295 		break;
1296 
1297 	case DL_PROMISC_SAP:
1298 		flag = IDNSALLSAP;
1299 		break;
1300 
1301 	case DL_PROMISC_MULTI:
1302 		flag = IDNSALLMULTI;
1303 		break;
1304 
1305 	default:
1306 		DLERRORACK(wq, mp, DL_PROMISCOFF_REQ, DL_NOTSUPPORTED, 0);
1307 		return;
1308 	}
1309 
1310 	if ((stp->ss_flags & flag) == 0) {
1311 		DLERRORACK(wq, mp, DL_PROMISCOFF_REQ, DL_NOTENAB, 0);
1312 		return;
1313 	}
1314 
1315 	stp->ss_flags &= ~flag;
1316 
1317 	if (stp->ss_sip)
1318 		(void) idndl_init(stp->ss_sip);
1319 
1320 	if (stp->ss_sip)
1321 		idndl_setipq(stp->ss_sip);
1322 
1323 	DLOKACK(wq, mp, DL_PROMISCOFF_REQ);
1324 }
1325 
1326 static void
1327 idndl_emreq(queue_t *wq, mblk_t *mp)
1328 {
1329 	struct idnstr		*stp;
1330 	union DL_primitives	*dlp;
1331 	struct ether_addr	*addrp;
1332 	int	off;
1333 	int	len;
1334 	int	i;
1335 
1336 	stp = (struct idnstr *)wq->q_ptr;
1337 
1338 	if (MBLKL(mp) < DL_ENABMULTI_REQ_SIZE) {
1339 		DLERRORACK(wq, mp, DL_ENABMULTI_REQ, DL_BADPRIM, 0);
1340 		return;
1341 	}
1342 
1343 	if (stp->ss_state == DL_UNATTACHED) {
1344 		DLERRORACK(wq, mp, DL_ENABMULTI_REQ, DL_OUTSTATE, 0);
1345 		return;
1346 	}
1347 
1348 	dlp = (union DL_primitives *)mp->b_rptr;
1349 	len = dlp->enabmulti_req.dl_addr_length;
1350 	off = dlp->enabmulti_req.dl_addr_offset;
1351 	addrp = (struct ether_addr *)(mp->b_rptr + off);
1352 
1353 	if ((len != ETHERADDRL) ||
1354 		!MBLKIN(mp, off, len) ||
1355 		!IDNDL_ADDR_IS_MULTICAST(addrp)) {
1356 		DLERRORACK(wq, mp, DL_ENABMULTI_REQ, DL_BADADDR, 0);
1357 		return;
1358 	}
1359 
1360 	if ((stp->ss_mccount + 1) >= IDNMAXMC) {
1361 		DLERRORACK(wq, mp, DL_ENABMULTI_REQ, DL_TOOMANY, 0);
1362 		return;
1363 	}
1364 
1365 	/*
1366 	 * Allocate table on first request.
1367 	 */
1368 	if (stp->ss_mctab == NULL)
1369 		stp->ss_mctab = kmem_alloc(IDNMCALLOC, KM_SLEEP);
1370 
1371 	/*
1372 	 * Check to see if the address is already in the table.
1373 	 * Bug 1209733:
1374 	 * If present in the table, add the entry to the end of the table
1375 	 * and return without initializing the hardware.
1376 	 */
1377 	for (i = 0; i < stp->ss_mccount; i++) {
1378 		if (ether_cmp(&stp->ss_mctab[i], addrp) == 0) {
1379 			stp->ss_mctab[stp->ss_mccount++] = *addrp;
1380 			DLOKACK(wq, mp, DL_ENABMULTI_REQ);
1381 			return;
1382 		}
1383 	}
1384 
1385 	stp->ss_mctab[stp->ss_mccount++] = *addrp;
1386 
1387 	(void) idndl_init(stp->ss_sip);
1388 
1389 	DLOKACK(wq, mp, DL_ENABMULTI_REQ);
1390 }
1391 
1392 static void
1393 idndl_dmreq(queue_t *wq, mblk_t *mp)
1394 {
1395 	struct idnstr		*stp;
1396 	union DL_primitives	*dlp;
1397 	struct ether_addr	*addrp;
1398 	int	off;
1399 	int	len;
1400 	int	i;
1401 
1402 	stp = (struct idnstr *)wq->q_ptr;
1403 
1404 	if (MBLKL(mp) < DL_DISABMULTI_REQ_SIZE) {
1405 		DLERRORACK(wq, mp, DL_DISABMULTI_REQ, DL_BADPRIM, 0);
1406 		return;
1407 	}
1408 
1409 	if (stp->ss_state == DL_UNATTACHED) {
1410 		DLERRORACK(wq, mp, DL_DISABMULTI_REQ, DL_OUTSTATE, 0);
1411 		return;
1412 	}
1413 
1414 	dlp = (union DL_primitives *)mp->b_rptr;
1415 	len = dlp->disabmulti_req.dl_addr_length;
1416 	off = dlp->disabmulti_req.dl_addr_offset;
1417 	addrp = (struct ether_addr *)(mp->b_rptr + off);
1418 
1419 	if ((len != ETHERADDRL) || !MBLKIN(mp, off, len)) {
1420 		DLERRORACK(wq, mp, DL_DISABMULTI_REQ, DL_BADADDR, 0);
1421 		return;
1422 	}
1423 
1424 	/*
1425 	 * Find the address in the multicast table for this Stream
1426 	 * and delete it by shifting all subsequent multicast
1427 	 * table entries over one.
1428 	 */
1429 	for (i = 0; i < stp->ss_mccount; i++)
1430 		if (ether_cmp(addrp, &stp->ss_mctab[i]) == 0) {
1431 			bcopy(&stp->ss_mctab[i+1],
1432 				&stp->ss_mctab[i],
1433 				((stp->ss_mccount - i) *
1434 				sizeof (struct ether_addr)));
1435 			stp->ss_mccount--;
1436 			(void) idndl_init(stp->ss_sip);
1437 			DLOKACK(wq, mp, DL_DISABMULTI_REQ);
1438 			return;
1439 		}
1440 	DLERRORACK(wq, mp, DL_DISABMULTI_REQ, DL_NOTENAB, 0);
1441 }
1442 
1443 static void
1444 idndl_pareq(queue_t *wq, mblk_t *mp)
1445 {
1446 	struct idnstr		*stp;
1447 	union DL_primitives	*dlp;
1448 	int			type;
1449 	struct idn		*sip;
1450 	struct ether_addr	addr;
1451 
1452 	stp = (struct idnstr *)wq->q_ptr;
1453 
1454 	if (MBLKL(mp) < DL_PHYS_ADDR_REQ_SIZE) {
1455 		DLERRORACK(wq, mp, DL_PHYS_ADDR_REQ, DL_BADPRIM, 0);
1456 		return;
1457 	}
1458 
1459 	dlp  = (union DL_primitives *)mp->b_rptr;
1460 	type = dlp->physaddr_req.dl_addr_type;
1461 	sip  = stp->ss_sip;
1462 
1463 	if (sip == NULL) {
1464 		DLERRORACK(wq, mp, DL_PHYS_ADDR_REQ, DL_OUTSTATE, 0);
1465 		return;
1466 	}
1467 
1468 	switch (type) {
1469 	case DL_FACT_PHYS_ADDR:
1470 		idndl_localetheraddr(sip, &addr);
1471 		break;
1472 
1473 	case DL_CURR_PHYS_ADDR:
1474 		ether_copy(&sip->si_ouraddr, &addr);
1475 		break;
1476 
1477 	default:
1478 		DLERRORACK(wq, mp, DL_PHYS_ADDR_REQ, DL_NOTSUPPORTED, 0);
1479 		return;
1480 	}
1481 
1482 	DLPHYSADDRACK(wq, mp, &addr, ETHERADDRL);
1483 }
1484 
1485 #ifdef notdef
1486 static void
1487 idndl_spareq(queue_t *wq, mblk_t *mp)
1488 {
1489 	struct idnstr		*stp;
1490 	union DL_primitives	*dlp;
1491 	int	off;
1492 	int	len;
1493 	struct ether_addr	*addrp;
1494 	struct idn		*sip;
1495 
1496 	stp = (struct idnstr *)wq->q_ptr;
1497 
1498 	if (MBLKL(mp) < DL_SET_PHYS_ADDR_REQ_SIZE) {
1499 		DLERRORACK(wq, mp, DL_SET_PHYS_ADDR_REQ, DL_BADPRIM, 0);
1500 		return;
1501 	}
1502 
1503 	dlp = (union DL_primitives *)mp->b_rptr;
1504 	len = dlp->set_physaddr_req.dl_addr_length;
1505 	off = dlp->set_physaddr_req.dl_addr_offset;
1506 
1507 	if (!MBLKIN(mp, off, len)) {
1508 		DLERRORACK(wq, mp, DL_SET_PHYS_ADDR_REQ, DL_BADPRIM, 0);
1509 		return;
1510 	}
1511 
1512 	addrp = (struct ether_addr *)(mp->b_rptr + off);
1513 
1514 	/*
1515 	 * Error if length of address isn't right or the address
1516 	 * specified is a multicast or broadcast address.
1517 	 */
1518 	if ((len != ETHERADDRL) ||
1519 	    IDNDL_ADDR_IS_MULTICAST(addrp) ||
1520 	    (ether_cmp(addrp, &etherbroadcastaddr) == 0)) {
1521 		DLERRORACK(wq, mp, DL_SET_PHYS_ADDR_REQ, DL_BADADDR, 0);
1522 		return;
1523 	}
1524 
1525 	/*
1526 	 * Error if this stream is not attached to a device.
1527 	 */
1528 	if ((sip = stp->ss_sip) == NULL) {
1529 		DLERRORACK(wq, mp, DL_SET_PHYS_ADDR_REQ, DL_OUTSTATE, 0);
1530 		return;
1531 	}
1532 
1533 	/*
1534 	 * Set new interface local address and re-init device.
1535 	 * This is destructive to any other streams attached
1536 	 * to this device.
1537 	 */
1538 	ether_copy(addrp, &sip->si_ouraddr);
1539 	(void) idndl_init(stp->ss_sip);
1540 
1541 	DLOKACK(wq, mp, DL_SET_PHYS_ADDR_REQ);
1542 }
1543 #endif /* notdef */
1544 
1545 static void
1546 idndl_udreq(queue_t *wq, mblk_t *mp)
1547 {
1548 	struct idnstr			*stp;
1549 	register struct idn		*sip;
1550 	register dl_unitdata_req_t	*dludp;
1551 	mblk_t				*nmp;
1552 	struct idndladdr	*dlap;
1553 	struct ether_header	*headerp;
1554 	t_uscalar_t		off, len;
1555 	t_uscalar_t		sap;
1556 
1557 	stp = (struct idnstr *)wq->q_ptr;
1558 	sip = stp->ss_sip;
1559 
1560 	if (stp->ss_state != DL_IDLE) {
1561 		DLERRORACK(wq, mp, DL_UNITDATA_REQ, DL_OUTSTATE, 0);
1562 		return;
1563 	}
1564 
1565 	dludp = (dl_unitdata_req_t *)mp->b_rptr;
1566 
1567 	off = dludp->dl_dest_addr_offset;
1568 	len = dludp->dl_dest_addr_length;
1569 
1570 	/*
1571 	 * Validate destination address format.
1572 	 */
1573 	if (!MBLKIN(mp, off, len) || (len != IDNADDRL)) {
1574 		dluderrorind(wq, mp, mp->b_rptr + off, len, DL_BADADDR, 0);
1575 		return;
1576 	}
1577 
1578 	/*
1579 	 * Error if no M_DATA follows.
1580 	 */
1581 	nmp = mp->b_cont;
1582 	if (nmp == NULL) {
1583 		dluderrorind(wq, mp, mp->b_rptr + off, len, DL_BADDATA, 0);
1584 		return;
1585 	}
1586 
1587 	dlap = (struct idndladdr *)(mp->b_rptr + off);
1588 
1589 	/*
1590 	 * Create ethernet header by either prepending it onto the
1591 	 * next mblk if potential, or reusing the M_PROTO block if not.
1592 	 */
1593 	if ((DB_REF(nmp) == 1) &&
1594 	    (MBLKHEAD(nmp) >= sizeof (struct ether_header)) &&
1595 	    (((ulong_t)nmp->b_rptr & 0x1) == 0)) {
1596 		nmp->b_rptr -= sizeof (struct ether_header);
1597 		headerp = (struct ether_header *)nmp->b_rptr;
1598 		ether_copy(&dlap->dl_phys, &headerp->ether_dhost);
1599 		ether_copy(&sip->si_ouraddr, &headerp->ether_shost);
1600 		sap = dlap->dl_sap;
1601 		freeb(mp);
1602 		mp = nmp;
1603 	} else {
1604 		DB_TYPE(mp) = M_DATA;
1605 		headerp = (struct ether_header *)mp->b_rptr;
1606 		mp->b_wptr = mp->b_rptr + sizeof (struct ether_header);
1607 		ether_copy(&dlap->dl_phys, &headerp->ether_dhost);
1608 		ether_copy(&sip->si_ouraddr, &headerp->ether_shost);
1609 		sap = dlap->dl_sap;
1610 	}
1611 
1612 	/*
1613 	 * For transmitting, the driver looks at the
1614 	 * sap field of the DL_BIND_REQ being 0 in addition to the type
1615 	 * field in the range [0-1500]. If either is true, then the driver
1616 	 * computes the length of the message, not including initial M_PROTO
1617 	 * mblk (message block), of all subsequent DL_UNITDATA_REQ messages and
1618 	 * transmits 802.3 frames that have this value in the MAC frame header
1619 	 * length field.
1620 	 */
1621 	if ((sap <= ETHERMTU) || (stp->ss_sap == 0))
1622 		headerp->ether_type = (msgsize(mp) -
1623 					sizeof (struct ether_header));
1624 	else
1625 		headerp->ether_type = sap;
1626 
1627 	/*
1628 	 * The data transfer code requires only READ access (idn_wput_data).
1629 	 */
1630 	rw_downgrade(&stp->ss_rwlock);
1631 	(void) idndl_start(wq, mp, sip);
1632 }
1633 
1634 int
1635 idndl_start(queue_t *wq, register mblk_t *mp, register struct idn *sip)
1636 {
1637 	int		rv = 0;
1638 	int		flags;
1639 	int		broadcast = 0;
1640 	int		goagain = 0;
1641 	int		goqueue = 0;
1642 	int		msgcount;
1643 	char		channel;
1644 	mblk_t		*nmp = NULL;
1645 	int		domid;
1646 	domainset_t	domset;
1647 	idn_netaddr_t	netaddr;
1648 	struct idnstr	*stp;
1649 	struct ether_header	*ehp;
1650 	procname_t	proc = "idndl_start";
1651 
1652 	ASSERT(DB_TYPE(mp) == M_DATA);
1653 
1654 	stp = (struct idnstr *)wq->q_ptr;
1655 	ASSERT(sip == stp->ss_sip);
1656 	flags = sip->si_flags;
1657 	channel = (char)sip->si_ouraddr.ether_addr_octet[IDNETHER_CHANNEL];
1658 
1659 	ASSERT(RW_READ_HELD(&stp->ss_rwlock));
1660 
1661 	if ((flags & (IDNRUNNING|IDNPROMISC)) != IDNRUNNING) {
1662 		if (!(flags & IDNRUNNING))
1663 			goto requeue;
1664 	}
1665 
1666 	/*
1667 	 * Translate an IDN ethernet address into a domainid
1668 	 * and idnaddr.
1669 	 */
1670 	ehp = (struct ether_header *)mp->b_rptr;
1671 	domid = IDNDL_ETHER2DOMAIN(&ehp->ether_dhost);
1672 
1673 	/*
1674 	 * update MIB II statistics
1675 	 */
1676 	BUMP_OutNUcast(sip, ehp);
1677 
1678 	PR_DLPI("%s: ether %x:%x:%x:%x:%x:%x (domid = %d)\n",
1679 		proc, ehp->ether_dhost.ether_addr_octet[0],
1680 		ehp->ether_dhost.ether_addr_octet[1],
1681 		ehp->ether_dhost.ether_addr_octet[2],
1682 		ehp->ether_dhost.ether_addr_octet[3],
1683 		ehp->ether_dhost.ether_addr_octet[4],
1684 		ehp->ether_dhost.ether_addr_octet[5],
1685 		domid);
1686 
1687 	netaddr.net.chan = channel;
1688 	PR_DLPI("%s: source channel = %d\n", proc, (int)channel);
1689 
1690 	if ((ether_cmp(&ehp->ether_dhost, &etherbroadcastaddr) == 0) ||
1691 			IDNDL_ADDR_IS_MULTICAST(&ehp->ether_dhost)) {
1692 		/*
1693 		 * Caller wants to broadcast!
1694 		 * XXX - Send to everybody but ourself???
1695 		 */
1696 		PR_DLPI("%s: broadcast/multicast requested!!!\n", proc);
1697 		domset = ~DOMAINSET(idn.localid);
1698 		broadcast = 1;
1699 		netaddr.net.netid = IDN_BROADCAST_ALLNETID;
1700 		if ((flags & IDNPROMISC) &&
1701 		    ((nmp = copymsg(mp)) == NULL)) {
1702 			IDN_KSTAT_INC(sip, si_allocbfail);
1703 		}
1704 
1705 	} else if (domid != IDN_NIL_DOMID) {
1706 		domset = DOMAINSET(domid);
1707 		netaddr.net.netid = idn_domain[domid].dnetid;
1708 		if ((flags & IDNPROMISC) &&
1709 		    ((nmp = copymsg(mp)) == NULL)) {
1710 			IDN_KSTAT_INC(sip, si_allocbfail);
1711 		}
1712 	} else {
1713 #ifdef DEBUG
1714 		int	netid;
1715 
1716 		netid = (int)
1717 			ehp->ether_dhost.ether_addr_octet[IDNETHER_NETID];
1718 		PR_DLPI("%s: no domain found for netid 0x%x\n",
1719 			proc, netid);
1720 #endif /* DEBUG */
1721 		goto bad;
1722 	}
1723 
1724 	PR_DLPI("%s: target domainset = 0x%x\n", proc, domset);
1725 
1726 	if ((domset == 0) && (domid == IDN_NIL_DOMID)) {
1727 		PR_DLPI("%s: not connected to any domains!!  Bailing\n",
1728 			proc);
1729 		goto bad;
1730 	}
1731 	/*
1732 	 * XXX - Need to find a better way to handle broadcasting.
1733 	 *	 Should be able to take advantage of the fact that
1734 	 *	 we can broadcast XDC's (xdc_some).  Need to use
1735 	 *	 atomic counter (semaphore) instead of binary
1736 	 *	 "owner" flag, or perhaps domain specific owner bytes.
1737 	 *
1738 	 * Transfer the data.
1739 	 */
1740 	msgcount = 0;
1741 	if (!broadcast)
1742 		goto noloop;
1743 
1744 	for (domid = 0; domid < MAX_DOMAINS; domid++) {
1745 		if (!DOMAIN_IN_SET(domset, domid))
1746 			continue;
1747 
1748 noloop:
1749 
1750 		if (idn_domain[domid].dcpu == IDN_NIL_DCPU) {
1751 			if (broadcast)
1752 				continue;
1753 			else
1754 				break;
1755 		}
1756 
1757 		rv = idn_send_data(domid, netaddr, wq, mp);
1758 
1759 		switch (rv) {
1760 		case IDNXMIT_LOOP:	/* handled in loopback */
1761 			msgcount++;
1762 			break;
1763 
1764 		case IDNXMIT_OKAY:	/* handled, okay to free */
1765 			msgcount++;
1766 			break;
1767 
1768 		case IDNXMIT_RETRY:
1769 			if (!broadcast)
1770 				goto tryagain;
1771 			goagain++;
1772 			break;
1773 
1774 		case IDNXMIT_REQUEUE:
1775 			if (!broadcast)
1776 				goto requeue;
1777 			goqueue++;
1778 			break;
1779 
1780 		default:
1781 			if (!broadcast)
1782 				goto bad;
1783 			break;
1784 		}
1785 		if (!broadcast)
1786 			break;
1787 	}
1788 
1789 	if (msgcount == 0)
1790 		if (goqueue)
1791 			goto requeue;
1792 		else if (goagain)
1793 			goto tryagain;
1794 		else
1795 			goto bad;
1796 
1797 	if ((flags & IDNPROMISC) && nmp)
1798 		idndl_sendup(sip, nmp, idndl_paccept);
1799 
1800 	freemsg(mp);
1801 
1802 	PR_DLPI("%s: successful transmit to domainset 0x%x.\n",
1803 		proc, domset);
1804 
1805 	return (0);
1806 
1807 bad:
1808 	PR_DLPI("%s: bad transmission to domainset 0x%x, dropping msg.\n",
1809 		proc, domset);
1810 	if (nmp)
1811 		freemsg(nmp);
1812 	freemsg(mp);
1813 	qenable(wq);
1814 	return (1);
1815 
1816 requeue:
1817 	PR_DLPI("%s: requeue for domainset 0x%x, no qenable\n",
1818 		proc, domset);
1819 	if (nmp)
1820 		freemsg(nmp);
1821 	if (putbq(wq, mp) == 0)
1822 		freemsg(mp);
1823 	return (1);
1824 
1825 tryagain:
1826 	PR_DLPI("%s: try again to domainset 0x%x, putbq.\n",
1827 		proc, domset);
1828 	if (nmp)
1829 		freemsg(nmp);
1830 	if (putbq(wq, mp) == 0)
1831 		freemsg(mp);
1832 	qenable(wq);
1833 	return (1);
1834 }
1835 
1836 /*
1837  * Called by:	idnh_recv_data, idn_recv_mboxdata.
1838  */
1839 void
1840 idndl_read(struct idn *sip, mblk_t *mp)
1841 {
1842 	struct ether_header	*ehp;
1843 	queue_t			*ip4q;
1844 	queue_t			*ip6q;
1845 	int		pktlen;
1846 	procname_t	proc = "idndl_read";
1847 
1848 	PR_DLPI("%s: incoming msgsize = %lu, msgdsize = %lu\n",
1849 		proc, msgsize(mp), msgdsize(mp));
1850 
1851 	ehp = (struct ether_header *)mp->b_rptr;
1852 	if (sip == NULL)
1853 		sip = IDNDL_ETHER2SIP(&ehp->ether_dhost);
1854 	if (sip == NULL) {
1855 		/*
1856 		 * If the sip is NULL, then I don't have a connection
1857 		 * for this network.  No point in sending the message
1858 		 * up.
1859 		 */
1860 		PR_DLPI("%s: no plumbing to send message through.\n",
1861 			proc);
1862 		freemsg(mp);
1863 		return;
1864 	}
1865 	IDN_KSTAT_INC(sip, si_ipackets);
1866 	IDN_KSTAT_INC(sip, si_ipackets64);
1867 	/*
1868 	 * update MIB II statistics
1869 	 */
1870 	pktlen = mp->b_wptr - mp->b_rptr;
1871 	BUMP_InNUcast(sip, ehp);
1872 	IDN_KSTAT_ADD(sip, si_rcvbytes, pktlen);
1873 	IDN_KSTAT_ADD(sip, si_rbytes64, (uint64_t)pktlen);
1874 
1875 	ip4q = sip->si_ip4q;
1876 	ip6q = sip->si_ip6q;
1877 
1878 	if (IS_ETHERTYPE_IPV4(ehp->ether_type) &&
1879 			!IDNDL_ADDR_IS_MULTICAST(&ehp->ether_dhost) &&
1880 			ip4q &&
1881 			canputnext(ip4q)) {
1882 		mp->b_rptr += sizeof (struct ether_header);
1883 		(void) putnext(ip4q, mp);
1884 		/*LINTED*/
1885 	} else if (IS_ETHERTYPE_IPV6(ehp->ether_type) &&
1886 			!IDNDL_ADDR_IS_MULTICAST(&ehp->ether_dhost) &&
1887 			ip6q &&
1888 			canputnext(ip6q)) {
1889 		mp->b_rptr += sizeof (struct ether_header);
1890 		(void) putnext(ip6q, mp);
1891 	} else {
1892 		/*
1893 		 * Strip the PADs for 802.3
1894 		 */
1895 		pktlen = ehp->ether_type + sizeof (struct ether_header);
1896 		PR_DLPI("%s: stripping PADs for 802.3 (pktlen=%d)\n",
1897 			proc, pktlen);
1898 		if (pktlen < ETHERMIN)
1899 			mp->b_wptr = mp->b_rptr + pktlen;
1900 		idndl_sendup(sip, mp, idndl_accept);
1901 	}
1902 }
1903 
1904 int
1905 idndl_init(struct idn *sip)
1906 {
1907 	struct idnstr	*stp;
1908 
1909 	if (sip->si_flags & IDNSUSPENDED)
1910 		(void) ddi_dev_is_needed(sip->si_dip, 0, 1);
1911 
1912 	sip->si_flags = 0;
1913 	sip->si_wantw = 0;
1914 
1915 	IDN_KSTAT_INC(sip, si_inits);
1916 
1917 	rw_enter(&idn.struprwlock, RW_WRITER);
1918 
1919 	for (stp = idn.strup; stp; stp = stp->ss_nextp) {
1920 		if ((stp->ss_sip == sip) && (stp->ss_flags & IDNSALLPHYS)) {
1921 			sip->si_flags |= IDNPROMISC;
1922 			break;
1923 		}
1924 	}
1925 
1926 	sip->si_flags |= IDNRUNNING;
1927 
1928 	mutex_enter(&idn.sipwenlock);
1929 	idndl_wenable(sip);
1930 	mutex_exit(&idn.sipwenlock);
1931 
1932 	rw_exit(&idn.struprwlock);
1933 
1934 	return (!(sip->si_flags & IDNRUNNING));
1935 }
1936 
1937 void
1938 idndl_uninit(struct idn *sip)
1939 {
1940 	int		channel;
1941 	procname_t	proc = "idndl_uninit";
1942 
1943 	sip->si_flags &= ~IDNRUNNING;
1944 
1945 	channel = (int)sip->si_ouraddr.ether_addr_octet[IDNETHER_CHANNEL];
1946 	PR_DLPI("%s: IP SAP, uninit channel %d\n", proc, channel);
1947 	/*
1948 	 * A uninit is a hard close of an interface.
1949 	 */
1950 	idn_close_channel(channel, IDNCHAN_HARD_CLOSE);
1951 }
1952 
1953 /*
1954  * Send packet upstream.
1955  * Assume mp->b_rptr points to ether_header.
1956  */
1957 void
1958 idndl_sendup(struct idn *sip, mblk_t *mp, struct idnstr *(*acceptfunc)())
1959 {
1960 	int			type;
1961 	struct ether_addr	*dhostp, *shostp;
1962 	struct idnstr		*stp, *nstp;
1963 	mblk_t 		*nmp;
1964 	ulong_t		isgroupaddr;
1965 
1966 	TRACE_0(TR_FAC_IDN, TR_IDN_SENDUP_START, "idnsendup start");
1967 
1968 	dhostp = &((struct ether_header *)mp->b_rptr)->ether_dhost;
1969 	shostp = &((struct ether_header *)mp->b_rptr)->ether_shost;
1970 	type = ((struct ether_header *)mp->b_rptr)->ether_type;
1971 
1972 	isgroupaddr = IDNDL_ADDR_IS_MULTICAST(dhostp);
1973 
1974 	/*
1975 	 * While holding a reader lock on the linked list of streams structures,
1976 	 * attempt to match the address criteria for each stream
1977 	 * and pass up the raw M_DATA ("fastpath") or a DL_UNITDATA_IND.
1978 	 */
1979 
1980 	rw_enter(&idn.struprwlock, RW_READER);
1981 
1982 	if ((stp = (*acceptfunc)(idn.strup, sip, type, dhostp)) == NULL) {
1983 		rw_exit(&idn.struprwlock);
1984 		freemsg(mp);
1985 		TRACE_0(TR_FAC_IDN, TR_IDN_SENDUP_END, "idnsendup end");
1986 		return;
1987 	}
1988 
1989 	/*
1990 	 * Loop on matching open streams until (*acceptfunc)() returns NULL.
1991 	 */
1992 	for (; nstp = (*acceptfunc)(stp->ss_nextp, sip, type, dhostp);
1993 		stp = nstp) {
1994 
1995 		if (canputnext(stp->ss_rq) == 0) {
1996 			IDN_KSTAT_INC(sip, si_nocanput);
1997 			continue;
1998 		}
1999 		if ((nmp = dupmsg(mp)) == NULL)
2000 			nmp = copymsg(mp);
2001 		if (nmp) {
2002 			if ((stp->ss_flags & IDNSFAST) && !isgroupaddr) {
2003 				nmp->b_rptr += sizeof (struct ether_header);
2004 				(void) putnext(stp->ss_rq, nmp);
2005 			} else if (stp->ss_flags & IDNSRAW) {
2006 				(void) putnext(stp->ss_rq, nmp);
2007 			} else if ((nmp = idndl_addudind(sip, nmp, shostp,
2008 						dhostp, type, isgroupaddr))) {
2009 				(void) putnext(stp->ss_rq, nmp);
2010 			}
2011 		} else {
2012 			IDN_KSTAT_INC(sip, si_allocbfail);
2013 		}
2014 	}
2015 
2016 
2017 	/*
2018 	 * Do the last one.
2019 	 */
2020 	if (canputnext(stp->ss_rq)) {
2021 		if ((stp->ss_flags & IDNSFAST) && !isgroupaddr) {
2022 			mp->b_rptr += sizeof (struct ether_header);
2023 			(void) putnext(stp->ss_rq, mp);
2024 		} else if (stp->ss_flags & IDNSRAW) {
2025 			(void) putnext(stp->ss_rq, mp);
2026 		} else if ((mp = idndl_addudind(sip, mp, shostp, dhostp,
2027 					    type, isgroupaddr))) {
2028 			(void) putnext(stp->ss_rq, mp);
2029 		}
2030 	} else {
2031 		freemsg(mp);
2032 		IDN_KSTAT_INC(sip, si_nocanput);
2033 		IDN_KSTAT_INC(sip, si_norcvbuf);	/* MIB II */
2034 	}
2035 
2036 	rw_exit(&idn.struprwlock);
2037 	TRACE_0(TR_FAC_IDN, TR_IDN_SENDUP_END, "idnsendup end");
2038 }
2039 
2040 /*
2041  * Test upstream destination sap and address match.
2042  */
2043 struct idnstr *
2044 idndl_accept(register struct idnstr *stp, register struct idn *sip,
2045 	    int type, struct ether_addr *addrp)
2046 {
2047 	t_uscalar_t	sap;
2048 	uint_t		flags;
2049 
2050 	for (; stp; stp = stp->ss_nextp) {
2051 		sap   = stp->ss_sap;
2052 		flags = stp->ss_flags;
2053 
2054 		if ((stp->ss_sip == sip) && IDNSAPMATCH(sap, type, flags))
2055 			if ((ether_cmp(addrp, &sip->si_ouraddr) == 0) ||
2056 			    (ether_cmp(addrp, &etherbroadcastaddr) == 0) ||
2057 			    (flags & IDNSALLPHYS) ||
2058 			    idndl_mcmatch(stp, addrp))
2059 				return (stp);
2060 	}
2061 
2062 	return (NULL);
2063 }
2064 
2065 /*
2066  * Test upstream destination sap and address match for IDNSALLPHYS only.
2067  */
2068 /* ARGSUSED3 */
2069 struct idnstr *
2070 idndl_paccept(register struct idnstr *stp, register struct idn *sip,
2071 	    int type, struct ether_addr *addrp)
2072 {
2073 	t_uscalar_t	sap;
2074 	uint_t		flags;
2075 
2076 	for (; stp; stp = stp->ss_nextp) {
2077 		sap   = stp->ss_sap;
2078 		flags = stp->ss_flags;
2079 
2080 		if ((stp->ss_sip == sip) &&
2081 		    IDNSAPMATCH(sap, type, flags) &&
2082 		    (flags & IDNSALLPHYS))
2083 			return (stp);
2084 	}
2085 
2086 	return (NULL);
2087 }
2088 
2089 /*
2090  * Set or clear the device ipq pointer.
2091  * Assumes IPv4 and IPv6 are IDNSFAST.
2092  */
2093 static void
2094 idndl_setipq(struct idn *sip)
2095 {
2096 	struct idnstr	*stp;
2097 	int		ok4 = 1;
2098 	int		ok6 = 1;
2099 	queue_t		*ip4q = NULL;
2100 	queue_t		*ip6q = NULL;
2101 
2102 	rw_enter(&idn.struprwlock, RW_READER);
2103 
2104 	for (stp = idn.strup; stp; stp = stp->ss_nextp) {
2105 		if (stp->ss_sip == sip) {
2106 			if (stp->ss_flags & (IDNSALLPHYS|IDNSALLSAP)) {
2107 				ok4 = 0;
2108 				ok6 = 0;
2109 				break;
2110 			}
2111 			if (IS_ETHERTYPE_IPV4(stp->ss_sap)) {
2112 				if (ip4q == NULL)
2113 					ip4q = stp->ss_rq;
2114 				else
2115 					ok4 = 0;
2116 				/*LINTED*/
2117 			} else if (IS_ETHERTYPE_IPV6(stp->ss_sap)) {
2118 				if (ip6q == NULL)
2119 					ip6q = stp->ss_rq;
2120 				else
2121 					ok6 = 0;
2122 			}
2123 		}
2124 	}
2125 
2126 	rw_exit(&idn.struprwlock);
2127 
2128 	if (ok4)
2129 		sip->si_ip4q = ip4q;
2130 	else
2131 		sip->si_ip4q = NULL;
2132 	if (ok6)
2133 		sip->si_ip6q = ip6q;
2134 	else
2135 		sip->si_ip6q = NULL;
2136 }
2137 
2138 /*
2139  * Prefix msg with a DL_UNITDATA_IND mblk and return the new msg.
2140  */
2141 static mblk_t *
2142 idndl_addudind(struct idn *sip, mblk_t *mp,
2143 	    struct ether_addr *shostp, struct ether_addr *dhostp,
2144 	    int type, ulong_t isgroupaddr)
2145 {
2146 	dl_unitdata_ind_t	*dludindp;
2147 	struct idndladdr	*dlap;
2148 	mblk_t	*nmp;
2149 	int	size;
2150 
2151 	TRACE_0(TR_FAC_IDN, TR_IDN_ADDUDIND_START, "idndl_addudind start");
2152 
2153 	mp->b_rptr += sizeof (struct ether_header);
2154 
2155 	/*
2156 	 * Allocate an M_PROTO mblk for the DL_UNITDATA_IND.
2157 	 */
2158 	size = sizeof (dl_unitdata_ind_t) + IDNADDRL + IDNADDRL;
2159 	nmp = allocb(IDNROUNDUP(IDNHEADROOM + size, sizeof (double)), BPRI_LO);
2160 	if (nmp == NULL) {
2161 		IDN_KSTAT_INC(sip, si_allocbfail);
2162 		IDN_KSTAT_INC(sip, si_ierrors);
2163 		if (idn_debug)
2164 			serror(sip->si_dip, 451, "allocb failed");
2165 		freemsg(mp);
2166 		TRACE_0(TR_FAC_IDN, TR_IDN_ADDUDIND_END, "idndl_addudind end");
2167 		return (NULL);
2168 	}
2169 	DB_TYPE(nmp) = M_PROTO;
2170 	nmp->b_wptr = nmp->b_datap->db_lim;
2171 	nmp->b_rptr = nmp->b_wptr - size;
2172 
2173 	/*
2174 	 * Construct a DL_UNITDATA_IND primitive.
2175 	 */
2176 	dludindp = (dl_unitdata_ind_t *)nmp->b_rptr;
2177 	dludindp->dl_primitive = DL_UNITDATA_IND;
2178 	dludindp->dl_dest_addr_length = IDNADDRL;
2179 	dludindp->dl_dest_addr_offset = sizeof (dl_unitdata_ind_t);
2180 	dludindp->dl_src_addr_length = IDNADDRL;
2181 	dludindp->dl_src_addr_offset = sizeof (dl_unitdata_ind_t) + IDNADDRL;
2182 	dludindp->dl_group_address = isgroupaddr;
2183 
2184 	dlap = (struct idndladdr *)(nmp->b_rptr + sizeof (dl_unitdata_ind_t));
2185 	ether_copy(dhostp, &dlap->dl_phys);
2186 	dlap->dl_sap = (ushort_t)type;
2187 
2188 	dlap = (struct idndladdr *)(nmp->b_rptr + sizeof (dl_unitdata_ind_t)
2189 					+ IDNADDRL);
2190 	ether_copy(shostp, &dlap->dl_phys);
2191 	dlap->dl_sap = (ushort_t)type;
2192 
2193 	/*
2194 	 * Link the M_PROTO and M_DATA together.
2195 	 */
2196 	nmp->b_cont = mp;
2197 	TRACE_0(TR_FAC_IDN, TR_IDN_ADDUDIND_END, "idndl_addudind end");
2198 	return (nmp);
2199 }
2200 
2201 /*
2202  * Return TRUE if the given multicast address is one
2203  * of those that this particular Stream is interested in.
2204  */
2205 static int
2206 idndl_mcmatch(register struct idnstr *stp, register struct ether_addr *addrp)
2207 {
2208 	register struct ether_addr	*mctab;
2209 	register int	mccount;
2210 	register int	i;
2211 
2212 	/*
2213 	 * Return FALSE if not a multicast address.
2214 	 */
2215 	if (!IDNDL_ADDR_IS_MULTICAST(addrp))
2216 		return (0);
2217 
2218 	/*
2219 	 * Check if all multicasts have been enabled for this Stream
2220 	 */
2221 	if (stp->ss_flags & IDNSALLMULTI)
2222 		return (1);
2223 
2224 	/*
2225 	 * Return FALSE if no multicast addresses enabled for this Stream.
2226 	 */
2227 	if (stp->ss_mccount == 0)
2228 		return (0);
2229 
2230 	/*
2231 	 * Otherwise, find it in the table.
2232 	 */
2233 
2234 	mccount = stp->ss_mccount;
2235 	mctab = stp->ss_mctab;
2236 
2237 	for (i = 0; i < mccount; i++)
2238 		if (!ether_cmp(addrp, &mctab[i]))
2239 			return (1);
2240 
2241 	return (0);
2242 }
2243 
2244 /*
2245  * Start xmit on any msgs previously enqueued on any write queues.
2246  * If the caller passes NULL, then we need to check all
2247  * our interfaces.
2248  */
2249 void
2250 idndl_wenable(struct idn *sip)
2251 {
2252 	struct idnstr	*stp;
2253 	queue_t		*wq;
2254 
2255 	/*
2256 	 * Order of wantw accesses is important.
2257 	 */
2258 	ASSERT((sip == NULL) ? RW_LOCK_HELD(&idn.struprwlock) : 1);
2259 	ASSERT(MUTEX_HELD(&idn.sipwenlock));
2260 
2261 	do {
2262 		if (sip)
2263 			sip->si_wantw = 0;
2264 		for (stp = idn.strup; stp; stp = stp->ss_nextp) {
2265 			if ((!sip || (stp->ss_sip == sip)) &&
2266 			    stp->ss_rq && ((wq = WR(stp->ss_rq))->q_first))
2267 				qenable(wq);
2268 		}
2269 	} while (sip && sip->si_wantw);
2270 }
2271 
2272 /*VARARGS*/
2273 static void
2274 serror(dev_info_t *dip, int idnerr, char *fmt, ...)
2275 {
2276 	static	long	last;
2277 	static	char	*lastfmt;
2278 	char		msg_buffer[255];
2279 	va_list ap;
2280 	time_t	now;
2281 
2282 	/*
2283 	 * Don't print same error message too often.
2284 	 */
2285 	now = gethrestime_sec();
2286 	if ((last == (now & ~1)) && (lastfmt == fmt))
2287 		return;
2288 
2289 	last = now & ~1;
2290 	lastfmt = fmt;
2291 
2292 	va_start(ap, fmt);
2293 	(void) vsprintf(msg_buffer, fmt, ap);
2294 	cmn_err(CE_CONT, "IDN: %d: %s%d: %s\n",
2295 		idnerr, ddi_get_name(dip),
2296 		ddi_get_instance(dip), msg_buffer);
2297 	va_end(ap);
2298 }
2299