xref: /titanic_50/usr/src/uts/sun4u/starfire/io/idn_dlpi.c (revision c9a6ea2e938727c95af7108c5e00eee4c890c7ae)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 /*
27  * IDN DLPI support (based on QE implementation).
28  */
29 #include <sys/types.h>
30 #include <sys/debug.h>
31 #include <sys/stropts.h>
32 #include <sys/stream.h>
33 #include <sys/systm.h>
34 #include <sys/cmn_err.h>
35 #include <sys/errno.h>
36 #ifdef xxx_trace
37 #include <sys/vtrace.h>
38 #endif /* xxx_trace */
39 #include <sys/kmem.h>
40 #include <sys/ddi.h>
41 #include <sys/sunddi.h>
42 #include <sys/strsun.h>
43 #include <sys/stat.h>
44 #include <sys/kstat.h>
45 #include <sys/dlpi.h>
46 #include <sys/time.h>
47 #include <sys/cpuvar.h>
48 
49 #include <sys/idn.h>
50 
51 #ifdef	IPV6
52 #define	IS_ETHERTYPE_IPV4(x)	((x) == ETHERTYPE_IP)
53 #define	IS_ETHERTYPE_IPV6(x)	((x) == ETHERTYPE_IPV6)
54 #define	IS_ETHERTYPE_IP(x)	(IS_ETHERTYPE_IPV4(x) || IS_ETHERTYPE_IPV6(x))
55 #else
56 #define	IS_ETHERTYPE_IPV4(x)	((x) == ETHERTYPE_IP)
57 #define	IS_ETHERTYPE_IPV6(x)	(0)
58 #define	IS_ETHERTYPE_IP		IS_ETHERTYPE_IPV4
59 #endif /* IPV6 */
60 
61 #ifdef IDN_TRACE
62 /*
63  * This stuff should go into <sys/vtrace.h>
64  */
65 #define	TR_FAC_IDN		100
66 /*
67  * TR_FAC_IDN tags
68  */
69 #define	TR_IDN_OPEN		0
70 #define	TR_IDN_CLOSE		1
71 #define	TR_IDN_WPUT_START	2
72 #define	TR_IDN_WPUT_END		3
73 #define	TR_IDN_WSRV_START	4
74 #define	TR_IDN_WSRV_END		5
75 #define	TR_IDN_START_START	6
76 #define	TR_IDN_START_END	7
77 #define	TR_IDN_INTR_START	8
78 #define	TR_IDN_INTR_END		9
79 #define	TR_IDN_READ_START	10
80 #define	TR_IDN_READ_END		11
81 #define	TR_IDN_SENDUP_START	12
82 #define	TR_IDN_SENDUP_END	13
83 #define	TR_IDN_ADDUDIND_START	14
84 #define	TR_IDN_ADDUDIND_END	15
85 #define	TR_IDN_GETBUF_START	16
86 #define	TR_IDN_GETBUF_END	17
87 #define	TR_IDN_FREEBUF_START	18
88 #define	TR_IDN_FREEBUF_END	19
89 #define	TR_IDN_PROTO_START	20
90 #define	TR_IDN_PROTO_END	21
91 #define	TR_IDN_INIT_START	22
92 #define	TR_IDN_INIT_END		23
93 #define	TR_IDN_PROTO_IN		24
94 #define	TR_IDN_PROTO_OUT	25
95 
96 #define	IDNTRACE(fac, tag)	(printf("idn.TRACE: "))
97 
98 #define	TRACE_0(fac, tag, name) \
99 	IDNTRACE((fac), (tag)); \
100 	printf(name); printf("\n");
101 
102 #define	TRACE_1(fac, tag, name, d1) \
103 	IDNTRACE((fac), (tag)); \
104 	printf(name, (d1)); printf("\n");
105 
106 #define	TRACE_2(fac, tag, name, d1, d2) \
107 	IDNTRACE((fac), (tag)); \
108 	printf(name, (d1), (d2)); printf("\n");
109 
110 #define	TRACE_3(fac, tag, name, d1, d2, d3) \
111 	IDNTRACE((fac), (tag)); \
112 	printf(name, (d1), (d2), (d3)); printf("\n");
113 
114 #define	TRACE_4(fac, tag, name, d1, d2, d3, d4) \
115 	IDNTRACE((fac), (tag)); \
116 	printf(name, (d1), (d2), (d3), (d4)); printf("\n");
117 
118 #define	TRACE_5(fac, tag, name, d1, d2, d3, d4, d5) \
119 	IDNTRACE((fac), (tag)); \
120 	printf(name, (d1), (d2), (d3), (d4), (d5)); printf("\n");
121 
122 #else /* IDN_TRACE */
123 
124 #define	TRACE_0(fac, tag, name) 			{}
125 #define	TRACE_1(fac, tag, name, d1) 			{}
126 #define	TRACE_2(fac, tag, name, d1, d2) 		{}
127 #define	TRACE_3(fac, tag, name, d1, d2, d3) 		{}
128 #define	TRACE_4(fac, tag, name, d1, d2, d3, d4) 	{}
129 #define	TRACE_5(fac, tag, name, d1, d2, d3, d4, d5) 	{}
130 
131 #endif /* IDN_TRACE */
132 
133 #ifdef DEBUG
134 #define	DLERRORACK(qq, mm, cc, ee, xx) \
135 { \
136 	PR_DLPI("dlpi: ERRORACK: 0x%x(%s), err = 0x%x(%s)\n", \
137 		(uint_t)(cc), dlprim2str(cc), \
138 		(uint_t)(ee), dlerr2str((int)(ee))); \
139 	dlerrorack((qq), (mm), (cc), (ee), (xx)); \
140 }
141 #define	DLOKACK(qq, mm, cc) \
142 { \
143 	PR_DLPI("dlpi: OKACK: 0x%x(%s)\n", (cc), dlprim2str(cc)); \
144 	dlokack((qq), (mm), (cc)); \
145 }
146 #define	DLBINDACK(qq, mm, ss, aa, ll, xx, yy) \
147 { \
148 	PR_DLPI("dlpi: BINDACK: eth=%x:%x:%x:%x:%x:%x, sap=0x%x, l=%d\n", \
149 		((struct idndladdr *)(aa))->dl_phys.ether_addr_octet[0], \
150 		((struct idndladdr *)(aa))->dl_phys.ether_addr_octet[1], \
151 		((struct idndladdr *)(aa))->dl_phys.ether_addr_octet[2], \
152 		((struct idndladdr *)(aa))->dl_phys.ether_addr_octet[3], \
153 		((struct idndladdr *)(aa))->dl_phys.ether_addr_octet[4], \
154 		((struct idndladdr *)(aa))->dl_phys.ether_addr_octet[5], \
155 		(uint_t)(ss), (int)(ll)); \
156 	dlbindack((qq), (mm), (ss), (aa), (ll), (xx), (yy)); \
157 }
158 #define	DLPHYSADDRACK(qq, mm, aa, ll) \
159 { \
160 	PR_DLPI("dlpi: PHYSACK: eth=%x:%x:%x:%x:%x:%x, l=%d\n", \
161 		((struct idndladdr *)(aa))->dl_phys.ether_addr_octet[0], \
162 		((struct idndladdr *)(aa))->dl_phys.ether_addr_octet[1], \
163 		((struct idndladdr *)(aa))->dl_phys.ether_addr_octet[2], \
164 		((struct idndladdr *)(aa))->dl_phys.ether_addr_octet[3], \
165 		((struct idndladdr *)(aa))->dl_phys.ether_addr_octet[4], \
166 		((struct idndladdr *)(aa))->dl_phys.ether_addr_octet[5], \
167 		(ll)); \
168 	dlphysaddrack((qq), (mm), (aa), (ll)); \
169 }
170 
171 static char *dlerrstr[] = {
172 	"DL_BADSAP",
173 	"DL_BADADDR",
174 	"DL_ACCESS",
175 	"DL_OUTSTATE",
176 	"DL_SYSERR",
177 	"DL_BADCORR",
178 	"DL_BADDATA",
179 	"DL_UNSUPPORTED",
180 	"DL_BADPPA",
181 	"DL_BADPRIM",
182 	"DL_BADQOSPARAM",
183 	"DL_BADQOSTYPE",
184 	"DL_BADTOKEN",
185 	"DL_BOUND",
186 	"DL_INITFAILED",
187 	"DL_NOADDR",
188 	"DL_NOTINIT",
189 	"DL_UNDELIVERABLE",
190 	"DL_NOTSUPPORTED",
191 	"DL_TOOMANY",
192 	"DL_NOTENAB",
193 	"DL_BUSY",
194 	"DL_NOAUTO",
195 	"DL_NOXIDAUTO",
196 	"DL_NOTESTAUTO",
197 	"DL_XIDAUTO",
198 	"DL_TESTAUTO",
199 	"DL_PENDING"
200 };
201 static int dlerrnum = (sizeof (dlerrstr) / sizeof (char *));
202 
203 static char *
204 dlerr2str(int err)
205 {
206 	if ((err < 0) || (err >= dlerrnum))
207 		return ("unknown");
208 	else
209 		return (dlerrstr[err]);
210 }
211 
212 static char *
213 dlprim2str(int prim)
214 {
215 	char	*pstr;
216 
217 	switch (prim) {
218 	case DL_UNITDATA_REQ:	pstr = "UNITDATA_REQ";		break;
219 	case DL_ATTACH_REQ:	pstr = "ATTACH_REQ";		break;
220 	case DL_DETACH_REQ:	pstr = "DETACH_REQ";		break;
221 	case DL_BIND_REQ:	pstr = "BIND_REQ";		break;
222 	case DL_UNBIND_REQ:	pstr = "UNBIND_REQ";		break;
223 	case DL_INFO_REQ:	pstr = "INFO_REQ";		break;
224 	case DL_PROMISCON_REQ:	pstr = "PROMISCON_REQ";		break;
225 	case DL_PROMISCOFF_REQ:	pstr = "PROMISCOFF_REQ";	break;
226 	case DL_ENABMULTI_REQ:	pstr = "ENABMULTI_REQ";		break;
227 	case DL_DISABMULTI_REQ:	pstr = "DISABMULTI_REQ";	break;
228 	case DL_PHYS_ADDR_REQ:	pstr = "PHYS_ADDR_REQ";		break;
229 	case DL_SET_PHYS_ADDR_REQ:
230 				pstr = "SET_PHYS_ADDR_REQ";	break;
231 	default:		pstr = "unsupported";		break;
232 	}
233 	return (pstr);
234 }
235 #else /* DEBUG */
236 #define	DLERRORACK(qq, mm, cc, ee, xx) \
237 			(dlerrorack((qq), (mm), (cc), (ee), (xx)))
238 #define	DLOKACK(qq, mm, cc) \
239 			(dlokack((qq), (mm), (cc)))
240 #define	DLBINDACK(qq, mm, ss, aa, ll, xx, yy) \
241 			(dlbindack((qq), (mm), (ss), (aa), (ll), (xx), (yy)))
242 #define	DLPHYSADDRACK(qq, mm, aa, ll) \
243 			(dlphysaddrack((qq), (mm), (aa), (ll)))
244 #endif /* DEBUG */
245 
246 #define	IDNDL_ADDR_IS_MULTICAST(ap)	(((ap)->ether_addr_octet[0] & 01) == 1)
247 /*
248  * MIB II broadcast/multicast packets
249  */
250 #define	IS_BROADCAST(ehp) \
251 		(ether_cmp(&(ehp)->ether_dhost, &etherbroadcastaddr) == 0)
252 #define	IS_MULTICAST(ehp) \
253 		IDNDL_ADDR_IS_MULTICAST(&(ehp)->ether_dhost)
254 #define	BUMP_InNUcast(sip, ehp)					\
255 		if (IS_BROADCAST(ehp)) {			\
256 			(sip)->si_kstat.si_brdcstrcv++;		\
257 		} else if (IS_MULTICAST(ehp)) {			\
258 			(sip)->si_kstat.si_multircv++;		\
259 		}
260 #define	BUMP_OutNUcast(sip, ehp)				\
261 		if (IS_BROADCAST(ehp)) {			\
262 			(sip)->si_kstat.si_brdcstxmt++;		\
263 		} else if (IS_MULTICAST(ehp)) {			\
264 			(sip)->si_kstat.si_multixmt++;		\
265 		}
266 
267 /*
268  * Function prototypes.
269  */
270 static int	idndl_ioc_hdr_info(queue_t *, mblk_t *, int *);
271 static void	idndl_areq(queue_t *, mblk_t *);
272 static void	idndl_dreq(queue_t *, mblk_t *);
273 static void	idndl_breq(queue_t *, mblk_t *);
274 static void	idndl_ubreq(queue_t *, mblk_t *);
275 static void	idndl_ireq(queue_t *, mblk_t *);
276 static void	idndl_ponreq(queue_t *, mblk_t *);
277 static void	idndl_poffreq(queue_t *, mblk_t *);
278 static void	idndl_emreq(queue_t *, mblk_t *);
279 static void	idndl_dmreq(queue_t *, mblk_t *);
280 static void	idndl_pareq(queue_t *, mblk_t *);
281 #ifdef notdef
282 static void	idndl_spareq(queue_t *, mblk_t *);
283 #endif /* notdef */
284 static void	idndl_udreq(queue_t *, mblk_t *);
285 static void	serror(dev_info_t *dip, int idnerr, char *fmt, ...);
286 static mblk_t	*idndl_addudind(struct idn *, mblk_t *, struct ether_addr *,
287 				struct ether_addr *, int, ulong_t);
288 static void	idndl_setipq(struct idn *);
289 static int	idndl_mcmatch(struct idnstr *, struct ether_addr *);
290 static int	idndl_stat_kstat_update(kstat_t *ksp, int rw);
291 
292 static int		_idndl_ether2domain(struct ether_addr *eap);
293 static struct idn	*_idndl_ether2sip(struct ether_addr *eap);
294 
295 
296 #define	IDNSAPMATCH(sap, type, flags) ((sap == type)? 1 : \
297 	((flags & IDNSALLSAP)? 1 : \
298 	((sap <= ETHERMTU) && sap && (type <= ETHERMTU))? 1 : 0))
299 
300 /*
301  * Our DL_INFO_ACK template.
302  */
303 static	dl_info_ack_t idninfoack = {
304 	DL_INFO_ACK,			/* dl_primitive */
305 	0,				/* dl_max_sdu (see idndl_dlpi_init()) */
306 	0,				/* dl_min_sdu */
307 	IDNADDRL,			/* dl_addr_length */
308 	DL_ETHER, /* DL_OTHER, */	/* dl_mac_type */
309 	0,				/* dl_reserved */
310 	0,				/* dl_current_state */
311 	-2,				/* dl_sap_length */
312 	DL_CLDLS, /* DL_CODLS? */	/* dl_service_mode */
313 	0,				/* dl_qos_length */
314 	0,				/* dl_qos_offset */
315 	0,				/* dl_range_length */
316 	0,				/* dl_range_offset */
317 	DL_STYLE2,			/* dl_provider_style */
318 	sizeof (dl_info_ack_t),		/* dl_addr_offset */
319 	DL_VERSION_2,			/* dl_version */
320 	ETHERADDRL,			/* dl_brdcst_addr_length */
321 	sizeof (dl_info_ack_t) + IDNADDRL,	/* dl_brdcst_addr_offset */
322 	0				/* dl_growth */
323 };
324 
325 /*
326  * Ethernet broadcast address definition.
327  */
328 static struct ether_addr	etherbroadcastaddr = {
329 	0xff, 0xff, 0xff, 0xff, 0xff, 0xff
330 };
331 
332 /*
333  * --------------------------------------------------
334  */
335 void
336 idndl_localetheraddr(struct idn *sip, struct ether_addr *eap)
337 {
338 	int		rv;
339 	int		instance;
340 	procname_t	proc = "idndl_localetheraddr";
341 
342 	ASSERT(sip && sip->si_dip && eap);
343 
344 	instance = ddi_get_instance(sip->si_dip);
345 
346 	PR_DLPI("%s: getting local etheraddr...\n", proc);
347 
348 	rv = idndl_domain_etheraddr(idn.localid, instance, eap);
349 	ASSERT(rv == 0);
350 }
351 
352 int
353 idndl_domain_etheraddr(int domid, int channel, struct ether_addr *eap)
354 {
355 	uchar_t		netid;
356 	procname_t	proc = "idndl_domain_etheraddr";
357 
358 	if (idn_domain[domid].dcpu == IDN_NIL_DCPU)
359 		return (-1);
360 
361 	netid = (uchar_t)idn_domain[domid].dnetid;
362 
363 	PR_DLPI("%s: dnetid = 0x%x, channel = 0x%x\n",
364 		proc, (uint_t)netid, channel);
365 
366 #ifdef notdef
367 	localetheraddr(NULL, eap);
368 
369 	PR_DLPI("%s: localetheraddr = %x:%x:%x:%x:%x:%x\n",
370 		proc, eap->ether_addr_octet[0], eap->ether_addr_octet[1],
371 		eap->ether_addr_octet[2], eap->ether_addr_octet[3],
372 		eap->ether_addr_octet[4], eap->ether_addr_octet[5]):
373 #endif /* notdef */
374 
375 	eap->ether_addr_octet[IDNETHER_ZERO] = 0;
376 	eap->ether_addr_octet[IDNETHER_COOKIE1] = IDNETHER_COOKIE1_VAL;
377 	eap->ether_addr_octet[IDNETHER_COOKIE2] = IDNETHER_COOKIE2_VAL;
378 	eap->ether_addr_octet[IDNETHER_NETID] = netid;
379 	eap->ether_addr_octet[IDNETHER_CHANNEL] = (uchar_t)channel;
380 	eap->ether_addr_octet[IDNETHER_RESERVED] = IDNETHER_RESERVED_VAL;
381 
382 	PR_DLPI("%s: domain %d: etheraddr = %x:%x:%x:%x:%x:%x\n",
383 		proc, domid,
384 		eap->ether_addr_octet[0], eap->ether_addr_octet[1],
385 		eap->ether_addr_octet[2], eap->ether_addr_octet[3],
386 		eap->ether_addr_octet[4], eap->ether_addr_octet[5]);
387 
388 	return (0);
389 }
390 
391 #ifdef DEBUG
392 /*
393  */
394 static int
395 _idndl_ether2domain(struct ether_addr *eap)
396 {
397 	uchar_t	*eaop;
398 
399 	eaop = eap->ether_addr_octet;
400 
401 	ASSERT(IDNDL_ADDR_IS_MULTICAST(eap) ||
402 		((eaop[IDNETHER_COOKIE1] == IDNETHER_COOKIE1_VAL) &&
403 		    (eaop[IDNETHER_COOKIE2] == IDNETHER_COOKIE2_VAL)) ||
404 		((eaop[IDNETHER_COOKIE1] == 0xff) &&
405 		    (eaop[IDNETHER_COOKIE2] == 0xff)));
406 	/*
407 	 * Note that (IDN_NIL_DOMID) will be returned if ether address is
408 	 * a broadcast 0xff.
409 	 */
410 	return (IDN_NETID2DOMID(eaop[IDNETHER_NETID]));
411 }
412 
413 /*
414  */
415 static struct idn *
416 _idndl_ether2sip(struct ether_addr *eap)
417 {
418 	int		instance;
419 	struct idn	*sip;
420 	uchar_t		*eaop;
421 	procname_t	proc = "_idndl_ether2sip";
422 
423 	eaop = eap->ether_addr_octet;
424 
425 	if (!IDNDL_ADDR_IS_MULTICAST(eap) &&
426 	    (((eaop[IDNETHER_COOKIE1] != IDNETHER_COOKIE1_VAL) ||
427 	    (eaop[IDNETHER_COOKIE2] != IDNETHER_COOKIE2_VAL)) &&
428 	    ((eaop[IDNETHER_COOKIE1] != 0xff) ||
429 		(eaop[IDNETHER_COOKIE2] != 0xff)))) {
430 
431 		cmn_err(CE_WARN,
432 			"IDN: 400: corrupted MAC header "
433 			"(exp %x or 0xffff, act 0x%x)",
434 			(IDNETHER_COOKIE1_VAL << 8) |
435 				IDNETHER_COOKIE2_VAL,
436 			(eaop[IDNETHER_COOKIE1] << 8) |
437 				eaop[IDNETHER_COOKIE2]);
438 
439 		return (NULL);
440 	}
441 
442 	if (IDNDL_ADDR_IS_MULTICAST(eap)) {
443 		PR_DLPI("%s: MULTICAST ADDR *** ERROR ***\n", proc);
444 		sip = NULL;
445 	} else if (eaop[IDNETHER_CHANNEL] == 0xff) {
446 		/*
447 		 * Received a broadcast.  Need to manually
448 		 * find anybody the first running sip and use it.
449 		 * XXX - kind of kludgy - single threads broadcasts.
450 		 */
451 		PR_DLPI("%s: BROADCAST CHANNEL *** ERROR ***\n", proc);
452 		sip = NULL;
453 	} else {
454 		instance = (int)eaop[IDNETHER_CHANNEL];
455 
456 		sip = IDN_INST2SIP(instance);
457 	}
458 
459 	return (sip);
460 }
461 #endif /* DEBUG */
462 
463 void
464 idndl_dlpi_init()
465 {
466 	procname_t	proc = "idndl_dlpi_init";
467 
468 	PR_DLPI("%s: setting dl_max_sdu to %ld (0x%lx) bytes\n",
469 		proc, IDN_MTU, IDN_MTU);
470 	/*
471 	 * This field is dynamic because the user may
472 	 * want to dynamically set it _before_ an IDNnet
473 	 * has been established via ndd(1M).
474 	 */
475 	idninfoack.dl_max_sdu = IDN_MTU;
476 }
477 
478 static int
479 idndl_stat_kstat_update(kstat_t *ksp, int rw)
480 {
481 	struct idn	*sip;
482 	struct idn_kstat_named	*skp;
483 
484 	sip = (struct idn *)ksp->ks_private;
485 	skp = (struct idn_kstat_named *)ksp->ks_data;
486 
487 	if (rw == KSTAT_WRITE) {
488 #if 0
489 		bzero(&sg_kstat.gk_kstat, sizeof (sg_kstat.gk_kstat));
490 #endif /* 0 */
491 		bzero(&sip->si_kstat, sizeof (sip->si_kstat));
492 
493 		sip->si_kstat.si_ipackets 	= skp->sk_ipackets.value.ul;
494 		sip->si_kstat.si_ierrors	= skp->sk_ierrors.value.ul;
495 		sip->si_kstat.si_opackets 	= skp->sk_opackets.value.ul;
496 		sip->si_kstat.si_oerrors	= skp->sk_oerrors.value.ul;
497 		sip->si_kstat.si_txcoll		= skp->sk_txcoll.value.ul;
498 		sip->si_kstat.si_rxcoll		= skp->sk_rxcoll.value.ul;
499 		sip->si_kstat.si_crc		= skp->sk_crc.value.ul;
500 		sip->si_kstat.si_buff		= skp->sk_buff.value.ul;
501 		sip->si_kstat.si_nolink		= skp->sk_nolink.value.ul;
502 		sip->si_kstat.si_linkdown	= skp->sk_linkdown.value.ul;
503 		sip->si_kstat.si_inits		= skp->sk_inits.value.ul;
504 		sip->si_kstat.si_nocanput	= skp->sk_nocanput.value.ul;
505 		sip->si_kstat.si_allocbfail	= skp->sk_allocbfail.value.ul;
506 		sip->si_kstat.si_notbufs	= skp->sk_notbufs.value.ul;
507 		sip->si_kstat.si_reclaim	= skp->sk_reclaim.value.ul;
508 		sip->si_kstat.si_smraddr	= skp->sk_smraddr.value.ul;
509 		sip->si_kstat.si_txmax		= skp->sk_txmax.value.ul;
510 		sip->si_kstat.si_txfull		= skp->sk_txfull.value.ul;
511 		sip->si_kstat.si_xdcall		= skp->sk_xdcall.value.ul;
512 		sip->si_kstat.si_sigsvr		= skp->sk_sigsvr.value.ul;
513 		sip->si_kstat.si_mboxcrc	= skp->sk_mboxcrc.value.ul;
514 		/*
515 		 * MIB II kstat variables
516 		 */
517 		sip->si_kstat.si_rcvbytes	= skp->sk_rcvbytes.value.ul;
518 		sip->si_kstat.si_xmtbytes	= skp->sk_xmtbytes.value.ul;
519 		sip->si_kstat.si_multircv	= skp->sk_multircv.value.ul;
520 		sip->si_kstat.si_multixmt	= skp->sk_multixmt.value.ul;
521 		sip->si_kstat.si_brdcstrcv	= skp->sk_brdcstrcv.value.ul;
522 		sip->si_kstat.si_brdcstxmt	= skp->sk_brdcstxmt.value.ul;
523 		sip->si_kstat.si_norcvbuf	= skp->sk_norcvbuf.value.ul;
524 		sip->si_kstat.si_noxmtbuf	= skp->sk_noxmtbuf.value.ul;
525 		/*
526 		 * PSARC 1997/198 : 64bit kstats
527 		 */
528 		sip->si_kstat.si_ipackets64	= skp->sk_ipackets64.value.ull;
529 		sip->si_kstat.si_opackets64	= skp->sk_opackets64.value.ull;
530 		sip->si_kstat.si_rbytes64	= skp->sk_rbytes64.value.ull;
531 		sip->si_kstat.si_obytes64	= skp->sk_obytes64.value.ull;
532 		/*
533 		 * PSARC 1997/247 : RFC 1643
534 		 */
535 		sip->si_kstat.si_fcs_errors	= skp->sk_fcs_errors.value.ul;
536 		sip->si_kstat.si_macxmt_errors	=
537 						skp->sk_macxmt_errors.value.ul;
538 		sip->si_kstat.si_toolong_errors	=
539 						skp->sk_toolong_errors.value.ul;
540 		sip->si_kstat.si_macrcv_errors	=
541 						skp->sk_macrcv_errors.value.ul;
542 
543 		return (0);
544 	}
545 
546 	skp->sk_ipackets.value.ul 	= sip->si_kstat.si_ipackets;
547 	skp->sk_ierrors.value.ul	= sip->si_kstat.si_ierrors;
548 	skp->sk_opackets.value.ul	= sip->si_kstat.si_opackets;
549 	skp->sk_oerrors.value.ul	= sip->si_kstat.si_oerrors;
550 	skp->sk_txcoll.value.ul		= sip->si_kstat.si_txcoll;
551 	skp->sk_rxcoll.value.ul		= sip->si_kstat.si_rxcoll;
552 	skp->sk_crc.value.ul		= sip->si_kstat.si_crc;
553 	skp->sk_buff.value.ul		= sip->si_kstat.si_buff;
554 	skp->sk_nolink.value.ul		= sip->si_kstat.si_nolink;
555 	skp->sk_linkdown.value.ul	= sip->si_kstat.si_linkdown;
556 	skp->sk_inits.value.ul		= sip->si_kstat.si_inits;
557 	skp->sk_nocanput.value.ul	= sip->si_kstat.si_nocanput;
558 	skp->sk_allocbfail.value.ul	= sip->si_kstat.si_allocbfail;
559 	skp->sk_notbufs.value.ul	= sip->si_kstat.si_notbufs;
560 	skp->sk_reclaim.value.ul	= sip->si_kstat.si_reclaim;
561 	skp->sk_smraddr.value.ul	= sip->si_kstat.si_smraddr;
562 	skp->sk_txfull.value.ul		= sip->si_kstat.si_txfull;
563 	skp->sk_txmax.value.ul		= sip->si_kstat.si_txmax;
564 	skp->sk_xdcall.value.ul		= sip->si_kstat.si_xdcall;
565 	skp->sk_sigsvr.value.ul		= sip->si_kstat.si_sigsvr;
566 	skp->sk_mboxcrc.value.ul	= sip->si_kstat.si_mboxcrc;
567 	/*
568 	 * MIB II kstat variables
569 	 */
570 	skp->sk_rcvbytes.value.ul	= sip->si_kstat.si_rcvbytes;
571 	skp->sk_xmtbytes.value.ul	= sip->si_kstat.si_xmtbytes;
572 	skp->sk_multircv.value.ul	= sip->si_kstat.si_multircv;
573 	skp->sk_multixmt.value.ul	= sip->si_kstat.si_multixmt;
574 	skp->sk_brdcstrcv.value.ul	= sip->si_kstat.si_brdcstrcv;
575 	skp->sk_brdcstxmt.value.ul	= sip->si_kstat.si_brdcstxmt;
576 	skp->sk_norcvbuf.value.ul	= sip->si_kstat.si_norcvbuf;
577 	skp->sk_noxmtbuf.value.ul	= sip->si_kstat.si_noxmtbuf;
578 	/*
579 	 * PSARC 1997/198 : 64bit kstats
580 	 */
581 	skp->sk_ipackets64.value.ull	= sip->si_kstat.si_ipackets64;
582 	skp->sk_opackets64.value.ull	= sip->si_kstat.si_opackets64;
583 	skp->sk_rbytes64.value.ull	= sip->si_kstat.si_rbytes64;
584 	skp->sk_obytes64.value.ull	= sip->si_kstat.si_obytes64;
585 	/*
586 	 * PSARC 1997/247 : RFC 1643
587 	 */
588 	skp->sk_fcs_errors.value.ul	= sip->si_kstat.si_fcs_errors;
589 	skp->sk_macxmt_errors.value.ul	= sip->si_kstat.si_macxmt_errors;
590 	skp->sk_toolong_errors.value.ul	= sip->si_kstat.si_toolong_errors;
591 	skp->sk_macrcv_errors.value.ul	= sip->si_kstat.si_macrcv_errors;
592 
593 	return (0);
594 }
595 
596 void
597 idndl_statinit(struct idn *sip)
598 {
599 	struct	kstat		*ksp;
600 	struct	idn_kstat_named	*skp;
601 
602 #ifdef	kstat
603 	if ((ksp = kstat_create(IDNNAME, ddi_get_instance(sip->si_dip),
604 		NULL, "net", KSTAT_TYPE_NAMED,
605 		sizeof (struct idn_kstat_named) / sizeof (kstat_named_t),
606 		KSTAT_FLAG_PERSISTENT)) == NULL) {
607 #else
608 	if ((ksp = kstat_create(IDNNAME, ddi_get_instance(sip->si_dip),
609 		NULL, "net", KSTAT_TYPE_NAMED,
610 		sizeof (struct idn_kstat_named) /
611 		sizeof (kstat_named_t), 0)) == NULL) {
612 #endif	/* kstat */
613 		serror(sip->si_dip, 450, "kstat_create failed");
614 		return;
615 	}
616 
617 	sip->si_ksp = ksp;
618 	skp = (struct idn_kstat_named *)(ksp->ks_data);
619 	kstat_named_init(&skp->sk_ipackets,		"ipackets",
620 		KSTAT_DATA_ULONG);
621 	kstat_named_init(&skp->sk_ierrors,		"ierrors",
622 		KSTAT_DATA_ULONG);
623 	kstat_named_init(&skp->sk_opackets,		"opackets",
624 		KSTAT_DATA_ULONG);
625 	kstat_named_init(&skp->sk_oerrors,		"oerrors",
626 		KSTAT_DATA_ULONG);
627 	kstat_named_init(&skp->sk_txcoll,		"collisions",
628 		KSTAT_DATA_ULONG);
629 	kstat_named_init(&skp->sk_rxcoll,		"rx_collisions",
630 		KSTAT_DATA_ULONG);
631 	kstat_named_init(&skp->sk_crc,			"crc",
632 		KSTAT_DATA_ULONG);
633 	kstat_named_init(&skp->sk_buff,			"buff",
634 		KSTAT_DATA_ULONG);
635 	kstat_named_init(&skp->sk_nolink,		"nolink",
636 		KSTAT_DATA_ULONG);
637 	kstat_named_init(&skp->sk_linkdown,		"linkdown",
638 		KSTAT_DATA_ULONG);
639 	kstat_named_init(&skp->sk_inits,		"inits",
640 		KSTAT_DATA_ULONG);
641 	kstat_named_init(&skp->sk_nocanput,		"nocanput",
642 		KSTAT_DATA_ULONG);
643 	kstat_named_init(&skp->sk_allocbfail,		"allocbfail",
644 		KSTAT_DATA_ULONG);
645 	kstat_named_init(&skp->sk_notbufs,		"notbufs",
646 		KSTAT_DATA_ULONG);
647 	kstat_named_init(&skp->sk_reclaim,		"reclaim",
648 		KSTAT_DATA_ULONG);
649 	kstat_named_init(&skp->sk_smraddr,		"smraddr",
650 		KSTAT_DATA_ULONG);
651 	kstat_named_init(&skp->sk_txmax,		"txmax",
652 		KSTAT_DATA_ULONG);
653 	kstat_named_init(&skp->sk_txfull,		"txfull",
654 		KSTAT_DATA_ULONG);
655 	kstat_named_init(&skp->sk_xdcall,		"xdcall",
656 		KSTAT_DATA_ULONG);
657 	kstat_named_init(&skp->sk_sigsvr,		"sigsvr",
658 		KSTAT_DATA_ULONG);
659 	kstat_named_init(&skp->sk_mboxcrc,		"mboxcrc",
660 		KSTAT_DATA_ULONG);
661 	/*
662 	 * MIB II kstat variables
663 	 */
664 	kstat_named_init(&skp->sk_rcvbytes,		"rbytes",
665 		KSTAT_DATA_ULONG);
666 	kstat_named_init(&skp->sk_xmtbytes,		"obytes",
667 		KSTAT_DATA_ULONG);
668 	kstat_named_init(&skp->sk_multircv,		"multircv",
669 		KSTAT_DATA_ULONG);
670 	kstat_named_init(&skp->sk_multixmt,		"multixmt",
671 		KSTAT_DATA_ULONG);
672 	kstat_named_init(&skp->sk_brdcstrcv,		"brdcstrcv",
673 		KSTAT_DATA_ULONG);
674 	kstat_named_init(&skp->sk_brdcstxmt,		"brdcstxmt",
675 		KSTAT_DATA_ULONG);
676 	kstat_named_init(&skp->sk_norcvbuf,		"norcvbuf",
677 		KSTAT_DATA_ULONG);
678 	kstat_named_init(&skp->sk_noxmtbuf,		"noxmtbuf",
679 		KSTAT_DATA_ULONG);
680 	/*
681 	 * PSARC 1997/198 : 64bit kstats
682 	 */
683 	kstat_named_init(&skp->sk_ipackets64,		"ipackets64",
684 		KSTAT_DATA_ULONGLONG);
685 	kstat_named_init(&skp->sk_opackets64,		"opackets64",
686 		KSTAT_DATA_ULONGLONG);
687 	kstat_named_init(&skp->sk_rbytes64,		"rbytes64",
688 		KSTAT_DATA_ULONGLONG);
689 	kstat_named_init(&skp->sk_obytes64,		"obytes64",
690 		KSTAT_DATA_ULONGLONG);
691 	/*
692 	 * PSARC 1997/247 : RFC 1643
693 	 */
694 	kstat_named_init(&skp->sk_fcs_errors,		"fcs_errors",
695 		KSTAT_DATA_ULONG);
696 	kstat_named_init(&skp->sk_macxmt_errors,	"macxmt_errors",
697 		KSTAT_DATA_ULONG);
698 	kstat_named_init(&skp->sk_toolong_errors,	"toolong_errors",
699 		KSTAT_DATA_ULONG);
700 	kstat_named_init(&skp->sk_macrcv_errors,	"macrcv_errors",
701 		KSTAT_DATA_ULONG);
702 
703 	ksp->ks_update = idndl_stat_kstat_update;
704 	ksp->ks_private = (void *)sip;
705 	kstat_install(ksp);
706 }
707 
708 void
709 idndl_proto(queue_t *wq, mblk_t *mp)
710 {
711 	union DL_primitives	*dlp;
712 	struct idnstr		*stp;
713 	t_uscalar_t		prim;
714 	procname_t		proc = "idndl_proto";
715 
716 	stp = (struct idnstr *)wq->q_ptr;
717 	if (MBLKL(mp) < sizeof (t_uscalar_t)) {
718 		/*
719 		 * Gotta at least have enough room to hold
720 		 * the primitive!
721 		 */
722 		DLERRORACK(wq, mp, -1, DL_BADPRIM, 0);
723 		return;
724 	}
725 	dlp = (union DL_primitives *)mp->b_rptr;
726 	prim = dlp->dl_primitive;
727 
728 	TRACE_2(TR_FAC_IDN, TR_IDN_PROTO_START,
729 		"idndl_proto start:  wq %p dlprim %X", wq, prim);
730 
731 #ifdef DEBUG
732 	PR_DLPI("%s: stp = 0x%p, wq = 0x%p, dlprim = 0x%x(%s)\n",
733 		proc, (void *)stp, (void *)wq, prim, dlprim2str(prim));
734 #endif /* DEBUG */
735 
736 	rw_enter(&stp->ss_rwlock, RW_WRITER);
737 
738 	switch (prim) {
739 	case DL_UNITDATA_REQ:
740 		idndl_udreq(wq, mp);
741 		break;
742 
743 	case DL_ATTACH_REQ:
744 		idndl_areq(wq, mp);
745 		break;
746 
747 	case DL_DETACH_REQ:
748 		idndl_dreq(wq, mp);
749 		break;
750 
751 	case DL_BIND_REQ:
752 		idndl_breq(wq, mp);
753 		break;
754 
755 	case DL_UNBIND_REQ:
756 		idndl_ubreq(wq, mp);
757 		break;
758 
759 	case DL_INFO_REQ:
760 		idndl_ireq(wq, mp);
761 		break;
762 
763 	case DL_PROMISCON_REQ:
764 		idndl_ponreq(wq, mp);
765 		break;
766 
767 	case DL_PROMISCOFF_REQ:
768 		idndl_poffreq(wq, mp);
769 		break;
770 
771 	case DL_ENABMULTI_REQ:
772 		idndl_emreq(wq, mp);
773 		break;
774 
775 	case DL_DISABMULTI_REQ:
776 		idndl_dmreq(wq, mp);
777 		break;
778 
779 	case DL_PHYS_ADDR_REQ:
780 		idndl_pareq(wq, mp);
781 		break;
782 
783 #ifdef notdef
784 	/*
785 	 * We cannot allow this in IDN-land because we
786 	 * rely on the ethernet (physical) address to determine
787 	 * where to target the message.  Recall that unlike
788 	 * ethernet we simply cannot dump junk on the wire and
789 	 * expect it to automatically find its destination.
790 	 * In the IDN we need to target the destination.
791 	 * Note that if we used POINT-TO-POINT then we wouldn't
792 	 * have to worry about the physical address since each
793 	 * domain connection would have a separate queue.
794 	 * However, ptp then requires multiple interfaces at
795 	 * the appl level as opposed to a single one for all
796 	 * of idn.  We opt for the simpler single interface (idn0).
797 	 */
798 	case DL_SET_PHYS_ADDR_REQ:
799 		idndl_spareq(wq, mp);
800 		break;
801 #endif /* notdef */
802 
803 	default:
804 		DLERRORACK(wq, mp, prim, DL_UNSUPPORTED, 0);
805 		break;
806 	}
807 
808 	TRACE_2(TR_FAC_IDN, TR_IDN_PROTO_END,
809 		"idnproto end:  wq %p dlprim %X", wq, prim);
810 
811 	rw_exit(&stp->ss_rwlock);
812 }
813 
814 int
815 idnioc_dlpi(queue_t *wq, mblk_t *mp, int *argsizep)
816 {
817 	int	rv = 0;
818 	struct	iocblk	*iocp = (struct iocblk *)mp->b_rptr;
819 	struct	idnstr	*stp  = (struct idnstr *)wq->q_ptr;
820 	procname_t	proc = "idnioc_dlpi";
821 
822 	*argsizep = 0;
823 
824 	switch (iocp->ioc_cmd) {
825 	case DLIOCRAW:			/* raw M_DATA mode */
826 		PR_DLPI("%s: cmd = DLIOCRAW\n", proc);
827 		stp->ss_flags |= IDNSRAW;
828 		break;
829 
830 	case DL_IOC_HDR_INFO:		/* M_DATA "fastpath" info request */
831 		PR_DLPI("%s: cmd = DL_IOC_HDR_INFO\n", proc);
832 		rv = idndl_ioc_hdr_info(wq, mp, argsizep);
833 		break;
834 
835 	default:
836 		PR_DLPI("%s: invalid cmd 0x%x\n", proc, iocp->ioc_cmd);
837 		rv = EINVAL;
838 		break;
839 	}
840 	return (rv);
841 }
842 
843 /*
844  * M_DATA "fastpath" info request.
845  * Following the M_IOCTL mblk should come a DL_UNITDATA_REQ mblk.
846  * We ack with an M_IOCACK pointing to the original DL_UNITDATA_REQ mblk
847  * followed by an mblk containing the raw ethernet header corresponding
848  * to the destination address.  Subsequently, we may receive M_DATA
849  * msgs which start with this header and may send up
850  * up M_DATA msgs with b_rptr pointing to a (ulong) group address
851  * indicator followed by the network-layer data (IP packet header).
852  * This is all selectable on a per-Stream basis.
853  */
854 static int
855 idndl_ioc_hdr_info(queue_t *wq, mblk_t *mp, int *argsizep)
856 {
857 	mblk_t			*nmp;
858 	struct idnstr		*stp;
859 	struct idndladdr	*dlap;
860 	dl_unitdata_req_t	*dludp;
861 	struct ether_header	*headerp;
862 	struct idn		*sip;
863 	int	off, len;
864 	int	padding = 0;
865 	int	error;
866 	procname_t		proc = "idndl_ioc_hdr_info";
867 
868 	stp = (struct idnstr *)wq->q_ptr;
869 	sip = stp->ss_sip;
870 	if (sip == NULL) {
871 		PR_DLPI("%s: NULL sip (ret EINVAL)\n", proc);
872 		return (EINVAL);
873 	}
874 
875 	error = miocpullup(mp, sizeof (dl_unitdata_req_t) + IDNADDRL);
876 	if (error != 0) {
877 		PR_DLPI("%s: sanity error (ret %d)\n", proc, error);
878 		return (error);
879 	}
880 
881 	/*
882 	 * Sanity check the DL_UNITDATA_REQ destination address
883 	 * offset and length values.
884 	 */
885 	dludp = (dl_unitdata_req_t *)mp->b_cont->b_rptr;
886 	off = dludp->dl_dest_addr_offset;
887 	len = dludp->dl_dest_addr_length;
888 	if (dludp->dl_primitive != DL_UNITDATA_REQ ||
889 	    !MBLKIN(mp->b_cont, off, len) || len != IDNADDRL) {
890 		PR_DLPI("%s: off(0x%x)/len(%d) error (ret EINVAL)\n",
891 		    proc, off, len);
892 		return (EINVAL);
893 	}
894 
895 	dlap = (struct idndladdr *)(mp->b_cont->b_rptr + off);
896 
897 	/*
898 	 * Allocate a new mblk to hold the ether header.
899 	 */
900 	nmp = allocb(sizeof (struct ether_header) + padding, BPRI_MED);
901 	if (nmp == NULL) {
902 		IDN_KSTAT_INC(sip, si_allocbfail);
903 		return (ENOMEM);
904 	}
905 	nmp->b_rptr += padding;
906 	nmp->b_wptr = nmp->b_rptr + sizeof (struct ether_header);
907 
908 	/*
909 	 * Fill in the ether header.
910 	 */
911 	headerp = (struct ether_header *)nmp->b_rptr;
912 	ether_copy(&dlap->dl_phys, &headerp->ether_dhost);
913 	ether_copy(&sip->si_ouraddr, &headerp->ether_shost);
914 	headerp->ether_type = dlap->dl_sap;
915 
916 	/*
917 	 * Link new mblk in after the "request" mblks.
918 	 */
919 	linkb(mp, nmp);
920 
921 	stp->ss_flags |= IDNSFAST;
922 
923 	/*
924 	 * XXX Don't bother calling idndl_setipq() here.
925 	 */
926 
927 	if (argsizep)
928 		*argsizep = msgsize(mp->b_cont);
929 
930 	return (0);
931 }
932 
933 static void
934 idndl_areq(queue_t *wq, mblk_t *mp)
935 {
936 	struct idnstr		*stp;
937 	union DL_primitives	*dlp;
938 	struct idn		*sip;
939 	int	ppa;
940 	procname_t	proc = "idndl_areq";
941 
942 	stp = (struct idnstr *)wq->q_ptr;
943 	dlp = (union DL_primitives *)mp->b_rptr;
944 
945 	if (MBLKL(mp) < DL_ATTACH_REQ_SIZE) {
946 		DLERRORACK(wq, mp, DL_ATTACH_REQ, DL_BADPRIM, 0);
947 		return;
948 	}
949 
950 	if (stp->ss_state != DL_UNATTACHED) {
951 		DLERRORACK(wq, mp, DL_ATTACH_REQ, DL_OUTSTATE, 0);
952 		return;
953 	}
954 
955 	ppa = dlp->attach_req.dl_ppa;
956 
957 	/*
958 	 * Valid ppa?
959 	 */
960 	if (ppa == -1 || qassociate(wq, ppa) != 0) {
961 		PR_DLPI("%s: bad ppa %d\n", proc, ppa);
962 		DLERRORACK(wq, mp, dlp->dl_primitive, DL_BADPPA, 0);
963 		return;
964 	}
965 	mutex_enter(&idn.siplock);
966 	for (sip = idn.sip; sip; sip = sip->si_nextp) {
967 		if (ppa == ddi_get_instance(sip->si_dip))
968 			break;
969 	}
970 	mutex_exit(&idn.siplock);
971 	ASSERT(sip != NULL);	/* qassociate() succeeded */
972 
973 	/*
974 	 * Has device been initialized?  Do so if necessary.
975 	 */
976 	if ((sip->si_flags & IDNRUNNING) == 0) {
977 		if (idndl_init(sip)) {
978 			DLERRORACK(wq, mp, dlp->dl_primitive,
979 					DL_INITFAILED, 0);
980 			(void) qassociate(wq, -1);
981 			return;
982 		}
983 	}
984 
985 	/*
986 	 * Set link to device and update our state.
987 	 */
988 	stp->ss_sip = sip;
989 	stp->ss_state = DL_UNBOUND;
990 
991 	DLOKACK(wq, mp, DL_ATTACH_REQ);
992 }
993 
994 static void
995 idndl_dreq(queue_t *wq, mblk_t *mp)
996 {
997 	struct idnstr	*stp;
998 
999 	stp = (struct idnstr *)wq->q_ptr;
1000 
1001 	if (MBLKL(mp) < DL_DETACH_REQ_SIZE) {
1002 		DLERRORACK(wq, mp, DL_DETACH_REQ, DL_BADPRIM, 0);
1003 		return;
1004 	}
1005 
1006 	if (stp->ss_state != DL_UNBOUND) {
1007 		DLERRORACK(wq, mp, DL_DETACH_REQ, DL_OUTSTATE, 0);
1008 		return;
1009 	}
1010 
1011 	idndl_dodetach(stp);
1012 	(void) qassociate(wq, -1);
1013 	DLOKACK(wq, mp, DL_DETACH_REQ);
1014 }
1015 
1016 /*
1017  * Detach a Stream from an interface.
1018  */
1019 void
1020 idndl_dodetach(struct idnstr *stp)
1021 {
1022 	struct idnstr	*tstp;
1023 	struct idn	*sip;
1024 	int		reinit = 0;
1025 
1026 	ASSERT(stp->ss_sip);
1027 
1028 	sip = stp->ss_sip;
1029 	stp->ss_sip = NULL;
1030 
1031 	/*
1032 	 * Disable promiscuous mode if on.
1033 	 */
1034 	if (stp->ss_flags & IDNSALLPHYS) {
1035 		stp->ss_flags &= ~IDNSALLPHYS;
1036 		reinit = 1;
1037 	}
1038 
1039 	/*
1040 	 * Disable ALLMULTI mode if on.
1041 	 */
1042 	if (stp->ss_flags & IDNSALLMULTI) {
1043 		stp->ss_flags &= ~IDNSALLMULTI;
1044 		reinit = 1;
1045 	}
1046 
1047 	/*
1048 	 * Disable any Multicast Addresses.
1049 	 */
1050 	stp->ss_mccount = 0;
1051 	if (stp->ss_mctab) {
1052 		kmem_free(stp->ss_mctab, IDNMCALLOC);
1053 		stp->ss_mctab = NULL;
1054 		reinit = 1;
1055 	}
1056 
1057 	/*
1058 	 * Detach from device structure.
1059 	 * Uninit the device when no other streams are attached to it.
1060 	 */
1061 	rw_enter(&idn.struprwlock, RW_READER);
1062 	for (tstp = idn.strup; tstp; tstp = tstp->ss_nextp)
1063 		if (tstp->ss_sip == sip)
1064 			break;
1065 	rw_exit(&idn.struprwlock);
1066 
1067 	if (tstp == NULL)
1068 		idndl_uninit(sip);
1069 	else if (reinit)
1070 		(void) idndl_init(sip);
1071 
1072 	stp->ss_state = DL_UNATTACHED;
1073 
1074 	idndl_setipq(sip);
1075 }
1076 
1077 static void
1078 idndl_breq(queue_t *wq, mblk_t *mp)
1079 {
1080 	struct idnstr		*stp;
1081 	union DL_primitives	*dlp;
1082 	struct idn		*sip;
1083 	struct idndladdr	idnaddr;
1084 	t_uscalar_t		sap;
1085 	int		xidtest;
1086 	procname_t	proc = "idndl_breq";
1087 
1088 	stp = (struct idnstr *)wq->q_ptr;
1089 
1090 	if (MBLKL(mp) < DL_BIND_REQ_SIZE) {
1091 		DLERRORACK(wq, mp, DL_BIND_REQ, DL_BADPRIM, 0);
1092 		return;
1093 	}
1094 
1095 	if (stp->ss_state != DL_UNBOUND) {
1096 		DLERRORACK(wq, mp, DL_BIND_REQ, DL_OUTSTATE, 0);
1097 		return;
1098 	}
1099 
1100 	dlp = (union DL_primitives *)mp->b_rptr;
1101 
1102 	if (dlp->bind_req.dl_service_mode != idninfoack.dl_service_mode) {
1103 		DLERRORACK(wq, mp, DL_BIND_REQ, DL_UNSUPPORTED, 0);
1104 		return;
1105 	}
1106 
1107 	sip = stp->ss_sip;
1108 	sap = dlp->bind_req.dl_sap;
1109 	xidtest = dlp->bind_req.dl_xidtest_flg;
1110 
1111 	ASSERT(sip);
1112 
1113 	if (xidtest) {
1114 		DLERRORACK(wq, mp, DL_BIND_REQ, DL_NOAUTO, 0);
1115 		return;
1116 	}
1117 
1118 	if (sap > ETHERTYPE_MAX) {
1119 		DLERRORACK(wq, mp, dlp->dl_primitive, DL_BADSAP, 0);
1120 		return;
1121 	}
1122 
1123 	/*
1124 	 * Save SAP value for this Stream and change state.
1125 	 */
1126 	stp->ss_sap = sap;
1127 	stp->ss_state = DL_IDLE;
1128 
1129 	idnaddr.dl_sap = sap;
1130 	ether_copy(&sip->si_ouraddr, &idnaddr.dl_phys);
1131 
1132 	if (IS_ETHERTYPE_IP(sap)) {
1133 		int	channel;
1134 
1135 		channel =
1136 			(int)sip->si_ouraddr.ether_addr_octet[IDNETHER_CHANNEL];
1137 		PR_DLPI("%s: IP SAP, opening channel %d\n", proc, channel);
1138 		if (idn_open_channel(channel)) {
1139 			PR_DLPI("%s: FAILED TO OPEN CHANNEL %d\n",
1140 				proc, channel);
1141 			DLERRORACK(wq, mp, dlp->dl_primitive, DL_NOADDR, 0);
1142 			return;
1143 		}
1144 	}
1145 	DLBINDACK(wq, mp, sap, &idnaddr, IDNADDRL, 0, 0);
1146 
1147 	idndl_setipq(sip);
1148 }
1149 
1150 static void
1151 idndl_ubreq(queue_t *wq, mblk_t *mp)
1152 {
1153 	struct idnstr	*stp;
1154 	procname_t	proc = "idndl_ubreq";
1155 
1156 	stp = (struct idnstr *)wq->q_ptr;
1157 
1158 	if (MBLKL(mp) < DL_UNBIND_REQ_SIZE) {
1159 		DLERRORACK(wq, mp, DL_UNBIND_REQ, DL_BADPRIM, 0);
1160 		return;
1161 	}
1162 
1163 	if (stp->ss_state != DL_IDLE) {
1164 		DLERRORACK(wq, mp, DL_UNBIND_REQ, DL_OUTSTATE, 0);
1165 		return;
1166 	}
1167 
1168 	stp->ss_state = DL_UNBOUND;
1169 
1170 	if (IS_ETHERTYPE_IP(stp->ss_sap)) {
1171 		struct idn	*sip;
1172 		int		channel;
1173 
1174 		sip = stp->ss_sip;
1175 		channel =
1176 			(int)sip->si_ouraddr.ether_addr_octet[IDNETHER_CHANNEL];
1177 		PR_DLPI("%s: IP SAP, unbinding channel %d\n", proc, channel);
1178 		/*
1179 		 * We need to do an "soft" close since there's a
1180 		 * potential that we've been called by one of the
1181 		 * IDN data server/dispatcher threads!  We'll deadlock
1182 		 * if we attempt a "hard" close of the channel from here.
1183 		 */
1184 		idn_close_channel(channel, IDNCHAN_SOFT_CLOSE);
1185 	}
1186 
1187 	stp->ss_sap = 0;
1188 
1189 	DLOKACK(wq, mp, DL_UNBIND_REQ);
1190 
1191 	idndl_setipq(stp->ss_sip);
1192 }
1193 
1194 static void
1195 idndl_ireq(queue_t *wq, mblk_t *mp)
1196 {
1197 	struct idnstr		*stp;
1198 	dl_info_ack_t		*dlip;
1199 	struct idndladdr	*dlap;
1200 	struct ether_addr	*ep;
1201 	int	size;
1202 
1203 	stp = (struct idnstr *)wq->q_ptr;
1204 
1205 	if (MBLKL(mp) < DL_INFO_REQ_SIZE) {
1206 		DLERRORACK(wq, mp, DL_INFO_REQ, DL_BADPRIM, 0);
1207 		return;
1208 	}
1209 
1210 	/*
1211 	 * Exchange current msg for a DL_INFO_ACK.
1212 	 */
1213 	size = sizeof (dl_info_ack_t) + IDNADDRL + ETHERADDRL;
1214 	if ((mp = mexchange(wq, mp, size, M_PCPROTO, DL_INFO_ACK)) == NULL)
1215 		return;
1216 
1217 	/*
1218 	 * Fill in the DL_INFO_ACK fields and reply.
1219 	 */
1220 	dlip = (dl_info_ack_t *)mp->b_rptr;
1221 	ASSERT(idninfoack.dl_max_sdu);
1222 	*dlip = idninfoack;
1223 	dlip->dl_current_state = stp->ss_state;
1224 	dlap = (struct idndladdr *)(mp->b_rptr + dlip->dl_addr_offset);
1225 	dlap->dl_sap = stp->ss_sap;
1226 	if (stp->ss_sip) {
1227 		ether_copy(&stp->ss_sip->si_ouraddr, &dlap->dl_phys);
1228 	} else {
1229 		bzero(&dlap->dl_phys, ETHERADDRL);
1230 	}
1231 	ep = (struct ether_addr *)(mp->b_rptr + dlip->dl_brdcst_addr_offset);
1232 	ether_copy(&etherbroadcastaddr, ep);
1233 
1234 	qreply(wq, mp);
1235 }
1236 
1237 static void
1238 idndl_ponreq(queue_t *wq, mblk_t *mp)
1239 {
1240 	struct idnstr	*stp;
1241 
1242 	stp = (struct idnstr *)wq->q_ptr;
1243 
1244 	if (MBLKL(mp) < DL_PROMISCON_REQ_SIZE) {
1245 		DLERRORACK(wq, mp, DL_PROMISCON_REQ, DL_BADPRIM, 0);
1246 		return;
1247 	}
1248 
1249 	switch (((dl_promiscon_req_t *)mp->b_rptr)->dl_level) {
1250 	case DL_PROMISC_PHYS:
1251 		stp->ss_flags |= IDNSALLPHYS;
1252 		break;
1253 
1254 	case DL_PROMISC_SAP:
1255 		stp->ss_flags |= IDNSALLSAP;
1256 		break;
1257 
1258 	case DL_PROMISC_MULTI:
1259 		stp->ss_flags |= IDNSALLMULTI;
1260 		break;
1261 
1262 	default:
1263 		DLERRORACK(wq, mp, DL_PROMISCON_REQ, DL_NOTSUPPORTED, 0);
1264 		return;
1265 	}
1266 
1267 	if (stp->ss_sip)
1268 		(void) idndl_init(stp->ss_sip);
1269 
1270 	if (stp->ss_sip)
1271 		idndl_setipq(stp->ss_sip);
1272 
1273 	DLOKACK(wq, mp, DL_PROMISCON_REQ);
1274 }
1275 
1276 static void
1277 idndl_poffreq(queue_t *wq, mblk_t *mp)
1278 {
1279 	struct idnstr	*stp;
1280 	int		flag;
1281 
1282 	stp = (struct idnstr *)wq->q_ptr;
1283 
1284 	if (MBLKL(mp) < DL_PROMISCOFF_REQ_SIZE) {
1285 		DLERRORACK(wq, mp, DL_PROMISCOFF_REQ, DL_BADPRIM, 0);
1286 		return;
1287 	}
1288 
1289 	switch (((dl_promiscoff_req_t *)mp->b_rptr)->dl_level) {
1290 	case DL_PROMISC_PHYS:
1291 		flag = IDNSALLPHYS;
1292 		break;
1293 
1294 	case DL_PROMISC_SAP:
1295 		flag = IDNSALLSAP;
1296 		break;
1297 
1298 	case DL_PROMISC_MULTI:
1299 		flag = IDNSALLMULTI;
1300 		break;
1301 
1302 	default:
1303 		DLERRORACK(wq, mp, DL_PROMISCOFF_REQ, DL_NOTSUPPORTED, 0);
1304 		return;
1305 	}
1306 
1307 	if ((stp->ss_flags & flag) == 0) {
1308 		DLERRORACK(wq, mp, DL_PROMISCOFF_REQ, DL_NOTENAB, 0);
1309 		return;
1310 	}
1311 
1312 	stp->ss_flags &= ~flag;
1313 
1314 	if (stp->ss_sip)
1315 		(void) idndl_init(stp->ss_sip);
1316 
1317 	if (stp->ss_sip)
1318 		idndl_setipq(stp->ss_sip);
1319 
1320 	DLOKACK(wq, mp, DL_PROMISCOFF_REQ);
1321 }
1322 
1323 static void
1324 idndl_emreq(queue_t *wq, mblk_t *mp)
1325 {
1326 	struct idnstr		*stp;
1327 	union DL_primitives	*dlp;
1328 	struct ether_addr	*addrp;
1329 	int	off;
1330 	int	len;
1331 	int	i;
1332 
1333 	stp = (struct idnstr *)wq->q_ptr;
1334 
1335 	if (MBLKL(mp) < DL_ENABMULTI_REQ_SIZE) {
1336 		DLERRORACK(wq, mp, DL_ENABMULTI_REQ, DL_BADPRIM, 0);
1337 		return;
1338 	}
1339 
1340 	if (stp->ss_state == DL_UNATTACHED) {
1341 		DLERRORACK(wq, mp, DL_ENABMULTI_REQ, DL_OUTSTATE, 0);
1342 		return;
1343 	}
1344 
1345 	dlp = (union DL_primitives *)mp->b_rptr;
1346 	len = dlp->enabmulti_req.dl_addr_length;
1347 	off = dlp->enabmulti_req.dl_addr_offset;
1348 	addrp = (struct ether_addr *)(mp->b_rptr + off);
1349 
1350 	if ((len != ETHERADDRL) ||
1351 		!MBLKIN(mp, off, len) ||
1352 		!IDNDL_ADDR_IS_MULTICAST(addrp)) {
1353 		DLERRORACK(wq, mp, DL_ENABMULTI_REQ, DL_BADADDR, 0);
1354 		return;
1355 	}
1356 
1357 	if ((stp->ss_mccount + 1) >= IDNMAXMC) {
1358 		DLERRORACK(wq, mp, DL_ENABMULTI_REQ, DL_TOOMANY, 0);
1359 		return;
1360 	}
1361 
1362 	/*
1363 	 * Allocate table on first request.
1364 	 */
1365 	if (stp->ss_mctab == NULL)
1366 		stp->ss_mctab = kmem_alloc(IDNMCALLOC, KM_SLEEP);
1367 
1368 	/*
1369 	 * Check to see if the address is already in the table.
1370 	 * Bug 1209733:
1371 	 * If present in the table, add the entry to the end of the table
1372 	 * and return without initializing the hardware.
1373 	 */
1374 	for (i = 0; i < stp->ss_mccount; i++) {
1375 		if (ether_cmp(&stp->ss_mctab[i], addrp) == 0) {
1376 			stp->ss_mctab[stp->ss_mccount++] = *addrp;
1377 			DLOKACK(wq, mp, DL_ENABMULTI_REQ);
1378 			return;
1379 		}
1380 	}
1381 
1382 	stp->ss_mctab[stp->ss_mccount++] = *addrp;
1383 
1384 	(void) idndl_init(stp->ss_sip);
1385 
1386 	DLOKACK(wq, mp, DL_ENABMULTI_REQ);
1387 }
1388 
1389 static void
1390 idndl_dmreq(queue_t *wq, mblk_t *mp)
1391 {
1392 	struct idnstr		*stp;
1393 	union DL_primitives	*dlp;
1394 	struct ether_addr	*addrp;
1395 	int	off;
1396 	int	len;
1397 	int	i;
1398 
1399 	stp = (struct idnstr *)wq->q_ptr;
1400 
1401 	if (MBLKL(mp) < DL_DISABMULTI_REQ_SIZE) {
1402 		DLERRORACK(wq, mp, DL_DISABMULTI_REQ, DL_BADPRIM, 0);
1403 		return;
1404 	}
1405 
1406 	if (stp->ss_state == DL_UNATTACHED) {
1407 		DLERRORACK(wq, mp, DL_DISABMULTI_REQ, DL_OUTSTATE, 0);
1408 		return;
1409 	}
1410 
1411 	dlp = (union DL_primitives *)mp->b_rptr;
1412 	len = dlp->disabmulti_req.dl_addr_length;
1413 	off = dlp->disabmulti_req.dl_addr_offset;
1414 	addrp = (struct ether_addr *)(mp->b_rptr + off);
1415 
1416 	if ((len != ETHERADDRL) || !MBLKIN(mp, off, len)) {
1417 		DLERRORACK(wq, mp, DL_DISABMULTI_REQ, DL_BADADDR, 0);
1418 		return;
1419 	}
1420 
1421 	/*
1422 	 * Find the address in the multicast table for this Stream
1423 	 * and delete it by shifting all subsequent multicast
1424 	 * table entries over one.
1425 	 */
1426 	for (i = 0; i < stp->ss_mccount; i++)
1427 		if (ether_cmp(addrp, &stp->ss_mctab[i]) == 0) {
1428 			bcopy(&stp->ss_mctab[i+1],
1429 				&stp->ss_mctab[i],
1430 				((stp->ss_mccount - i) *
1431 				sizeof (struct ether_addr)));
1432 			stp->ss_mccount--;
1433 			(void) idndl_init(stp->ss_sip);
1434 			DLOKACK(wq, mp, DL_DISABMULTI_REQ);
1435 			return;
1436 		}
1437 	DLERRORACK(wq, mp, DL_DISABMULTI_REQ, DL_NOTENAB, 0);
1438 }
1439 
1440 static void
1441 idndl_pareq(queue_t *wq, mblk_t *mp)
1442 {
1443 	struct idnstr		*stp;
1444 	union DL_primitives	*dlp;
1445 	int			type;
1446 	struct idn		*sip;
1447 	struct ether_addr	addr;
1448 
1449 	stp = (struct idnstr *)wq->q_ptr;
1450 
1451 	if (MBLKL(mp) < DL_PHYS_ADDR_REQ_SIZE) {
1452 		DLERRORACK(wq, mp, DL_PHYS_ADDR_REQ, DL_BADPRIM, 0);
1453 		return;
1454 	}
1455 
1456 	dlp  = (union DL_primitives *)mp->b_rptr;
1457 	type = dlp->physaddr_req.dl_addr_type;
1458 	sip  = stp->ss_sip;
1459 
1460 	if (sip == NULL) {
1461 		DLERRORACK(wq, mp, DL_PHYS_ADDR_REQ, DL_OUTSTATE, 0);
1462 		return;
1463 	}
1464 
1465 	switch (type) {
1466 	case DL_FACT_PHYS_ADDR:
1467 		idndl_localetheraddr(sip, &addr);
1468 		break;
1469 
1470 	case DL_CURR_PHYS_ADDR:
1471 		ether_copy(&sip->si_ouraddr, &addr);
1472 		break;
1473 
1474 	default:
1475 		DLERRORACK(wq, mp, DL_PHYS_ADDR_REQ, DL_NOTSUPPORTED, 0);
1476 		return;
1477 	}
1478 
1479 	DLPHYSADDRACK(wq, mp, &addr, ETHERADDRL);
1480 }
1481 
1482 #ifdef notdef
1483 static void
1484 idndl_spareq(queue_t *wq, mblk_t *mp)
1485 {
1486 	struct idnstr		*stp;
1487 	union DL_primitives	*dlp;
1488 	int	off;
1489 	int	len;
1490 	struct ether_addr	*addrp;
1491 	struct idn		*sip;
1492 
1493 	stp = (struct idnstr *)wq->q_ptr;
1494 
1495 	if (MBLKL(mp) < DL_SET_PHYS_ADDR_REQ_SIZE) {
1496 		DLERRORACK(wq, mp, DL_SET_PHYS_ADDR_REQ, DL_BADPRIM, 0);
1497 		return;
1498 	}
1499 
1500 	dlp = (union DL_primitives *)mp->b_rptr;
1501 	len = dlp->set_physaddr_req.dl_addr_length;
1502 	off = dlp->set_physaddr_req.dl_addr_offset;
1503 
1504 	if (!MBLKIN(mp, off, len)) {
1505 		DLERRORACK(wq, mp, DL_SET_PHYS_ADDR_REQ, DL_BADPRIM, 0);
1506 		return;
1507 	}
1508 
1509 	addrp = (struct ether_addr *)(mp->b_rptr + off);
1510 
1511 	/*
1512 	 * Error if length of address isn't right or the address
1513 	 * specified is a multicast or broadcast address.
1514 	 */
1515 	if ((len != ETHERADDRL) ||
1516 	    IDNDL_ADDR_IS_MULTICAST(addrp) ||
1517 	    (ether_cmp(addrp, &etherbroadcastaddr) == 0)) {
1518 		DLERRORACK(wq, mp, DL_SET_PHYS_ADDR_REQ, DL_BADADDR, 0);
1519 		return;
1520 	}
1521 
1522 	/*
1523 	 * Error if this stream is not attached to a device.
1524 	 */
1525 	if ((sip = stp->ss_sip) == NULL) {
1526 		DLERRORACK(wq, mp, DL_SET_PHYS_ADDR_REQ, DL_OUTSTATE, 0);
1527 		return;
1528 	}
1529 
1530 	/*
1531 	 * Set new interface local address and re-init device.
1532 	 * This is destructive to any other streams attached
1533 	 * to this device.
1534 	 */
1535 	ether_copy(addrp, &sip->si_ouraddr);
1536 	(void) idndl_init(stp->ss_sip);
1537 
1538 	DLOKACK(wq, mp, DL_SET_PHYS_ADDR_REQ);
1539 }
1540 #endif /* notdef */
1541 
1542 static void
1543 idndl_udreq(queue_t *wq, mblk_t *mp)
1544 {
1545 	struct idnstr			*stp;
1546 	register struct idn		*sip;
1547 	register dl_unitdata_req_t	*dludp;
1548 	mblk_t				*nmp;
1549 	struct idndladdr	*dlap;
1550 	struct ether_header	*headerp;
1551 	t_uscalar_t		off, len;
1552 	t_uscalar_t		sap;
1553 
1554 	stp = (struct idnstr *)wq->q_ptr;
1555 	sip = stp->ss_sip;
1556 
1557 	if (stp->ss_state != DL_IDLE) {
1558 		DLERRORACK(wq, mp, DL_UNITDATA_REQ, DL_OUTSTATE, 0);
1559 		return;
1560 	}
1561 
1562 	dludp = (dl_unitdata_req_t *)mp->b_rptr;
1563 
1564 	off = dludp->dl_dest_addr_offset;
1565 	len = dludp->dl_dest_addr_length;
1566 
1567 	/*
1568 	 * Validate destination address format.
1569 	 */
1570 	if (!MBLKIN(mp, off, len) || (len != IDNADDRL)) {
1571 		dluderrorind(wq, mp, mp->b_rptr + off, len, DL_BADADDR, 0);
1572 		return;
1573 	}
1574 
1575 	/*
1576 	 * Error if no M_DATA follows.
1577 	 */
1578 	nmp = mp->b_cont;
1579 	if (nmp == NULL) {
1580 		dluderrorind(wq, mp, mp->b_rptr + off, len, DL_BADDATA, 0);
1581 		return;
1582 	}
1583 
1584 	dlap = (struct idndladdr *)(mp->b_rptr + off);
1585 
1586 	/*
1587 	 * Create ethernet header by either prepending it onto the
1588 	 * next mblk if potential, or reusing the M_PROTO block if not.
1589 	 */
1590 	if ((DB_REF(nmp) == 1) &&
1591 	    (MBLKHEAD(nmp) >= sizeof (struct ether_header)) &&
1592 	    (((ulong_t)nmp->b_rptr & 0x1) == 0)) {
1593 		nmp->b_rptr -= sizeof (struct ether_header);
1594 		headerp = (struct ether_header *)nmp->b_rptr;
1595 		ether_copy(&dlap->dl_phys, &headerp->ether_dhost);
1596 		ether_copy(&sip->si_ouraddr, &headerp->ether_shost);
1597 		sap = dlap->dl_sap;
1598 		freeb(mp);
1599 		mp = nmp;
1600 	} else {
1601 		DB_TYPE(mp) = M_DATA;
1602 		headerp = (struct ether_header *)mp->b_rptr;
1603 		mp->b_wptr = mp->b_rptr + sizeof (struct ether_header);
1604 		ether_copy(&dlap->dl_phys, &headerp->ether_dhost);
1605 		ether_copy(&sip->si_ouraddr, &headerp->ether_shost);
1606 		sap = dlap->dl_sap;
1607 	}
1608 
1609 	/*
1610 	 * For transmitting, the driver looks at the
1611 	 * sap field of the DL_BIND_REQ being 0 in addition to the type
1612 	 * field in the range [0-1500]. If either is true, then the driver
1613 	 * computes the length of the message, not including initial M_PROTO
1614 	 * mblk (message block), of all subsequent DL_UNITDATA_REQ messages and
1615 	 * transmits 802.3 frames that have this value in the MAC frame header
1616 	 * length field.
1617 	 */
1618 	if ((sap <= ETHERMTU) || (stp->ss_sap == 0))
1619 		headerp->ether_type = (msgsize(mp) -
1620 					sizeof (struct ether_header));
1621 	else
1622 		headerp->ether_type = sap;
1623 
1624 	/*
1625 	 * The data transfer code requires only READ access (idn_wput_data).
1626 	 */
1627 	rw_downgrade(&stp->ss_rwlock);
1628 	(void) idndl_start(wq, mp, sip);
1629 }
1630 
1631 int
1632 idndl_start(queue_t *wq, register mblk_t *mp, register struct idn *sip)
1633 {
1634 	int		rv = 0;
1635 	int		flags;
1636 	int		broadcast = 0;
1637 	int		goagain = 0;
1638 	int		goqueue = 0;
1639 	int		msgcount;
1640 	char		channel;
1641 	mblk_t		*nmp = NULL;
1642 	int		domid;
1643 	domainset_t	domset;
1644 	idn_netaddr_t	netaddr;
1645 	struct idnstr	*stp;
1646 	struct ether_header	*ehp;
1647 	procname_t	proc = "idndl_start";
1648 
1649 	ASSERT(DB_TYPE(mp) == M_DATA);
1650 
1651 	stp = (struct idnstr *)wq->q_ptr;
1652 	ASSERT(sip == stp->ss_sip);
1653 	flags = sip->si_flags;
1654 	channel = (char)sip->si_ouraddr.ether_addr_octet[IDNETHER_CHANNEL];
1655 
1656 	ASSERT(RW_READ_HELD(&stp->ss_rwlock));
1657 
1658 	if ((flags & (IDNRUNNING|IDNPROMISC)) != IDNRUNNING) {
1659 		if (!(flags & IDNRUNNING))
1660 			goto requeue;
1661 	}
1662 
1663 	/*
1664 	 * Translate an IDN ethernet address into a domainid
1665 	 * and idnaddr.
1666 	 */
1667 	ehp = (struct ether_header *)mp->b_rptr;
1668 	domid = IDNDL_ETHER2DOMAIN(&ehp->ether_dhost);
1669 
1670 	/*
1671 	 * update MIB II statistics
1672 	 */
1673 	BUMP_OutNUcast(sip, ehp);
1674 
1675 	PR_DLPI("%s: ether %x:%x:%x:%x:%x:%x (domid = %d)\n",
1676 		proc, ehp->ether_dhost.ether_addr_octet[0],
1677 		ehp->ether_dhost.ether_addr_octet[1],
1678 		ehp->ether_dhost.ether_addr_octet[2],
1679 		ehp->ether_dhost.ether_addr_octet[3],
1680 		ehp->ether_dhost.ether_addr_octet[4],
1681 		ehp->ether_dhost.ether_addr_octet[5],
1682 		domid);
1683 
1684 	netaddr.net.chan = channel;
1685 	PR_DLPI("%s: source channel = %d\n", proc, (int)channel);
1686 
1687 	if ((ether_cmp(&ehp->ether_dhost, &etherbroadcastaddr) == 0) ||
1688 			IDNDL_ADDR_IS_MULTICAST(&ehp->ether_dhost)) {
1689 		/*
1690 		 * Caller wants to broadcast!
1691 		 * XXX - Send to everybody but ourself???
1692 		 */
1693 		PR_DLPI("%s: broadcast/multicast requested!!!\n", proc);
1694 		domset = ~DOMAINSET(idn.localid);
1695 		broadcast = 1;
1696 		netaddr.net.netid = IDN_BROADCAST_ALLNETID;
1697 		if ((flags & IDNPROMISC) &&
1698 		    ((nmp = copymsg(mp)) == NULL)) {
1699 			IDN_KSTAT_INC(sip, si_allocbfail);
1700 		}
1701 
1702 	} else if (domid != IDN_NIL_DOMID) {
1703 		domset = DOMAINSET(domid);
1704 		netaddr.net.netid = idn_domain[domid].dnetid;
1705 		if ((flags & IDNPROMISC) &&
1706 		    ((nmp = copymsg(mp)) == NULL)) {
1707 			IDN_KSTAT_INC(sip, si_allocbfail);
1708 		}
1709 	} else {
1710 #ifdef DEBUG
1711 		int	netid;
1712 
1713 		netid = (int)
1714 			ehp->ether_dhost.ether_addr_octet[IDNETHER_NETID];
1715 		PR_DLPI("%s: no domain found for netid 0x%x\n",
1716 			proc, netid);
1717 #endif /* DEBUG */
1718 		goto bad;
1719 	}
1720 
1721 	PR_DLPI("%s: target domainset = 0x%x\n", proc, domset);
1722 
1723 	if ((domset == 0) && (domid == IDN_NIL_DOMID)) {
1724 		PR_DLPI("%s: not connected to any domains!!  Bailing\n",
1725 			proc);
1726 		goto bad;
1727 	}
1728 	/*
1729 	 * XXX - Need to find a better way to handle broadcasting.
1730 	 *	 Should be able to take advantage of the fact that
1731 	 *	 we can broadcast XDC's (xdc_some).  Need to use
1732 	 *	 atomic counter (semaphore) instead of binary
1733 	 *	 "owner" flag, or perhaps domain specific owner bytes.
1734 	 *
1735 	 * Transfer the data.
1736 	 */
1737 	msgcount = 0;
1738 	if (!broadcast)
1739 		goto noloop;
1740 
1741 	for (domid = 0; domid < MAX_DOMAINS; domid++) {
1742 		if (!DOMAIN_IN_SET(domset, domid))
1743 			continue;
1744 
1745 noloop:
1746 
1747 		if (idn_domain[domid].dcpu == IDN_NIL_DCPU) {
1748 			if (broadcast)
1749 				continue;
1750 			else
1751 				break;
1752 		}
1753 
1754 		rv = idn_send_data(domid, netaddr, wq, mp);
1755 
1756 		switch (rv) {
1757 		case IDNXMIT_LOOP:	/* handled in loopback */
1758 			msgcount++;
1759 			break;
1760 
1761 		case IDNXMIT_OKAY:	/* handled, okay to free */
1762 			msgcount++;
1763 			break;
1764 
1765 		case IDNXMIT_RETRY:
1766 			if (!broadcast)
1767 				goto tryagain;
1768 			goagain++;
1769 			break;
1770 
1771 		case IDNXMIT_REQUEUE:
1772 			if (!broadcast)
1773 				goto requeue;
1774 			goqueue++;
1775 			break;
1776 
1777 		default:
1778 			if (!broadcast)
1779 				goto bad;
1780 			break;
1781 		}
1782 		if (!broadcast)
1783 			break;
1784 	}
1785 
1786 	if (msgcount == 0)
1787 		if (goqueue)
1788 			goto requeue;
1789 		else if (goagain)
1790 			goto tryagain;
1791 		else
1792 			goto bad;
1793 
1794 	if ((flags & IDNPROMISC) && nmp)
1795 		idndl_sendup(sip, nmp, idndl_paccept);
1796 
1797 	freemsg(mp);
1798 
1799 	PR_DLPI("%s: successful transmit to domainset 0x%x.\n",
1800 		proc, domset);
1801 
1802 	return (0);
1803 
1804 bad:
1805 	PR_DLPI("%s: bad transmission to domainset 0x%x, dropping msg.\n",
1806 		proc, domset);
1807 	if (nmp)
1808 		freemsg(nmp);
1809 	freemsg(mp);
1810 	qenable(wq);
1811 	return (1);
1812 
1813 requeue:
1814 	PR_DLPI("%s: requeue for domainset 0x%x, no qenable\n",
1815 		proc, domset);
1816 	if (nmp)
1817 		freemsg(nmp);
1818 	if (putbq(wq, mp) == 0)
1819 		freemsg(mp);
1820 	return (1);
1821 
1822 tryagain:
1823 	PR_DLPI("%s: try again to domainset 0x%x, putbq.\n",
1824 		proc, domset);
1825 	if (nmp)
1826 		freemsg(nmp);
1827 	if (putbq(wq, mp) == 0)
1828 		freemsg(mp);
1829 	qenable(wq);
1830 	return (1);
1831 }
1832 
1833 /*
1834  * Called by:	idnh_recv_data, idn_recv_mboxdata.
1835  */
1836 void
1837 idndl_read(struct idn *sip, mblk_t *mp)
1838 {
1839 	struct ether_header	*ehp;
1840 	queue_t			*ip4q;
1841 	queue_t			*ip6q;
1842 	int		pktlen;
1843 	procname_t	proc = "idndl_read";
1844 
1845 	PR_DLPI("%s: incoming msgsize = %lu, msgdsize = %lu\n",
1846 		proc, msgsize(mp), msgdsize(mp));
1847 
1848 	ehp = (struct ether_header *)mp->b_rptr;
1849 	if (sip == NULL)
1850 		sip = IDNDL_ETHER2SIP(&ehp->ether_dhost);
1851 	if (sip == NULL) {
1852 		/*
1853 		 * If the sip is NULL, then I don't have a connection
1854 		 * for this network.  No point in sending the message
1855 		 * up.
1856 		 */
1857 		PR_DLPI("%s: no plumbing to send message through.\n",
1858 			proc);
1859 		freemsg(mp);
1860 		return;
1861 	}
1862 	IDN_KSTAT_INC(sip, si_ipackets);
1863 	IDN_KSTAT_INC(sip, si_ipackets64);
1864 	/*
1865 	 * update MIB II statistics
1866 	 */
1867 	pktlen = mp->b_wptr - mp->b_rptr;
1868 	BUMP_InNUcast(sip, ehp);
1869 	IDN_KSTAT_ADD(sip, si_rcvbytes, pktlen);
1870 	IDN_KSTAT_ADD(sip, si_rbytes64, (uint64_t)pktlen);
1871 
1872 	ip4q = sip->si_ip4q;
1873 	ip6q = sip->si_ip6q;
1874 
1875 	if (IS_ETHERTYPE_IPV4(ehp->ether_type) &&
1876 			!IDNDL_ADDR_IS_MULTICAST(&ehp->ether_dhost) &&
1877 			ip4q &&
1878 			canputnext(ip4q)) {
1879 		mp->b_rptr += sizeof (struct ether_header);
1880 		(void) putnext(ip4q, mp);
1881 		/*LINTED*/
1882 	} else if (IS_ETHERTYPE_IPV6(ehp->ether_type) &&
1883 			!IDNDL_ADDR_IS_MULTICAST(&ehp->ether_dhost) &&
1884 			ip6q &&
1885 			canputnext(ip6q)) {
1886 		mp->b_rptr += sizeof (struct ether_header);
1887 		(void) putnext(ip6q, mp);
1888 	} else {
1889 		/*
1890 		 * Strip the PADs for 802.3
1891 		 */
1892 		pktlen = ehp->ether_type + sizeof (struct ether_header);
1893 		PR_DLPI("%s: stripping PADs for 802.3 (pktlen=%d)\n",
1894 			proc, pktlen);
1895 		if (pktlen < ETHERMIN)
1896 			mp->b_wptr = mp->b_rptr + pktlen;
1897 		idndl_sendup(sip, mp, idndl_accept);
1898 	}
1899 }
1900 
1901 int
1902 idndl_init(struct idn *sip)
1903 {
1904 	struct idnstr	*stp;
1905 
1906 	if (sip->si_flags & IDNSUSPENDED)
1907 		(void) ddi_dev_is_needed(sip->si_dip, 0, 1);
1908 
1909 	sip->si_flags = 0;
1910 	sip->si_wantw = 0;
1911 
1912 	IDN_KSTAT_INC(sip, si_inits);
1913 
1914 	rw_enter(&idn.struprwlock, RW_WRITER);
1915 
1916 	for (stp = idn.strup; stp; stp = stp->ss_nextp) {
1917 		if ((stp->ss_sip == sip) && (stp->ss_flags & IDNSALLPHYS)) {
1918 			sip->si_flags |= IDNPROMISC;
1919 			break;
1920 		}
1921 	}
1922 
1923 	sip->si_flags |= IDNRUNNING;
1924 
1925 	mutex_enter(&idn.sipwenlock);
1926 	idndl_wenable(sip);
1927 	mutex_exit(&idn.sipwenlock);
1928 
1929 	rw_exit(&idn.struprwlock);
1930 
1931 	return (!(sip->si_flags & IDNRUNNING));
1932 }
1933 
1934 void
1935 idndl_uninit(struct idn *sip)
1936 {
1937 	int		channel;
1938 	procname_t	proc = "idndl_uninit";
1939 
1940 	sip->si_flags &= ~IDNRUNNING;
1941 
1942 	channel = (int)sip->si_ouraddr.ether_addr_octet[IDNETHER_CHANNEL];
1943 	PR_DLPI("%s: IP SAP, uninit channel %d\n", proc, channel);
1944 	/*
1945 	 * A uninit is a hard close of an interface.
1946 	 */
1947 	idn_close_channel(channel, IDNCHAN_HARD_CLOSE);
1948 }
1949 
1950 /*
1951  * Send packet upstream.
1952  * Assume mp->b_rptr points to ether_header.
1953  */
1954 void
1955 idndl_sendup(struct idn *sip, mblk_t *mp, struct idnstr *(*acceptfunc)())
1956 {
1957 	int			type;
1958 	struct ether_addr	*dhostp, *shostp;
1959 	struct idnstr		*stp, *nstp;
1960 	mblk_t 		*nmp;
1961 	ulong_t		isgroupaddr;
1962 
1963 	TRACE_0(TR_FAC_IDN, TR_IDN_SENDUP_START, "idnsendup start");
1964 
1965 	dhostp = &((struct ether_header *)mp->b_rptr)->ether_dhost;
1966 	shostp = &((struct ether_header *)mp->b_rptr)->ether_shost;
1967 	type = ((struct ether_header *)mp->b_rptr)->ether_type;
1968 
1969 	isgroupaddr = IDNDL_ADDR_IS_MULTICAST(dhostp);
1970 
1971 	/*
1972 	 * While holding a reader lock on the linked list of streams structures,
1973 	 * attempt to match the address criteria for each stream
1974 	 * and pass up the raw M_DATA ("fastpath") or a DL_UNITDATA_IND.
1975 	 */
1976 
1977 	rw_enter(&idn.struprwlock, RW_READER);
1978 
1979 	if ((stp = (*acceptfunc)(idn.strup, sip, type, dhostp)) == NULL) {
1980 		rw_exit(&idn.struprwlock);
1981 		freemsg(mp);
1982 		TRACE_0(TR_FAC_IDN, TR_IDN_SENDUP_END, "idnsendup end");
1983 		return;
1984 	}
1985 
1986 	/*
1987 	 * Loop on matching open streams until (*acceptfunc)() returns NULL.
1988 	 */
1989 	for (; nstp = (*acceptfunc)(stp->ss_nextp, sip, type, dhostp);
1990 		stp = nstp) {
1991 
1992 		if (canputnext(stp->ss_rq) == 0) {
1993 			IDN_KSTAT_INC(sip, si_nocanput);
1994 			continue;
1995 		}
1996 		if ((nmp = dupmsg(mp)) == NULL)
1997 			nmp = copymsg(mp);
1998 		if (nmp) {
1999 			if ((stp->ss_flags & IDNSFAST) && !isgroupaddr) {
2000 				nmp->b_rptr += sizeof (struct ether_header);
2001 				(void) putnext(stp->ss_rq, nmp);
2002 			} else if (stp->ss_flags & IDNSRAW) {
2003 				(void) putnext(stp->ss_rq, nmp);
2004 			} else if ((nmp = idndl_addudind(sip, nmp, shostp,
2005 						dhostp, type, isgroupaddr))) {
2006 				(void) putnext(stp->ss_rq, nmp);
2007 			}
2008 		} else {
2009 			IDN_KSTAT_INC(sip, si_allocbfail);
2010 		}
2011 	}
2012 
2013 
2014 	/*
2015 	 * Do the last one.
2016 	 */
2017 	if (canputnext(stp->ss_rq)) {
2018 		if ((stp->ss_flags & IDNSFAST) && !isgroupaddr) {
2019 			mp->b_rptr += sizeof (struct ether_header);
2020 			(void) putnext(stp->ss_rq, mp);
2021 		} else if (stp->ss_flags & IDNSRAW) {
2022 			(void) putnext(stp->ss_rq, mp);
2023 		} else if ((mp = idndl_addudind(sip, mp, shostp, dhostp,
2024 					    type, isgroupaddr))) {
2025 			(void) putnext(stp->ss_rq, mp);
2026 		}
2027 	} else {
2028 		freemsg(mp);
2029 		IDN_KSTAT_INC(sip, si_nocanput);
2030 		IDN_KSTAT_INC(sip, si_norcvbuf);	/* MIB II */
2031 	}
2032 
2033 	rw_exit(&idn.struprwlock);
2034 	TRACE_0(TR_FAC_IDN, TR_IDN_SENDUP_END, "idnsendup end");
2035 }
2036 
2037 /*
2038  * Test upstream destination sap and address match.
2039  */
2040 struct idnstr *
2041 idndl_accept(register struct idnstr *stp, register struct idn *sip,
2042 	    int type, struct ether_addr *addrp)
2043 {
2044 	t_uscalar_t	sap;
2045 	uint_t		flags;
2046 
2047 	for (; stp; stp = stp->ss_nextp) {
2048 		sap   = stp->ss_sap;
2049 		flags = stp->ss_flags;
2050 
2051 		if ((stp->ss_sip == sip) && IDNSAPMATCH(sap, type, flags))
2052 			if ((ether_cmp(addrp, &sip->si_ouraddr) == 0) ||
2053 			    (ether_cmp(addrp, &etherbroadcastaddr) == 0) ||
2054 			    (flags & IDNSALLPHYS) ||
2055 			    idndl_mcmatch(stp, addrp))
2056 				return (stp);
2057 	}
2058 
2059 	return (NULL);
2060 }
2061 
2062 /*
2063  * Test upstream destination sap and address match for IDNSALLPHYS only.
2064  */
2065 /* ARGSUSED3 */
2066 struct idnstr *
2067 idndl_paccept(register struct idnstr *stp, register struct idn *sip,
2068 	    int type, struct ether_addr *addrp)
2069 {
2070 	t_uscalar_t	sap;
2071 	uint_t		flags;
2072 
2073 	for (; stp; stp = stp->ss_nextp) {
2074 		sap   = stp->ss_sap;
2075 		flags = stp->ss_flags;
2076 
2077 		if ((stp->ss_sip == sip) &&
2078 		    IDNSAPMATCH(sap, type, flags) &&
2079 		    (flags & IDNSALLPHYS))
2080 			return (stp);
2081 	}
2082 
2083 	return (NULL);
2084 }
2085 
2086 /*
2087  * Set or clear the device ipq pointer.
2088  * Assumes IPv4 and IPv6 are IDNSFAST.
2089  */
2090 static void
2091 idndl_setipq(struct idn *sip)
2092 {
2093 	struct idnstr	*stp;
2094 	int		ok4 = 1;
2095 	int		ok6 = 1;
2096 	queue_t		*ip4q = NULL;
2097 	queue_t		*ip6q = NULL;
2098 
2099 	rw_enter(&idn.struprwlock, RW_READER);
2100 
2101 	for (stp = idn.strup; stp; stp = stp->ss_nextp) {
2102 		if (stp->ss_sip == sip) {
2103 			if (stp->ss_flags & (IDNSALLPHYS|IDNSALLSAP)) {
2104 				ok4 = 0;
2105 				ok6 = 0;
2106 				break;
2107 			}
2108 			if (IS_ETHERTYPE_IPV4(stp->ss_sap)) {
2109 				if (ip4q == NULL)
2110 					ip4q = stp->ss_rq;
2111 				else
2112 					ok4 = 0;
2113 				/*LINTED*/
2114 			} else if (IS_ETHERTYPE_IPV6(stp->ss_sap)) {
2115 				if (ip6q == NULL)
2116 					ip6q = stp->ss_rq;
2117 				else
2118 					ok6 = 0;
2119 			}
2120 		}
2121 	}
2122 
2123 	rw_exit(&idn.struprwlock);
2124 
2125 	if (ok4)
2126 		sip->si_ip4q = ip4q;
2127 	else
2128 		sip->si_ip4q = NULL;
2129 	if (ok6)
2130 		sip->si_ip6q = ip6q;
2131 	else
2132 		sip->si_ip6q = NULL;
2133 }
2134 
2135 /*
2136  * Prefix msg with a DL_UNITDATA_IND mblk and return the new msg.
2137  */
2138 static mblk_t *
2139 idndl_addudind(struct idn *sip, mblk_t *mp,
2140 	    struct ether_addr *shostp, struct ether_addr *dhostp,
2141 	    int type, ulong_t isgroupaddr)
2142 {
2143 	dl_unitdata_ind_t	*dludindp;
2144 	struct idndladdr	*dlap;
2145 	mblk_t	*nmp;
2146 	int	size;
2147 
2148 	TRACE_0(TR_FAC_IDN, TR_IDN_ADDUDIND_START, "idndl_addudind start");
2149 
2150 	mp->b_rptr += sizeof (struct ether_header);
2151 
2152 	/*
2153 	 * Allocate an M_PROTO mblk for the DL_UNITDATA_IND.
2154 	 */
2155 	size = sizeof (dl_unitdata_ind_t) + IDNADDRL + IDNADDRL;
2156 	nmp = allocb(IDNROUNDUP(IDNHEADROOM + size, sizeof (double)), BPRI_LO);
2157 	if (nmp == NULL) {
2158 		IDN_KSTAT_INC(sip, si_allocbfail);
2159 		IDN_KSTAT_INC(sip, si_ierrors);
2160 		if (idn_debug)
2161 			serror(sip->si_dip, 451, "allocb failed");
2162 		freemsg(mp);
2163 		TRACE_0(TR_FAC_IDN, TR_IDN_ADDUDIND_END, "idndl_addudind end");
2164 		return (NULL);
2165 	}
2166 	DB_TYPE(nmp) = M_PROTO;
2167 	nmp->b_wptr = nmp->b_datap->db_lim;
2168 	nmp->b_rptr = nmp->b_wptr - size;
2169 
2170 	/*
2171 	 * Construct a DL_UNITDATA_IND primitive.
2172 	 */
2173 	dludindp = (dl_unitdata_ind_t *)nmp->b_rptr;
2174 	dludindp->dl_primitive = DL_UNITDATA_IND;
2175 	dludindp->dl_dest_addr_length = IDNADDRL;
2176 	dludindp->dl_dest_addr_offset = sizeof (dl_unitdata_ind_t);
2177 	dludindp->dl_src_addr_length = IDNADDRL;
2178 	dludindp->dl_src_addr_offset = sizeof (dl_unitdata_ind_t) + IDNADDRL;
2179 	dludindp->dl_group_address = isgroupaddr;
2180 
2181 	dlap = (struct idndladdr *)(nmp->b_rptr + sizeof (dl_unitdata_ind_t));
2182 	ether_copy(dhostp, &dlap->dl_phys);
2183 	dlap->dl_sap = (ushort_t)type;
2184 
2185 	dlap = (struct idndladdr *)(nmp->b_rptr + sizeof (dl_unitdata_ind_t)
2186 					+ IDNADDRL);
2187 	ether_copy(shostp, &dlap->dl_phys);
2188 	dlap->dl_sap = (ushort_t)type;
2189 
2190 	/*
2191 	 * Link the M_PROTO and M_DATA together.
2192 	 */
2193 	nmp->b_cont = mp;
2194 	TRACE_0(TR_FAC_IDN, TR_IDN_ADDUDIND_END, "idndl_addudind end");
2195 	return (nmp);
2196 }
2197 
2198 /*
2199  * Return TRUE if the given multicast address is one
2200  * of those that this particular Stream is interested in.
2201  */
2202 static int
2203 idndl_mcmatch(register struct idnstr *stp, register struct ether_addr *addrp)
2204 {
2205 	register struct ether_addr	*mctab;
2206 	register int	mccount;
2207 	register int	i;
2208 
2209 	/*
2210 	 * Return FALSE if not a multicast address.
2211 	 */
2212 	if (!IDNDL_ADDR_IS_MULTICAST(addrp))
2213 		return (0);
2214 
2215 	/*
2216 	 * Check if all multicasts have been enabled for this Stream
2217 	 */
2218 	if (stp->ss_flags & IDNSALLMULTI)
2219 		return (1);
2220 
2221 	/*
2222 	 * Return FALSE if no multicast addresses enabled for this Stream.
2223 	 */
2224 	if (stp->ss_mccount == 0)
2225 		return (0);
2226 
2227 	/*
2228 	 * Otherwise, find it in the table.
2229 	 */
2230 
2231 	mccount = stp->ss_mccount;
2232 	mctab = stp->ss_mctab;
2233 
2234 	for (i = 0; i < mccount; i++)
2235 		if (!ether_cmp(addrp, &mctab[i]))
2236 			return (1);
2237 
2238 	return (0);
2239 }
2240 
2241 /*
2242  * Start xmit on any msgs previously enqueued on any write queues.
2243  * If the caller passes NULL, then we need to check all
2244  * our interfaces.
2245  */
2246 void
2247 idndl_wenable(struct idn *sip)
2248 {
2249 	struct idnstr	*stp;
2250 	queue_t		*wq;
2251 
2252 	/*
2253 	 * Order of wantw accesses is important.
2254 	 */
2255 	ASSERT((sip == NULL) ? RW_LOCK_HELD(&idn.struprwlock) : 1);
2256 	ASSERT(MUTEX_HELD(&idn.sipwenlock));
2257 
2258 	do {
2259 		if (sip)
2260 			sip->si_wantw = 0;
2261 		for (stp = idn.strup; stp; stp = stp->ss_nextp) {
2262 			if ((!sip || (stp->ss_sip == sip)) &&
2263 			    stp->ss_rq && ((wq = WR(stp->ss_rq))->q_first))
2264 				qenable(wq);
2265 		}
2266 	} while (sip && sip->si_wantw);
2267 }
2268 
2269 /*VARARGS*/
2270 static void
2271 serror(dev_info_t *dip, int idnerr, char *fmt, ...)
2272 {
2273 	static	long	last;
2274 	static	char	*lastfmt;
2275 	char		msg_buffer[255];
2276 	va_list ap;
2277 	time_t	now;
2278 
2279 	/*
2280 	 * Don't print same error message too often.
2281 	 */
2282 	now = gethrestime_sec();
2283 	if ((last == (now & ~1)) && (lastfmt == fmt))
2284 		return;
2285 
2286 	last = now & ~1;
2287 	lastfmt = fmt;
2288 
2289 	va_start(ap, fmt);
2290 	(void) vsprintf(msg_buffer, fmt, ap);
2291 	cmn_err(CE_CONT, "IDN: %d: %s%d: %s\n",
2292 		idnerr, ddi_get_name(dip),
2293 		ddi_get_instance(dip), msg_buffer);
2294 	va_end(ap);
2295 }
2296