xref: /titanic_41/usr/src/uts/sun4u/starfire/io/idn_proto.c (revision 45916cd2fec6e79bca5dee0421bd39e3c2910d1e)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License, Version 1.0 only
6  * (the "License").  You may not use this file except in compliance
7  * with the License.
8  *
9  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10  * or http://www.opensolaris.org/os/licensing.
11  * See the License for the specific language governing permissions
12  * and limitations under the License.
13  *
14  * When distributing Covered Code, include this CDDL HEADER in each
15  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16  * If applicable, add the following below this CDDL HEADER, with the
17  * fields enclosed by brackets "[]" replaced with your own identifying
18  * information: Portions Copyright [yyyy] [name of copyright owner]
19  *
20  * CDDL HEADER END
21  */
22 /*
23  * Copyright 2005 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 #pragma ident	"%Z%%M%	%I%	%E% SMI"
28 
29 /*
30  * Inter-Domain Network
31  *
32  * IDN Protocol functions to support domain link/unlink/reconfig.
33  */
34 
35 #include <sys/types.h>
36 #include <sys/param.h>
37 #include <sys/machparam.h>
38 #include <sys/debug.h>
39 #include <sys/cpuvar.h>
40 #include <sys/kmem.h>
41 #include <sys/mutex.h>
42 #include <sys/rwlock.h>
43 #include <sys/systm.h>
44 #include <sys/stream.h>
45 #include <sys/strsun.h>
46 #include <sys/stropts.h>
47 #include <sys/sema_impl.h>
48 #include <sys/membar.h>
49 #include <sys/utsname.h>
50 #include <inet/common.h>
51 #include <inet/mi.h>
52 #include <netinet/ip6.h>
53 #include <inet/ip.h>
54 #include <netinet/in.h>
55 #include <sys/vm_machparam.h>
56 #include <sys/x_call.h>
57 #include <sys/ddi.h>
58 #include <sys/sunddi.h>
59 #include <sys/atomic.h>
60 #include <vm/as.h>		/* kas decl */
61 
62 #include <sys/idn.h>
63 #include <sys/idn_xf.h>
64 
65 #define	IDNBUG_CPUPERBOARD
66 
67 extern pri_t		maxclsyspri;
68 extern u_longlong_t	gettick();
69 
70 clock_t	idn_xmit_monitor_freq = 50;
71 
72 static int	idn_connect(int domid);
73 static int	idn_disconnect(int domid, idn_fin_t fintype,
74 				idn_finarg_t finarg, idn_finsync_t finsync);
75 static void	idn_deconfig(int domid);
76 static void	idn_unlink_domainset(domainset_t domset, idn_fin_t fintype,
77 				idn_finarg_t finarg, idn_finopt_t finopt,
78 				boardset_t idnset);
79 static void	idn_retry_execute(void *arg);
80 static void	idn_retry_submit(void (*func)(uint_t token, void *arg),
81 				void *arg, uint_t token, clock_t ticks);
82 static void	idn_shutdown_datapath(domainset_t domset, int force);
83 static mblk_t	*idn_fill_buffer(caddr_t bufp, int size, mblk_t *mp,
84 				uchar_t **data_rptrp);
85 static ushort_t	idn_cksum(register ushort_t *hdrp, register int count);
86 static int	idn_mark_awol(int domid, clock_t *atime);
87 
88 static void	idn_recv_proto(idn_protomsg_t *hp);
89 static void	idn_send_config(int domid, int phase);
90 static void	idn_recv_config(int domid, idn_msgtype_t *mtp,
91 				idn_xdcargs_t xargs);
92 static int	idn_send_master_config(int domid, int phase);
93 static int	idn_send_slave_config(int domid, int phase);
94 static uint_t	idn_check_master_config(int domid, uint_t *exp, uint_t *act);
95 static uint_t	idn_check_slave_config(int domid, uint_t *exp, uint_t *act);
96 static int	idn_recv_config_done(int domid);
97 static void	idn_nego_cleanup_check(int domid, int new_masterid,
98 				int new_cpuid);
99 static void	idn_recv_cmd(int domid, idn_msgtype_t *mtp,
100 				idn_xdcargs_t xargs);
101 static int	idn_recv_data(int domid, idn_msgtype_t *mtp,
102 				idn_xdcargs_t xargs);
103 static int	idn_send_data_loopback(idn_netaddr_t dst_netaddr,
104 				queue_t *wq, mblk_t *mp);
105 static void	idn_send_dataresp(int domid, idn_nack_t nacktype);
106 static int	idn_send_mboxdata(int domid, struct idn *sip, int channel,
107 				caddr_t bufp);
108 static int	idn_recv_mboxdata(int channel, caddr_t bufp);
109 static int	idn_program_hardware(int domid);
110 static int	idn_deprogram_hardware(int domid);
111 
112 static void	idn_send_cmd_nackresp(int domid, idn_msgtype_t *mtp,
113 			idn_cmd_t cmdtype, idn_nack_t nacktype);
114 static void	idn_local_cmd(idn_cmd_t cmdtype, uint_t arg1,
115 				uint_t arg2, uint_t arg3);
116 static void	idn_terminate_cmd(int domid, int serrno);
117 static void	idn_mboxarea_init(idn_mboxtbl_t *mtp, register int ntbls);
118 static void	idn_mainmbox_activate(int domid);
119 static void	idn_mainmbox_deactivate(ushort_t domset);
120 static void	idn_mainmbox_chan_register(int domid,
121 				idn_mainmbox_t *send_mmp,
122 				idn_mainmbox_t *recv_mmp, int channel);
123 static int	idn_mainmbox_chan_unregister(ushort_t domset, int channel);
124 static int	idn_mainmbox_flush(int domid, idn_mainmbox_t *mmp);
125 static void	idn_mainmbox_reset(int domid, idn_mainmbox_t *cmp);
126 static int	idn_activate_channel(idn_chanset_t chanset,
127 				idn_chanop_t chanop);
128 static void	idn_deactivate_channel(idn_chanset_t chanset,
129 				idn_chanop_t chanop);
130 static int	idn_deactivate_channel_services(int channel,
131 				idn_chanop_t chanop);
132 static int	idn_activate_channel_services(int channel);
133 static void	idn_chan_server(idn_chansvr_t **cspp);
134 #if 0
135 static void	idn_chan_flush(idn_chansvr_t *csp);
136 #endif /* 0 */
137 static void	idn_chan_action(int channel, idn_chanaction_t chanaction,
138 				int wait);
139 static void	idn_chan_addmbox(int channel, ushort_t domset);
140 static void	idn_chan_delmbox(int channel, ushort_t domset);
141 static void	idn_submit_chanactivate_job(int channel);
142 static void	idn_exec_chanactivate(void *chn);
143 
144 static void	idn_link_established(void *arg);
145 static void	idn_prealloc_slab(int nslabs);
146 static void	idn_recv_slaballoc_req(int domid, idn_msgtype_t *mtp,
147 				uint_t slab_size);
148 static void	idn_send_slaballoc_resp(int domid, idn_msgtype_t *mtp,
149 				uint_t slab_offset, uint_t slab_size,
150 				int serrno);
151 static void	idn_recv_slaballoc_resp(int domid, smr_offset_t slab_offset,
152 				uint_t slab_size, int serrno);
153 static void	idn_recv_slabreap_req(int domid, idn_msgtype_t *mtp,
154 				int nslabs);
155 static void	idn_recv_slabreap_resp(int domid, int nslabs, int serrno);
156 static void	idn_send_slabreap_resp(int domid, idn_msgtype_t *mtp,
157 				int nslabs, int serrno);
158 static void	idn_recv_slabfree_req(int domid, idn_msgtype_t *mtp,
159 				smr_offset_t slab_offset, uint_t slab_size);
160 static void	idn_recv_slabfree_resp(int domid, uint_t slab_offset,
161 				uint_t slab_size, int serrno);
162 static void	idn_send_slabfree_resp(int domid, idn_msgtype_t *mtp,
163 				uint_t slab_offset, uint_t slab_size,
164 				int serrno);
165 static void	idn_retry_nodename_req(void *arg);
166 static void	idn_send_nodename_req(int domid);
167 static void	idn_send_nodename_resp(int domid, idn_msgtype_t *mtp,
168 				uint_t bufoffset, int serrno);
169 static void	idn_recv_nodename_req(int domid, idn_msgtype_t *mtp,
170 				uint_t bufoffset);
171 static void	idn_recv_nodename_resp(int domid, uint_t bufoffset,
172 				int serrno);
173 
174 static void	idn_protocol_server(int *id);
175 static void	idn_protocol_server_killall();
176 static void	idn_protojob_free(idn_protojob_t *jp);
177 
178 static int	idn_xstate_transfunc(int domid, void *transarg);
179 static int	idn_xphase_transition(int domid, idn_msgtype_t *mtp,
180 				idn_xdcargs_t xargs);
181 static void	idn_sync_enter(int domid, idn_synccmd_t cmd,
182 				domainset_t xset, domainset_t rset,
183 				int (*transfunc)(), void *transarg);
184 static domainset_t
185 		idn_sync_register(int domid, idn_synccmd_t cmd,
186 				domainset_t ready_set, idn_syncreg_t regtype);
187 static void	idn_sync_register_awol(int domid);
188 static int	idn_verify_config_mbox(int domid);
189 static int	idn_select_master(int domid, int rmasterid, int rcpuid);
190 
191 static int	valid_mtu(uint_t mtu);
192 static int	valid_bufsize(uint_t bufsize);
193 static int	valid_slabsize(int slabsize);
194 static int	valid_nwrsize(int nwrsize);
195 
196 static int	idn_master_init();
197 static void	idn_master_deinit();
198 
199 static void	idn_send_acknack(int domid, idn_msgtype_t *mtp,
200 				idn_xdcargs_t xargs);
201 
202 static int	idn_send_nego(int domid, idn_msgtype_t *mtp,
203 				domainset_t conset);
204 static void	idn_retry_nego(uint_t token, void *arg);
205 static int	idn_check_nego(int domid, idn_msgtype_t *mtp,
206 				idn_xdcargs_t xargs);
207 static void	idn_action_nego_pend(int domid, idn_msgtype_t *mtp,
208 				idn_xdcargs_t xargs);
209 static void	idn_error_nego(int domid, idn_msgtype_t *mtp,
210 				idn_xdcargs_t xargs);
211 static void	idn_action_nego_sent(int domid, idn_msgtype_t *mtp,
212 				idn_xdcargs_t xargs);
213 static void	idn_action_nego_rcvd(int domid, idn_msgtype_t *mtp,
214 				idn_xdcargs_t xargs);
215 static void	idn_final_nego(int domid);
216 static void	idn_exit_nego(int domid, uint_t msgtype);
217 
218 static int	idn_send_con(int domid, idn_msgtype_t *mtp,
219 				idn_con_t contype, domainset_t conset);
220 static void	idn_retry_con(uint_t token, void *arg);
221 static int	idn_check_con(int domid, idn_msgtype_t *mtp,
222 				idn_xdcargs_t xargs);
223 static void	idn_action_con_pend(int domid, idn_msgtype_t *mtp,
224 				idn_xdcargs_t xargs);
225 static void	idn_error_con(int domid, idn_msgtype_t *mtp,
226 				idn_xdcargs_t xargs);
227 static void	idn_action_con_sent(int domid, idn_msgtype_t *mtp,
228 				idn_xdcargs_t xargs);
229 static void	idn_action_con_rcvd(int domid, idn_msgtype_t *mtp,
230 				idn_xdcargs_t xargs);
231 static void	idn_final_con(int domid);
232 static void	idn_exit_con(int domid, uint_t msgtype);
233 
234 static int	idn_send_fin(int domid, idn_msgtype_t *mtp, idn_fin_t fintype,
235 				idn_finarg_t finarg, idn_finopt_t finopt,
236 				domainset_t finset, uint_t finmaster);
237 static void	idn_retry_fin(uint_t token, void *arg);
238 static int	idn_check_fin_pend(int domid, idn_msgtype_t *mtp,
239 				idn_xdcargs_t xargs);
240 static void	idn_action_fin_pend(int domid, idn_msgtype_t *mtp,
241 				idn_xdcargs_t xargs);
242 static void	idn_error_fin_pend(int domid, idn_msgtype_t *mtp,
243 				idn_xdcargs_t xargs);
244 static int	idn_check_fin_sent(int domid, idn_msgtype_t *mtp,
245 				idn_xdcargs_t xargs);
246 static void	idn_action_fin_sent(int domid, idn_msgtype_t *mtp,
247 				idn_xdcargs_t xargs);
248 static void	idn_error_fin_sent(int domid, idn_msgtype_t *mtp,
249 				idn_xdcargs_t xargs);
250 static void	idn_action_fin_rcvd(int domid, idn_msgtype_t *mtp,
251 				idn_xdcargs_t xargs);
252 static void	idn_final_fin(int domid);
253 static void	idn_exit_fin(int domid, uint_t msgtype);
254 
255 /*
256  * We keep a small cache of protojob structures just
257  * in case allocation within idn_handler comes back
258  * with nothing from the land of kmem.
259  */
260 idn_protojob_t	idn_protojob_cache[IDN_DMV_PENDING_MAX];
261 idn_protojob_t	*idn_protojob_cache_list;
262 kmutex_t	idn_protojob_cache_lock;
263 
264 /*
265  *	- receive message.
266  *	- call check-function for current state.
267  *	- if (check-function == ok) then
268  *		call action-function for current state.
269  *	  else
270  *		call error-function for current state.
271  *	- transition state based on check results.
272  *	- if (next state == final state) then
273  *		call final-function.
274  */
275 static idn_xphase_t xphase_nego = {
276 	IDNP_NEGO,
277 	{
278 		{ IDNDS_NEGO_PEND,
279 			idn_check_nego,
280 			idn_action_nego_pend,
281 			idn_error_nego},
282 		{ IDNDS_NEGO_SENT,
283 			idn_check_nego,
284 			idn_action_nego_sent,
285 			idn_error_nego},
286 		{ IDNDS_NEGO_RCVD,
287 			NULL,
288 			idn_action_nego_rcvd,
289 			NULL },
290 		{ IDNDS_CONFIG, NULL, NULL, NULL },
291 	},
292 	idn_final_nego,
293 	idn_exit_nego
294 };
295 
296 static idn_xphase_t xphase_con = {
297 	IDNP_CON,
298 	{
299 		{ IDNDS_CON_PEND,
300 			idn_check_con,
301 			idn_action_con_pend,
302 			idn_error_con},
303 		{ IDNDS_CON_SENT,
304 			idn_check_con,
305 			idn_action_con_sent,
306 			idn_error_con},
307 		{ IDNDS_CON_RCVD,
308 			NULL,
309 			idn_action_con_rcvd,
310 			NULL },
311 		{ IDNDS_CON_READY, NULL, NULL, NULL },
312 	},
313 	idn_final_con,
314 	idn_exit_con
315 };
316 
317 static idn_xphase_t xphase_fin = {
318 	IDNP_FIN,
319 	{
320 		{ IDNDS_FIN_PEND,
321 			idn_check_fin_pend,
322 			idn_action_fin_pend,
323 			idn_error_fin_pend },
324 		{ IDNDS_FIN_SENT,
325 			idn_check_fin_sent,
326 			idn_action_fin_sent,
327 			idn_error_fin_sent },
328 		{ IDNDS_FIN_RCVD,
329 			NULL,
330 			idn_action_fin_rcvd,
331 			NULL },
332 		{ IDNDS_DMAP, NULL, NULL, NULL },
333 	},
334 	idn_final_fin,
335 	idn_exit_fin
336 };
337 
338 static int idnxs_state_table[4][5][2] = {
339 	{			/* IDNXS_PEND */
340 		{ IDNXS_SENT,	IDNXS_PEND },	/* 0 */
341 		{ IDNXS_RCVD,	IDNXS_PEND },	/* msg */
342 		{ IDNXS_NIL,	IDNXS_PEND },	/* msg+ack */
343 		{ IDNXS_PEND,	IDNXS_NIL },	/* ack */
344 		{ IDNXS_PEND,	IDNXS_NIL },	/* nack */
345 	},
346 	{			/* IDNXS_SENT */
347 		{ IDNXS_NIL,	IDNXS_NIL },	/* 0 */
348 		{ IDNXS_RCVD,	IDNXS_PEND },	/* msg */
349 		{ IDNXS_FINAL,	IDNXS_PEND },	/* msg+ack */
350 		{ IDNXS_NIL,	IDNXS_NIL },	/* ack */
351 		{ IDNXS_PEND,	IDNXS_NIL },	/* nack */
352 	},
353 	{			/* IDNXS_RCVD */
354 		{ IDNXS_NIL,	IDNXS_NIL },	/* 0 */
355 		{ IDNXS_NIL,	IDNXS_NIL },	/* msg */
356 		{ IDNXS_FINAL,	IDNXS_NIL },	/* msg+ack */
357 		{ IDNXS_FINAL,	IDNXS_NIL },	/* ack */
358 		{ IDNXS_PEND,	IDNXS_NIL },	/* nack */
359 	},
360 	{			/* IDNXS_FINAL */
361 		{ IDNXS_NIL,	IDNXS_NIL },	/* 0 */
362 		{ IDNXS_NIL,	IDNXS_NIL },	/* msg */
363 		{ IDNXS_NIL,	IDNXS_NIL },	/* msg+ack */
364 		{ IDNXS_NIL,	IDNXS_NIL },	/* ack */
365 		{ IDNXS_NIL,	IDNXS_NIL },	/* nack */
366 	}
367 };
368 
369 /*
370  * NONE		Respective domain does not have a master.
371  * OTHER	Respective domain has a master different
372  *		than either local or remote.
373  * LOCAL	Respective domain has chosen local as master.
374  * REMOTE	Respective domain has chosen remote as master.
375  *
376  * Actions:
377  *	VOTE		Compare votes and select one.
378  *	VOTE_RCFG	Compare votes and Reconfigure
379  *			if necessary, i.e. remote won.
380  *	CONNECT		Connect to remote's OTHER if different
381  *			than our local master.
382  *	LOCAL		Local domain is winner.
383  *	REMOTE		Remote domain is winner.
384  *	WAIT		Wait for remote to connect to our
385  *			master if his is different.
386  *	ERROR		An impossible condition.
387  *
388  * Index:
389  *	0 = Local
390  *	1 = Remote
391  */
392 static idn_master_select_t master_select_table[4][4] = {
393 	{				/* local	remote	*/
394 		MASTER_SELECT_VOTE,	/* NONE		NONE	*/
395 		MASTER_SELECT_CONNECT,	/* NONE		OTHER	*/
396 		MASTER_SELECT_LOCAL,	/* NONE		LOCAL	*/
397 		MASTER_SELECT_REMOTE	/* NONE		REMOTE	*/
398 	},
399 	{
400 		MASTER_SELECT_WAIT,	/* OTHER	NONE	*/
401 		MASTER_SELECT_CONNECT,	/* OTHER	OTHER	*/
402 		MASTER_SELECT_WAIT,	/* OTHER	LOCAL	*/
403 		MASTER_SELECT_WAIT	/* OTHER	REMOTE	*/
404 	},
405 	{
406 		MASTER_SELECT_LOCAL,	/* LOCAL	NONE	*/
407 		MASTER_SELECT_CONNECT,	/* LOCAL	OTHER	*/
408 		MASTER_SELECT_LOCAL,	/* LOCAL	LOCAL	*/
409 		MASTER_SELECT_VOTE_RCFG	/* LOCAL	REMOTE	*/
410 	},
411 	{
412 		MASTER_SELECT_REMOTE,	/* REMOTE	NONE	*/
413 		MASTER_SELECT_CONNECT,	/* REMOTE	OTHER	*/
414 		MASTER_SELECT_ERROR,	/* REMOTE	LOCAL	*/
415 		MASTER_SELECT_REMOTE	/* REMOTE	REMOTE	*/
416 	}
417 };
418 
419 void
420 idn_assign_cookie(int domid)
421 {
422 	static ushort_t	num = 0;
423 	ushort_t	cookie;
424 	procname_t	proc = "idn_assign_cookie";
425 
426 	if ((cookie = idn_domain[domid].dcookie_recv) != 0)
427 		return;
428 
429 	cookie = (ushort_t)(((uint64_t)&idn_domain[domid] >> 8) & 0xff);
430 	while ((cookie ^= num++ & 0xff) == 0)
431 		;
432 
433 	PR_PROTO("%s:%d: assigned RECV cookie 0x%x\n", proc, domid, cookie);
434 
435 	idn_domain[domid].dcookie_recv = cookie;
436 }
437 
438 void
439 idn_update_priority(int domid, int pri)
440 {
441 	idn_domain_t	*dp;
442 	procname_t	proc = "idn_update_priority";
443 
444 	ASSERT(IDN_DLOCK_IS_EXCL(domid));
445 
446 	dp = &idn_domain[domid];
447 
448 	if (pri >= IDNVOTE_MINPRI) {
449 		dp->dvote.v.priority = pri & IDNVOTE_PRI_MASK;
450 
451 		PR_PROTO("%s:%d: SETTING PRIORITY to req(%d) "
452 			"(localpri = 0x%x)\n",
453 			proc, domid, pri, IDNVOTE_PRIVALUE(dp->dvote));
454 	} else {
455 		PR_PROTO("%s:%d: PRIORITIES UNCHANGED (pri = 0x%x)\n",
456 			proc, domid, IDNVOTE_PRIVALUE(dp->dvote));
457 	}
458 }
459 
460 /*
461  * Initiate a link between the local domain and the remote domain
462  * containing the given cpuid.
463  */
464 int
465 idn_link(int domid, int cpuid, int pri, int waittime, idnsb_error_t *sep)
466 {
467 	int		rv;
468 	idn_domain_t	*dp;
469 	void		*opcookie;
470 	procname_t	proc = "idn_link";
471 
472 	if ((cpuid < 0) || (cpuid >= NCPU)) {
473 		cmn_err(CE_WARN,
474 			"IDN: 201: (LINK) invalid CPU ID (%d)", cpuid);
475 		return (EINVAL);
476 	}
477 	if (waittime < 0) {
478 		cmn_err(CE_WARN,
479 			"IDN: 202: (LINK) invalid time-out value (%d)",
480 			waittime);
481 		return (EINVAL);
482 	}
483 	if (!VALID_DOMAINID(domid)) {
484 		cmn_err(CE_WARN,
485 			"IDN: 203: (LINK) invalid domain ID (%d)",
486 			domid);
487 		return (EINVAL);
488 	}
489 	if (domid == idn.localid)
490 		return (0);
491 
492 	IDN_SYNC_LOCK();
493 	IDN_DLOCK_EXCL(domid);
494 
495 	dp = &idn_domain[domid];
496 
497 	switch (dp->dstate) {
498 	case IDNDS_CLOSED:
499 		break;
500 
501 	case IDNDS_CONNECTED:
502 #ifdef DEBUG
503 		cmn_err(CE_NOTE,
504 			"!IDN: domain %d (CPU ID %d) already connected",
505 			domid, cpuid);
506 #endif /* DEBUG */
507 		IDN_DUNLOCK(domid);
508 		IDN_SYNC_UNLOCK();
509 		return (0);
510 
511 	default:
512 		cmn_err(CE_WARN,
513 			"IDN: 204: domain %d state (%s) inappropriate",
514 			domid, idnds_str[dp->dstate]);
515 		IDN_DUNLOCK(domid);
516 		IDN_SYNC_UNLOCK();
517 		return (EINVAL);
518 	}
519 
520 	rv = idn_open_domain(domid, cpuid, 0);
521 	if (rv != 0) {
522 		cmn_err(CE_WARN,
523 			"IDN: 205: (%s) failed to open-domain(%d,%d)",
524 			proc, domid, cpuid);
525 		IDN_DUNLOCK(domid);
526 		IDN_SYNC_UNLOCK();
527 		return (EIO);
528 	}
529 
530 
531 	IDN_DLOCK_EXCL(idn.localid);
532 	idn_update_priority(idn.localid, pri);
533 	IDN_DUNLOCK(idn.localid);
534 
535 	if (waittime > 0)
536 		opcookie = idn_init_op(IDNOP_CONNECTED, DOMAINSET(domid), sep);
537 
538 	idn_connect(domid);
539 
540 	IDN_DUNLOCK(domid);
541 	IDN_SYNC_UNLOCK();
542 
543 	PR_PROTO("%s:%d: ALLOCATED idn_link(%d)\n", proc, domid, cpuid);
544 
545 	if (waittime > 0) {
546 		boardset_t	domset = 0;
547 		/*
548 		 * Well we've successfully allocated a domain id,
549 		 * but the link may not be fully established yet.
550 		 * Need to wait since it happens asynchronously.
551 		 */
552 		PR_PROTO("%s:%d: WAITING for op(%s) for (domset 0%x)...\n",
553 			proc, domid, idnop_str[IDNOP_CONNECTED],
554 			DOMAINSET(domid));
555 
556 		rv = idn_wait_op(opcookie, &domset, waittime);
557 	}
558 
559 #ifdef DEBUG
560 	if (rv == 0) {
561 		if (waittime > 0) {
562 			PR_PROTO("%s:%d: connect SUCCEEDED (cpu %d)\n",
563 					proc, domid, cpuid);
564 		} else {
565 			PR_PROTO("%s:%d: connect KICKED OFF (cpu %d)\n",
566 					proc, domid, cpuid);
567 		}
568 	} else {
569 		PR_PROTO("%s:%d: connect FAILED (cpu %d)\n",
570 				proc, domid, cpuid);
571 	}
572 #endif /* DEBUG */
573 
574 	return (rv);
575 }
576 
577 /*
578  * Unlink the given domain from any domain cluster of
579  * which it might be a member.  Force indicates that domain
580  * should not go AWOL and if it's currently AWOL to close
581  * and remove it.
582  * IMPORTANT: If the (hard) force flag is set, the caller is
583  *	      assumed to GUARANTEE that the given domain will
584  *	      not attempt to communicate with the local domain
585  *	      in any manner.
586  */
587 int
588 idn_unlink(int domid, boardset_t idnset, idn_fin_t fintype,
589 		idn_finopt_t finopt, int waittime, idnsb_error_t *sep)
590 {
591 	int		rv = 0;
592 	domainset_t	domset;
593 	void		*opcookie;
594 	procname_t	proc = "idn_unlink";
595 
596 
597 	if (waittime < 0) {
598 		cmn_err(CE_WARN,
599 			"IDN: 202: (UNLINK) invalid time-out value (%d)",
600 			waittime);
601 		SET_IDNKERR_IDNERR(sep, IDNKERR_INVALID_WTIME);
602 		SET_IDNKERR_PARAM0(sep, waittime);
603 		return (EINVAL);
604 	}
605 	if (!VALID_DOMAINID(domid)) {
606 		cmn_err(CE_WARN,
607 			"IDN: 203: (UNLINK) invalid domain ID (%d)",
608 			domid);
609 		SET_IDNKERR_IDNERR(sep, IDNKERR_INVALID_DOMAIN);
610 		SET_IDNKERR_PARAM0(sep, domid);
611 		SET_IDNKERR_PARAM1(sep, -1);
612 		return (EINVAL);
613 	}
614 	if (idn.localid == IDN_NIL_DOMID) {
615 #ifdef DEBUG
616 		cmn_err(CE_NOTE,
617 			"!IDN: %s: local domain not connected to an IDNnet",
618 			proc);
619 #endif /* DEBUG */
620 		return (0);
621 	}
622 
623 	/*
624 	 * Lock ordering protocols requires that we grab the
625 	 * global lock _before_ the local domain's lock.
626 	 * However, non-local domains must have their lock
627 	 * grabbed _before_ the global lock.
628 	 */
629 	IDN_SYNC_LOCK();
630 	IDN_GLOCK_EXCL();
631 	domset = idn.domset.ds_trans_on | idn.domset.ds_trans_off;
632 	if ((idn.state == IDNGS_OFFLINE) && !domset) {
633 #ifdef DEBUG
634 		cmn_err(CE_WARN,
635 			"!IDN: %s: local domain not connected to an IDNnet",
636 			proc);
637 #endif /* DEBUG */
638 		IDN_GUNLOCK();
639 		IDN_SYNC_UNLOCK();
640 		return (0);
641 	}
642 
643 	if ((domid == IDN_NIL_DOMID) || (domid == idn.localid)) {
644 		domid = idn.localid;
645 		IDN_GSTATE_TRANSITION(IDNGS_DISCONNECT);
646 		IDN_SET_NEW_MASTERID(IDN_NIL_DOMID);
647 		domset = DOMAINSET_ALL;
648 		DOMAINSET_DEL(domset, idn.localid);
649 	} else {
650 		domset = DOMAINSET(domid);
651 	}
652 	IDN_GUNLOCK();
653 
654 	if (waittime > 0)
655 		opcookie = idn_init_op(IDNOP_DISCONNECTED, domset, sep);
656 
657 	idn_unlink_domainset(domset, fintype, IDNFIN_ARG_NONE, finopt, idnset);
658 
659 	IDN_SYNC_UNLOCK();
660 
661 	if (waittime > 0) {
662 		/*
663 		 * Well the unlink has successfully kicked off.
664 		 * Since process is asynchronous we need to wait
665 		 * for it to complete.
666 		 */
667 		PR_PROTO("%s:%d: WAITING for op(%s) for (domset 0%x)...\n",
668 			proc, domid, idnop_str[IDNOP_DISCONNECTED],
669 			domset);
670 
671 		rv = idn_wait_op(opcookie, &domset, waittime);
672 	}
673 
674 	if (rv == 0) {
675 		if (waittime > 0) {
676 			PR_PROTO("%s:%d: disconnect SUCCEEDED\n",
677 				proc, domid);
678 		} else {
679 			PR_PROTO("%s:%d: disconnect KICKED OFF\n",
680 				proc, domid);
681 		}
682 	} else {
683 		PR_PROTO("%s:%d: disconnect FAILED\n", proc, domid);
684 	}
685 
686 	return (rv);
687 }
688 
689 static void
690 idn_unlink_domainset(domainset_t domset, idn_fin_t fintype,
691 			idn_finarg_t finarg, idn_finopt_t finopt,
692 			boardset_t idnset)
693 {
694 	int		d;
695 	domainset_t	offset;
696 	procname_t	proc = "idn_unlink_domainset";
697 
698 	ASSERT(IDN_SYNC_IS_LOCKED());
699 
700 	/*
701 	 * Determine subset for which we have
702 	 * no active connections.
703 	 */
704 	offset = domset & ~(idn.domset.ds_trans_on |
705 				idn.domset.ds_connected |
706 				idn.domset.ds_trans_off |
707 				idn.domset.ds_relink);
708 	/*
709 	 * Determine subset that are really candidates.
710 	 * Note that we include those already down the path
711 	 * since it's possible a request came in to upgrade
712 	 * their fintype (e.g. NORMAL->FORCE_SOFT).
713 	 */
714 	domset &= ~offset;
715 
716 	if (offset)
717 		idn_update_op(IDNOP_DISCONNECTED, offset, NULL);
718 
719 	IDN_GLOCK_EXCL();
720 	if ((finopt == IDNFIN_OPT_RELINK) && (idn.state != IDNGS_DISCONNECT)) {
721 		/*
722 		 * Don't add domains already transitioning off.
723 		 * If they caught on an earlier Reconfig wave then
724 		 * they'll already be in ds_relink anyway.  Otherwise,
725 		 * once a domain is transition off we can't upgrade
726 		 * him to a RELINK.
727 		 */
728 #ifdef DEBUG
729 		if (idn.domset.ds_hitlist & domset) {
730 			PR_HITLIST("%s: domset=%x, hitlist=%x, trans_off=%x "
731 				"-> relink = %x -> %x\n",
732 				proc, domset, idn.domset.ds_hitlist,
733 				idn.domset.ds_relink, idn.domset.ds_trans_off,
734 				idn.domset.ds_relink |
735 					(domset & ~idn.domset.ds_trans_off));
736 		}
737 #endif /* DEBUG */
738 
739 		domset &= ~idn.domset.ds_trans_off;
740 		idn.domset.ds_relink |= domset;
741 	} else {
742 		idn.domset.ds_relink &= ~domset;
743 	}
744 	/*
745 	 * Update the ds_trans_on/off so we don't waste
746 	 * time talking to these folks.
747 	 */
748 	idn.domset.ds_trans_on  &= ~domset;
749 	idn.domset.ds_trans_off |= domset;
750 
751 	if (domset == 0) {
752 		if ((idn.domset.ds_trans_on |
753 				idn.domset.ds_connected |
754 				idn.domset.ds_trans_off |
755 				idn.domset.ds_relink) == 0) {
756 			PR_HITLIST("%s:%x: HITLIST %x -> 0\n",
757 				proc, domset, idn.domset.ds_hitlist);
758 			idn.domset.ds_hitlist = 0;
759 			IDN_GSTATE_TRANSITION(IDNGS_OFFLINE);
760 		}
761 		IDN_GUNLOCK();
762 		return;
763 	}
764 	IDN_GUNLOCK();
765 
766 	for (d = 0; d < MAX_DOMAINS; d++) {
767 		idn_domain_t	*dp;
768 		idn_fin_t	ftype;
769 
770 		if (!DOMAIN_IN_SET(domset, d))
771 			continue;
772 
773 		dp = &idn_domain[d];
774 		IDN_DLOCK_EXCL(d);
775 		IDN_HISTORY_LOG(IDNH_RELINK, d, dp->dstate,
776 					idn.domset.ds_relink);
777 		ftype = fintype;
778 		if ((dp->dcpu != IDN_NIL_DCPU) && dp->dhw.dh_boardset) {
779 			/*
780 			 * If domain is not in the IDNSET passed
781 			 * down then we need to upgrade this to
782 			 * hard-force in order to prevent possible
783 			 * system failures (arbstop).  This is simply
784 			 * extra protection beyond that checked by
785 			 * the SSP.  IDNSET contains the set of boards
786 			 * that have a "link" to the local domain,
787 			 * including the SMD regs.
788 			 */
789 			if ((idnset & dp->dhw.dh_boardset) == 0) {
790 				PR_PROTO("%s:%d: boardset 0x%x "
791 					"NOT in IDNSET 0x%x\n",
792 					proc, d, dp->dhw.dh_boardset,
793 					idnset);
794 				if (ftype != IDNFIN_FORCE_HARD)
795 					cmn_err(CE_NOTE,
796 						"!IDN: 222: no IDN linkage "
797 						"found (b=0x%x, i=0x%x) "
798 						"upgrading unlink %s to %s",
799 						dp->dhw.dh_boardset,
800 						idnset, idnfin_str[ftype],
801 						idnfin_str[IDNFIN_FORCE_HARD]);
802 
803 				ftype = IDNFIN_FORCE_HARD;
804 			} else {
805 				PR_PROTO("%s:%d: boardset 0x%x "
806 					"FOUND in IDNSET 0x%x\n",
807 					proc, d, dp->dhw.dh_boardset,
808 					idnset);
809 			}
810 		}
811 		idn_disconnect(d, ftype, finarg, IDNDS_SYNC_TYPE(dp));
812 		IDN_DUNLOCK(d);
813 	}
814 }
815 
816 /*
817  * Return w/locks held.
818  */
819 static int
820 idn_connect(int domid)
821 {
822 	idn_xdcargs_t	xargs;
823 	idn_domain_t	*dp = &idn_domain[domid];
824 	procname_t	proc = "idn_connect";
825 
826 	ASSERT(IDN_SYNC_IS_LOCKED());
827 	ASSERT(IDN_DLOCK_IS_EXCL(domid));
828 
829 	ASSERT(dp->dcpu != IDN_NIL_DCPU);
830 
831 	if (dp->dstate != IDNDS_CLOSED) {
832 		if (DOMAIN_IN_SET(idn.domset.ds_trans_on |
833 				idn.domset.ds_connected, domid)) {
834 			PR_PROTO("%s:%d: already connected or "
835 				"in-progress\n", proc, domid);
836 		} else {
837 			PR_PROTO("%s:%d: current state (%s) != "
838 				"CLOSED\n", proc, domid,
839 				idnds_str[dp->dstate]);
840 		}
841 		return (-1);
842 	}
843 
844 	ASSERT(!DOMAIN_IN_SET(idn.domset.ds_connected, domid));
845 	ASSERT(!DOMAIN_IN_SET(idn.domset.ds_trans_off, domid));
846 
847 	dp->dxp = &xphase_nego;
848 	IDN_XSTATE_TRANSITION(dp, IDNXS_PEND);
849 
850 	idn_xphase_transition(domid, NULL, xargs);
851 
852 	return (0);
853 }
854 
855 /*
856  * Return w/locks held.
857  */
858 static int
859 idn_disconnect(int domid, idn_fin_t fintype, idn_finarg_t finarg,
860 		idn_finsync_t finsync)
861 {
862 	int		new_masterid, new_cpuid = IDN_NIL_DCPU;
863 	uint_t		token;
864 	uint_t		finmaster;
865 	idn_xdcargs_t	xargs;
866 	idn_finopt_t	finopt;
867 	idn_domain_t	*dp = &idn_domain[domid];
868 	procname_t	proc = "idn_disconnect";
869 
870 	ASSERT(IDN_SYNC_IS_LOCKED());
871 	ASSERT(IDN_DLOCK_IS_EXCL(domid));
872 
873 	if (dp->dstate == IDNDS_CLOSED) {
874 		PR_PROTO("%s:%d: already CLOSED\n", proc, domid);
875 		idn_update_op(IDNOP_DISCONNECTED, DOMAINSET(domid), NULL);
876 		return (-1);
877 	}
878 
879 	/*
880 	 * Terminate any outstanding commands that were
881 	 * targeted towards this domain.
882 	 */
883 	idn_terminate_cmd(domid, ECANCELED);
884 
885 	/*
886 	 * Terminate any and all retries that may have
887 	 * outstanding for this domain.
888 	 */
889 	token = IDN_RETRY_TOKEN(domid, IDN_RETRY_TYPEALL);
890 	(void) idn_retry_terminate(token);
891 
892 	/*
893 	 * Stop all outstanding message timers for
894 	 * this guy.
895 	 */
896 	IDN_MSGTIMER_STOP(domid, 0, 0);
897 
898 	dp->dxp = &xphase_fin;
899 	IDN_XSTATE_TRANSITION(dp, IDNXS_PEND);
900 	if ((int)dp->dfin < (int)fintype) {
901 		/*
902 		 * You can only upgrade a fin type.
903 		 * We don't allow it to be downgraded
904 		 * as it's too dangerous since some
905 		 * state may have been blown away while
906 		 * we were fin'ing at a higher level.
907 		 */
908 		IDN_FSTATE_TRANSITION(dp, fintype);
909 	}
910 
911 	dp->dfin_sync = finsync;
912 	PR_PROTO("%s:%d: disconnect synchronously = %s\n",
913 		proc, domid, (finsync == IDNFIN_SYNC_OFF) ? "OFF" :
914 		(finsync == IDNFIN_SYNC_NO) ? "NO" : "YES");
915 
916 	IDN_GLOCK_SHARED();
917 	if (DOMAIN_IN_SET(idn.domset.ds_relink, domid) &&
918 			(idn.state != IDNGS_DISCONNECT)) {
919 		finopt = IDNFIN_OPT_RELINK;
920 	} else {
921 		finopt = IDNFIN_OPT_UNLINK;
922 		PR_HITLIST("%s:%d: HITLIST %x -> %x\n",
923 			proc, domid, idn.domset.ds_hitlist,
924 			idn.domset.ds_hitlist | DOMAINSET(domid));
925 		DOMAINSET_ADD(idn.domset.ds_hitlist, domid);
926 	}
927 
928 	CLR_XARGS(xargs);
929 	SET_XARGS_FIN_TYPE(xargs, dp->dfin);
930 	SET_XARGS_FIN_ARG(xargs, finarg);
931 	SET_XARGS_FIN_OPT(xargs, finopt);
932 	SET_XARGS_FIN_DOMSET(xargs, 0);		/* unused when msg = 0 */
933 	new_masterid = IDN_GET_NEW_MASTERID();
934 	IDN_GUNLOCK();
935 	if (new_masterid != IDN_NIL_DOMID)
936 		new_cpuid = idn_domain[new_masterid].dcpu;
937 	finmaster = MAKE_FIN_MASTER(new_masterid, new_cpuid);
938 	SET_XARGS_FIN_MASTER(xargs, finmaster);
939 
940 	idn_xphase_transition(domid, NULL, xargs);
941 
942 	return (0);
943 }
944 
945 static int
946 idn_next_xstate(idn_xstate_t o_xstate, int err, uint_t msg)
947 {
948 	int		index;
949 	procname_t	proc = "idn_next_xstate";
950 
951 	ASSERT(((int)o_xstate >= 0) && ((int)o_xstate <= 4));
952 
953 	if (!msg)
954 		index = 0;
955 	else if ((msg & IDNP_MSGTYPE_MASK) == 0)
956 		index = (msg & IDNP_ACK) ? 3 : (msg & IDNP_NACK) ? 4 : -1;
957 	else
958 		index = (msg & IDNP_ACK) ? 2 :
959 				!(msg & IDNP_ACKNACK_MASK) ? 1 : -1;
960 
961 	if (index == -1) {
962 		STRING(str);
963 
964 		INUM2STR(msg, str);
965 		PR_PROTO("%s: (msg = 0x%x(%s))\n", proc, msg, str);
966 		return (IDNXS_NIL);
967 	}
968 
969 	if (err == -1) {
970 		int	n_xstate;
971 		/*
972 		 * Caller is just interested in querying is this
973 		 * is a valid message to receive in the current
974 		 * xstate.  A return value of IDNXS_NIL indicates
975 		 * that it's not.  A return value of non-IDNXS_NIL
976 		 * indicates it's cool.  An invalid message is
977 		 * determined by both err & !err states being IDNXS_NIL.
978 		 */
979 		n_xstate = idnxs_state_table[(int)o_xstate][index][0];
980 		if (n_xstate != IDNXS_NIL)
981 			return (n_xstate);
982 		else
983 			return (idnxs_state_table[(int)o_xstate][index][1]);
984 	} else {
985 		return (idnxs_state_table[(int)o_xstate][index][err ? 1 : 0]);
986 	}
987 }
988 
989 static int
990 idn_select_candidate(domainset_t master_set)
991 {
992 	int		d, best_id = IDN_NIL_DOMID;
993 	uint_t		best_vote = 0;
994 	idn_domain_t	*dp;
995 	procname_t	proc = "idn_select_candidate";
996 
997 	ASSERT(IDN_SYNC_IS_LOCKED());
998 
999 	if (master_set == 0) {
1000 		PR_PROTO("%s: %x -> %d\n", proc, master_set, IDN_NIL_DOMID);
1001 		return (IDN_NIL_DOMID);
1002 	}
1003 
1004 	for (d = 0; d < MAX_DOMAINS; d++) {
1005 		uint_t		vote;
1006 		idn_vote_t	v;
1007 
1008 		if (!DOMAIN_IN_SET(master_set, d))
1009 			continue;
1010 
1011 		dp = &idn_domain[d];
1012 
1013 		if ((dp->domid == IDN_NIL_DOMID) ||
1014 			(dp->dcpu == IDN_NIL_DCPU) ||
1015 			((v.ticket = dp->dvote.ticket) == 0))
1016 			continue;
1017 
1018 		vote = IDNVOTE_ELECT(v);
1019 
1020 		if (vote > best_vote) {
1021 			best_vote = vote;
1022 			best_id = d;
1023 		}
1024 	}
1025 
1026 	PR_PROTO("%s: %x -> %d\n", proc, master_set, best_id);
1027 
1028 	return (best_id);
1029 }
1030 
1031 /*
1032  * If a non-zero value is returned then GLOCK will have been dropped.
1033  * Otherwise, routine returns with all incoming locks still held.
1034  */
1035 static int
1036 idn_select_master(int domid, int rmasterid, int rcpuid)
1037 {
1038 	char		*sel;
1039 	int		lmasterid, masterid;
1040 	int		do_reconfig = 0;
1041 	int		lindex, rindex;
1042 	idn_domain_t	*ldp, *rdp;
1043 	uint_t		rvote, lvote;
1044 	idn_master_select_t	select;
1045 	procname_t	proc = "idn_select_master";
1046 
1047 	ASSERT(IDN_SYNC_IS_LOCKED());
1048 	ASSERT(IDN_GLOCK_IS_EXCL());
1049 	ASSERT(IDN_DLOCK_IS_EXCL(domid));
1050 
1051 	PR_PROTO("%s:%d: lmasterid = %d, rmasterid = %d, rcpuid = %d\n",
1052 		proc, domid, IDN_GET_MASTERID(), rmasterid, rcpuid);
1053 
1054 	IDN_DLOCK_EXCL(idn.localid);
1055 
1056 	ldp = &idn_domain[idn.localid];
1057 	rdp = &idn_domain[domid];
1058 
1059 	/*
1060 	 * Clear master bits since mastership is derived from
1061 	 * other information (local/remote idn.masterid/idn.new_masterid)
1062 	 * and we don't want the vote master bit to confuse matters.
1063 	 */
1064 	lvote = IDNVOTE_ELECT(ldp->dvote);
1065 	rvote = IDNVOTE_ELECT(rdp->dvote);
1066 
1067 	lmasterid = IDN_GET_MASTERID();
1068 
1069 	lindex = (lmasterid == IDN_NIL_DOMID) ? MASTER_IS_NONE :
1070 			(lmasterid == idn.localid) ? MASTER_IS_LOCAL :
1071 			(lmasterid == domid) ? MASTER_IS_REMOTE :
1072 			MASTER_IS_OTHER;
1073 
1074 	rindex = (rmasterid == IDN_NIL_DOMID) ? MASTER_IS_NONE :
1075 			(rmasterid == domid) ? MASTER_IS_REMOTE :
1076 			(rmasterid == idn.localid) ? MASTER_IS_LOCAL :
1077 			MASTER_IS_OTHER;
1078 
1079 	select = master_select_table[lindex][rindex];
1080 
1081 	masterid = IDN_NIL_DOMID;
1082 
1083 	/*
1084 	 * Each case is responsible for dropping DLOCK(localid)
1085 	 * and GLOCK if it doesn't select a master, unless a
1086 	 * reconfig is necessary.
1087 	 */
1088 	switch (select) {
1089 	case MASTER_SELECT_VOTE_RCFG:
1090 		sel = "VOTE_RECONFIG";
1091 		if (lvote > rvote) {
1092 			/*
1093 			 * If the local domain is the winner then remote
1094 			 * domain will have to Reconfig.  We'll continue
1095 			 * through the connection process anyway.  The
1096 			 * remote domains will tell us to back-off while
1097 			 * Reconfigs, but that's okay as we'll keep retrying.
1098 			 */
1099 			masterid = idn.localid;
1100 		} else if (lvote < rvote) {
1101 			do_reconfig = 1;
1102 			/*
1103 			 * GLOCK will get dropped once reconfig
1104 			 * is kicked off.
1105 			 */
1106 		} else {
1107 			cmn_err(CE_WARN,
1108 				"IDN: 206: cannot link domains "
1109 				"with equal votes (L(%d),R(%d),0x%x)",
1110 				idn.localid, domid, rvote);
1111 			IDN_GUNLOCK();
1112 		}
1113 		IDN_DUNLOCK(idn.localid);
1114 		break;
1115 
1116 	case MASTER_SELECT_VOTE:
1117 		sel = "VOTE";
1118 		if (lvote > rvote) {
1119 			masterid = idn.localid;
1120 			ldp->dvote.v.master = 1;
1121 			rdp->dvote.v.master = 0;
1122 		} else if (lvote < rvote) {
1123 			masterid = domid;
1124 			ldp->dvote.v.master = 0;
1125 			rdp->dvote.v.master = 1;
1126 		} else {
1127 			cmn_err(CE_WARN,
1128 				"IDN: 206: cannot link domains "
1129 				"with equal votes (L(%d),R(%d),0x%x)",
1130 				idn.localid, domid, rvote);
1131 		}
1132 		ASSERT(IDN_GET_MASTERID() == IDN_NIL_DOMID);
1133 		if (masterid != IDN_NIL_DOMID) {
1134 			IDN_SET_MASTERID(masterid);
1135 			IDN_SET_NEW_MASTERID(IDN_NIL_DOMID);
1136 		} else {
1137 			IDN_GUNLOCK();
1138 		}
1139 		IDN_DUNLOCK(idn.localid);
1140 		break;
1141 
1142 	case MASTER_SELECT_REMOTE:
1143 		sel = "REMOTE";
1144 		masterid = domid;
1145 		if (IDN_GET_MASTERID() == IDN_NIL_DOMID) {
1146 			IDN_SET_MASTERID(masterid);
1147 			IDN_SET_NEW_MASTERID(IDN_NIL_DOMID);
1148 			ldp->dvote.v.master = 0;
1149 			rdp->dvote.v.master = 1;
1150 		}
1151 		ASSERT(IDN_GET_MASTERID() == domid);
1152 		IDN_DUNLOCK(idn.localid);
1153 		break;
1154 
1155 	case MASTER_SELECT_LOCAL:
1156 		sel = "LOCAL";
1157 		masterid = idn.localid;
1158 		if (IDN_GET_MASTERID() == IDN_NIL_DOMID) {
1159 			IDN_SET_MASTERID(masterid);
1160 			IDN_SET_NEW_MASTERID(IDN_NIL_DOMID);
1161 			ldp->dvote.v.master = 1;
1162 			rdp->dvote.v.master = 0;
1163 		}
1164 		ASSERT(IDN_GET_MASTERID() == idn.localid);
1165 		IDN_DUNLOCK(idn.localid);
1166 		break;
1167 
1168 	case MASTER_SELECT_CONNECT:
1169 		sel = "CONNECT";
1170 		if (rmasterid == lmasterid) {
1171 			/*
1172 			 * Local and remote have same master,
1173 			 * let him come onboard.
1174 			 */
1175 			masterid = lmasterid;
1176 			IDN_DUNLOCK(idn.localid);
1177 
1178 		} else {
1179 			int	rv;
1180 
1181 			IDN_DUNLOCK(idn.localid);
1182 			IDN_GUNLOCK();
1183 			IDN_DLOCK_EXCL(rmasterid);
1184 			PR_PROTO("%s:%d: attempting connect w/remote "
1185 				"master %d\n",
1186 				proc, domid, rmasterid);
1187 			rv = idn_open_domain(rmasterid, rcpuid, 0);
1188 			if (rv == 0) {
1189 				idn_connect(rmasterid);
1190 			} else if (rv < 0) {
1191 				cmn_err(CE_WARN,
1192 					"IDN: 205: (%s) failed to "
1193 					"open-domain(%d,%d)",
1194 					proc, rmasterid, rcpuid);
1195 			} else {
1196 				/*
1197 				 * Must already have a connection going.
1198 				 */
1199 				PR_PROTO("%s:%d: failed "
1200 					"idn_open_domain(%d,%d,0) "
1201 					"(rv = %d)\n",
1202 					proc, domid, rmasterid,
1203 					rcpuid, rv);
1204 			}
1205 			IDN_DUNLOCK(rmasterid);
1206 		}
1207 		break;
1208 
1209 	case MASTER_SELECT_WAIT:
1210 		sel = "WAIT";
1211 		/*
1212 		 * If the remote domain has the same master as the local
1213 		 * domain then there's no need to wait.
1214 		 */
1215 		if (rmasterid == lmasterid) {
1216 			masterid = lmasterid;
1217 		} else {
1218 			IDN_GUNLOCK();
1219 		}
1220 		IDN_DUNLOCK(idn.localid);
1221 		break;
1222 
1223 	case MASTER_SELECT_ERROR:
1224 		sel = "ERROR";
1225 		/*
1226 		 * Hit impossible condition.
1227 		 */
1228 		cmn_err(CE_WARN,
1229 			"IDN: 207: local/remote master-id conflict "
1230 			"(%d.lmasterid = %d, %d.rmasterid = %d)",
1231 			idn.localid, lmasterid, domid, rmasterid);
1232 		IDN_GUNLOCK();
1233 		IDN_DUNLOCK(idn.localid);
1234 		break;
1235 
1236 	default:
1237 		cmn_err(CE_WARN,
1238 			"IDN: 208: %s: unknown case (%d)",
1239 			proc, (int)select);
1240 		IDN_GUNLOCK();
1241 		IDN_DUNLOCK(idn.localid);
1242 		ASSERT(0);
1243 		break;
1244 	}
1245 
1246 	if (masterid == IDN_NIL_DOMID) {
1247 		PR_PROTO("%s:%d: NO MASTER SELECTED (rmstr=%d) sel=%s\n",
1248 			proc, domid, rmasterid, sel);
1249 	} else {
1250 		PR_PROTO("%s:%d: MASTER SELECTED = %d (%s)\n",
1251 			proc, domid, masterid,
1252 			(masterid == idn.localid) ? "LOCAL" :
1253 			(masterid == domid) ? "REMOTE" : "OTHER");
1254 	}
1255 
1256 	if (do_reconfig) {
1257 		domainset_t	dis_set;
1258 
1259 		/*
1260 		 * Local domain already has a master.
1261 		 * Need to dismantle all connections
1262 		 * and reestablish one with new master.
1263 		 */
1264 		IDN_GKSTAT_GLOBAL_EVENT(gk_reconfigs, gk_reconfig_last);
1265 
1266 		PR_PROTO("%s:%d: RECONFIG new masterid = %d\n",
1267 				proc, domid, domid);
1268 
1269 		IDN_GSTATE_TRANSITION(IDNGS_RECONFIG);
1270 		IDN_SET_NEW_MASTERID(domid);
1271 		IDN_GUNLOCK();
1272 
1273 		dis_set = idn.domset.ds_trans_on | idn.domset.ds_connected;
1274 		DOMAINSET_DEL(dis_set, domid);
1275 
1276 		idn_unlink_domainset(dis_set, IDNFIN_NORMAL,
1277 					IDNFIN_ARG_NONE, IDNFIN_OPT_RELINK,
1278 					BOARDSET_ALL);
1279 	}
1280 
1281 	return ((masterid == IDN_NIL_DOMID) ? -1 : 0);
1282 }
1283 
1284 /*ARGSUSED1*/
1285 static void
1286 idn_retry_query(uint_t token, void *arg)
1287 {
1288 	idn_retry_t	rtype = IDN_RETRY_TOKEN2TYPE(token);
1289 	int		d, domid = IDN_RETRY_TOKEN2DOMID(token);
1290 	idn_domain_t	*dp = &idn_domain[domid];
1291 	idn_synccmd_t	sync_cmd;
1292 	domainset_t	query_set, my_ready_set;
1293 	procname_t	proc = "idn_retry_query";
1294 
1295 	IDN_SYNC_LOCK();
1296 	IDN_DLOCK_EXCL(domid);
1297 
1298 	switch (rtype) {
1299 	case IDNRETRY_CONQ:
1300 		sync_cmd = IDNSYNC_CONNECT;
1301 		my_ready_set = idn.domset.ds_ready_on |
1302 				idn.domset.ds_connected;
1303 		my_ready_set &= ~idn.domset.ds_trans_off;
1304 		DOMAINSET_ADD(my_ready_set, idn.localid);
1305 		break;
1306 
1307 	case IDNRETRY_FINQ:
1308 		sync_cmd = IDNSYNC_DISCONNECT;
1309 		my_ready_set = idn.domset.ds_ready_off |
1310 				~idn.domset.ds_connected;
1311 		break;
1312 
1313 	default:
1314 		IDN_DUNLOCK(domid);
1315 		IDN_SYNC_UNLOCK();
1316 		return;
1317 	}
1318 
1319 	if (dp->dsync.s_cmd == sync_cmd)
1320 		my_ready_set |= dp->dsync.s_set_rdy;
1321 
1322 	query_set = idn_sync_register(domid, sync_cmd, 0, IDNSYNC_REG_QUERY);
1323 
1324 	PR_PROTO("%s:%d: query_set = 0x%x\n", proc, domid, query_set);
1325 
1326 	if (query_set == 0) {
1327 		IDN_DUNLOCK(domid);
1328 		IDN_SYNC_UNLOCK();
1329 		return;
1330 	}
1331 
1332 	for (d = 0; d < MAX_DOMAINS; d++) {
1333 		if (!DOMAIN_IN_SET(query_set, d))
1334 			continue;
1335 
1336 		dp = &idn_domain[d];
1337 		if (d != domid)
1338 			IDN_DLOCK_EXCL(d);
1339 
1340 		if ((dp->dsync.s_cmd == sync_cmd) ||
1341 				(!dp->dcookie_send &&
1342 				(rtype == IDNRETRY_CONQ))) {
1343 			if (d != domid)
1344 				IDN_DUNLOCK(d);
1345 			continue;
1346 		}
1347 
1348 		IDN_SYNC_QUERY_UPDATE(domid, d);
1349 
1350 		if (rtype == IDNRETRY_CONQ)
1351 			idn_send_con(d, NULL, IDNCON_QUERY, my_ready_set);
1352 		else
1353 			idn_send_fin(d, NULL, IDNFIN_QUERY, IDNFIN_ARG_NONE,
1354 					IDNFIN_OPT_NONE, my_ready_set,
1355 					NIL_FIN_MASTER);
1356 		if (d != domid)
1357 			IDN_DUNLOCK(d);
1358 	}
1359 
1360 	IDN_DUNLOCK(domid);
1361 	IDN_SYNC_UNLOCK();
1362 }
1363 
1364 static int
1365 idn_send_nego(int domid, idn_msgtype_t *mtp, domainset_t conset)
1366 {
1367 	idn_domain_t	*ldp, *dp;
1368 	int		d, masterid;
1369 	uint_t		dmask;
1370 	uint_t		acknack;
1371 	uint_t		ticket;
1372 	idnneg_dset_t	dset;
1373 	idn_msgtype_t	mt;
1374 	procname_t	proc = "idn_send_nego";
1375 
1376 	ASSERT(IDN_SYNC_IS_LOCKED());
1377 	ASSERT(IDN_DLOCK_IS_EXCL(domid));
1378 
1379 	if (mtp) {
1380 		acknack = mtp->mt_mtype & IDNP_ACKNACK_MASK;
1381 		mt.mt_mtype = mtp->mt_mtype;
1382 		mt.mt_atype = mtp->mt_atype;
1383 		mt.mt_cookie = mtp->mt_cookie;
1384 	} else {
1385 		acknack = 0;
1386 		mt.mt_mtype = IDNP_NEGO;
1387 		mt.mt_atype = 0;
1388 		mt.mt_cookie = IDN_TIMER_PUBLIC_COOKIE;
1389 	}
1390 
1391 	IDN_GLOCK_SHARED();
1392 
1393 	dp = &idn_domain[domid];
1394 	ldp = &idn_domain[idn.localid];
1395 
1396 	if ((idn.state == IDNGS_RECONFIG) ||
1397 			((masterid = IDN_GET_MASTERID()) == IDN_NIL_DOMID)) {
1398 		masterid = IDN_GET_NEW_MASTERID();
1399 		if ((masterid == idn.localid) || (masterid == domid)) {
1400 			/*
1401 			 * We only send the new-master "hint" to
1402 			 * "other" domains.  If the new-master is
1403 			 * ourself or we're talking to the new-master
1404 			 * then we need to be accurate about our
1405 			 * real master so that the correct master
1406 			 * is selected.
1407 			 */
1408 			masterid = IDN_NIL_DOMID;
1409 		}
1410 	}
1411 
1412 	DOMAINSET_DEL(conset, idn.localid);
1413 	DOMAINSET_DEL(conset, domid);
1414 	/*
1415 	 * Exclude domains from conset that are on
1416 	 * remote domain's hitlist.  He's not interested
1417 	 * in hearing about them.  SSP is probably requesting
1418 	 * such domains be unlinked - will eventually get to
1419 	 * local domain.
1420 	 */
1421 	conset &= ~idn.domset.ds_hitlist;
1422 	if ((masterid != IDN_NIL_DOMID) &&
1423 			DOMAIN_IN_SET(idn.domset.ds_hitlist, masterid)) {
1424 		PR_PROTO("%s:%d: masterid(%d) on hitlist(0x%x) -> -1\n",
1425 			proc, domid, masterid, idn.domset.ds_hitlist);
1426 		/*
1427 		 * Yikes, our chosen master is on the hitlist!
1428 		 */
1429 		masterid = IDN_NIL_DOMID;
1430 	}
1431 
1432 	dmask = IDNNEG_DSET_MYMASK();
1433 	IDNNEG_DSET_INIT(dset, dmask);
1434 	for (d = 0; d < MAX_DOMAINS; d++) {
1435 		int	cpuid;
1436 
1437 		if (!DOMAIN_IN_SET(conset, d))
1438 			continue;
1439 
1440 		if ((cpuid = idn_domain[d].dcpu) == IDN_NIL_DCPU) {
1441 			ASSERT(d != masterid);
1442 			continue;
1443 		}
1444 
1445 		IDNNEG_DSET_SET(dset, d, cpuid, dmask);
1446 	}
1447 	IDNNEG_DSET_SET_MASTER(dset, domid, masterid);
1448 	ASSERT((masterid != IDN_NIL_DOMID) ?
1449 		(idn_domain[masterid].dcpu != IDN_NIL_DCPU) : 1);
1450 	IDN_GUNLOCK();
1451 
1452 	IDN_DLOCK_SHARED(idn.localid);
1453 	ticket = IDNVOTE_BASICS(ldp->dvote);
1454 	/*
1455 	 * We just want to send basic vote components without an
1456 	 * indication of mastership (master bit) since that's primarily
1457 	 * for local domain's usage.  There is more correct master
1458 	 * indications in the DSET.  Recall that if we were in a
1459 	 * Reconfig we would have transmitted the "new_masterid"
1460 	 * which might conflict with the local domain's vote.v.master
1461 	 * bit if he was originally the master prior to the Reconfig.
1462 	 */
1463 
1464 	PR_PROTO("%s:%d: sending nego%sto (cpu %d) "
1465 		"[v=0x%x, cs=0x%x, mstr=%d]\n",
1466 		proc, domid,
1467 		(acknack & IDNP_ACK) ? "+ack " :
1468 		(acknack & IDNP_NACK) ? "+nack " : " ",
1469 		dp->dcpu, ticket, conset, masterid);
1470 
1471 	IDN_MSGTIMER_START(domid, IDNP_NEGO, 0,
1472 			idn_msg_waittime[IDNP_NEGO], &mt.mt_cookie);
1473 
1474 	IDNXDC(domid, &mt, ticket, dset[0], dset[1], dset[2]);
1475 
1476 	IDN_DUNLOCK(idn.localid);
1477 
1478 	return (0);
1479 }
1480 
1481 static int
1482 idn_recv_nego(int domid, idn_msgtype_t *mtp, idn_xdcargs_t xargs,
1483 		ushort_t dcookie)
1484 {
1485 	uint_t		msg = mtp->mt_mtype;
1486 	idn_msgtype_t	mt;
1487 	idn_domain_t	*dp = &idn_domain[domid];
1488 	idn_xdcargs_t	nargs;
1489 	procname_t	proc = "idn_recv_nego";
1490 
1491 	ASSERT(IDN_SYNC_IS_LOCKED());
1492 	ASSERT(IDN_DLOCK_IS_EXCL(domid));
1493 
1494 	mt.mt_cookie = mtp->mt_cookie;
1495 
1496 #ifdef DEBUG
1497 	if (DOMAIN_IN_SET(idn.domset.ds_hitlist, domid)) {
1498 		PR_HITLIST("%s:%d: dcpu=%d, dstate=%s, msg=%x, "
1499 			"hitlist=%x\n",
1500 			proc, domid, dp->dcpu, idnds_str[dp->dstate],
1501 			msg, idn.domset.ds_hitlist);
1502 	}
1503 #endif /* DEBUG */
1504 
1505 	if (dp->dcpu == IDN_NIL_DCPU) {
1506 		int		cpuid;
1507 		uint_t		ticket;
1508 		/*
1509 		 * Brandnew link.  Need to open a new domain entry.
1510 		 */
1511 		ticket = GET_XARGS_NEGO_TICKET(xargs);
1512 		cpuid = dp->dcpu_last;
1513 		ASSERT(VALID_CPUID(cpuid));
1514 
1515 		if (idn_open_domain(domid, cpuid, ticket) != 0) {
1516 			PR_PROTO("%s:%d: FAILED to open doamin "
1517 				"(ticket = 0x%x)\n",
1518 				proc, domid, ticket);
1519 			return (-1);
1520 		}
1521 	}
1522 
1523 	if ((msg & IDNP_MSGTYPE_MASK) == IDNP_NEGO) {
1524 		PR_PROTO("%s:%d: assigned SEND cookie 0x%x\n",
1525 			proc, domid, dcookie);
1526 		dp->dcookie_send = dcookie;
1527 	}
1528 
1529 	if ((dp->dxp == NULL) && IDNDS_IS_CLOSED(dp)) {
1530 		dp->dxp = &xphase_nego;
1531 		IDN_XSTATE_TRANSITION(dp, IDNXS_PEND);
1532 	} else if (dp->dxp != &xphase_nego) {
1533 		if (msg & IDNP_MSGTYPE_MASK) {
1534 			/*
1535 			 * If we already have a connection to somebody
1536 			 * trying to initiate a connection to us, then
1537 			 * possibly we've awaken from a coma or he did.
1538 			 * In any case, dismantle current connection
1539 			 * and attempt to establish a new one.
1540 			 */
1541 			if (dp->dstate == IDNDS_CONNECTED) {
1542 				DOMAINSET_ADD(idn.domset.ds_relink, domid);
1543 				IDN_HISTORY_LOG(IDNH_RELINK, domid,
1544 						dp->dstate,
1545 						idn.domset.ds_relink);
1546 				idn_disconnect(domid, IDNFIN_NORMAL,
1547 						IDNFIN_ARG_NONE,
1548 						IDNFIN_SYNC_YES);
1549 			} else {
1550 				mt.mt_mtype = IDNP_NACK;
1551 				mt.mt_atype = msg;
1552 
1553 				CLR_XARGS(nargs);
1554 
1555 				if (DOMAIN_IN_SET(idn.domset.ds_hitlist,
1556 							domid)) {
1557 					SET_XARGS_NACK_TYPE(nargs,
1558 							IDNNACK_EXIT);
1559 				} else {
1560 					int	new_masterid;
1561 					int	new_cpuid = IDN_NIL_DCPU;
1562 
1563 					SET_XARGS_NACK_TYPE(nargs,
1564 							IDNNACK_RETRY);
1565 					IDN_GLOCK_SHARED();
1566 					new_masterid = IDN_GET_NEW_MASTERID();
1567 					if (new_masterid == IDN_NIL_DOMID)
1568 						new_masterid =
1569 							IDN_GET_MASTERID();
1570 					if (new_masterid != IDN_NIL_DOMID) {
1571 						idn_domain_t	*mdp;
1572 
1573 						mdp = &idn_domain[new_masterid];
1574 						new_cpuid = mdp->dcpu;
1575 					}
1576 					SET_XARGS_NACK_ARG1(nargs,
1577 								new_masterid);
1578 					SET_XARGS_NACK_ARG2(nargs,
1579 								new_cpuid);
1580 					IDN_GUNLOCK();
1581 				}
1582 				idn_send_acknack(domid, &mt, nargs);
1583 			}
1584 		}
1585 		return (0);
1586 	}
1587 
1588 	idn_xphase_transition(domid, mtp, xargs);
1589 
1590 	return (0);
1591 }
1592 
1593 /*ARGSUSED1*/
1594 static void
1595 idn_retry_nego(uint_t token, void *arg)
1596 {
1597 	int		domid = IDN_RETRY_TOKEN2DOMID(token);
1598 	int		new_masterid;
1599 	idn_domain_t	*dp = &idn_domain[domid];
1600 	idn_xdcargs_t	xargs;
1601 	procname_t	proc = "idn_retry_nego";
1602 
1603 	ASSERT(IDN_RETRY_TOKEN2TYPE(token) == IDNRETRY_NEGO);
1604 
1605 	IDN_SYNC_LOCK();
1606 	IDN_DLOCK_EXCL(domid);
1607 
1608 	if (dp->dxp != &xphase_nego) {
1609 		STRING(str);
1610 
1611 #ifdef DEBUG
1612 		if (dp->dxp) {
1613 			INUM2STR(dp->dxp->xt_msgtype, str);
1614 		}
1615 #endif /* DEBUG */
1616 
1617 		PR_PROTO("%s:%d: dxp(%s) != NEGO...bailing...\n",
1618 			proc, domid, dp->dxp ? str : "NULL");
1619 		IDN_DUNLOCK(domid);
1620 		IDN_SYNC_UNLOCK();
1621 		return;
1622 	}
1623 
1624 	if (dp->dxstate != IDNXS_PEND) {
1625 		PR_PROTO("%s:%d: xstate(%s) != %s...bailing\n",
1626 			proc, domid, idnxs_str[dp->dxstate],
1627 			idnxs_str[IDNXS_PEND]);
1628 		IDN_DUNLOCK(domid);
1629 		IDN_SYNC_UNLOCK();
1630 		return;
1631 	}
1632 
1633 	IDN_GLOCK_SHARED();
1634 	if (idn.state == IDNGS_RECONFIG) {
1635 		/*
1636 		 * Have to try again later after
1637 		 * reconfig has completed.
1638 		 */
1639 		PR_PROTO("%s:%d: reconfig in-progress...try later\n",
1640 			proc, domid);
1641 		idn_retry_submit(idn_retry_nego, NULL, token,
1642 				idn_msg_retrytime[IDNP_NEGO]);
1643 		IDN_GUNLOCK();
1644 		IDN_DUNLOCK(domid);
1645 		IDN_SYNC_UNLOCK();
1646 		return;
1647 	}
1648 	new_masterid = IDN_GET_NEW_MASTERID();
1649 	if ((idn.state == IDNGS_CONNECT) &&
1650 			(new_masterid != IDN_NIL_DOMID) &&
1651 			(domid != new_masterid) &&
1652 			(idn.localid != new_masterid)) {
1653 		/*
1654 		 * We have a new master pending and this
1655 		 * guy isn't it.  Wait until the local domain
1656 		 * has a chance to connect with the new
1657 		 * master before going forward with this
1658 		 * guy.
1659 		 */
1660 		PR_PROTO("%s:%d: waiting for connect to new master %d\n",
1661 			proc, domid, IDN_GET_NEW_MASTERID());
1662 		idn_retry_submit(idn_retry_nego, NULL, token,
1663 				idn_msg_retrytime[IDNP_NEGO]);
1664 		IDN_GUNLOCK();
1665 		IDN_DUNLOCK(domid);
1666 		IDN_SYNC_UNLOCK();
1667 		return;
1668 	}
1669 	IDN_GUNLOCK();
1670 
1671 	idn_xphase_transition(domid, NULL, xargs);
1672 
1673 	IDN_DUNLOCK(domid);
1674 	IDN_SYNC_UNLOCK();
1675 }
1676 
1677 static int
1678 idn_check_nego(int domid, idn_msgtype_t *mtp, idn_xdcargs_t xargs)
1679 {
1680 	int		d, new_masterid, masterid;
1681 	int		cpuid, m_cpuid = -1;
1682 	uint_t		dmask;
1683 	uint_t		msg = mtp ? mtp->mt_mtype : 0;
1684 	idn_domain_t	*dp, *ldp;
1685 	domainset_t	con_set, pending_set;
1686 	idnneg_dset_t	dset;
1687 	procname_t	proc = "idn_check_nego";
1688 
1689 	ASSERT(IDN_SYNC_IS_LOCKED());
1690 	ASSERT(IDN_DLOCK_IS_EXCL(domid));
1691 
1692 	dp = &idn_domain[domid];
1693 	ldp = &idn_domain[idn.localid];
1694 
1695 	if (msg & IDNP_NACK) {
1696 		if (GET_XARGS_NACK_TYPE(xargs) == IDNNACK_EXIT) {
1697 			PR_HITLIST("%s:%d(%s): (msg=%x) EXIT received, "
1698 				"adding to hitlist %x -> %x\n",
1699 				proc, domid, idnds_str[dp->dstate], msg,
1700 				idn.domset.ds_hitlist,
1701 				idn.domset.ds_hitlist | DOMAINSET(domid));
1702 
1703 			DOMAINSET_ADD(idn.domset.ds_hitlist, domid);
1704 			return (-1);
1705 		} else {
1706 			return (0);
1707 		}
1708 	}
1709 
1710 	if (DOMAIN_IN_SET(idn.domset.ds_hitlist, domid)) {
1711 		PR_HITLIST("%s:%d(%s): (msg=%x) domain in hitlist (%x) - "
1712 			"exiting phase\n",
1713 			proc, domid, idnds_str[dp->dstate], msg,
1714 			idn.domset.ds_hitlist);
1715 		return (-1);
1716 	}
1717 
1718 	if ((dp->dstate == IDNDS_NEGO_PEND) &&
1719 			(msg & IDNP_MSGTYPE_MASK) &&
1720 			(msg & IDNP_ACK))		/* nego+ack */
1721 		return (1);
1722 
1723 	dmask = (uint_t)-1;
1724 
1725 	IDN_GLOCK_EXCL();
1726 	if (idn.state == IDNGS_DISCONNECT) {
1727 		PR_PROTO("%s:%d: DISCONNECT in-progress >>> EXIT\n",
1728 			proc, domid);
1729 		IDN_GUNLOCK();
1730 		return (-1);
1731 	} else if (idn.state == IDNGS_OFFLINE) {
1732 		IDN_GSTATE_TRANSITION(IDNGS_CONNECT);
1733 		IDN_PREP_HWINIT();
1734 		IDN_DLOCK_EXCL(idn.localid);
1735 		ldp->dvote.v.connected = 0;
1736 		IDN_DUNLOCK(idn.localid);
1737 	}
1738 
1739 	if (!DOMAIN_IN_SET(idn.domset.ds_trans_on, domid)) {
1740 		DOMAINSET_ADD(idn.domset.ds_trans_on, domid);
1741 		IDN_HISTORY_LOG(IDNH_NEGO, domid,
1742 				idn.domset.ds_trans_on,
1743 				idn.domset.ds_connected);
1744 	}
1745 
1746 	switch (idn.state) {
1747 	case IDNGS_RECONFIG:
1748 		PR_PROTO("%s:%d: RECONFIG in-progress >>> RETRY\n",
1749 			proc, domid);
1750 		IDN_GUNLOCK();
1751 		return (1);
1752 
1753 	case IDNGS_CONNECT:
1754 		new_masterid = IDN_GET_NEW_MASTERID();
1755 		if ((new_masterid != IDN_NIL_DOMID) &&
1756 				(domid != new_masterid) &&
1757 				(idn.localid != new_masterid)) {
1758 			PR_PROTO("%s:%d: waiting for connect to "
1759 				"new master %d\n",
1760 				proc, domid, IDN_GET_NEW_MASTERID());
1761 			IDN_GUNLOCK();
1762 			return (1);
1763 		}
1764 		break;
1765 
1766 	default:
1767 		break;
1768 	}
1769 
1770 	ASSERT((idn.state == IDNGS_CONNECT) || (idn.state == IDNGS_ONLINE));
1771 
1772 	con_set = 0;
1773 
1774 	if (msg) {
1775 		idn_domain_t	*mdp;
1776 		idn_vote_t	vote;
1777 
1778 		vote.ticket = GET_XARGS_NEGO_TICKET(xargs);
1779 		/*
1780 		 * Sender should note have set master bit,
1781 		 * but just in case clear it so local domain
1782 		 * doesn't get confused.
1783 		 */
1784 		vote.v.master = 0;
1785 		dp->dvote.ticket = vote.ticket;
1786 		GET_XARGS_NEGO_DSET(xargs, dset);
1787 		/*LINTED*/
1788 		IDNNEG_DSET_GET_MASK(dset, domid, dmask);
1789 		IDNNEG_DSET_GET_MASTER(dset, new_masterid);
1790 		if (new_masterid == IDNNEG_NO_MASTER) {
1791 			new_masterid = IDN_NIL_DOMID;
1792 		} else {
1793 			/*
1794 			 * Remote domain has a master.  Find
1795 			 * his cpuid in the dset.  We may need
1796 			 * it to initiate a connection.
1797 			 */
1798 			if (new_masterid == domid) {
1799 				m_cpuid = dp->dcpu;
1800 			} else {
1801 				IDNNEG_DSET_GET(dset, new_masterid, m_cpuid,
1802 						dmask);
1803 				if (m_cpuid == -1) {
1804 					/*
1805 					 * Something is bogus if remote domain
1806 					 * is reporting a valid masterid, but
1807 					 * doesn't have the cpuid for it.
1808 					 */
1809 					cmn_err(CE_WARN,
1810 						"IDN: 209: remote domain (ID "
1811 						"%d, CPU %d) reporting master "
1812 						"(ID %d) without CPU ID",
1813 						domid, dp->dcpu, new_masterid);
1814 					DOMAINSET_ADD(idn.domset.ds_hitlist,
1815 						domid);
1816 					IDN_GUNLOCK();
1817 					return (-1);
1818 				}
1819 			}
1820 		}
1821 
1822 		for (d = 0; d < MAX_DOMAINS; d++) {
1823 			if ((d == idn.localid) || (d == domid))
1824 				continue;
1825 			IDNNEG_DSET_GET(dset, d, cpuid, dmask);
1826 			if (cpuid != -1) {
1827 				DOMAINSET_ADD(con_set, d);
1828 			}
1829 		}
1830 
1831 #ifdef DEBUG
1832 		if (idn.domset.ds_hitlist) {
1833 			PR_HITLIST("%s:%d: con_set %x -> %x (hitlist = %x)\n",
1834 				proc, domid, con_set,
1835 				con_set & ~idn.domset.ds_hitlist,
1836 				idn.domset.ds_hitlist);
1837 		}
1838 #endif /* DEBUG */
1839 
1840 		con_set &= ~idn.domset.ds_hitlist;
1841 
1842 		ASSERT(!DOMAIN_IN_SET(con_set, idn.localid));
1843 		ASSERT(!DOMAIN_IN_SET(con_set, domid));
1844 
1845 		if ((new_masterid != IDN_NIL_DOMID) &&
1846 				DOMAIN_IN_SET(idn.domset.ds_hitlist,
1847 						new_masterid)) {
1848 			PR_HITLIST("%s:%d: new_mstr %d -> -1 (hitlist = %x)\n",
1849 				proc, domid, new_masterid,
1850 				idn.domset.ds_hitlist);
1851 			IDN_GUNLOCK();
1852 			return (1);
1853 		}
1854 
1855 		if (idn_select_master(domid, new_masterid, m_cpuid) < 0) {
1856 			/*
1857 			 * Returns w/GLOCK dropped if error.
1858 			 */
1859 			return (1);
1860 		}
1861 
1862 		masterid = IDN_GET_MASTERID();
1863 		ASSERT(masterid != IDN_NIL_DOMID);
1864 
1865 		if (idn.state == IDNGS_CONNECT) {
1866 			/*
1867 			 * This is the initial connection for
1868 			 * the local domain.
1869 			 */
1870 			IDN_DLOCK_EXCL(idn.localid);
1871 
1872 			if (masterid == idn.localid) {
1873 				if (idn_master_init() < 0) {
1874 					cmn_err(CE_WARN,
1875 						"IDN: 210: failed to init "
1876 						"MASTER context");
1877 					ldp->dvote.v.master = 0;
1878 					IDN_DUNLOCK(idn.localid);
1879 					IDN_GSTATE_TRANSITION(IDNGS_DISCONNECT);
1880 					IDN_SET_MASTERID(IDN_NIL_DOMID);
1881 					IDN_GUNLOCK();
1882 					return (-1);
1883 				}
1884 				DSLAB_LOCK_EXCL(idn.localid);
1885 				ldp->dslab_state = DSLAB_STATE_LOCAL;
1886 				DSLAB_UNLOCK(idn.localid);
1887 				ldp->dvote.v.connected = 1;
1888 			} else {
1889 				/*
1890 				 * Either the remote domain is the
1891 				 * master or its a new slave trying
1892 				 * to connect to us.  We can't allow
1893 				 * further progress until we've
1894 				 * sync'd up with the master.
1895 				 */
1896 				if (masterid != domid) {
1897 					IDN_DUNLOCK(idn.localid);
1898 					IDN_GUNLOCK();
1899 					return (1);
1900 				}
1901 				DSLAB_LOCK_EXCL(idn.localid);
1902 				ldp->dslab_state = DSLAB_STATE_REMOTE;
1903 				DSLAB_UNLOCK(idn.localid);
1904 			}
1905 			IDN_DUNLOCK(idn.localid);
1906 			/*
1907 			 * We've sync'd up with the new master.
1908 			 */
1909 			IDN_GSTATE_TRANSITION(IDNGS_ONLINE);
1910 		}
1911 
1912 		mdp = &idn_domain[masterid];
1913 
1914 		if ((masterid != domid) && !IDNDS_CONFIG_DONE(mdp)) {
1915 			/*
1916 			 * We can't progress any further with
1917 			 * other domains until we've exchanged all
1918 			 * the necessary CFG info with the master,
1919 			 * i.e. until we have a mailbox area from
1920 			 * which we can allocate mailboxes to
1921 			 * other domains.
1922 			 */
1923 			PR_PROTO("%s:%d: still exchanging CFG "
1924 				"w/master(%d)\n",
1925 				proc, domid, masterid);
1926 			IDN_GUNLOCK();
1927 			return (1);
1928 		}
1929 
1930 		DSLAB_LOCK_EXCL(domid);
1931 		dp->dslab_state = ldp->dslab_state;
1932 		DSLAB_UNLOCK(domid);
1933 		if (idn.state != IDNGS_ONLINE) {
1934 			IDN_GSTATE_TRANSITION(IDNGS_ONLINE);
1935 		}
1936 	}
1937 
1938 	IDN_GUNLOCK();
1939 
1940 	pending_set = con_set;
1941 	pending_set &= ~(idn.domset.ds_trans_on | idn.domset.ds_connected);
1942 	idn.domset.ds_trans_on |= pending_set;
1943 
1944 	con_set |= idn.domset.ds_trans_on | idn.domset.ds_connected;
1945 	con_set &= ~idn.domset.ds_trans_off;
1946 	DOMAINSET_ADD(con_set, idn.localid);
1947 
1948 	if (dp->dsync.s_cmd != IDNSYNC_CONNECT) {
1949 		idn_sync_exit(domid, IDNSYNC_DISCONNECT);
1950 		idn_sync_enter(domid, IDNSYNC_CONNECT,
1951 				con_set, DOMAINSET(idn.localid),
1952 				idn_xstate_transfunc,
1953 				(void *)IDNP_CON);
1954 	}
1955 
1956 	/*
1957 	 * Get this domain registered as an expected domain on
1958 	 * the remaining domains in the CONNECT synchronization.
1959 	 */
1960 	(void) idn_sync_register(domid, IDNSYNC_CONNECT, 0, IDNSYNC_REG_NEW);
1961 
1962 	/*
1963 	 * Note that if (msg == 0), i.e. then there will be
1964 	 * no dset and also pending_set will be 0.
1965 	 * So, the following loop will never attempt to
1966 	 * look at the dset unless (msg != 0), implying
1967 	 * that we've been through the initial code above
1968 	 * and have initialized dmask.
1969 	 */
1970 	ASSERT(pending_set ? (dmask != (uint_t)-1) : 1);
1971 
1972 	for (d = 0; d < MAX_DOMAINS; d++) {
1973 		int	rv;
1974 
1975 		if (!DOMAIN_IN_SET(pending_set, d))
1976 			continue;
1977 
1978 		ASSERT((d != idn.localid) && (d != domid));
1979 
1980 		dp = &idn_domain[d];
1981 
1982 		IDNNEG_DSET_GET(dset, d, cpuid, dmask);
1983 		if (cpuid == -1) {
1984 			PR_PROTO("%s:%d: failed to get cpuid from dset "
1985 				"for domain %d (pset = 0x%x)\n",
1986 				proc, domid, d, pending_set);
1987 			DOMAINSET_DEL(idn.domset.ds_trans_on, d);
1988 			continue;
1989 		}
1990 
1991 		IDN_DLOCK_EXCL(d);
1992 		if ((rv = idn_open_domain(d, cpuid, 0)) != 0) {
1993 			PR_PROTO("%s:%d: failed "
1994 				"idn_open_domain(%d,%d,0) (rv = %d)\n",
1995 				proc, domid, d, cpuid, rv);
1996 			if (rv < 0) {
1997 				cmn_err(CE_WARN,
1998 					"IDN: 205: (%s) failed to "
1999 					"open-domain(%d,%d)",
2000 					proc, d, cpuid);
2001 				DOMAINSET_DEL(idn.domset.ds_trans_on, d);
2002 			} else if (DOMAIN_IN_SET(idn.domset.ds_trans_off, d)) {
2003 				/*
2004 				 * We've requested to connect to a domain
2005 				 * from which we're disconnecting.  We
2006 				 * better mark this guy for relinking.
2007 				 */
2008 				DOMAINSET_ADD(idn.domset.ds_relink, d);
2009 				IDN_HISTORY_LOG(IDNH_RELINK, d, dp->dstate,
2010 						idn.domset.ds_relink);
2011 			}
2012 			IDN_DUNLOCK(d);
2013 			continue;
2014 		}
2015 
2016 		idn_connect(d);
2017 
2018 		IDN_DUNLOCK(d);
2019 	}
2020 
2021 	return (0);
2022 }
2023 
2024 /*ARGSUSED*/
2025 static void
2026 idn_action_nego_pend(int domid, idn_msgtype_t *mtp, idn_xdcargs_t xargs)
2027 {
2028 	uint_t		msg = mtp ? mtp->mt_mtype : 0;
2029 	idn_msgtype_t	mt;
2030 	domainset_t	con_set;
2031 
2032 	ASSERT(IDN_SYNC_IS_LOCKED());
2033 	ASSERT(IDN_DLOCK_IS_EXCL(domid));
2034 
2035 	con_set = idn.domset.ds_trans_on | idn.domset.ds_connected;
2036 	con_set &= ~idn.domset.ds_trans_off;
2037 
2038 	if (!msg) {
2039 		idn_send_nego(domid, NULL, con_set);
2040 	} else {
2041 		mt.mt_mtype = IDNP_NEGO | IDNP_ACK;
2042 		mt.mt_atype = 0;
2043 		mt.mt_cookie = mtp->mt_cookie;
2044 		idn_send_nego(domid, &mt, con_set);
2045 	}
2046 }
2047 
2048 /*ARGSUSED*/
2049 static void
2050 idn_error_nego(int domid, idn_msgtype_t *mtp, idn_xdcargs_t xargs)
2051 {
2052 	int	new_masterid, new_cpuid;
2053 	int	retry = 1;
2054 	uint_t	msg = mtp ? mtp->mt_mtype : 0;
2055 	uint_t	token;
2056 
2057 	ASSERT(IDN_SYNC_IS_LOCKED());
2058 	ASSERT(IDN_DLOCK_IS_EXCL(domid));
2059 
2060 	if (msg & IDNP_NACK) {
2061 		idn_nack_t	nack;
2062 
2063 		nack = GET_XARGS_NACK_TYPE(xargs);
2064 		switch (nack) {
2065 		case IDNNACK_RETRY:
2066 			new_masterid = (int)GET_XARGS_NACK_ARG1(xargs);
2067 			new_cpuid    = (int)GET_XARGS_NACK_ARG2(xargs);
2068 			break;
2069 
2070 		case IDNNACK_EXIT:
2071 			retry = 0;
2072 			/*FALLTHROUGH*/
2073 
2074 		default:
2075 			new_masterid = IDN_NIL_DOMID;
2076 			new_cpuid    = IDN_NIL_DCPU;
2077 			break;
2078 		}
2079 		idn_nego_cleanup_check(domid, new_masterid, new_cpuid);
2080 	}
2081 
2082 	if (msg & IDNP_MSGTYPE_MASK) {
2083 		idn_msgtype_t	mt;
2084 		idn_xdcargs_t	nargs;
2085 
2086 		mt.mt_mtype = IDNP_NACK;
2087 		mt.mt_atype = msg;
2088 		mt.mt_cookie = mtp->mt_cookie;
2089 		CLR_XARGS(nargs);
2090 		SET_XARGS_NACK_TYPE(nargs, IDNNACK_RETRY);
2091 		IDN_GLOCK_SHARED();
2092 		new_masterid = IDN_GET_NEW_MASTERID();
2093 		if (new_masterid == IDN_NIL_DOMID)
2094 			new_masterid = IDN_GET_MASTERID();
2095 		if (new_masterid != IDN_NIL_DOMID)
2096 			new_cpuid = idn_domain[new_masterid].dcpu;
2097 		else
2098 			new_cpuid = IDN_NIL_DCPU;
2099 		SET_XARGS_NACK_ARG1(nargs, new_masterid);
2100 		SET_XARGS_NACK_ARG2(nargs, new_cpuid);
2101 		IDN_GUNLOCK();
2102 		idn_send_acknack(domid, &mt, nargs);
2103 	}
2104 
2105 	if (retry) {
2106 		token = IDN_RETRY_TOKEN(domid, IDNRETRY_NEGO);
2107 		idn_retry_submit(idn_retry_nego, NULL, token,
2108 				idn_msg_retrytime[(int)IDNRETRY_NEGO]);
2109 	} else {
2110 		DOMAINSET_DEL(idn.domset.ds_relink, domid);
2111 		IDN_RESET_COOKIES(domid);
2112 		idn_disconnect(domid, IDNFIN_NORMAL, IDNFIN_ARG_NONE,
2113 				IDNDS_SYNC_TYPE(&idn_domain[domid]));
2114 	}
2115 }
2116 
2117 /*ARGSUSED*/
2118 static void
2119 idn_action_nego_sent(int domid, idn_msgtype_t *mtp, idn_xdcargs_t xargs)
2120 {
2121 	uint_t		msg = mtp ? mtp->mt_mtype : 0;
2122 	domainset_t	conset;
2123 	idn_msgtype_t	mt;
2124 
2125 	ASSERT(IDN_SYNC_IS_LOCKED());
2126 	ASSERT(IDN_DLOCK_IS_EXCL(domid));
2127 
2128 	mt.mt_cookie = mtp ? mtp->mt_cookie : 0;
2129 
2130 	conset = idn.domset.ds_trans_on | idn.domset.ds_connected;
2131 	conset &= ~idn.domset.ds_trans_off;
2132 
2133 	if ((msg & IDNP_ACKNACK_MASK) == 0) {
2134 		/*
2135 		 * nego
2136 		 */
2137 		mt.mt_mtype = IDNP_NEGO | IDNP_ACK;
2138 		mt.mt_atype = 0;
2139 		idn_send_nego(domid, &mt, conset);
2140 	} else if (msg & IDNP_MSGTYPE_MASK) {
2141 		int		d;
2142 		idn_xdcargs_t	nargs;
2143 		idnneg_dset_t	dset;
2144 		uint_t		dmask;
2145 		idn_vote_t	vote;
2146 
2147 		mt.mt_mtype = IDNP_ACK;
2148 		mt.mt_atype = msg;
2149 		DOMAINSET_DEL(conset, idn.localid);
2150 		DOMAINSET_DEL(conset, domid);
2151 
2152 		dmask = IDNNEG_DSET_MYMASK();
2153 		IDNNEG_DSET_INIT(dset, dmask);
2154 		for (d = 0; d < MAX_DOMAINS; d++) {
2155 			int	cpuid;
2156 
2157 			if (!DOMAIN_IN_SET(conset, d))
2158 				continue;
2159 
2160 			if ((cpuid = idn_domain[d].dcpu) == IDN_NIL_DCPU)
2161 				continue;
2162 
2163 			IDNNEG_DSET_SET(dset, d, cpuid, dmask);
2164 		}
2165 		IDNNEG_DSET_SET_MASTER(dset, domid, IDN_GET_MASTERID());
2166 		ASSERT((IDN_GET_MASTERID() != IDN_NIL_DOMID) ?
2167 			(idn_domain[IDN_GET_MASTERID()].dcpu != IDN_NIL_DCPU) :
2168 			1);
2169 		vote.ticket = idn_domain[idn.localid].dvote.ticket;
2170 		vote.v.master = 0;
2171 		CLR_XARGS(nargs);
2172 		SET_XARGS_NEGO_TICKET(nargs, vote.ticket);
2173 		SET_XARGS_NEGO_DSET(nargs, dset);
2174 		/*
2175 		 * nego+ack
2176 		 */
2177 		idn_send_acknack(domid, &mt, nargs);
2178 	} else {
2179 		uint_t		token;
2180 		int		new_masterid, new_cpuid;
2181 		int		retry = 1;
2182 		idn_nack_t	nack;
2183 		/*
2184 		 * nack - retry
2185 		 *
2186 		 * It's possible if we've made it this far that
2187 		 * we may have already chosen a master and this
2188 		 * dude might be it!  If it is we need to clean up.
2189 		 */
2190 		nack = GET_XARGS_NACK_TYPE(xargs);
2191 		switch (nack) {
2192 		case IDNNACK_RETRY:
2193 			new_masterid = (int)GET_XARGS_NACK_ARG1(xargs);
2194 			new_cpuid = (int)GET_XARGS_NACK_ARG2(xargs);
2195 			break;
2196 
2197 		case IDNNACK_EXIT:
2198 			retry = 0;
2199 			/*FALLTHROUGH*/
2200 
2201 		default:
2202 			new_masterid = IDN_NIL_DOMID;
2203 			new_cpuid = IDN_NIL_DCPU;
2204 			break;
2205 		}
2206 
2207 		idn_nego_cleanup_check(domid, new_masterid, new_cpuid);
2208 
2209 		if (retry) {
2210 			token = IDN_RETRY_TOKEN(domid, IDNRETRY_NEGO);
2211 			idn_retry_submit(idn_retry_nego, NULL, token,
2212 					idn_msg_retrytime[(int)IDNRETRY_NEGO]);
2213 		} else {
2214 			DOMAINSET_DEL(idn.domset.ds_relink, domid);
2215 			IDN_RESET_COOKIES(domid);
2216 			idn_disconnect(domid, IDNFIN_NORMAL, IDNFIN_ARG_NONE,
2217 					IDNDS_SYNC_TYPE(&idn_domain[domid]));
2218 		}
2219 	}
2220 }
2221 
2222 /*ARGSUSED*/
2223 static void
2224 idn_action_nego_rcvd(int domid, idn_msgtype_t *mtp, idn_xdcargs_t xargs)
2225 {
2226 	uint_t	msg = mtp ? mtp->mt_mtype : 0;
2227 
2228 	ASSERT(IDN_SYNC_IS_LOCKED());
2229 	ASSERT(IDN_DLOCK_IS_EXCL(domid));
2230 
2231 	if (msg & IDNP_NACK) {
2232 		uint_t		token;
2233 		int		new_masterid, new_cpuid;
2234 		int		retry = 1;
2235 		idn_nack_t	nack;
2236 		/*
2237 		 * nack - retry.
2238 		 *
2239 		 * At this stage of receiving a nack we need to
2240 		 * check whether we need to start over again with
2241 		 * selecting a new master.
2242 		 */
2243 		nack = GET_XARGS_NACK_TYPE(xargs);
2244 		switch (nack) {
2245 		case IDNNACK_RETRY:
2246 			new_masterid = (int)GET_XARGS_NACK_ARG1(xargs);
2247 			new_cpuid = (int)GET_XARGS_NACK_ARG2(xargs);
2248 			break;
2249 
2250 		case IDNNACK_EXIT:
2251 			retry = 0;
2252 			/*FALLTHROUGH*/
2253 
2254 		default:
2255 			new_masterid = IDN_NIL_DOMID;
2256 			new_cpuid = IDN_NIL_DCPU;
2257 			break;
2258 		}
2259 
2260 		idn_nego_cleanup_check(domid, new_masterid, new_cpuid);
2261 
2262 		if (retry) {
2263 			token = IDN_RETRY_TOKEN(domid, IDNRETRY_NEGO);
2264 			idn_retry_submit(idn_retry_nego, NULL, token,
2265 					idn_msg_retrytime[(int)IDNRETRY_NEGO]);
2266 		} else {
2267 			DOMAINSET_DEL(idn.domset.ds_relink, domid);
2268 			IDN_RESET_COOKIES(domid);
2269 			idn_disconnect(domid, IDNFIN_NORMAL, IDNFIN_ARG_NONE,
2270 					IDNDS_SYNC_TYPE(&idn_domain[domid]));
2271 		}
2272 	}
2273 }
2274 
2275 static void
2276 idn_final_nego(int domid)
2277 {
2278 	idn_domain_t	*dp = &idn_domain[domid];
2279 
2280 	ASSERT(IDN_SYNC_IS_LOCKED());
2281 	ASSERT(IDN_DLOCK_IS_EXCL(domid));
2282 
2283 	(void) idn_retry_terminate(IDN_RETRY_TOKEN(domid, IDNRETRY_NEGO));
2284 
2285 	ASSERT(dp->dstate == IDNDS_CONFIG);
2286 
2287 	dp->dxp = NULL;
2288 	IDN_XSTATE_TRANSITION(dp, IDNXS_NIL);
2289 
2290 	idn_send_config(domid, 1);
2291 }
2292 
2293 /*
2294  */
2295 /*ARGSUSED1*/
2296 static void
2297 idn_exit_nego(int domid, uint_t msgtype)
2298 {
2299 	idn_domain_t	*dp;
2300 	idn_fin_t	fintype;
2301 
2302 	ASSERT(IDN_SYNC_IS_LOCKED());
2303 	ASSERT(IDN_DLOCK_IS_EXCL(domid));
2304 
2305 	dp = &idn_domain[domid];
2306 
2307 	fintype = msgtype ? IDNFIN_NORMAL : IDNFIN_FORCE_HARD;
2308 
2309 	(void) idn_retry_terminate(IDN_RETRY_TOKEN(domid, IDNRETRY_NEGO));
2310 
2311 	ASSERT(!DOMAIN_IN_SET(idn.domset.ds_connected, domid));
2312 	ASSERT(!DOMAIN_IN_SET(idn.domset.ds_ready_on, domid));
2313 	ASSERT(dp->dxp == &xphase_nego);
2314 
2315 	idn_nego_cleanup_check(domid, IDN_NIL_DOMID, IDN_NIL_DCPU);
2316 
2317 	IDN_GLOCK_SHARED();
2318 	if ((idn.state != IDNGS_DISCONNECT) &&
2319 			!DOMAIN_IN_SET(idn.domset.ds_hitlist, domid)) {
2320 		DOMAINSET_ADD(idn.domset.ds_relink, domid);
2321 		IDN_HISTORY_LOG(IDNH_RELINK, domid, dp->dstate,
2322 				idn.domset.ds_relink);
2323 	} else {
2324 		idn_update_op(IDNOP_ERROR, DOMAINSET(domid), NULL);
2325 		DOMAINSET_DEL(idn.domset.ds_relink, domid);
2326 	}
2327 	IDN_GUNLOCK();
2328 	/*
2329 	 * Reset send cookie to 0 so that receiver does not validate
2330 	 * cookie.  This is necessary since at this early stage it's
2331 	 * possible we may not have exchanged appropriate cookies.
2332 	 */
2333 	IDN_RESET_COOKIES(domid);
2334 	idn_disconnect(domid, fintype, IDNFIN_ARG_NONE,
2335 			IDNDS_SYNC_TYPE(dp));
2336 }
2337 
2338 static void
2339 idn_nego_cleanup_check(int domid, int new_masterid, int new_cpuid)
2340 {
2341 	idn_domain_t	*ldp, *dp;
2342 	procname_t	proc = "idn_nego_cleanup_check";
2343 
2344 	ASSERT(IDN_SYNC_IS_LOCKED());
2345 	ASSERT(IDN_DLOCK_IS_EXCL(domid));
2346 
2347 	dp = &idn_domain[domid];
2348 	ldp = &idn_domain[idn.localid];
2349 
2350 	IDN_GLOCK_EXCL();
2351 
2352 	if (((idn.state == IDNGS_ONLINE) && !idn.domset.ds_connected) ||
2353 			(idn.state == IDNGS_CONNECT)) {
2354 		domainset_t	trans_on;
2355 		int		masterid;
2356 		int		retry_domid = IDN_NIL_DOMID;
2357 		int		rv;
2358 
2359 		IDN_DLOCK_EXCL(idn.localid);
2360 		masterid = (idn.state == IDNGS_ONLINE) ?
2361 				IDN_GET_MASTERID() : IDN_GET_NEW_MASTERID();
2362 		trans_on = idn.domset.ds_trans_on;
2363 		DOMAINSET_DEL(trans_on, domid);
2364 		if (trans_on == 0) {
2365 			int		d;
2366 			domainset_t	relink = idn.domset.ds_relink;
2367 			/*
2368 			 * This was the only guy we were trying
2369 			 * to connect with.
2370 			 */
2371 			ASSERT((idn.state == IDNGS_ONLINE) ?
2372 				((idn.localid == masterid) ||
2373 					(domid == masterid)) : 1);
2374 			if (idn.localid == masterid)
2375 				idn_master_deinit();
2376 			ldp->dvote.v.connected = 0;
2377 			ldp->dvote.v.master = 0;
2378 			dp->dvote.v.master = 0;
2379 			IDN_SET_MASTERID(IDN_NIL_DOMID);
2380 			IDN_SET_NEW_MASTERID(new_masterid);
2381 			IDN_GSTATE_TRANSITION(IDNGS_CONNECT);
2382 			IDN_PREP_HWINIT();
2383 			IDN_DUNLOCK(idn.localid);
2384 			IDN_GUNLOCK();
2385 			/*
2386 			 * If there's a new master available then
2387 			 * just try and relink with him unless
2388 			 * it's ourself.
2389 			 */
2390 			if ((new_masterid != IDN_NIL_DOMID) &&
2391 					(new_masterid != idn.localid) &&
2392 					(new_masterid != domid)) {
2393 				IDN_DLOCK_EXCL(new_masterid);
2394 				rv = idn_open_domain(new_masterid,
2395 							new_cpuid, 0);
2396 				if (rv < 0) {
2397 					cmn_err(CE_WARN,
2398 						"IDN: 205: (%s) failed to "
2399 						"open-domain(%d,%d)",
2400 						proc, new_masterid, new_cpuid);
2401 					IDN_GLOCK_EXCL();
2402 					IDN_SET_NEW_MASTERID(IDN_NIL_DOMID);
2403 					IDN_GUNLOCK();
2404 				} else {
2405 					relink = DOMAINSET(new_masterid);
2406 				}
2407 				IDN_DUNLOCK(new_masterid);
2408 			}
2409 			DOMAINSET_DEL(relink, domid);
2410 			if (relink)
2411 				for (d = 0; d < MAX_DOMAINS; d++) {
2412 					if (!DOMAIN_IN_SET(relink, d))
2413 						continue;
2414 					retry_domid = d;
2415 					break;
2416 				}
2417 		} else if (domid == masterid) {
2418 			/*
2419 			 * There are other domains we were trying
2420 			 * to connect to.  As long as the chosen
2421 			 * master was somebody other then this
2422 			 * domain that nack'd us, life is cool, but
2423 			 * if it was this remote domain we'll need
2424 			 * to start over.
2425 			 */
2426 			IDN_DUNLOCK(idn.localid);
2427 			dp->dvote.v.master = 0;
2428 			IDN_SET_MASTERID(IDN_NIL_DOMID);
2429 			IDN_SET_NEW_MASTERID(new_masterid);
2430 
2431 			if (idn.state == IDNGS_ONLINE) {
2432 				IDN_GKSTAT_GLOBAL_EVENT(gk_reconfigs,
2433 							gk_reconfig_last);
2434 				IDN_GSTATE_TRANSITION(IDNGS_RECONFIG);
2435 				IDN_GUNLOCK();
2436 				idn_unlink_domainset(trans_on, IDNFIN_NORMAL,
2437 							IDNFIN_ARG_NONE,
2438 							IDNFIN_OPT_RELINK,
2439 							BOARDSET_ALL);
2440 			} else if ((new_masterid != IDN_NIL_DOMID) &&
2441 					(new_masterid != idn.localid) &&
2442 					(new_masterid != domid) &&
2443 					!DOMAIN_IN_SET(trans_on,
2444 							new_masterid)) {
2445 				IDN_GUNLOCK();
2446 				IDN_DLOCK_EXCL(new_masterid);
2447 				rv = idn_open_domain(new_masterid,
2448 							new_cpuid, 0);
2449 				IDN_GLOCK_EXCL();
2450 				IDN_DUNLOCK(new_masterid);
2451 				if (rv < 0) {
2452 					cmn_err(CE_WARN,
2453 						"IDN: 205: (%s) failed to "
2454 						"open-domain(%d,%d)",
2455 						proc, new_masterid,
2456 						new_cpuid);
2457 					IDN_SET_NEW_MASTERID(IDN_NIL_DOMID);
2458 					new_masterid = IDN_NIL_DOMID;
2459 				} else {
2460 					retry_domid = new_masterid;
2461 				}
2462 				IDN_GUNLOCK();
2463 			} else {
2464 				IDN_GUNLOCK();
2465 			}
2466 		} else {
2467 			IDN_DUNLOCK(idn.localid);
2468 			IDN_GUNLOCK();
2469 		}
2470 		if (retry_domid != IDN_NIL_DOMID) {
2471 			uint_t		token;
2472 			idn_domain_t	*rdp = &idn_domain[retry_domid];
2473 
2474 			IDN_DLOCK_EXCL(retry_domid);
2475 			rdp->dxp = &xphase_nego;
2476 			IDN_XSTATE_TRANSITION(rdp, IDNXS_PEND);
2477 			IDN_DUNLOCK(retry_domid);
2478 			token = IDN_RETRY_TOKEN(retry_domid, IDNRETRY_NEGO);
2479 			idn_retry_submit(idn_retry_nego, NULL, token,
2480 					idn_msg_retrytime[(int)IDNRETRY_NEGO]);
2481 		}
2482 	} else {
2483 		IDN_GUNLOCK();
2484 	}
2485 }
2486 
2487 static int
2488 idn_send_con(int domid, idn_msgtype_t *mtp,
2489 		idn_con_t contype, domainset_t conset)
2490 {
2491 	idn_msgtype_t	mt;
2492 	uint_t		acknack;
2493 	procname_t	proc = "idn_send_con";
2494 
2495 	ASSERT(IDN_SYNC_IS_LOCKED());
2496 	ASSERT(IDN_DLOCK_IS_EXCL(domid));
2497 
2498 	if (mtp) {
2499 		acknack = mtp->mt_mtype & IDNP_ACKNACK_MASK;
2500 		mt.mt_mtype = mtp->mt_mtype;
2501 		mt.mt_atype = mtp->mt_atype;
2502 		mt.mt_cookie = mtp->mt_cookie;
2503 	} else {
2504 		acknack = 0;
2505 		mt.mt_mtype = IDNP_CON;
2506 		mt.mt_atype = 0;
2507 		/*
2508 		 * For simple CON queries we want a unique
2509 		 * timer assigned.  For others, they
2510 		 * effectively share one.
2511 		 */
2512 		if (contype == IDNCON_QUERY)
2513 			mt.mt_cookie = 0;
2514 		else
2515 			mt.mt_cookie = IDN_TIMER_PUBLIC_COOKIE;
2516 	}
2517 
2518 	ASSERT((contype == IDNCON_QUERY) ? idn_domain[domid].dcookie_send : 1);
2519 
2520 	PR_PROTO("%s:%d: sending con%sto (cpu %d) [ct=%s, cs=0x%x]\n",
2521 		proc, domid,
2522 		(acknack & IDNP_ACK) ? "+ack " :
2523 		(acknack & IDNP_NACK) ? "+nack " : " ",
2524 		idn_domain[domid].dcpu,
2525 		idncon_str[contype], conset);
2526 
2527 	IDN_MSGTIMER_START(domid, IDNP_CON, (ushort_t)contype,
2528 			idn_msg_waittime[IDNP_CON], &mt.mt_cookie);
2529 
2530 	IDNXDC(domid, &mt, (uint_t)contype, (uint_t)conset, 0, 0);
2531 
2532 	return (0);
2533 }
2534 
2535 /*
2536  * Must leave w/DLOCK dropped and SYNC_LOCK held.
2537  */
2538 static int
2539 idn_recv_con(int domid, idn_msgtype_t *mtp, idn_xdcargs_t xargs)
2540 {
2541 	uint_t		msg = mtp ? mtp->mt_mtype : 0;
2542 	uint_t		msgarg = mtp ? mtp->mt_atype : 0;
2543 	idn_con_t	contype;
2544 	domainset_t	my_ready_set, ready_set;
2545 	idn_msgtype_t	mt;
2546 	idn_domain_t	*dp = &idn_domain[domid];
2547 	idn_xdcargs_t	aargs;
2548 	procname_t	proc = "idn_recv_con";
2549 
2550 	ASSERT(IDN_SYNC_IS_LOCKED());
2551 	ASSERT(IDN_DLOCK_IS_EXCL(domid));
2552 
2553 	mt.mt_cookie = mtp ? mtp->mt_cookie : 0;
2554 
2555 	contype   = GET_XARGS_CON_TYPE(xargs);
2556 	ready_set = GET_XARGS_CON_DOMSET(xargs);
2557 
2558 	CLR_XARGS(aargs);
2559 
2560 	if (!(msg & IDNP_NACK) && (contype == IDNCON_QUERY)) {
2561 		domainset_t	query_set;
2562 
2563 		query_set = idn_sync_register(domid, IDNSYNC_CONNECT,
2564 						ready_set, IDNSYNC_REG_REG);
2565 
2566 		my_ready_set = idn.domset.ds_connected |
2567 				idn.domset.ds_ready_on;
2568 		my_ready_set &= ~idn.domset.ds_trans_off;
2569 		DOMAINSET_ADD(my_ready_set, idn.localid);
2570 
2571 		if (msg & IDNP_MSGTYPE_MASK) {
2572 			mt.mt_mtype = IDNP_ACK;
2573 			mt.mt_atype = IDNP_CON;
2574 			SET_XARGS_CON_TYPE(aargs, contype);
2575 			SET_XARGS_CON_DOMSET(aargs, my_ready_set);
2576 			idn_send_acknack(domid, &mt, aargs);
2577 		}
2578 
2579 		if (query_set) {
2580 			uint_t	token;
2581 
2582 			token = IDN_RETRY_TOKEN(domid, IDNRETRY_CONQ);
2583 			idn_retry_submit(idn_retry_query, NULL, token,
2584 					idn_msg_retrytime[(int)IDNRETRY_CONQ]);
2585 		}
2586 
2587 		return (0);
2588 	}
2589 
2590 	if (dp->dxp == NULL) {
2591 		STRING(mstr);
2592 		STRING(lstr);
2593 		/*
2594 		 * Must have received an inappropriate error
2595 		 * message as we should already be registered
2596 		 * by the time we reach here.
2597 		 */
2598 		INUM2STR(msg, mstr);
2599 		INUM2STR(msgarg, lstr);
2600 
2601 		PR_PROTO("%s:%d: ERROR: NOT YET REGISTERED (%s/%s)\n",
2602 			proc, domid, mstr, lstr);
2603 
2604 		if (msg & IDNP_MSGTYPE_MASK) {
2605 			mt.mt_mtype = IDNP_NACK;
2606 			mt.mt_atype = msg;
2607 			SET_XARGS_NACK_TYPE(aargs, IDNNACK_RETRY);
2608 			idn_send_acknack(domid, &mt, aargs);
2609 		}
2610 
2611 		return (-1);
2612 	}
2613 
2614 	idn_xphase_transition(domid, mtp, xargs);
2615 
2616 	return (0);
2617 }
2618 
2619 /*ARGSUSED1*/
2620 static void
2621 idn_retry_con(uint_t token, void *arg)
2622 {
2623 	int		domid = IDN_RETRY_TOKEN2DOMID(token);
2624 	idn_domain_t	*dp = &idn_domain[domid];
2625 	idn_xdcargs_t	xargs;
2626 	procname_t	proc = "idn_retry_con";
2627 
2628 	ASSERT(IDN_RETRY_TOKEN2TYPE(token) == IDNRETRY_CON);
2629 
2630 	IDN_SYNC_LOCK();
2631 	IDN_DLOCK_EXCL(domid);
2632 
2633 	if (dp->dxp != &xphase_con) {
2634 		STRING(str);
2635 
2636 #ifdef DEBUG
2637 		if (dp->dxp) {
2638 			INUM2STR(dp->dxp->xt_msgtype, str);
2639 		}
2640 #endif /* DEBUG */
2641 
2642 		PR_PROTO("%s:%d: dxp(%s) != CON...bailing...\n",
2643 			proc, domid, dp->dxp ? str : "NULL");
2644 		IDN_DUNLOCK(domid);
2645 		IDN_SYNC_UNLOCK();
2646 		return;
2647 	}
2648 
2649 	if ((dp->dsync.s_cmd != IDNSYNC_CONNECT) ||
2650 			(dp->dxstate != IDNXS_PEND)) {
2651 		PR_PROTO("%s:%d: cmd (%s) and/or xstate (%s) not "
2652 			"expected (%s/%s)\n",
2653 			proc, domid, idnsync_str[dp->dsync.s_cmd],
2654 			idnxs_str[dp->dxstate], idnsync_str[IDNSYNC_CONNECT],
2655 			idnxs_str[IDNXS_PEND]);
2656 		IDN_DUNLOCK(domid);
2657 		IDN_SYNC_UNLOCK();
2658 		return;
2659 	}
2660 
2661 	idn_xphase_transition(domid, NULL, xargs);
2662 
2663 	IDN_DUNLOCK(domid);
2664 	IDN_SYNC_UNLOCK();
2665 }
2666 
2667 static int
2668 idn_check_con(int domid, idn_msgtype_t *mtp, idn_xdcargs_t xargs)
2669 {
2670 	int		ready;
2671 	uint_t		msg = mtp ? mtp->mt_mtype : 0;
2672 	idn_domain_t	*dp = &idn_domain[domid];
2673 	domainset_t	ready_set, my_ready_set, query_set;
2674 
2675 	ASSERT(IDN_SYNC_IS_LOCKED());
2676 	ASSERT(IDN_DLOCK_IS_EXCL(domid));
2677 
2678 	if (msg & IDNP_NACK)
2679 		return (0);
2680 
2681 	if ((dp->dstate == IDNDS_CON_PEND) &&
2682 			(msg & IDNP_MSGTYPE_MASK) &&
2683 			(msg & IDNP_ACK))		/* con+ack */
2684 		return (1);
2685 
2686 	if (msg == 0) {
2687 		ready_set = idn.domset.ds_connected &
2688 				~idn.domset.ds_trans_off;
2689 	} else {
2690 		ready_set = GET_XARGS_CON_DOMSET(xargs);
2691 		DOMAINSET_ADD(idn.domset.ds_ready_on, domid);
2692 	}
2693 
2694 	DOMAINSET_ADD(ready_set, idn.localid);
2695 
2696 	query_set = idn_sync_register(domid, IDNSYNC_CONNECT,
2697 					ready_set, IDNSYNC_REG_REG);
2698 	/*
2699 	 * No need to query this domain as he's already
2700 	 * in the CON sequence.
2701 	 */
2702 	DOMAINSET_DEL(query_set, domid);
2703 
2704 	ready = (dp->dsync.s_set_exp == dp->dsync.s_set_rdy) ? 1 : 0;
2705 	if (ready) {
2706 		DOMAINSET_DEL(idn.domset.ds_ready_on, domid);
2707 		DOMAINSET_ADD(idn.domset.ds_connected, domid);
2708 	}
2709 
2710 	if (query_set) {
2711 		int	d;
2712 
2713 		my_ready_set = idn.domset.ds_ready_on |
2714 				idn.domset.ds_connected;
2715 		my_ready_set &= ~idn.domset.ds_trans_off;
2716 		DOMAINSET_ADD(my_ready_set, idn.localid);
2717 
2718 		for (d = 0; d < MAX_DOMAINS; d++) {
2719 			if (!DOMAIN_IN_SET(query_set, d))
2720 				continue;
2721 
2722 			dp = &idn_domain[d];
2723 
2724 			IDN_DLOCK_EXCL(d);
2725 			if ((dp->dsync.s_cmd == IDNSYNC_CONNECT) ||
2726 					!dp->dcookie_send) {
2727 				IDN_DUNLOCK(d);
2728 				continue;
2729 			}
2730 
2731 			IDN_SYNC_QUERY_UPDATE(domid, d);
2732 
2733 			idn_send_con(d, NULL, IDNCON_QUERY, my_ready_set);
2734 			IDN_DUNLOCK(d);
2735 		}
2736 	}
2737 
2738 	return (!msg ? 0 : (ready ? 0 : 1));
2739 }
2740 
2741 /*ARGSUSED2*/
2742 static void
2743 idn_error_con(int domid, idn_msgtype_t *mtp, idn_xdcargs_t xargs)
2744 {
2745 	uint_t	token;
2746 	uint_t	msg = mtp ? mtp->mt_mtype : 0;
2747 
2748 	ASSERT(IDN_SYNC_IS_LOCKED());
2749 	ASSERT(IDN_DLOCK_IS_EXCL(domid));
2750 
2751 	if (msg & IDNP_MSGTYPE_MASK) {
2752 		idn_msgtype_t	mt;
2753 		idn_xdcargs_t	nargs;
2754 
2755 		mt.mt_mtype = IDNP_NACK;
2756 		mt.mt_atype = msg;
2757 		mt.mt_cookie = mtp->mt_cookie;
2758 		CLR_XARGS(nargs);
2759 		SET_XARGS_NACK_TYPE(nargs, IDNNACK_RETRY);
2760 		idn_send_acknack(domid, &mt, nargs);
2761 	}
2762 
2763 	token = IDN_RETRY_TOKEN(domid, IDNRETRY_CON);
2764 	idn_retry_submit(idn_retry_con, NULL, token,
2765 			idn_msg_retrytime[(int)IDNRETRY_CON]);
2766 }
2767 
2768 /*ARGSUSED*/
2769 static void
2770 idn_action_con_pend(int domid, idn_msgtype_t *mtp, idn_xdcargs_t xargs)
2771 {
2772 	uint_t		msg = mtp ? mtp->mt_mtype : 0;
2773 	idn_domain_t	*dp = &idn_domain[domid];
2774 	idn_msgtype_t	mt;
2775 	domainset_t	my_ready_set;
2776 
2777 	ASSERT(IDN_SYNC_IS_LOCKED());
2778 	ASSERT(IDN_DLOCK_IS_EXCL(domid));
2779 
2780 	my_ready_set = dp->dsync.s_set_rdy |
2781 			idn.domset.ds_ready_on | idn.domset.ds_connected;
2782 	my_ready_set &= ~idn.domset.ds_trans_off;
2783 	DOMAINSET_ADD(my_ready_set, idn.localid);
2784 
2785 	if (!msg) {
2786 		idn_send_con(domid, NULL, IDNCON_NORMAL, my_ready_set);
2787 	} else {
2788 		mt.mt_mtype = IDNP_CON | IDNP_ACK;
2789 		mt.mt_atype = 0;
2790 		mt.mt_cookie = mtp->mt_cookie;
2791 		idn_send_con(domid, &mt, IDNCON_NORMAL, my_ready_set);
2792 	}
2793 }
2794 
2795 static void
2796 idn_action_con_sent(int domid, idn_msgtype_t *mtp, idn_xdcargs_t xargs)
2797 {
2798 	uint_t		msg = mtp ? mtp->mt_mtype : 0;
2799 	idn_domain_t	*dp = &idn_domain[domid];
2800 	idn_con_t	contype;
2801 	domainset_t	my_ready_set;
2802 	idn_msgtype_t	mt;
2803 
2804 	ASSERT(IDN_SYNC_IS_LOCKED());
2805 	ASSERT(IDN_DLOCK_IS_EXCL(domid));
2806 
2807 	mt.mt_cookie = mtp ? mtp->mt_cookie : 0;
2808 
2809 	my_ready_set = dp->dsync.s_set_rdy |
2810 				idn.domset.ds_ready_on |
2811 				idn.domset.ds_connected;
2812 	my_ready_set &= ~idn.domset.ds_trans_off;
2813 	DOMAINSET_ADD(my_ready_set, idn.localid);
2814 
2815 	contype = GET_XARGS_CON_TYPE(xargs);
2816 
2817 	if ((msg & IDNP_ACKNACK_MASK) == 0) {
2818 		/*
2819 		 * con
2820 		 */
2821 		mt.mt_mtype = IDNP_CON | IDNP_ACK;
2822 		mt.mt_atype = 0;
2823 		idn_send_con(domid, &mt, contype, my_ready_set);
2824 	} else if (msg & IDNP_MSGTYPE_MASK) {
2825 		idn_xdcargs_t	cargs;
2826 
2827 		mt.mt_mtype = IDNP_ACK;
2828 		mt.mt_atype = msg;
2829 		CLR_XARGS(cargs);
2830 		SET_XARGS_CON_TYPE(cargs, contype);
2831 		SET_XARGS_CON_DOMSET(cargs, my_ready_set);
2832 		/*
2833 		 * con+ack
2834 		 */
2835 		idn_send_acknack(domid, &mt, cargs);
2836 	} else {
2837 		uint_t	token;
2838 		/*
2839 		 * nack - retry
2840 		 */
2841 		token = IDN_RETRY_TOKEN(domid, IDNRETRY_CON);
2842 		idn_retry_submit(idn_retry_con, NULL, token,
2843 				idn_msg_retrytime[(int)IDNRETRY_CON]);
2844 	}
2845 }
2846 
2847 /*ARGSUSED*/
2848 static void
2849 idn_action_con_rcvd(int domid, idn_msgtype_t *mtp, idn_xdcargs_t xargs)
2850 {
2851 	uint_t	msg = mtp ? mtp->mt_mtype : 0;
2852 
2853 	ASSERT(IDN_SYNC_IS_LOCKED());
2854 	ASSERT(IDN_DLOCK_IS_EXCL(domid));
2855 
2856 	if (msg & IDNP_NACK) {
2857 		uint_t	token;
2858 		/*
2859 		 * nack - retry
2860 		 */
2861 		token = IDN_RETRY_TOKEN(domid, IDNRETRY_CON);
2862 		idn_retry_submit(idn_retry_con, NULL, token,
2863 				idn_msg_retrytime[(int)IDNRETRY_CON]);
2864 	}
2865 }
2866 
2867 static void
2868 idn_final_con(int domid)
2869 {
2870 	uint_t		targ;
2871 	uint_t		token = IDN_RETRY_TOKEN(domid, IDNRETRY_CON);
2872 	idn_domain_t	*dp = &idn_domain[domid];
2873 	procname_t	proc = "idn_final_con";
2874 
2875 	ASSERT(IDN_SYNC_IS_LOCKED());
2876 	ASSERT(IDN_DLOCK_IS_EXCL(domid));
2877 
2878 	(void) idn_retry_terminate(token);
2879 
2880 	dp->dxp = NULL;
2881 	IDN_XSTATE_TRANSITION(dp, IDNXS_NIL);
2882 
2883 	idn_sync_exit(domid, IDNSYNC_CONNECT);
2884 
2885 	CHECKPOINT_OPENED(IDNSB_CHKPT_LINK, dp->dhw.dh_boardset, 1);
2886 
2887 	DOMAINSET_DEL(idn.domset.ds_trans_on, domid);
2888 	DOMAINSET_DEL(idn.domset.ds_relink, domid);
2889 	IDN_FSTATE_TRANSITION(dp, IDNFIN_OFF);
2890 
2891 	PR_PROTO("%s:%d: CONNECTED\n", proc, domid);
2892 
2893 	if (idn.domset.ds_trans_on == 0) {
2894 		if ((idn.domset.ds_trans_off | idn.domset.ds_relink) == 0) {
2895 			PR_HITLIST("%s:%d: HITLIST %x -> 0\n",
2896 				proc, domid, idn.domset.ds_hitlist);
2897 			idn.domset.ds_hitlist = 0;
2898 		}
2899 		PR_PROTO("%s:%d: ALL CONNECTED ************ "
2900 			"(0x%x + 0x%x) = 0x%x\n", proc, domid,
2901 			DOMAINSET(idn.localid), idn.domset.ds_connected,
2902 			DOMAINSET(idn.localid) | idn.domset.ds_connected);
2903 	} else {
2904 		PR_PROTO("%s:%d: >>> ds_trans_on = 0x%x, ds_ready_on = 0x%x\n",
2905 			proc, domid,
2906 			idn.domset.ds_trans_on, idn.domset.ds_ready_on);
2907 	}
2908 
2909 	if (idn_verify_config_mbox(domid)) {
2910 		idnsb_error_t	idnerr;
2911 		/*
2912 		 * Mailbox is not cool. Need to disconnect.
2913 		 */
2914 		INIT_IDNKERR(&idnerr);
2915 		SET_IDNKERR_ERRNO(&idnerr, EPROTO);
2916 		SET_IDNKERR_IDNERR(&idnerr, IDNKERR_SMR_CORRUPTED);
2917 		SET_IDNKERR_PARAM0(&idnerr, domid);
2918 		idn_update_op(IDNOP_ERROR, DOMAINSET(domid), &idnerr);
2919 		/*
2920 		 * We cannot disconnect from an individual domain
2921 		 * unless all domains are attempting to disconnect
2922 		 * from him also, especially now since we touched
2923 		 * the SMR and now we have a potential cache conflicts
2924 		 * with the other domains with respect to this
2925 		 * domain.  Disconnect attempt will effectively
2926 		 * shutdown connection with respective domain
2927 		 * which is the effect we really want anyway.
2928 		 */
2929 		idn_disconnect(domid, IDNFIN_NORMAL, IDNFIN_ARG_SMRBAD,
2930 				IDNFIN_SYNC_YES);
2931 
2932 		return;
2933 	}
2934 
2935 	if (lock_try(&idn.first_swlink)) {
2936 		/*
2937 		 * This is our first connection.  Need to
2938 		 * kick some stuff into gear.
2939 		 */
2940 		idndl_dlpi_init();
2941 		(void) idn_activate_channel(CHANSET_ALL, IDNCHAN_ONLINE);
2942 
2943 		targ = 0xf0;
2944 	} else {
2945 		targ = 0;
2946 	}
2947 
2948 	idn_mainmbox_activate(domid);
2949 
2950 	idn_update_op(IDNOP_CONNECTED, DOMAINSET(domid), NULL);
2951 
2952 	IDN_GKSTAT_GLOBAL_EVENT(gk_links, gk_link_last);
2953 
2954 	membar_stst_ldst();
2955 
2956 	IDN_DSTATE_TRANSITION(dp, IDNDS_CONNECTED);
2957 	/*
2958 	 * Need to kick off initial commands in background.
2959 	 * We do not want to do them within the context of
2960 	 * a protocol server because they may sleep and thus
2961 	 * cause the protocol server to incur a soft-deadlock,
2962 	 * i.e. he's sleeping waiting in the slab-waiting area
2963 	 * for a response that will arrive on his protojob
2964 	 * queue, but which he obviously can't process since
2965 	 * he's not waiting on his protojob queue.
2966 	 */
2967 	targ |= domid & 0x0f;
2968 	(void) timeout(idn_link_established, (void *)(uintptr_t)targ, 50);
2969 
2970 	cmn_err(CE_NOTE,
2971 		"!IDN: 200: link (domain %d, CPU %d) connected",
2972 		dp->domid, dp->dcpu);
2973 }
2974 
2975 static void
2976 idn_exit_con(int domid, uint_t msgtype)
2977 {
2978 	idn_domain_t	*dp = &idn_domain[domid];
2979 	idn_fin_t	fintype;
2980 	procname_t	proc = "idn_exit_con";
2981 	STRING(str);
2982 
2983 	ASSERT(IDN_SYNC_IS_LOCKED());
2984 	ASSERT(IDN_DLOCK_IS_EXCL(domid));
2985 
2986 	INUM2STR(msgtype, str);
2987 	PR_PROTO("%s:%d: msgtype = 0x%x(%s)\n", proc, domid, msgtype, str);
2988 
2989 	fintype = msgtype ? IDNFIN_NORMAL : IDNFIN_FORCE_HARD;
2990 
2991 	IDN_GLOCK_SHARED();
2992 	if (idn.state != IDNGS_DISCONNECT) {
2993 		DOMAINSET_ADD(idn.domset.ds_relink, domid);
2994 		IDN_HISTORY_LOG(IDNH_RELINK, domid, dp->dstate,
2995 				idn.domset.ds_relink);
2996 	} else {
2997 		DOMAINSET_DEL(idn.domset.ds_relink, domid);
2998 	}
2999 	IDN_GUNLOCK();
3000 
3001 	idn_disconnect(domid, fintype, IDNFIN_ARG_NONE,
3002 			IDNDS_SYNC_TYPE(dp));
3003 }
3004 
3005 static int
3006 idn_send_fin(int domid, idn_msgtype_t *mtp, idn_fin_t fintype,
3007 		idn_finarg_t finarg, idn_finopt_t finopt,
3008 		domainset_t finset, uint_t finmaster)
3009 {
3010 	int		need_timer = 1;
3011 	uint_t		acknack;
3012 	uint_t		fintypearg = 0;
3013 	idn_msgtype_t	mt;
3014 	idn_domain_t	*dp = &idn_domain[domid];
3015 	procname_t	proc = "idn_send_fin";
3016 
3017 	ASSERT(IDN_SYNC_IS_LOCKED());
3018 	ASSERT(IDN_DLOCK_IS_HELD(domid));
3019 
3020 	ASSERT((fintype != IDNFIN_QUERY) ? (finopt != IDNFIN_OPT_NONE) : 1);
3021 
3022 	if (mtp) {
3023 		acknack = mtp->mt_mtype & IDNP_ACKNACK_MASK;
3024 		mt.mt_mtype = mtp->mt_mtype;
3025 		mt.mt_atype = mtp->mt_atype;
3026 		mt.mt_cookie = mtp->mt_cookie;
3027 	} else {
3028 		acknack = 0;
3029 		mt.mt_mtype = IDNP_FIN;
3030 		mt.mt_atype = 0;
3031 		/*
3032 		 * For simple FIN queries we want a unique
3033 		 * timer assigned.  For others, they
3034 		 * effectively share one.
3035 		 */
3036 		if (fintype == IDNFIN_QUERY)
3037 			mt.mt_cookie = 0;
3038 		else
3039 			mt.mt_cookie = IDN_TIMER_PUBLIC_COOKIE;
3040 	}
3041 
3042 	PR_PROTO("%s:%d: sending fin%sto (cpu %d) "
3043 		"[ft=%s, fa=%s, fs=0x%x, fo=%s, fm=(%d,%d)]\n",
3044 		proc, domid,
3045 		(acknack & IDNP_ACK) ? "+ack " :
3046 		(acknack & IDNP_NACK) ? "+nack " : " ",
3047 		dp->dcpu, idnfin_str[fintype], idnfinarg_str[finarg],
3048 		(int)finset, idnfinopt_str[finopt],
3049 		FIN_MASTER_DOMID(finmaster), FIN_MASTER_CPUID(finmaster));
3050 
3051 	if (need_timer) {
3052 		IDN_MSGTIMER_START(domid, IDNP_FIN, (ushort_t)fintype,
3053 				idn_msg_waittime[IDNP_FIN], &mt.mt_cookie);
3054 	}
3055 
3056 	SET_FIN_TYPE(fintypearg, fintype);
3057 	SET_FIN_ARG(fintypearg, finarg);
3058 
3059 	IDNXDC(domid, &mt, fintypearg, (uint_t)finset,
3060 		(uint_t)finopt, finmaster);
3061 
3062 	return (0);
3063 }
3064 
3065 /*
3066  * Must leave w/DLOCK dropped and SYNC_LOCK held.
3067  */
3068 static int
3069 idn_recv_fin(int domid, idn_msgtype_t *mtp, idn_xdcargs_t xargs)
3070 {
3071 	uint_t		msg = mtp ? mtp->mt_mtype : 0;
3072 	idn_fin_t	fintype;
3073 	idn_finarg_t	finarg;
3074 	idn_finopt_t	finopt;
3075 	domainset_t	my_ready_set, ready_set;
3076 	idn_msgtype_t	mt;
3077 	idn_domain_t	*dp = &idn_domain[domid];
3078 	idn_xdcargs_t	aargs;
3079 	procname_t	proc = "idn_recv_fin";
3080 
3081 	ASSERT(IDN_SYNC_IS_LOCKED());
3082 	ASSERT(IDN_DLOCK_IS_EXCL(domid));
3083 
3084 	mt.mt_cookie = mtp ? mtp->mt_cookie : 0;
3085 
3086 	fintype   = GET_XARGS_FIN_TYPE(xargs);
3087 	finarg    = GET_XARGS_FIN_ARG(xargs);
3088 	ready_set = GET_XARGS_FIN_DOMSET(xargs);
3089 	finopt    = GET_XARGS_FIN_OPT(xargs);
3090 
3091 	CLR_XARGS(aargs);
3092 
3093 	if (msg & IDNP_NACK) {
3094 		PR_PROTO("%s:%d: received NACK (type = %s)\n",
3095 			proc, domid, idnnack_str[xargs[0]]);
3096 	} else {
3097 		PR_PROTO("%s:%d: fintype = %s, finopt = %s, "
3098 			"finarg = %s, ready_set = 0x%x\n",
3099 			proc, domid, idnfin_str[fintype],
3100 			idnfinopt_str[finopt],
3101 			idnfinarg_str[finarg], ready_set);
3102 	}
3103 
3104 	if (!(msg & IDNP_NACK) && (fintype == IDNFIN_QUERY)) {
3105 		domainset_t	query_set;
3106 
3107 		query_set = idn_sync_register(domid, IDNSYNC_DISCONNECT,
3108 						ready_set, IDNSYNC_REG_REG);
3109 
3110 		my_ready_set = ~idn.domset.ds_connected |
3111 					idn.domset.ds_ready_off;
3112 
3113 		if (msg & IDNP_MSGTYPE_MASK) {
3114 			mt.mt_mtype = IDNP_ACK;
3115 			mt.mt_atype = IDNP_FIN;
3116 			SET_XARGS_FIN_TYPE(aargs, fintype);
3117 			SET_XARGS_FIN_ARG(aargs, finarg);
3118 			SET_XARGS_FIN_DOMSET(aargs, my_ready_set);
3119 			SET_XARGS_FIN_OPT(aargs, IDNFIN_OPT_NONE);
3120 			SET_XARGS_FIN_MASTER(aargs, NIL_FIN_MASTER);
3121 			idn_send_acknack(domid, &mt, aargs);
3122 		}
3123 
3124 		if (query_set) {
3125 			uint_t	token;
3126 
3127 			token = IDN_RETRY_TOKEN(domid, IDNRETRY_FINQ);
3128 			idn_retry_submit(idn_retry_query, NULL, token,
3129 					idn_msg_retrytime[(int)IDNRETRY_FINQ]);
3130 		}
3131 
3132 		return (0);
3133 	}
3134 
3135 	if (dp->dxp != &xphase_fin) {
3136 		uint_t	token;
3137 
3138 		if (IDNDS_IS_CLOSED(dp)) {
3139 			PR_PROTO("%s:%d: domain already closed (%s)\n",
3140 				proc, domid, idnds_str[dp->dstate]);
3141 			if (msg & IDNP_MSGTYPE_MASK) {
3142 				/*
3143 				 * fin or fin+ack.
3144 				 */
3145 				mt.mt_mtype = IDNP_NACK;
3146 				mt.mt_atype = msg;
3147 				SET_XARGS_NACK_TYPE(aargs, IDNNACK_NOCONN);
3148 				idn_send_acknack(domid, &mt, aargs);
3149 			}
3150 			return (0);
3151 		}
3152 		dp->dfin_sync = IDNDS_SYNC_TYPE(dp);
3153 
3154 		/*
3155 		 * Need to do some clean-up ala idn_disconnect().
3156 		 *
3157 		 * Terminate any outstanding commands that were
3158 		 * targeted towards this domain.
3159 		 */
3160 		idn_terminate_cmd(domid, ECANCELED);
3161 
3162 		/*
3163 		 * Terminate any and all retries that may have
3164 		 * outstanding for this domain.
3165 		 */
3166 		token = IDN_RETRY_TOKEN(domid, IDN_RETRY_TYPEALL);
3167 		(void) idn_retry_terminate(token);
3168 
3169 		/*
3170 		 * Stop all outstanding message timers for
3171 		 * this guy.
3172 		 */
3173 		IDN_MSGTIMER_STOP(domid, 0, 0);
3174 
3175 		dp->dxp = &xphase_fin;
3176 		IDN_XSTATE_TRANSITION(dp, IDNXS_PEND);
3177 	}
3178 
3179 	if (msg & IDNP_NACK) {
3180 		idn_nack_t	nack;
3181 
3182 		nack = GET_XARGS_NACK_TYPE(xargs);
3183 		if (nack == IDNNACK_NOCONN) {
3184 			/*
3185 			 * We're trying to FIN with somebody we're
3186 			 * already disconnected from.  Need to
3187 			 * speed this guy through.
3188 			 */
3189 			DOMAINSET_ADD(idn.domset.ds_ready_off, domid);
3190 			(void) idn_sync_register(domid, IDNSYNC_DISCONNECT,
3191 					DOMAINSET_ALL, IDNSYNC_REG_REG);
3192 			ready_set = (uint_t)DOMAINSET_ALL;
3193 			/*
3194 			 * Need to transform message to allow us to
3195 			 * pass this guy right through and not waste time
3196 			 * talking to him.
3197 			 */
3198 			IDN_FSTATE_TRANSITION(dp, IDNFIN_FORCE_HARD);
3199 
3200 			switch (dp->dstate) {
3201 			case IDNDS_FIN_PEND:
3202 				mtp->mt_mtype = 0;
3203 				mtp->mt_atype = 0;
3204 				break;
3205 
3206 			case IDNDS_FIN_SENT:
3207 				mtp->mt_mtype = IDNP_FIN | IDNP_ACK;
3208 				mtp->mt_atype = 0;
3209 				break;
3210 
3211 			case IDNDS_FIN_RCVD:
3212 				mtp->mt_mtype = IDNP_ACK;
3213 				mtp->mt_atype = IDNP_FIN | IDNP_ACK;
3214 				break;
3215 
3216 			default:
3217 #ifdef DEBUG
3218 				cmn_err(CE_PANIC,
3219 					"%s:%d: UNEXPECTED state = %s",
3220 					proc, domid,
3221 					idnds_str[dp->dstate]);
3222 #endif /* DEBUG */
3223 				break;
3224 			}
3225 		}
3226 		fintype = (uint_t)dp->dfin;
3227 		finopt = DOMAIN_IN_SET(idn.domset.ds_relink, domid) ?
3228 				IDNFIN_OPT_RELINK : IDNFIN_OPT_UNLINK;
3229 
3230 		CLR_XARGS(xargs);
3231 		SET_XARGS_FIN_TYPE(xargs, fintype);
3232 		SET_XARGS_FIN_ARG(xargs, finarg);
3233 		SET_XARGS_FIN_DOMSET(xargs, ready_set);
3234 		SET_XARGS_FIN_OPT(xargs, finopt);
3235 		SET_XARGS_FIN_MASTER(xargs, NIL_FIN_MASTER);
3236 	}
3237 
3238 	idn_xphase_transition(domid, mtp, xargs);
3239 
3240 	return (0);
3241 }
3242 
3243 /*ARGSUSED1*/
3244 static void
3245 idn_retry_fin(uint_t token, void *arg)
3246 {
3247 	int		domid = IDN_RETRY_TOKEN2DOMID(token);
3248 	int		new_masterid, new_cpuid = IDN_NIL_DCPU;
3249 	uint_t		finmaster;
3250 	idn_domain_t	*dp = &idn_domain[domid];
3251 	idn_xdcargs_t	xargs;
3252 	idn_finopt_t	finopt;
3253 	procname_t	proc = "idn_retry_fin";
3254 
3255 	ASSERT(IDN_RETRY_TOKEN2TYPE(token) == IDNRETRY_FIN);
3256 
3257 	IDN_SYNC_LOCK();
3258 	IDN_DLOCK_EXCL(domid);
3259 
3260 	if (dp->dxp != &xphase_fin) {
3261 		PR_PROTO("%s:%d: dxp(0x%p) != xstate_fin(0x%p)...bailing\n",
3262 			proc, domid, dp->dxp, &xphase_fin);
3263 		IDN_DUNLOCK(domid);
3264 		IDN_SYNC_UNLOCK();
3265 		return;
3266 	}
3267 
3268 	if (dp->dxstate != IDNXS_PEND) {
3269 		PR_PROTO("%s:%d: xstate(%s) != %s...bailing\n",
3270 			proc, domid, idnxs_str[dp->dxstate],
3271 			idnxs_str[IDNXS_PEND]);
3272 		IDN_DUNLOCK(domid);
3273 		IDN_SYNC_UNLOCK();
3274 		return;
3275 	}
3276 
3277 	finopt = DOMAIN_IN_SET(idn.domset.ds_relink, domid) ?
3278 			IDNFIN_OPT_RELINK : IDNFIN_OPT_UNLINK;
3279 
3280 	CLR_XARGS(xargs);
3281 	SET_XARGS_FIN_TYPE(xargs, dp->dfin);
3282 	/*LINTED*/
3283 	SET_XARGS_FIN_ARG(xargs, IDNFIN_ARG_NONE);
3284 	SET_XARGS_FIN_OPT(xargs, finopt);
3285 	SET_XARGS_FIN_DOMSET(xargs, 0);		/* unused when msg == 0 */
3286 	IDN_GLOCK_SHARED();
3287 	new_masterid = IDN_GET_NEW_MASTERID();
3288 	IDN_GUNLOCK();
3289 	if (new_masterid != IDN_NIL_DOMID)
3290 		new_cpuid = idn_domain[new_masterid].dcpu;
3291 	finmaster = MAKE_FIN_MASTER(new_masterid, new_cpuid);
3292 	SET_XARGS_FIN_MASTER(xargs, finmaster);
3293 
3294 	idn_xphase_transition(domid, NULL, xargs);
3295 
3296 	IDN_DUNLOCK(domid);
3297 	IDN_SYNC_UNLOCK();
3298 }
3299 
3300 static int
3301 idn_check_fin_pend(int domid, idn_msgtype_t *mtp, idn_xdcargs_t xargs)
3302 {
3303 	idn_domain_t	*dp = &idn_domain[domid];
3304 	idn_fin_t	fintype;
3305 	idn_finopt_t	finopt;
3306 	idn_finarg_t	finarg;
3307 	int		ready;
3308 	int		finmasterid;
3309 	int		fincpuid;
3310 	uint_t		finmaster;
3311 	uint_t		msg = mtp ? mtp->mt_mtype : 0;
3312 	domainset_t	query_set, ready_set, conn_set;
3313 	domainset_t	my_ready_set, shutdown_set;
3314 	procname_t	proc = "idn_check_fin_pend";
3315 
3316 	ASSERT(IDN_SYNC_IS_LOCKED());
3317 	ASSERT(IDN_DLOCK_IS_EXCL(domid));
3318 
3319 	if (msg & IDNP_NACK)
3320 		return (0);
3321 
3322 	if ((dp->dstate == IDNDS_FIN_PEND) &&
3323 			(msg & IDNP_MSGTYPE_MASK) &&
3324 			(msg & IDNP_ACK))		/* fin+ack */
3325 		return (1);
3326 
3327 	query_set = 0;
3328 
3329 	if (!DOMAIN_IN_SET(idn.domset.ds_trans_off, domid)) {
3330 		/*
3331 		 * Can't remove domain from ds_connected yet,
3332 		 * since he's still officially connected until
3333 		 * we get an ACK from him.
3334 		 */
3335 		DOMAINSET_DEL(idn.domset.ds_trans_on, domid);
3336 		DOMAINSET_ADD(idn.domset.ds_trans_off, domid);
3337 	}
3338 
3339 	IDN_GLOCK_SHARED();
3340 	conn_set = (idn.domset.ds_connected | idn.domset.ds_trans_on) &
3341 			~idn.domset.ds_trans_off;
3342 	if ((idn.state == IDNGS_DISCONNECT) ||
3343 		(idn.state == IDNGS_RECONFIG) ||
3344 		(domid == IDN_GET_MASTERID()) || !conn_set) {
3345 		/*
3346 		 * If we're disconnecting, reconfiguring,
3347 		 * unlinking from the master, or unlinking
3348 		 * the last of our connections, then we need
3349 		 * to shutdown all the channels.
3350 		 */
3351 		shutdown_set = DOMAINSET_ALL;
3352 	} else {
3353 		shutdown_set = DOMAINSET(domid);
3354 	}
3355 	IDN_GUNLOCK();
3356 
3357 	idn_shutdown_datapath(shutdown_set,
3358 				(dp->dfin == IDNFIN_FORCE_HARD));
3359 
3360 	IDN_GLOCK_EXCL();
3361 	/*
3362 	 * Remap the SMR back to our local space if the remote
3363 	 * domain going down is the master.  We do this now before
3364 	 * flushing caches.  This will help guarantee that any
3365 	 * accidental accesses to the SMR after the cache flush
3366 	 * will only go to local memory.
3367 	 */
3368 	if ((domid == IDN_GET_MASTERID()) && (idn.smr.rempfn != PFN_INVALID)) {
3369 		PR_PROTO("%s:%d: deconfiging CURRENT MASTER - SMR remap\n",
3370 			proc, domid);
3371 		IDN_DLOCK_EXCL(idn.localid);
3372 		/*
3373 		 * We're going to remap the SMR,
3374 		 * so gotta blow away our local
3375 		 * pointer to the mbox table.
3376 		 */
3377 		idn_domain[idn.localid].dmbox.m_tbl = NULL;
3378 		IDN_DUNLOCK(idn.localid);
3379 
3380 		idn.smr.rempfn = PFN_INVALID;
3381 		idn.smr.rempfnlim = PFN_INVALID;
3382 
3383 		smr_remap(&kas, idn.smr.vaddr, idn.smr.locpfn, IDN_SMR_SIZE);
3384 	}
3385 	IDN_GUNLOCK();
3386 
3387 	if (DOMAIN_IN_SET(idn.domset.ds_flush, domid)) {
3388 		idnxf_flushall_ecache();
3389 		CHECKPOINT_CLOSED(IDNSB_CHKPT_CACHE, dp->dhw.dh_boardset, 2);
3390 		DOMAINSET_DEL(idn.domset.ds_flush, domid);
3391 	}
3392 
3393 	fintype   = GET_XARGS_FIN_TYPE(xargs);
3394 	finarg    = GET_XARGS_FIN_ARG(xargs);
3395 	ready_set = GET_XARGS_FIN_DOMSET(xargs);
3396 	finopt    = GET_XARGS_FIN_OPT(xargs);
3397 
3398 	ASSERT(fintype != IDNFIN_QUERY);
3399 	if (!VALID_FIN(fintype)) {
3400 		/*
3401 		 * If for some reason remote domain
3402 		 * sent us an invalid FIN type,
3403 		 * override it to a  NORMAL fin.
3404 		 */
3405 		PR_PROTO("%s:%d: WARNING invalid fintype (%d) -> %s(%d)\n",
3406 			proc, domid, (int)fintype,
3407 			idnfin_str[IDNFIN_NORMAL], (int)IDNFIN_NORMAL);
3408 		fintype = IDNFIN_NORMAL;
3409 	}
3410 
3411 	if (!VALID_FINOPT(finopt)) {
3412 		PR_PROTO("%s:%d: WARNING invalid finopt (%d) -> %s(%d)\n",
3413 			proc, domid, (int)finopt,
3414 			idnfinopt_str[IDNFIN_OPT_UNLINK],
3415 			(int)IDNFIN_OPT_UNLINK);
3416 		finopt = IDNFIN_OPT_UNLINK;
3417 	}
3418 
3419 	finmaster = GET_XARGS_FIN_MASTER(xargs);
3420 	finmasterid = FIN_MASTER_DOMID(finmaster);
3421 	fincpuid = FIN_MASTER_CPUID(finmaster);
3422 
3423 	if ((finarg != IDNFIN_ARG_NONE) &&
3424 			!DOMAIN_IN_SET(idn.domset.ds_hitlist, domid)) {
3425 		idnsb_error_t	idnerr;
3426 
3427 		INIT_IDNKERR(&idnerr);
3428 		SET_IDNKERR_ERRNO(&idnerr, EPROTO);
3429 		SET_IDNKERR_IDNERR(&idnerr, FINARG2IDNKERR(finarg));
3430 		SET_IDNKERR_PARAM0(&idnerr, domid);
3431 
3432 		if (IDNFIN_ARG_IS_FATAL(finarg)) {
3433 			finopt = IDNFIN_OPT_UNLINK;
3434 			DOMAINSET_DEL(idn.domset.ds_relink, domid);
3435 			DOMAINSET_ADD(idn.domset.ds_hitlist, domid);
3436 
3437 			if (idn.domset.ds_connected == 0) {
3438 				domainset_t	domset;
3439 
3440 				IDN_GLOCK_EXCL();
3441 				domset = ~idn.domset.ds_relink;
3442 				if (idn.domset.ds_relink == 0) {
3443 					IDN_GSTATE_TRANSITION(IDNGS_DISCONNECT);
3444 				}
3445 				domset &= ~idn.domset.ds_hitlist;
3446 				/*
3447 				 * The primary domain we were trying to
3448 				 * connect to fin'd us with a fatal argument.
3449 				 * Something isn't cool in our IDN environment,
3450 				 * e.g. corrupted SMR or non-compatible CONFIG
3451 				 * parameters.  In any case we need to dismantle
3452 				 * ourselves completely.
3453 				 */
3454 				IDN_SET_NEW_MASTERID(IDN_NIL_DOMID);
3455 				IDN_GUNLOCK();
3456 				IDN_DUNLOCK(domid);
3457 
3458 				DOMAINSET_DEL(domset, idn.localid);
3459 				DOMAINSET_DEL(domset, domid);
3460 
3461 				idn_update_op(IDNOP_ERROR, DOMAINSET_ALL,
3462 						&idnerr);
3463 
3464 				PR_HITLIST("%s:%d: unlink_domainset(%x) "
3465 					"due to CFG error (relink=%x, "
3466 					"hitlist=%x)\n", proc, domid, domset,
3467 					idn.domset.ds_relink,
3468 					idn.domset.ds_hitlist);
3469 
3470 				idn_unlink_domainset(domset, IDNFIN_NORMAL,
3471 						finarg, IDNFIN_OPT_UNLINK,
3472 						BOARDSET_ALL);
3473 				IDN_DLOCK_EXCL(domid);
3474 			}
3475 			PR_HITLIST("%s:%d: CFG error, (conn=%x, relink=%x, "
3476 				"hitlist=%x)\n",
3477 				proc, domid, idn.domset.ds_connected,
3478 				idn.domset.ds_relink, idn.domset.ds_hitlist);
3479 		}
3480 		idn_update_op(IDNOP_ERROR, DOMAINSET(domid), &idnerr);
3481 	}
3482 
3483 	if ((finmasterid != IDN_NIL_DOMID) &&
3484 			(!VALID_DOMAINID(finmasterid) ||
3485 			DOMAIN_IN_SET(idn.domset.ds_hitlist, domid))) {
3486 		PR_HITLIST("%s:%d: finmasterid = %d -> -1, relink=%x, "
3487 			"hitlist=%x\n",
3488 			proc, domid, finmasterid, idn.domset.ds_relink,
3489 			idn.domset.ds_hitlist);
3490 		PR_PROTO("%s:%d: WARNING invalid finmasterid (%d) -> -1\n",
3491 			proc, domid, finmasterid);
3492 		finmasterid = IDN_NIL_DOMID;
3493 	}
3494 
3495 	IDN_GLOCK_EXCL();
3496 
3497 	if ((finopt == IDNFIN_OPT_RELINK) && (idn.state != IDNGS_DISCONNECT)) {
3498 		DOMAINSET_ADD(idn.domset.ds_relink, domid);
3499 		IDN_HISTORY_LOG(IDNH_RELINK, domid, dp->dstate,
3500 					idn.domset.ds_relink);
3501 	} else {
3502 		DOMAINSET_DEL(idn.domset.ds_relink, domid);
3503 		DOMAINSET_ADD(idn.domset.ds_hitlist, domid);
3504 	}
3505 
3506 	if ((domid == IDN_GET_NEW_MASTERID()) &&
3507 			!DOMAIN_IN_SET(idn.domset.ds_relink, domid)) {
3508 		IDN_SET_NEW_MASTERID(IDN_NIL_DOMID);
3509 	}
3510 
3511 	if ((idn.state != IDNGS_DISCONNECT) &&
3512 			(idn.state != IDNGS_RECONFIG) &&
3513 			(domid == IDN_GET_MASTERID())) {
3514 		domainset_t	dis_set, master_candidates;
3515 
3516 		IDN_GKSTAT_GLOBAL_EVENT(gk_reconfigs, gk_reconfig_last);
3517 
3518 		IDN_GSTATE_TRANSITION(IDNGS_RECONFIG);
3519 		IDN_GUNLOCK();
3520 
3521 		if ((finmasterid != IDN_NIL_DOMID) &&
3522 				(finmasterid != idn.localid)) {
3523 			if (finmasterid != domid)
3524 				IDN_DLOCK_EXCL(finmasterid);
3525 			if (idn_open_domain(finmasterid, fincpuid, 0) < 0) {
3526 				cmn_err(CE_WARN,
3527 					"IDN: 205: (%s) failed to "
3528 					"open-domain(%d,%d)",
3529 					proc, finmasterid, fincpuid);
3530 				if (finmasterid != domid)
3531 					IDN_DUNLOCK(finmasterid);
3532 				finmasterid = IDN_NIL_DOMID;
3533 			}
3534 			if (finmasterid != domid)
3535 				IDN_DUNLOCK(finmasterid);
3536 		}
3537 
3538 		IDN_GLOCK_EXCL();
3539 		if (finmasterid == IDN_NIL_DOMID) {
3540 			int	m;
3541 
3542 			master_candidates = idn.domset.ds_trans_on |
3543 						idn.domset.ds_connected |
3544 						idn.domset.ds_relink;
3545 			master_candidates &= ~(idn.domset.ds_trans_off &
3546 						~idn.domset.ds_relink);
3547 			DOMAINSET_DEL(master_candidates, domid);
3548 			/*
3549 			 * Local domain gets to participate also.
3550 			 */
3551 			DOMAINSET_ADD(master_candidates, idn.localid);
3552 
3553 			m = idn_select_candidate(master_candidates);
3554 			IDN_SET_NEW_MASTERID(m);
3555 		} else {
3556 			IDN_SET_NEW_MASTERID(finmasterid);
3557 		}
3558 		IDN_GUNLOCK();
3559 
3560 		dis_set = idn.domset.ds_trans_on | idn.domset.ds_connected;
3561 		DOMAINSET_DEL(dis_set, domid);
3562 
3563 		idn_unlink_domainset(dis_set, IDNFIN_NORMAL, IDNFIN_ARG_NONE,
3564 					IDNFIN_OPT_RELINK, BOARDSET_ALL);
3565 	} else {
3566 		IDN_GUNLOCK();
3567 	}
3568 
3569 	/*
3570 	 * My local ready-set are those domains from which I
3571 	 * have confirmed no datapaths exist.
3572 	 */
3573 	my_ready_set = ~idn.domset.ds_connected;
3574 
3575 	switch (dp->dfin) {
3576 	case IDNFIN_NORMAL:
3577 	case IDNFIN_FORCE_SOFT:
3578 	case IDNFIN_FORCE_HARD:
3579 		if (fintype < dp->dfin) {
3580 			/*
3581 			 * Remote domain has requested a
3582 			 * FIN of lower priority than what
3583 			 * we're currently running.  Just
3584 			 * leave the priority where it is.
3585 			 */
3586 			break;
3587 		}
3588 		/*FALLTHROUGH*/
3589 
3590 	default:
3591 		IDN_FSTATE_TRANSITION(dp, fintype);
3592 		break;
3593 	}
3594 
3595 	ASSERT(dp->dfin_sync != IDNFIN_SYNC_OFF);
3596 
3597 	if (msg == 0) {
3598 		/*
3599 		 * Local domain is initiating a FIN sequence
3600 		 * to remote domid.  Note that remote domain
3601 		 * remains in ds_connected even though he's
3602 		 * in thet ready-set from the local domain's
3603 		 * perspective.  We can't remove him from
3604 		 * ds_connected until we get a confirmed message
3605 		 * from him indicating he has ceased communication.
3606 		 */
3607 		ready_set = my_ready_set;
3608 	} else {
3609 		/*
3610 		 * Remote domain initiated a FIN sequence
3611 		 * to local domain.  This implies that he
3612 		 * has shutdown his datapath to us.  Since
3613 		 * we shutdown our datapath to him, we're
3614 		 * effectively now in his ready-set.
3615 		 */
3616 		DOMAINSET_ADD(ready_set, idn.localid);
3617 		/*
3618 		 * Since we know both sides of the connection
3619 		 * have ceased, this remote domain is effectively
3620 		 * considered disconnected.
3621 		 */
3622 		DOMAINSET_ADD(idn.domset.ds_ready_off, domid);
3623 	}
3624 
3625 	if (dp->dfin == IDNFIN_FORCE_HARD) {
3626 		/*
3627 		 * If we're doing a hard disconnect
3628 		 * of this domain then we want to
3629 		 * blow straight through and not
3630 		 * waste time trying to talk to the
3631 		 * remote domain nor to domains we
3632 		 * believe are AWOL.  Although we will
3633 		 * try and do it cleanly with
3634 		 * everybody else.
3635 		 */
3636 		DOMAINSET_ADD(my_ready_set, domid);
3637 		my_ready_set |= idn.domset.ds_awol;
3638 		ready_set = DOMAINSET_ALL;
3639 
3640 	} else if (dp->dfin_sync == IDNFIN_SYNC_NO) {
3641 		/*
3642 		 * If we're not fin'ing this domain
3643 		 * synchronously then the only
3644 		 * expected domain set is himself.
3645 		 */
3646 		ready_set |= ~DOMAINSET(domid);
3647 		my_ready_set |= ~DOMAINSET(domid);
3648 	}
3649 
3650 	if (dp->dsync.s_cmd != IDNSYNC_DISCONNECT) {
3651 		idn_sync_exit(domid, IDNSYNC_CONNECT);
3652 		idn_sync_enter(domid, IDNSYNC_DISCONNECT,
3653 					DOMAINSET_ALL, my_ready_set,
3654 					idn_xstate_transfunc,
3655 					(void *)IDNP_FIN);
3656 	}
3657 
3658 	query_set = idn_sync_register(domid, IDNSYNC_DISCONNECT,
3659 					ready_set, IDNSYNC_REG_REG);
3660 	/*
3661 	 * No need to query this domain as he's already
3662 	 * in the FIN sequence.
3663 	 */
3664 	DOMAINSET_DEL(query_set, domid);
3665 
3666 	ready = (dp->dsync.s_set_exp == dp->dsync.s_set_rdy) ? 1 : 0;
3667 	if (ready) {
3668 		DOMAINSET_DEL(idn.domset.ds_ready_off, domid);
3669 		DOMAINSET_DEL(idn.domset.ds_connected, domid);
3670 	}
3671 
3672 	if (query_set) {
3673 		int	d;
3674 
3675 		my_ready_set = idn.domset.ds_ready_off |
3676 				~idn.domset.ds_connected;
3677 
3678 		for (d = 0; d < MAX_DOMAINS; d++) {
3679 			if (!DOMAIN_IN_SET(query_set, d))
3680 				continue;
3681 
3682 			dp = &idn_domain[d];
3683 
3684 			IDN_DLOCK_EXCL(d);
3685 
3686 			if (dp->dsync.s_cmd == IDNSYNC_DISCONNECT) {
3687 				IDN_DUNLOCK(d);
3688 				continue;
3689 			}
3690 
3691 			IDN_SYNC_QUERY_UPDATE(domid, d);
3692 
3693 			idn_send_fin(d, NULL, IDNFIN_QUERY, IDNFIN_ARG_NONE,
3694 					IDNFIN_OPT_NONE, my_ready_set,
3695 					NIL_FIN_MASTER);
3696 			IDN_DUNLOCK(d);
3697 		}
3698 	}
3699 
3700 	return (!msg ? 0 : (ready ? 0 : 1));
3701 }
3702 
3703 /*ARGSUSED*/
3704 static void
3705 idn_error_fin_pend(int domid, idn_msgtype_t *mtp, idn_xdcargs_t xargs)
3706 {
3707 	uint_t	msg = mtp ? mtp->mt_mtype : 0;
3708 	uint_t	token;
3709 
3710 	ASSERT(IDN_SYNC_IS_LOCKED());
3711 	ASSERT(IDN_DLOCK_IS_HELD(domid));
3712 
3713 	/*
3714 	 * Don't communicate with domains that
3715 	 * we're forcing a hard disconnect.
3716 	 */
3717 	if ((idn_domain[domid].dfin != IDNFIN_FORCE_HARD) &&
3718 			(msg & IDNP_MSGTYPE_MASK)) {
3719 		idn_msgtype_t	mt;
3720 		idn_xdcargs_t	nargs;
3721 
3722 		mt.mt_mtype = IDNP_NACK;
3723 		mt.mt_atype = msg;
3724 		mt.mt_cookie = mtp->mt_cookie;
3725 		CLR_XARGS(nargs);
3726 		SET_XARGS_NACK_TYPE(nargs, IDNNACK_RETRY);
3727 		idn_send_acknack(domid, &mt, nargs);
3728 	}
3729 
3730 	token = IDN_RETRY_TOKEN(domid, IDNRETRY_FIN);
3731 	idn_retry_submit(idn_retry_fin, NULL, token,
3732 			idn_msg_retrytime[(int)IDNRETRY_FIN]);
3733 }
3734 
3735 static void
3736 idn_action_fin_pend(int domid, idn_msgtype_t *mtp, idn_xdcargs_t xargs)
3737 {
3738 	idn_domain_t	*dp = &idn_domain[domid];
3739 	domainset_t	my_ready_set;
3740 	idn_finopt_t	finopt;
3741 	idn_finarg_t	finarg;
3742 	uint_t		finmaster;
3743 	int		new_masterid, new_cpuid = IDN_NIL_DCPU;
3744 	uint_t		msg = mtp ? mtp->mt_mtype : 0;
3745 	idn_msgtype_t	mt;
3746 
3747 	ASSERT(IDN_SYNC_IS_LOCKED());
3748 	ASSERT(IDN_DLOCK_IS_HELD(domid));
3749 
3750 	my_ready_set = dp->dsync.s_set_rdy |
3751 			idn.domset.ds_ready_off |
3752 			~idn.domset.ds_connected;
3753 
3754 	ASSERT(xargs[0] != (uint_t)IDNFIN_QUERY);
3755 
3756 	finarg = GET_XARGS_FIN_ARG(xargs);
3757 	finopt = DOMAIN_IN_SET(idn.domset.ds_relink, domid) ?
3758 			IDNFIN_OPT_RELINK : IDNFIN_OPT_UNLINK;
3759 
3760 	mt.mt_cookie = mtp ? mtp->mt_cookie : 0;
3761 
3762 	IDN_GLOCK_SHARED();
3763 	new_masterid = IDN_GET_NEW_MASTERID();
3764 	IDN_GUNLOCK();
3765 	if (new_masterid != IDN_NIL_DOMID)
3766 		new_cpuid = idn_domain[new_masterid].dcpu;
3767 	finmaster = MAKE_FIN_MASTER(new_masterid, new_cpuid);
3768 
3769 	if (dp->dfin == IDNFIN_FORCE_HARD) {
3770 		ASSERT(IDN_DLOCK_IS_EXCL(domid));
3771 
3772 		if (!msg) {
3773 			mt.mt_mtype = IDNP_FIN | IDNP_ACK;
3774 			mt.mt_atype = 0;
3775 		} else {
3776 			mt.mt_mtype = IDNP_ACK;
3777 			mt.mt_atype = IDNP_FIN | IDNP_ACK;
3778 		}
3779 		idn_xphase_transition(domid, &mt, xargs);
3780 	} else if (!msg) {
3781 		idn_send_fin(domid, NULL, dp->dfin, finarg,
3782 				finopt, my_ready_set, finmaster);
3783 	} else if ((msg & IDNP_ACKNACK_MASK) == 0) {
3784 		/*
3785 		 * fin
3786 		 */
3787 		mt.mt_mtype = IDNP_FIN | IDNP_ACK;
3788 		mt.mt_atype = 0;
3789 		idn_send_fin(domid, &mt, dp->dfin, finarg,
3790 				finopt, my_ready_set, finmaster);
3791 	} else {
3792 		uint_t	token;
3793 		/*
3794 		 * nack - retry
3795 		 */
3796 		token = IDN_RETRY_TOKEN(domid, IDNRETRY_FIN);
3797 		idn_retry_submit(idn_retry_fin, NULL, token,
3798 				idn_msg_retrytime[(int)IDNRETRY_FIN]);
3799 	}
3800 }
3801 
3802 static int
3803 idn_check_fin_sent(int domid, idn_msgtype_t *mtp, idn_xdcargs_t xargs)
3804 {
3805 	int		ready;
3806 	uint_t		msg = mtp ? mtp->mt_mtype : 0;
3807 	idn_fin_t	fintype;
3808 	idn_finopt_t	finopt;
3809 	idn_domain_t	*dp = &idn_domain[domid];
3810 	domainset_t	query_set, ready_set;
3811 
3812 	ASSERT(IDN_SYNC_IS_LOCKED());
3813 	ASSERT(IDN_DLOCK_IS_EXCL(domid));
3814 
3815 	if (msg & IDNP_NACK)
3816 		return (0);
3817 
3818 	fintype   = GET_XARGS_FIN_TYPE(xargs);
3819 	ready_set = GET_XARGS_FIN_DOMSET(xargs);
3820 	finopt    = GET_XARGS_FIN_OPT(xargs);
3821 
3822 	ASSERT(fintype != IDNFIN_QUERY);
3823 	if (!VALID_FIN(fintype)) {
3824 		/*
3825 		 * If for some reason remote domain
3826 		 * sent us an invalid FIN type,
3827 		 * override it to a  NORMAL fin.
3828 		 */
3829 		fintype = IDNFIN_NORMAL;
3830 	}
3831 
3832 	if (!VALID_FINOPT(finopt)) {
3833 		finopt = IDNFIN_OPT_UNLINK;
3834 	}
3835 	IDN_GLOCK_SHARED();
3836 	if ((finopt == IDNFIN_OPT_RELINK) && (idn.state != IDNGS_DISCONNECT)) {
3837 		DOMAINSET_ADD(idn.domset.ds_relink, domid);
3838 		IDN_HISTORY_LOG(IDNH_RELINK, domid, dp->dstate,
3839 					idn.domset.ds_relink);
3840 	} else {
3841 		DOMAINSET_DEL(idn.domset.ds_relink, domid);
3842 	}
3843 	IDN_GUNLOCK();
3844 
3845 	switch (dp->dfin) {
3846 	case IDNFIN_NORMAL:
3847 	case IDNFIN_FORCE_SOFT:
3848 	case IDNFIN_FORCE_HARD:
3849 		if (fintype < dp->dfin) {
3850 			/*
3851 			 * Remote domain has requested a
3852 			 * FIN of lower priority than what
3853 			 * we're current running.  Just
3854 			 * leave the priority where it is.
3855 			 */
3856 			break;
3857 		}
3858 		/*FALLTHROUGH*/
3859 
3860 	default:
3861 		IDN_FSTATE_TRANSITION(dp, fintype);
3862 		break;
3863 	}
3864 
3865 	if (dp->dfin == IDNFIN_FORCE_HARD) {
3866 		/*
3867 		 * If we're doing a hard disconnect
3868 		 * of this domain then we want to
3869 		 * blow straight through and not
3870 		 * waste time trying to talk to the
3871 		 * remote domain.  By registering him
3872 		 * as ready with respect to all
3873 		 * possible domains he'll transition
3874 		 * immediately.  Note that we'll still
3875 		 * try and do it coherently with
3876 		 * other domains to which we're connected.
3877 		 */
3878 		ready_set = DOMAINSET_ALL;
3879 	} else {
3880 		DOMAINSET_ADD(ready_set, idn.localid);
3881 	}
3882 
3883 	DOMAINSET_ADD(idn.domset.ds_ready_off, domid);
3884 
3885 	query_set = idn_sync_register(domid, IDNSYNC_DISCONNECT,
3886 					ready_set, IDNSYNC_REG_REG);
3887 	/*
3888 	 * No need to query this domain as he's already
3889 	 * in the FIN sequence.
3890 	 */
3891 	DOMAINSET_DEL(query_set, domid);
3892 
3893 	ready = (dp->dsync.s_set_exp == dp->dsync.s_set_rdy) ? 1 : 0;
3894 	if (ready) {
3895 		DOMAINSET_DEL(idn.domset.ds_ready_off, domid);
3896 		DOMAINSET_DEL(idn.domset.ds_connected, domid);
3897 	}
3898 
3899 	if (query_set) {
3900 		int		d;
3901 		domainset_t	my_ready_set;
3902 
3903 		my_ready_set = idn.domset.ds_ready_off |
3904 				~idn.domset.ds_connected;
3905 
3906 		for (d = 0; d < MAX_DOMAINS; d++) {
3907 			if (!DOMAIN_IN_SET(query_set, d))
3908 				continue;
3909 
3910 			dp = &idn_domain[d];
3911 
3912 			IDN_DLOCK_EXCL(d);
3913 
3914 			if (dp->dsync.s_cmd == IDNSYNC_DISCONNECT) {
3915 				IDN_DUNLOCK(d);
3916 				continue;
3917 			}
3918 
3919 			IDN_SYNC_QUERY_UPDATE(domid, d);
3920 
3921 			idn_send_fin(d, NULL, IDNFIN_QUERY, IDNFIN_ARG_NONE,
3922 					IDNFIN_OPT_NONE, my_ready_set,
3923 					NIL_FIN_MASTER);
3924 			IDN_DUNLOCK(d);
3925 		}
3926 	}
3927 
3928 	return ((ready > 0) ? 0 : 1);
3929 }
3930 
3931 /*ARGSUSED*/
3932 static void
3933 idn_error_fin_sent(int domid, idn_msgtype_t *mtp, idn_xdcargs_t xargs)
3934 {
3935 	uint_t	msg = mtp ? mtp->mt_mtype : 0;
3936 	uint_t	token;
3937 
3938 	ASSERT(IDN_SYNC_IS_LOCKED());
3939 	ASSERT(IDN_DLOCK_IS_EXCL(domid));
3940 
3941 	/*
3942 	 * Don't communicate with domains that
3943 	 * we're forcing a hard disconnect.
3944 	 */
3945 	if ((idn_domain[domid].dfin != IDNFIN_FORCE_HARD) &&
3946 			(msg & IDNP_MSGTYPE_MASK)) {
3947 		idn_msgtype_t	mt;
3948 		idn_xdcargs_t	nargs;
3949 
3950 		mt.mt_mtype = IDNP_NACK;
3951 		mt.mt_atype = msg;
3952 		mt.mt_cookie = mtp->mt_cookie;
3953 		CLR_XARGS(nargs);
3954 		SET_XARGS_NACK_TYPE(nargs, IDNNACK_RETRY);
3955 		idn_send_acknack(domid, &mt, nargs);
3956 	}
3957 
3958 	token = IDN_RETRY_TOKEN(domid, IDNRETRY_FIN);
3959 	idn_retry_submit(idn_retry_fin, NULL, token,
3960 			idn_msg_retrytime[(int)IDNRETRY_FIN]);
3961 }
3962 
3963 static void
3964 idn_action_fin_sent(int domid, idn_msgtype_t *mtp, idn_xdcargs_t xargs)
3965 {
3966 	uint_t		msg = mtp ? mtp->mt_mtype : 0;
3967 	int		new_masterid, new_cpuid = IDN_NIL_DCPU;
3968 	uint_t		finmaster;
3969 	idn_msgtype_t	mt;
3970 	idn_finopt_t	finopt;
3971 	idn_finarg_t	finarg;
3972 	domainset_t	my_ready_set;
3973 	idn_domain_t	*dp = &idn_domain[domid];
3974 
3975 	ASSERT(IDN_SYNC_IS_LOCKED());
3976 	ASSERT(IDN_DLOCK_IS_EXCL(domid));
3977 
3978 	mt.mt_cookie = mtp ? mtp->mt_cookie : 0;
3979 
3980 	finopt = DOMAIN_IN_SET(idn.domset.ds_relink, domid) ?
3981 				IDNFIN_OPT_RELINK : IDNFIN_OPT_UNLINK;
3982 
3983 	finarg = GET_XARGS_FIN_ARG(xargs);
3984 
3985 	my_ready_set = dp->dsync.s_set_rdy |
3986 				idn.domset.ds_ready_off |
3987 				~idn.domset.ds_connected;
3988 
3989 	IDN_GLOCK_SHARED();
3990 	new_masterid = IDN_GET_NEW_MASTERID();
3991 	IDN_GUNLOCK();
3992 	if (new_masterid != IDN_NIL_DOMID)
3993 		new_cpuid = idn_domain[new_masterid].dcpu;
3994 	finmaster = MAKE_FIN_MASTER(new_masterid, new_cpuid);
3995 
3996 	if ((msg & IDNP_ACKNACK_MASK) == 0) {
3997 		/*
3998 		 * fin
3999 		 */
4000 		if (dp->dfin == IDNFIN_FORCE_HARD) {
4001 			mt.mt_mtype = IDNP_ACK;
4002 			mt.mt_atype = IDNP_FIN | IDNP_ACK;
4003 			idn_xphase_transition(domid, &mt, xargs);
4004 		} else {
4005 			mt.mt_mtype = IDNP_FIN | IDNP_ACK;
4006 			mt.mt_atype = 0;
4007 			idn_send_fin(domid, &mt, dp->dfin, finarg,
4008 					finopt, my_ready_set, finmaster);
4009 		}
4010 	} else if (msg & IDNP_MSGTYPE_MASK) {
4011 		/*
4012 		 * fin+ack
4013 		 */
4014 		if (dp->dfin != IDNFIN_FORCE_HARD) {
4015 			idn_xdcargs_t	fargs;
4016 
4017 			mt.mt_mtype = IDNP_ACK;
4018 			mt.mt_atype = msg;
4019 			CLR_XARGS(fargs);
4020 			SET_XARGS_FIN_TYPE(fargs, dp->dfin);
4021 			SET_XARGS_FIN_ARG(fargs, finarg);
4022 			SET_XARGS_FIN_DOMSET(fargs, my_ready_set);
4023 			SET_XARGS_FIN_OPT(fargs, finopt);
4024 			SET_XARGS_FIN_MASTER(fargs, finmaster);
4025 			idn_send_acknack(domid, &mt, fargs);
4026 		}
4027 	} else {
4028 		uint_t	token;
4029 		/*
4030 		 * nack - retry
4031 		 */
4032 		token = IDN_RETRY_TOKEN(domid, IDNRETRY_FIN);
4033 		idn_retry_submit(idn_retry_fin, NULL, token,
4034 				idn_msg_retrytime[(int)IDNRETRY_FIN]);
4035 	}
4036 }
4037 
4038 /*ARGSUSED*/
4039 static void
4040 idn_action_fin_rcvd(int domid, idn_msgtype_t *mtp, idn_xdcargs_t xargs)
4041 {
4042 	uint_t	msg = mtp ? mtp->mt_mtype : 0;
4043 
4044 	ASSERT(IDN_SYNC_IS_LOCKED());
4045 	ASSERT(IDN_DLOCK_IS_EXCL(domid));
4046 
4047 	if (msg & IDNP_NACK) {
4048 		uint_t	token;
4049 		/*
4050 		 * nack - retry.
4051 		 */
4052 		token = IDN_RETRY_TOKEN(domid, IDNRETRY_FIN);
4053 		idn_retry_submit(idn_retry_fin, NULL, token,
4054 				idn_msg_retrytime[(int)IDNRETRY_FIN]);
4055 	}
4056 }
4057 
4058 static void
4059 idn_final_fin(int domid)
4060 {
4061 	int		do_relink;
4062 	int		rv, d, new_masterid = IDN_NIL_DOMID;
4063 	idn_gstate_t	next_gstate;
4064 	domainset_t	relinkset;
4065 	uint_t		token = IDN_RETRY_TOKEN(domid, IDNRETRY_FIN);
4066 	idn_domain_t	*ldp, *dp = &idn_domain[domid];
4067 	procname_t	proc = "idn_final_fin";
4068 
4069 	ASSERT(IDN_SYNC_IS_LOCKED());
4070 	ASSERT(IDN_DLOCK_IS_EXCL(domid));
4071 	ASSERT(dp->dstate == IDNDS_DMAP);
4072 
4073 	(void) idn_retry_terminate(token);
4074 
4075 	dp->dxp = NULL;
4076 	IDN_XSTATE_TRANSITION(dp, IDNXS_NIL);
4077 
4078 	idn_sync_exit(domid, IDNSYNC_DISCONNECT);
4079 
4080 	DOMAINSET_DEL(idn.domset.ds_trans_off, domid);
4081 
4082 	do_relink = DOMAIN_IN_SET(idn.domset.ds_relink, domid) ? 1 : 0;
4083 
4084 	/*
4085 	 * idn_deconfig will idn_close_domain.
4086 	 */
4087 	idn_deconfig(domid);
4088 
4089 	PR_PROTO("%s:%d: DISCONNECTED\n", proc, domid);
4090 
4091 	IDN_GLOCK_EXCL();
4092 	/*
4093 	 * It's important that this update-op occur within
4094 	 * the context of holding the glock(EXCL).  There is
4095 	 * still some additional state stuff to cleanup which
4096 	 * will be completed once the glock is dropped in
4097 	 * this flow.  Which means anybody that's doing a
4098 	 * SSI_INFO and waiting on glock will not actually
4099 	 * run until the clean-up is completed, which is what
4100 	 * we want.  Recall that a separate thread processes
4101 	 * the SSI_LINK/UNLINK calls and when they complete
4102 	 * (i.e. are awakened) they will immediately SSI_INFO
4103 	 * and we don't want them to prematurely pick up stale
4104 	 * information.
4105 	 */
4106 	idn_update_op(IDNOP_DISCONNECTED, DOMAINSET(domid), NULL);
4107 
4108 	ASSERT(idn.state != IDNGS_OFFLINE);
4109 	ASSERT(!DOMAIN_IN_SET(idn.domset.ds_trans_on, domid));
4110 
4111 	if (domid == IDN_GET_MASTERID()) {
4112 		IDN_SET_MASTERID(IDN_NIL_DOMID);
4113 		dp->dvote.v.master = 0;
4114 	}
4115 
4116 	if ((domid == IDN_GET_NEW_MASTERID()) && !do_relink) {
4117 		IDN_SET_NEW_MASTERID(IDN_NIL_DOMID);
4118 	}
4119 
4120 	if (idn.state == IDNGS_RECONFIG)
4121 		new_masterid = IDN_GET_NEW_MASTERID();
4122 
4123 	if ((idn.domset.ds_trans_on |
4124 			idn.domset.ds_trans_off |
4125 			idn.domset.ds_relink) == 0) {
4126 		PR_HITLIST("%s:%d: HITLIST %x -> 0\n",
4127 			proc, domid, idn.domset.ds_hitlist);
4128 		idn.domset.ds_hitlist = 0;
4129 	}
4130 
4131 	if (idn.domset.ds_connected || idn.domset.ds_trans_off) {
4132 		PR_PROTO("%s:%d: ds_connected = 0x%x, ds_trans_off = 0x%x\n",
4133 			proc, domid, idn.domset.ds_connected,
4134 			idn.domset.ds_trans_off);
4135 		IDN_GUNLOCK();
4136 		goto fin_done;
4137 	}
4138 
4139 	IDN_DLOCK_EXCL(idn.localid);
4140 	ldp = &idn_domain[idn.localid];
4141 
4142 	if (idn.domset.ds_trans_on != 0) {
4143 		ASSERT((idn.state != IDNGS_DISCONNECT) &&
4144 			(idn.state != IDNGS_OFFLINE));
4145 
4146 		switch (idn.state) {
4147 		case IDNGS_CONNECT:
4148 			if (idn.localid == IDN_GET_MASTERID()) {
4149 				idn_master_deinit();
4150 				IDN_SET_MASTERID(IDN_NIL_DOMID);
4151 				ldp->dvote.v.master = 0;
4152 			}
4153 			/*FALLTHROUGH*/
4154 		case IDNGS_ONLINE:
4155 			next_gstate = idn.state;
4156 			break;
4157 
4158 		case IDNGS_RECONFIG:
4159 			if (idn.localid == IDN_GET_MASTERID()) {
4160 				idn_master_deinit();
4161 				IDN_SET_MASTERID(IDN_NIL_DOMID);
4162 				ldp->dvote.v.master = 0;
4163 			}
4164 			ASSERT(IDN_GET_MASTERID() == IDN_NIL_DOMID);
4165 			next_gstate = IDNGS_CONNECT;
4166 			ldp->dvote.v.connected = 0;
4167 			/*
4168 			 * Need to do HWINIT since we won't
4169 			 * be transitioning through OFFLINE
4170 			 * which would normally be caught in
4171 			 * idn_check_nego() when we
4172 			 * initially go to CONNECT.
4173 			 */
4174 			IDN_PREP_HWINIT();
4175 			break;
4176 
4177 		case IDNGS_DISCONNECT:
4178 		case IDNGS_OFFLINE:
4179 			cmn_err(CE_WARN,
4180 				"IDN: 211: disconnect domain %d, "
4181 				"unexpected Gstate (%s)",
4182 				domid, idngs_str[idn.state]);
4183 			IDN_DUNLOCK(idn.localid);
4184 			IDN_GUNLOCK();
4185 			goto fin_done;
4186 
4187 		default:
4188 			/*
4189 			 * XXX
4190 			 * Go into FATAL state?
4191 			 */
4192 			cmn_err(CE_PANIC,
4193 				"IDN: 212: disconnect domain %d, "
4194 				"bad Gstate (%d)",
4195 				domid, idn.state);
4196 			/* not reached */
4197 			break;
4198 		}
4199 	} else {
4200 		if (idn.localid == IDN_GET_MASTERID()) {
4201 			idn_master_deinit();
4202 			IDN_SET_MASTERID(IDN_NIL_DOMID);
4203 			ldp->dvote.v.master = 0;
4204 		}
4205 		next_gstate = IDNGS_OFFLINE;
4206 		if (idn.domset.ds_relink == 0) {
4207 			IDN_SET_NEW_MASTERID(IDN_NIL_DOMID);
4208 		}
4209 	}
4210 	IDN_DUNLOCK(idn.localid);
4211 
4212 	/*
4213 	 * If we reach here we've effectively disconnected all
4214 	 * existing links, however new ones may be pending.
4215 	 */
4216 	PR_PROTO("%s:%d: ALL DISCONNECTED *****************\n", proc, domid);
4217 
4218 	IDN_GSTATE_TRANSITION(next_gstate);
4219 
4220 	ASSERT((idn.state == IDNGS_OFFLINE) ?
4221 		(IDN_GET_MASTERID() == IDN_NIL_DOMID) : 1);
4222 
4223 	IDN_GUNLOCK();
4224 
4225 	/*
4226 	 * If we have no new masterid and yet there are relinkers
4227 	 * out there, then force us to attempt to link with one
4228 	 * of them.
4229 	 */
4230 	if ((new_masterid == IDN_NIL_DOMID) && idn.domset.ds_relink)
4231 		new_masterid = idn.localid;
4232 
4233 	if (new_masterid != IDN_NIL_DOMID) {
4234 		/*
4235 		 * If the local domain is the selected
4236 		 * master then we'll want to initiate
4237 		 * a link with one of the other candidates.
4238 		 * If not, then we want to initiate a link
4239 		 * with the master only.
4240 		 */
4241 		relinkset = (new_masterid == idn.localid) ?
4242 				idn.domset.ds_relink : DOMAINSET(new_masterid);
4243 
4244 		DOMAINSET_DEL(relinkset, idn.localid);
4245 
4246 		for (d = 0; d < MAX_DOMAINS; d++) {
4247 			int	lock_held;
4248 
4249 			if (!DOMAIN_IN_SET(relinkset, d))
4250 				continue;
4251 
4252 			if (d == domid) {
4253 				do_relink = 0;
4254 				lock_held = 0;
4255 			} else {
4256 				IDN_DLOCK_EXCL(d);
4257 				lock_held = 1;
4258 			}
4259 
4260 			rv = idn_open_domain(d, -1, 0);
4261 			if (rv == 0) {
4262 				rv = idn_connect(d);
4263 				if (lock_held)
4264 					IDN_DUNLOCK(d);
4265 				/*
4266 				 * If we're able to kick off at
4267 				 * least one connect then that's
4268 				 * good enough for now.  The others
4269 				 * will fall into place normally.
4270 				 */
4271 				if (rv == 0)
4272 					break;
4273 			} else if (rv < 0) {
4274 				if (lock_held)
4275 					IDN_DUNLOCK(d);
4276 				cmn_err(CE_WARN,
4277 					"IDN: 205: (%s.1) failed to "
4278 					"open-domain(%d,%d)",
4279 					proc, domid, -1);
4280 				DOMAINSET_DEL(idn.domset.ds_relink, d);
4281 			} else {
4282 				if (lock_held)
4283 					IDN_DUNLOCK(d);
4284 				PR_PROTO("%s:%d: failed to "
4285 					"re-open domain %d "
4286 					"(cpu %d) [rv = %d]\n",
4287 					proc, domid, d, idn_domain[d].dcpu,
4288 					rv);
4289 			}
4290 		}
4291 	}
4292 
4293 fin_done:
4294 	if (do_relink) {
4295 		ASSERT(IDN_DLOCK_IS_EXCL(domid));
4296 
4297 		rv = idn_open_domain(domid, -1, 0);
4298 		if (rv == 0) {
4299 			(void) idn_connect(domid);
4300 		} else if (rv < 0) {
4301 			cmn_err(CE_WARN,
4302 				"IDN: 205: (%s.2) failed to "
4303 				"open-domain(%d,%d)",
4304 				proc, domid, -1);
4305 			DOMAINSET_DEL(idn.domset.ds_relink, domid);
4306 		}
4307 	}
4308 }
4309 
4310 static void
4311 idn_exit_fin(int domid, uint_t msgtype)
4312 {
4313 	idn_domain_t	*dp = &idn_domain[domid];
4314 	uint_t		token;
4315 	procname_t	proc = "idn_exit_fin";
4316 	STRING(str);
4317 
4318 	ASSERT(IDN_SYNC_IS_LOCKED());
4319 	ASSERT(IDN_DLOCK_IS_EXCL(domid));
4320 
4321 	INUM2STR(msgtype, str);
4322 	PR_PROTO("%s:%d: msgtype = 0x%x(%s)\n", proc, domid, msgtype, str);
4323 
4324 	token = IDN_RETRY_TOKEN(domid, IDNRETRY_FIN);
4325 	(void) idn_retry_terminate(token);
4326 
4327 	DOMAINSET_DEL(idn.domset.ds_ready_off, domid);
4328 
4329 	dp->dxp = &xphase_fin;
4330 	IDN_XSTATE_TRANSITION(dp, IDNXS_PEND);
4331 
4332 	idn_retry_submit(idn_retry_fin, NULL, token,
4333 			idn_msg_retrytime[(int)IDNRETRY_FIN]);
4334 }
4335 
4336 /*
4337  * Must return w/locks held.
4338  */
4339 static int
4340 idn_xphase_transition(int domid, idn_msgtype_t *mtp, idn_xdcargs_t xargs)
4341 {
4342 	uint_t		msg = mtp ? mtp->mt_mtype : 0;
4343 	uint_t		msgarg = mtp ? mtp->mt_atype : 0;
4344 	idn_xphase_t	*xp;
4345 	idn_domain_t	*dp;
4346 	int		(*cfunc)(int, idn_msgtype_t *, idn_xdcargs_t);
4347 	void		(*ffunc)(int);
4348 	void		(*afunc)(int, idn_msgtype_t *, idn_xdcargs_t);
4349 	void		(*efunc)(int, idn_msgtype_t *, idn_xdcargs_t);
4350 	void		(*xfunc)(int, uint_t);
4351 	int		err = 0;
4352 	uint_t		msgtype;
4353 	idn_xstate_t	o_xstate, n_xstate;
4354 	procname_t	proc = "idn_xphase_transition";
4355 	STRING(mstr);
4356 	STRING(astr);
4357 
4358 	ASSERT(IDN_SYNC_IS_LOCKED());
4359 	ASSERT(IDN_DLOCK_IS_EXCL(domid));
4360 
4361 	INUM2STR(msg, mstr);
4362 	INUM2STR(msgarg, astr);
4363 
4364 	dp = &idn_domain[domid];
4365 	if ((xp = dp->dxp) == NULL) {
4366 		PR_PROTO("%s:%d: WARNING: domain xsp is NULL (msg = %s, "
4367 			"msgarg = %s) <<<<<<<<<<<<\n",
4368 			proc, domid, mstr, astr);
4369 		return (-1);
4370 	}
4371 	o_xstate = dp->dxstate;
4372 
4373 	xfunc = xp->xt_exit;
4374 
4375 	if ((msgtype = (msg & IDNP_MSGTYPE_MASK)) == 0)
4376 		msgtype = msgarg & IDNP_MSGTYPE_MASK;
4377 
4378 	if ((o_xstate == IDNXS_PEND) && msg &&
4379 			((msg & IDNP_ACKNACK_MASK) == msg)) {
4380 		PR_PROTO("%s:%d: unwanted acknack received (o_xstate = %s, "
4381 			"msg = %s/%s - dropping message\n",
4382 			proc, domid, idnxs_str[(int)o_xstate], mstr, astr);
4383 		return (0);
4384 	}
4385 
4386 	/*
4387 	 * Validate that message received is following
4388 	 * the expected protocol for the current state.
4389 	 */
4390 	if (idn_next_xstate(o_xstate, -1, msg) == IDNXS_NIL) {
4391 		PR_PROTO("%s:%d: WARNING: o_xstate = %s, msg = %s -> NIL "
4392 			"<<<<<<<<<\n",
4393 			proc, domid, idnxs_str[(int)o_xstate], mstr);
4394 		if (xfunc)
4395 			(*xfunc)(domid, msgtype);
4396 		return (-1);
4397 	}
4398 
4399 	if (msg || msgarg) {
4400 		/*
4401 		 * Verify that message type is correct for
4402 		 * the given xstate.
4403 		 */
4404 		if (msgtype != xp->xt_msgtype) {
4405 			STRING(xstr);
4406 			STRING(tstr);
4407 
4408 			INUM2STR(xp->xt_msgtype, xstr);
4409 			INUM2STR(msgtype, tstr);
4410 			PR_PROTO("%s:%d: WARNING: msg expected %s(0x%x), "
4411 				"actual %s(0x%x) [msg=%s(0x%x), "
4412 				"msgarg=%s(0x%x)]\n",
4413 				proc, domid, xstr, xp->xt_msgtype,
4414 				tstr, msgtype, mstr, msg, astr, msgarg);
4415 			if (xfunc)
4416 				(*xfunc)(domid, msgtype);
4417 			return (-1);
4418 		}
4419 	}
4420 
4421 	cfunc = xp->xt_trans[(int)o_xstate].t_check;
4422 
4423 	if (cfunc && ((err = (*cfunc)(domid, mtp, xargs)) < 0)) {
4424 		if (o_xstate != IDNXS_PEND) {
4425 			IDN_XSTATE_TRANSITION(dp, IDNXS_PEND);
4426 		}
4427 		if (xfunc)
4428 			(*xfunc)(domid, msgtype);
4429 		return (-1);
4430 	}
4431 
4432 	n_xstate = idn_next_xstate(o_xstate, err, msg);
4433 
4434 	if (n_xstate == IDNXS_NIL) {
4435 		PR_PROTO("%s:%d: WARNING: n_xstate = %s, msg = %s -> NIL "
4436 			"<<<<<<<<<\n",
4437 			proc, domid, idnxs_str[(int)n_xstate], mstr);
4438 		if (xfunc)
4439 			(*xfunc)(domid, msgtype);
4440 		return (-1);
4441 	}
4442 
4443 	if (n_xstate != o_xstate) {
4444 		IDN_XSTATE_TRANSITION(dp, n_xstate);
4445 	}
4446 
4447 	if (err) {
4448 		if ((efunc = xp->xt_trans[(int)o_xstate].t_error) != NULL)
4449 			(*efunc)(domid, mtp, xargs);
4450 	} else if ((afunc = xp->xt_trans[(int)o_xstate].t_action) != NULL) {
4451 		(*afunc)(domid, mtp, xargs);
4452 	}
4453 
4454 	if ((n_xstate == IDNXS_FINAL) && ((ffunc = xp->xt_final) != NULL))
4455 		(*ffunc)(domid);
4456 
4457 	return (0);
4458 }
4459 
4460 /*
4461  * Entered and returns w/DLOCK & SYNC_LOCK held.
4462  */
4463 static int
4464 idn_xstate_transfunc(int domid, void *transarg)
4465 {
4466 	uint_t		msg = (uint_t)(uintptr_t)transarg;
4467 	uint_t		token;
4468 	procname_t	proc = "idn_xstate_transfunc";
4469 
4470 	ASSERT(IDN_SYNC_IS_LOCKED());
4471 
4472 	switch (msg) {
4473 	case IDNP_CON:
4474 		DOMAINSET_ADD(idn.domset.ds_connected, domid);
4475 		break;
4476 
4477 	case IDNP_FIN:
4478 		DOMAINSET_DEL(idn.domset.ds_connected, domid);
4479 		break;
4480 
4481 	default:
4482 		PR_PROTO("%s:%d: ERROR: unknown msg (0x%x) <<<<<<<<\n",
4483 			proc, domid, msg);
4484 		return (0);
4485 	}
4486 
4487 	token = IDN_RETRY_TOKEN(domid, (msg == IDNP_CON) ?
4488 					IDNRETRY_CON : IDNRETRY_FIN);
4489 	if (msg == IDNP_CON)
4490 		idn_retry_submit(idn_retry_con, NULL, token,
4491 			idn_msg_retrytime[(int)IDNRETRY_CON]);
4492 	else
4493 		idn_retry_submit(idn_retry_fin, NULL, token,
4494 			idn_msg_retrytime[(int)IDNRETRY_FIN]);
4495 
4496 	return (1);
4497 }
4498 
4499 /*
4500  * Entered and returns w/DLOCK & SYNC_LOCK held.
4501  */
4502 static void
4503 idn_sync_enter(int domid, idn_synccmd_t cmd,
4504 	domainset_t xset, domainset_t rset,
4505 	int (*transfunc)(), void *transarg)
4506 {
4507 	int		z;
4508 	idn_syncop_t	*sp;
4509 	idn_synczone_t	*zp;
4510 	procname_t	proc = "idn_sync_enter";
4511 
4512 	ASSERT(IDN_SYNC_IS_LOCKED());
4513 	ASSERT(IDN_DLOCK_IS_EXCL(domid));
4514 
4515 	z = IDN_SYNC_GETZONE(cmd);
4516 	ASSERT(z >= 0);
4517 	zp = &idn.sync.sz_zone[z];
4518 
4519 	PR_SYNC("%s:%d: cmd=%s(%d), z=%d, xs=0x%x, rx=0x%x, cnt=%d\n",
4520 		proc, domid, idnsync_str[cmd], cmd, z, xset, rset, zp->sc_cnt);
4521 
4522 	sp = &idn_domain[domid].dsync;
4523 
4524 	sp->s_domid = domid;
4525 	sp->s_cmd = cmd;
4526 	sp->s_msg = 0;
4527 	sp->s_set_exp = xset;
4528 	sp->s_set_rdy = rset;
4529 	sp->s_transfunc = transfunc;
4530 	sp->s_transarg = transarg;
4531 	IDN_SYNC_QUERY_INIT(domid);
4532 
4533 	sp->s_next = zp->sc_op;
4534 	zp->sc_op = sp;
4535 	zp->sc_cnt++;
4536 }
4537 
4538 /*
4539  * Entered and returns w/DLOCK & SYNC_LOCK held.
4540  */
4541 void
4542 idn_sync_exit(int domid, idn_synccmd_t cmd)
4543 {
4544 	int		d, z, zone, tot_queries, tot_domains;
4545 	idn_syncop_t	*sp;
4546 	idn_synczone_t	*zp = NULL;
4547 	procname_t	proc = "idn_sync_exit";
4548 
4549 	ASSERT(IDN_SYNC_IS_LOCKED());
4550 	ASSERT(IDN_DLOCK_IS_EXCL(domid));
4551 
4552 	sp = &idn_domain[domid].dsync;
4553 
4554 	z = IDN_SYNC_GETZONE(sp->s_cmd);
4555 
4556 	zone = IDN_SYNC_GETZONE(cmd);
4557 
4558 	PR_SYNC("%s:%d: cmd=%s(%d) (z=%d, zone=%d)\n",
4559 		proc, domid, idnsync_str[cmd], cmd, z, zone);
4560 
4561 #ifdef DEBUG
4562 	if (z != -1) {
4563 		tot_queries = tot_domains = 0;
4564 
4565 		for (d = 0; d < MAX_DOMAINS; d++) {
4566 			int	qv;
4567 
4568 			if ((qv = sp->s_query[d]) > 0) {
4569 				tot_queries += qv;
4570 				tot_domains++;
4571 				PR_SYNC("%s:%d: query_count = %d\n",
4572 					proc, domid, qv);
4573 			}
4574 		}
4575 		PR_SYNC("%s:%d: tot_queries = %d, tot_domaines = %d\n",
4576 			proc, domid, tot_queries, tot_domains);
4577 	}
4578 #endif /* DEBUG */
4579 
4580 	zp = (z != -1) ? &idn.sync.sz_zone[z] : NULL;
4581 
4582 	if (zp) {
4583 		idn_syncop_t	**spp;
4584 
4585 		for (spp = &zp->sc_op; *spp; spp = &((*spp)->s_next)) {
4586 			if (*spp == sp) {
4587 				*spp = sp->s_next;
4588 				sp->s_next = NULL;
4589 				zp->sc_cnt--;
4590 				break;
4591 			}
4592 		}
4593 	}
4594 
4595 	sp->s_cmd = IDNSYNC_NIL;
4596 
4597 	for (z = 0; z < IDN_SYNC_NUMZONE; z++) {
4598 		idn_syncop_t	**spp, **nspp;
4599 
4600 		if ((zone != -1) && (z != zone))
4601 			continue;
4602 
4603 		zp = &idn.sync.sz_zone[z];
4604 
4605 		for (spp = &zp->sc_op; *spp; spp = nspp) {
4606 			sp = *spp;
4607 			nspp = &sp->s_next;
4608 
4609 			if (!DOMAIN_IN_SET(sp->s_set_exp, domid))
4610 				continue;
4611 
4612 			DOMAINSET_DEL(sp->s_set_exp, domid);
4613 			DOMAINSET_DEL(sp->s_set_rdy, domid);
4614 
4615 			if ((sp->s_set_exp == sp->s_set_rdy) &&
4616 					sp->s_transfunc) {
4617 				int	delok;
4618 
4619 				ASSERT(sp->s_domid != domid);
4620 
4621 				PR_SYNC("%s:%d invoking transfunc "
4622 					"for domain %d\n",
4623 					proc, domid, sp->s_domid);
4624 				delok = (*sp->s_transfunc)(sp->s_domid,
4625 							sp->s_transarg);
4626 				if (delok) {
4627 					*spp = sp->s_next;
4628 					sp->s_next = NULL;
4629 					zp->sc_cnt--;
4630 					nspp = spp;
4631 				}
4632 			}
4633 		}
4634 	}
4635 }
4636 
4637 /*
4638  * Entered and returns w/DLOCK & SYNC_LOCK held.
4639  */
4640 static domainset_t
4641 idn_sync_register(int domid, idn_synccmd_t cmd,
4642 		domainset_t ready_set, idn_syncreg_t regtype)
4643 {
4644 	int		z;
4645 	idn_synczone_t	*zp;
4646 	idn_syncop_t	*sp, **spp, **nspp;
4647 	domainset_t	query_set = 0, trans_set;
4648 	procname_t	proc = "idn_sync_register";
4649 
4650 	ASSERT(IDN_SYNC_IS_LOCKED());
4651 	ASSERT(IDN_DLOCK_IS_EXCL(domid));
4652 
4653 	if ((z = IDN_SYNC_GETZONE(cmd)) == -1) {
4654 		PR_SYNC("%s:%d: ERROR: unexpected sync cmd(%d)\n",
4655 			proc, domid, cmd);
4656 		return (0);
4657 	}
4658 
4659 	/*
4660 	 * Find out what domains are in transition with respect
4661 	 * to given command.  There will be no need to query
4662 	 * these folks.
4663 	 */
4664 	trans_set = IDN_SYNC_GETTRANS(cmd);
4665 
4666 	zp = &idn.sync.sz_zone[z];
4667 
4668 	PR_SYNC("%s:%d: cmd=%s(%d), z=%d, rset=0x%x, "
4669 		"regtype=%s(%d), sc_op=%s\n",
4670 		proc, domid, idnsync_str[cmd], cmd, z, ready_set,
4671 		idnreg_str[regtype], regtype,
4672 		zp->sc_op ? idnsync_str[zp->sc_op->s_cmd] : "NULL");
4673 
4674 	for (spp = &zp->sc_op; *spp; spp = nspp) {
4675 		sp = *spp;
4676 		nspp = &sp->s_next;
4677 
4678 		if (regtype == IDNSYNC_REG_NEW) {
4679 			DOMAINSET_ADD(sp->s_set_exp, domid);
4680 			PR_SYNC("%s:%d: adding new to %d (exp=0x%x)\n",
4681 				proc, domid, sp->s_domid, sp->s_set_exp);
4682 		} else if (regtype == IDNSYNC_REG_QUERY) {
4683 			query_set |= ~sp->s_set_rdy & sp->s_set_exp;
4684 			continue;
4685 		}
4686 
4687 		if (!DOMAIN_IN_SET(sp->s_set_exp, domid))
4688 			continue;
4689 
4690 		if (!DOMAIN_IN_SET(ready_set, sp->s_domid)) {
4691 			/*
4692 			 * Given domid doesn't have a desired
4693 			 * domain in his ready-set.  We'll need
4694 			 * to query him again.
4695 			 */
4696 			DOMAINSET_ADD(query_set, domid);
4697 			continue;
4698 		}
4699 
4700 		/*
4701 		 * If we reach here, then an expected domain
4702 		 * has marked its respective datapath to
4703 		 * sp->s_domid as down (i.e. in his ready_set).
4704 		 */
4705 		DOMAINSET_ADD(sp->s_set_rdy, domid);
4706 
4707 		PR_SYNC("%s:%d: mark READY for domain %d "
4708 			"(r=0x%x, x=0x%x)\n",
4709 			proc, domid, sp->s_domid,
4710 			sp->s_set_rdy, sp->s_set_exp);
4711 
4712 		query_set |= ~sp->s_set_rdy & sp->s_set_exp;
4713 
4714 		if (sp->s_set_exp == sp->s_set_rdy) {
4715 #ifdef DEBUG
4716 			if (sp->s_msg == 0) {
4717 				sp->s_msg = 1;
4718 				PR_SYNC("%s:%d: >>>>>>>>>>> DOMAIN %d "
4719 					"ALL CHECKED IN (0x%x)\n",
4720 					proc, domid, sp->s_domid,
4721 					sp->s_set_exp);
4722 			}
4723 #endif /* DEBUG */
4724 
4725 			if ((sp->s_domid != domid) && sp->s_transfunc) {
4726 				int	delok;
4727 
4728 				PR_SYNC("%s:%d invoking transfunc "
4729 					"for domain %d\n",
4730 					proc, domid, sp->s_domid);
4731 				delok = (*sp->s_transfunc)(sp->s_domid,
4732 						sp->s_transarg);
4733 				if (delok) {
4734 					*spp = sp->s_next;
4735 					sp->s_next = NULL;
4736 					zp->sc_cnt--;
4737 					nspp = spp;
4738 				}
4739 			}
4740 		}
4741 	}
4742 
4743 	PR_SYNC("%s:%d: trans_set = 0x%x, query_set = 0x%x -> 0x%x\n",
4744 		proc, domid, trans_set, query_set, query_set & ~trans_set);
4745 
4746 	query_set &= ~trans_set;
4747 
4748 	return (query_set);
4749 }
4750 
4751 static void
4752 idn_sync_register_awol(int domid)
4753 {
4754 	int		z;
4755 	idn_synccmd_t	cmd = IDNSYNC_DISCONNECT;
4756 	idn_synczone_t	*zp;
4757 	idn_syncop_t	*sp;
4758 	procname_t	proc = "idn_sync_register_awol";
4759 
4760 	ASSERT(IDN_SYNC_IS_LOCKED());
4761 
4762 	if ((z = IDN_SYNC_GETZONE(cmd)) == -1) {
4763 		PR_SYNC("%s:%d: ERROR: unexpected sync cmd(%d)\n",
4764 			proc, domid, cmd);
4765 		return;
4766 	}
4767 
4768 	zp = &idn.sync.sz_zone[z];
4769 
4770 	PR_SYNC("%s:%d: cmd=%s(%d), z=%d (domain %d = AWOL)\n",
4771 		proc, domid, idnsync_str[cmd], cmd, z, domid);
4772 
4773 	for (sp = zp->sc_op; sp; sp = sp->s_next) {
4774 		idn_domain_t	*dp;
4775 
4776 		dp = &idn_domain[sp->s_domid];
4777 		if (dp->dfin == IDNFIN_FORCE_HARD) {
4778 			DOMAINSET_ADD(sp->s_set_rdy, domid);
4779 			PR_SYNC("%s:%d: adding new to %d (rdy=0x%x)\n",
4780 				proc, domid, sp->s_domid, sp->s_set_rdy);
4781 		}
4782 	}
4783 }
4784 
4785 static void
4786 idn_link_established(void *arg)
4787 {
4788 	int	first_link;
4789 	int	domid, masterid;
4790 	uint_t	info = (uint_t)(uintptr_t)arg;
4791 
4792 	first_link = (int)(info & 0xf0);
4793 	domid = (int)(info & 0x0f);
4794 
4795 	IDN_GLOCK_SHARED();
4796 	masterid = IDN_GET_MASTERID();
4797 	if ((masterid == IDN_NIL_DOMID) ||
4798 		(idn_domain[masterid].dstate != IDNDS_CONNECTED)) {
4799 		/*
4800 		 * No point in doing this unless we're connected
4801 		 * to the master.
4802 		 */
4803 		if ((masterid != IDN_NIL_DOMID) &&
4804 				(idn.state == IDNGS_ONLINE)) {
4805 			/*
4806 			 * As long as we're still online keep
4807 			 * trying.
4808 			 */
4809 			(void) timeout(idn_link_established, arg, 50);
4810 		}
4811 		IDN_GUNLOCK();
4812 		return;
4813 	}
4814 	IDN_GUNLOCK();
4815 
4816 	if (first_link && IDN_SLAB_PREALLOC)
4817 		idn_prealloc_slab(IDN_SLAB_PREALLOC);
4818 
4819 	/*
4820 	 * No guarantee, but it might save a little
4821 	 * time.
4822 	 */
4823 	if (idn_domain[domid].dstate == IDNDS_CONNECTED) {
4824 		/*
4825 		 * Get the remote domain's dname.
4826 		 */
4827 		idn_send_nodename_req(domid);
4828 	}
4829 
4830 	/*
4831 	 * May have had some streams backed up waiting for
4832 	 * this connection.  Prod them.
4833 	 */
4834 	rw_enter(&idn.struprwlock, RW_READER);
4835 	mutex_enter(&idn.sipwenlock);
4836 	idndl_wenable(NULL);
4837 	mutex_exit(&idn.sipwenlock);
4838 	rw_exit(&idn.struprwlock);
4839 }
4840 
4841 /*
4842  * Send the following chunk of data received from above onto
4843  * the IDN wire.  This is raw data as far as the IDN driver
4844  * is concerned.
4845  * Returns:
4846  *	IDNXMIT_LOOP	- Msg handled in loopback and thus
4847  *			  still active (i.e. don't free).
4848  *	IDNXMIT_OKAY	- Data handled (freemsg).
4849  *	IDNXMIT_DROP	- Packet should be dropped.
4850  *	IDNXMIT_RETRY	- Packet should be requeued and retried.
4851  *	IDNXMIT_REQUEUE	- Packet should be requeued, but not
4852  *			  immediatetly retried.
4853  */
4854 int
4855 idn_send_data(int dst_domid, idn_netaddr_t dst_netaddr,
4856 						queue_t *wq, mblk_t *mp)
4857 {
4858 	int		pktcnt = 0;
4859 	int		msglen;
4860 	int		rv = IDNXMIT_OKAY;
4861 	int		xfersize = 0;
4862 	caddr_t		iobufp, iodatap;
4863 	uchar_t		*data_rptr;
4864 	int		cpuindex;
4865 	int		serrno;
4866 	int		channel;
4867 	int		retry_reclaim;
4868 	idn_chansvr_t	*csp = NULL;
4869 	uint_t		netports = 0;
4870 	struct idnstr	*stp;
4871 	struct idn	*sip;
4872 	idn_domain_t	*dp;
4873 	struct ether_header	*ehp;
4874 	smr_pkthdr_t	*hdrp;
4875 	idn_msgtype_t	mt;
4876 	procname_t	proc = "idn_send_data";
4877 #ifdef DEBUG
4878 	size_t		orig_msglen = msgsize(mp);
4879 #endif /* DEBUG */
4880 
4881 	ASSERT(DB_TYPE(mp) == M_DATA);
4882 
4883 	mt.mt_mtype = IDNP_DATA;
4884 	mt.mt_atype = 0;
4885 	mt.mt_cookie = 0;
4886 
4887 	channel = (int)dst_netaddr.net.chan;
4888 
4889 	msglen = msgdsize(mp);
4890 	PR_DATA("%s:%d: (netaddr 0x%x) msgsize=%ld, msgdsize=%d\n",
4891 		proc, dst_domid, dst_netaddr.netaddr, msgsize(mp), msglen);
4892 
4893 	ASSERT(wq->q_ptr);
4894 
4895 	stp = (struct idnstr *)wq->q_ptr;
4896 	sip = stp->ss_sip;
4897 	ASSERT(sip);
4898 
4899 	if (msglen < 0) {
4900 		/*
4901 		 * No data to send.  That was easy!
4902 		 */
4903 		PR_DATA("%s:%d: BAD msg length (%d) (netaddr 0x%x)\n",
4904 			proc, dst_domid, msglen, dst_netaddr.netaddr);
4905 		return (IDNXMIT_DROP);
4906 	}
4907 
4908 	ASSERT(RW_READ_HELD(&stp->ss_rwlock));
4909 
4910 	if (dst_domid == IDN_NIL_DOMID) {
4911 		cmn_err(CE_WARN,
4912 			"IDN: 213: no destination specified "
4913 			"(d=%d, c=%d, n=0x%x)",
4914 			dst_domid, dst_netaddr.net.chan,
4915 			dst_netaddr.net.netid);
4916 		IDN_KSTAT_INC(sip, si_nolink);
4917 		IDN_KSTAT_INC(sip, si_macxmt_errors);
4918 		rv = IDNXMIT_DROP;
4919 		goto nocando;
4920 	}
4921 
4922 	ehp = (struct ether_header *)mp->b_rptr;
4923 	PR_DATA("%s:%d: destination channel = %d\n", proc, dst_domid, channel);
4924 
4925 #ifdef DEBUG
4926 	{
4927 		uchar_t	echn;
4928 
4929 		echn = (uchar_t)
4930 			ehp->ether_shost.ether_addr_octet[IDNETHER_CHANNEL];
4931 		ASSERT((uchar_t)channel == echn);
4932 	}
4933 #endif /* DEBUG */
4934 	ASSERT(msglen <= IDN_DATA_SIZE);
4935 
4936 	dp = &idn_domain[dst_domid];
4937 	/*
4938 	 * Get reader lock.  We hold for the duration
4939 	 * of the transfer so that our state doesn't
4940 	 * change during this activity.  Note that since
4941 	 * we grab the reader lock, we can still permit
4942 	 * simultaneous tranfers from different threads
4943 	 * to the same domain.
4944 	 * Before we waste a bunch of time gathering locks, etc.
4945 	 * do a an unprotected check to make sure things are
4946 	 * semi-copesetic.  If these values are in flux,
4947 	 * that's okay.
4948 	 */
4949 	if ((dp->dstate != IDNDS_CONNECTED) || (idn.state != IDNGS_ONLINE)) {
4950 		IDN_KSTAT_INC(sip, si_linkdown);
4951 		if (idn.state != IDNGS_ONLINE) {
4952 			rv = IDNXMIT_REQUEUE;
4953 		} else {
4954 			IDN_KSTAT_INC(sip, si_macxmt_errors);
4955 			rv = IDNXMIT_DROP;
4956 		}
4957 		goto nocando;
4958 	}
4959 
4960 	if (idn.chan_servers[channel].ch_send.c_checkin) {
4961 		/*
4962 		 * Gotta bail, somethin' s'up.
4963 		 */
4964 		rv = IDNXMIT_REQUEUE;
4965 		goto nocando;
4966 	}
4967 
4968 	csp = &idn.chan_servers[channel];
4969 	IDN_CHAN_LOCK_SEND(csp);
4970 
4971 	if (dst_netaddr.net.netid == IDN_BROADCAST_ALLNETID) {
4972 		/*
4973 		 * We're doing a broadcast.  Need to set
4974 		 * up IDN netaddr's one at a time.
4975 		 * We set the ethernet destination to the same
4976 		 * instance as the sending address.  The instance
4977 		 * numbers effectively represent subnets.
4978 		 */
4979 		dst_netaddr.net.netid = dp->dnetid;
4980 
4981 		(void) idndl_domain_etheraddr(dst_domid, channel,
4982 						&ehp->ether_dhost);
4983 
4984 		if (dst_domid == idn.localid) {
4985 			mblk_t	*nmp;
4986 			/*
4987 			 * If this is a broadcast and going to
4988 			 * the local domain, then we need to make
4989 			 * a private copy of the message since
4990 			 * the current one will be reused when
4991 			 * transmitting to other domains.
4992 			 */
4993 			PR_DATA("%s:%d: dup broadcast msg for local domain\n",
4994 				proc, dst_domid);
4995 			if ((nmp = copymsg(mp)) == NULL) {
4996 				/*
4997 				 * Couldn't get a duplicate copy.
4998 				 */
4999 				IDN_CHAN_UNLOCK_SEND(csp);
5000 				csp = NULL;
5001 				IDN_KSTAT_INC(sip, si_allocbfail);
5002 				IDN_KSTAT_INC(sip, si_noxmtbuf);
5003 				rv = IDNXMIT_DROP;
5004 				goto nocando;
5005 			}
5006 			mp = nmp;
5007 		}
5008 	}
5009 
5010 	if (dp->dnetid != dst_netaddr.net.netid) {
5011 		PR_DATA("%s:%d: dest netid (0x%x) != expected (0x%x)\n",
5012 			proc, dst_domid, (uint_t)dst_netaddr.net.netid,
5013 			(uint_t)dp->dnetid);
5014 		IDN_CHAN_UNLOCK_SEND(csp);
5015 		csp = NULL;
5016 		IDN_KSTAT_INC(sip, si_nolink);
5017 		IDN_KSTAT_INC(sip, si_macxmt_errors);
5018 		rv = IDNXMIT_DROP;
5019 		goto nocando;
5020 	}
5021 
5022 	if (dst_domid == idn.localid) {
5023 		int	lbrv;
5024 		/*
5025 		 * Sending to our local domain! Loopback.
5026 		 * Note that idn_send_data_loop returning 0
5027 		 * does not mean the message can now be freed.
5028 		 * We need to return (-1) so that caller doesn't
5029 		 * try to free mblk.
5030 		 */
5031 		IDN_CHAN_UNLOCK_SEND(csp);
5032 		rw_exit(&stp->ss_rwlock);
5033 		lbrv = idn_send_data_loopback(dst_netaddr, wq, mp);
5034 		rw_enter(&stp->ss_rwlock, RW_READER);
5035 		if (lbrv == 0) {
5036 			return (IDNXMIT_LOOP);
5037 		} else {
5038 			IDN_KSTAT_INC(sip, si_macxmt_errors);
5039 			return (IDNXMIT_DROP);
5040 		}
5041 	}
5042 
5043 	if (dp->dstate != IDNDS_CONNECTED) {
5044 		/*
5045 		 * Can't send data unless a link has already been
5046 		 * established with the target domain.  Normally,
5047 		 * a user cannot set the remote netaddr unless a
5048 		 * link has already been established, however it
5049 		 * is possible the connection may have become
5050 		 * disconnected since that time.
5051 		 */
5052 		IDN_CHAN_UNLOCK_SEND(csp);
5053 		csp = NULL;
5054 		IDN_KSTAT_INC(sip, si_linkdown);
5055 		IDN_KSTAT_INC(sip, si_macxmt_errors);
5056 		rv = IDNXMIT_DROP;
5057 		goto nocando;
5058 	}
5059 
5060 	/*
5061 	 * Need to make sure the channel is active and that the
5062 	 * domain to which we're sending is allowed to receive stuff.
5063 	 */
5064 	if (!IDN_CHANNEL_IS_SEND_ACTIVE(csp)) {
5065 		int	not_active;
5066 		/*
5067 		 * See if we can activate channel.
5068 		 */
5069 		IDN_CHAN_UNLOCK_SEND(csp);
5070 		not_active = idn_activate_channel(CHANSET(channel),
5071 							IDNCHAN_OPEN);
5072 		if (!not_active) {
5073 			/*
5074 			 * Only grab the lock for a recheck if we were
5075 			 * able to activate the channel.
5076 			 */
5077 			IDN_CHAN_LOCK_SEND(csp);
5078 		}
5079 		/*
5080 		 * Verify channel still active now that we have the lock.
5081 		 */
5082 		if (not_active || !IDN_CHANNEL_IS_SEND_ACTIVE(csp)) {
5083 			if (!not_active) {
5084 				/*
5085 				 * Only need to drop the lock if it was
5086 				 * acquired while we thought we had
5087 				 * activated the channel.
5088 				 */
5089 				IDN_CHAN_UNLOCK_SEND(csp);
5090 			}
5091 			ASSERT(!IDN_CHAN_SEND_IS_LOCKED(csp));
5092 			/*
5093 			 * Damn!   Must have went inactive during the window
5094 			 * before we regrabbed the send lock.  Oh well, can't
5095 			 * spend all day doing this, bail out.  Set csp to
5096 			 * NULL to prevent inprogress update at bottom.
5097 			 */
5098 			csp = NULL;
5099 			/*
5100 			 * Channel is not active, should not be used.
5101 			 */
5102 			PR_DATA("%s:%d: dest channel %d NOT ACTIVE\n",
5103 				proc, dst_domid, channel);
5104 			IDN_KSTAT_INC(sip, si_linkdown);
5105 			rv = IDNXMIT_REQUEUE;
5106 			goto nocando;
5107 		}
5108 		ASSERT(IDN_CHAN_SEND_IS_LOCKED(csp));
5109 	}
5110 	/*
5111 	 * If we made it here then the channel is active
5112 	 * Make sure the target domain is registered to receive stuff,
5113 	 * i.e. we're still linked.
5114 	 */
5115 	if (!IDN_CHAN_DOMAIN_IS_REGISTERED(csp, dst_domid)) {
5116 		/*
5117 		 * If domain is not even registered with this channel
5118 		 * then we have no business being here.  Doesn't matter
5119 		 * whether it's active or not.
5120 		 */
5121 		PR_DATA("%s:%d: domain not registered with channel %d\n",
5122 			proc, dst_domid, channel);
5123 		/*
5124 		 * Set csp to NULL to prevent in-progress update below.
5125 		 */
5126 		IDN_CHAN_UNLOCK_SEND(csp);
5127 		csp = NULL;
5128 		IDN_KSTAT_INC(sip, si_linkdown);
5129 		IDN_KSTAT_INC(sip, si_macxmt_errors);
5130 		rv = IDNXMIT_DROP;
5131 		goto nocando;
5132 	}
5133 
5134 	IDN_CHAN_SEND_INPROGRESS(csp);
5135 	IDN_CHAN_UNLOCK_SEND(csp);
5136 
5137 	/*
5138 	 * Find a target cpu to send interrupt to if
5139 	 * it becomes necessary (i.e. remote channel
5140 	 * server is idle).
5141 	 */
5142 	cpuindex = dp->dcpuindex;
5143 
5144 	/*
5145 	 * dcpuindex is atomically incremented, but other than
5146 	 * that is not well protected and that's okay.  The
5147 	 * intention is to simply spread around the interrupts
5148 	 * at the destination domain, however we don't have to
5149 	 * anal about it.  If we hit the same cpu multiple times
5150 	 * in a row that's okay, it will only be for a very short
5151 	 * period anyway before the cpuindex is incremented
5152 	 * to the next cpu.
5153 	 */
5154 	if (cpuindex < NCPU) {
5155 		ATOMIC_INC(dp->dcpuindex);
5156 	}
5157 	if (dp->dcpuindex >= NCPU)
5158 		dp->dcpuindex = 0;
5159 
5160 	IDN_ASSIGN_DCPU(dp, cpuindex);
5161 
5162 #ifdef XXX_DLPI_UNFRIENDLY
5163 	{
5164 		ushort_t	dstport = (ushort_t)dp->dcpu;
5165 
5166 		/*
5167 		 * XXX
5168 		 * This is not DLPI friendly, but we need some way
5169 		 * of distributing our XDC interrupts to the cpus
5170 		 * on the remote domain in a relatively random fashion
5171 		 * while trying to remain constant for an individual
5172 		 * network connection.  Don't want the target network
5173 		 * appl pinging around cpus thrashing the caches.
5174 		 * So, we'll pick target cpus based on the destination
5175 		 * TCP/IP port (socket).  The (simple) alternative to
5176 		 * this is to simply send all messages destined for
5177 		 * particular domain to the same cpu (dcpu), but
5178 		 * will lower our bandwidth and introduce a lot of
5179 		 * contention on that target cpu.
5180 		 */
5181 		if (ehp->ether_type == ETHERTYPE_IP) {
5182 			ipha_t	*ipha;
5183 			uchar_t	*dstporta;
5184 			int	hdr_length;
5185 			mblk_t	*nmp = mp;
5186 			uchar_t	*rptr = mp->b_rptr +
5187 					sizeof (struct ether_header);
5188 			if (nmp->b_wptr <= rptr) {
5189 				/*
5190 				 * Only the ethernet header was contained
5191 				 * in the first block.  Check for the
5192 				 * next packet.
5193 				 */
5194 				if ((nmp = mp->b_cont) != NULL)
5195 					rptr = nmp->b_rptr;
5196 			}
5197 			/*
5198 			 * If we still haven't found the IP header packet
5199 			 * then don't bother.  Can't search forever.
5200 			 */
5201 			if (nmp &&
5202 			    ((nmp->b_wptr - rptr) >= IP_SIMPLE_HDR_LENGTH)) {
5203 				ipha = (ipha_t *)ALIGN32(rptr);
5204 
5205 				ASSERT(DB_TYPE(mp) == M_DATA);
5206 				hdr_length = IPH_HDR_LENGTH(ipha);
5207 
5208 				switch (ipha->ipha_protocol) {
5209 				case IPPROTO_UDP:
5210 				case IPPROTO_TCP:
5211 					/*
5212 					 * TCP/UDP Protocol Header (1st word)
5213 					 * 0	    15,16	31
5214 					 * -----------------------
5215 					 * | src port | dst port |
5216 					 * -----------------------
5217 					 */
5218 					dstporta = (uchar_t *)ipha + hdr_length;
5219 					netports = *(uint_t *)dstporta;
5220 					dstporta += 2;
5221 					dstport  = *(ushort_t *)dstporta;
5222 					break;
5223 				default:
5224 					break;
5225 				}
5226 			}
5227 
5228 		}
5229 		IDN_ASSIGN_DCPU(dp, dstport);
5230 
5231 		PR_DATA("%s:%d: (dstport %d) assigned %d\n",
5232 			proc, dst_domid, (int)dstport, dp->dcpu);
5233 	}
5234 #endif /* XXX_DLPI_UNFRIENDLY */
5235 
5236 	data_rptr = mp->b_rptr;
5237 
5238 	ASSERT(dp->dcpu != IDN_NIL_DCPU);
5239 
5240 	ASSERT(idn_domain[dst_domid].dmbox.m_send);
5241 
5242 	retry_reclaim = 1;
5243 retry:
5244 	if ((dp->dio >= IDN_RECLAIM_MIN) || dp->diowanted) {
5245 		int	reclaim_req;
5246 		/*
5247 		 * Reclaim however many outstanding buffers
5248 		 * there are up to IDN_RECLAIM_MAX if it's set.
5249 		 */
5250 		reclaim_req = dp->diowanted ? -1 : IDN_RECLAIM_MAX ?
5251 					MIN(dp->dio, IDN_RECLAIM_MAX) :
5252 					dp->dio;
5253 		(void) idn_reclaim_mboxdata(dst_domid, channel,
5254 					reclaim_req);
5255 	}
5256 
5257 	if (dp->dio >= IDN_WINDOW_EMAX) {
5258 
5259 		if (lock_try(&dp->diocheck)) {
5260 			IDN_MSGTIMER_START(dst_domid, IDNP_DATA, 0,
5261 					idn_msg_waittime[IDNP_DATA],
5262 					&mt.mt_cookie);
5263 			/*
5264 			 * We have exceeded the minimum window for
5265 			 * outstanding I/O buffers to this domain.
5266 			 * Need to start the MSG timer to check for
5267 			 * possible response from remote domain.
5268 			 * The remote domain may be hung.  Send a
5269 			 * wakeup!  Specify all channels for given
5270 			 * domain since we don't know precisely which
5271 			 * is backed up (dio is global).
5272 			 */
5273 			IDNXDC(dst_domid, &mt,
5274 				(uint_t)dst_netaddr.net.chan, 0, 0, 0);
5275 		}
5276 
5277 		/*
5278 		 * Yikes!  We have exceeded the maximum window
5279 		 * which means no more packets going to remote
5280 		 * domain until he frees some up.
5281 		 */
5282 		IDN_KSTAT_INC(sip, si_txmax);
5283 		IDN_KSTAT_INC(sip, si_macxmt_errors);
5284 		rv = IDNXMIT_DROP;
5285 		goto nocando;
5286 	}
5287 	/*
5288 	 * Allocate a SMR I/O buffer and send it.
5289 	 */
5290 
5291 	if (msglen == 0) {
5292 		/*
5293 		 * A zero length messages is effectively a signal
5294 		 * to just send an interrupt to the remote domain.
5295 		 */
5296 		IDN_MSGTIMER_START(dst_domid, IDNP_DATA, 0,
5297 				idn_msg_waittime[IDNP_DATA],
5298 				&mt.mt_cookie);
5299 		IDNXDC(dst_domid, &mt,
5300 			(uint_t)dst_netaddr.net.chan, 0, 0, 0);
5301 	}
5302 	for (; (msglen > 0) && mp; msglen -= xfersize) {
5303 		int		xrv;
5304 		smr_offset_t	bufoffset;
5305 #ifdef DEBUG
5306 		int		n_xfersize;
5307 #endif /* DEBUG */
5308 
5309 		ASSERT(msglen <= IDN_DATA_SIZE);
5310 		xfersize = msglen;
5311 
5312 		serrno = smr_buf_alloc(dst_domid, xfersize, &iobufp);
5313 		if (serrno) {
5314 			PR_DATA("%s:%d: failed to alloc SMR I/O buffer "
5315 				"(serrno = %d)\n",
5316 				proc, dst_domid, serrno);
5317 			/*
5318 			 * Failure is either due to a timeout waiting
5319 			 * for the master to give us a slab, OR the
5320 			 * local domain exhausted its slab quota!
5321 			 * In either case we'll have to bail from
5322 			 * here and let higher layers decide what
5323 			 * to do.
5324 			 * We also could have had locking problems.
5325 			 * A negative serrno indicates we lost the lock
5326 			 * on dst_domid, so no need in dropping lock.
5327 			 */
5328 
5329 			if (lock_try(&dp->diowanted) && retry_reclaim) {
5330 				/*
5331 				 * We were the first to acquire the
5332 				 * lock indicating that it wasn't
5333 				 * set on entry to idn_send_data.
5334 				 * So, let's go back and see if we
5335 				 * can't reclaim some buffers and
5336 				 * try again.
5337 				 * It's very likely diowanted will be
5338 				 * enough to prevent us from looping
5339 				 * on retrying here, however to protect
5340 				 * against the small window where a
5341 				 * race condition might exist, we use
5342 				 * the retry_reclaim flag so that we
5343 				 * don't retry more than once.
5344 				 */
5345 				retry_reclaim = 0;
5346 				goto retry;
5347 			}
5348 
5349 			rv = (serrno > 0) ? serrno : -serrno;
5350 			IDN_KSTAT_INC(sip, si_notbufs);
5351 			IDN_KSTAT_INC(sip, si_noxmtbuf);	/* MIB II */
5352 			switch (rv) {
5353 			case ENOMEM:
5354 			case EBUSY:
5355 			case ENOLCK:
5356 			case ETIMEDOUT:
5357 			case EDQUOT:
5358 				/*
5359 				 * These are all transient conditions
5360 				 * which should be recoverable over
5361 				 * time.
5362 				 */
5363 				rv = IDNXMIT_REQUEUE;
5364 				break;
5365 
5366 			default:
5367 				rv = IDNXMIT_DROP;
5368 				break;
5369 			}
5370 			goto nocando;
5371 		}
5372 
5373 		lock_clear(&dp->diowanted);
5374 
5375 		hdrp = IDN_BUF2HDR(iobufp);
5376 		bufoffset = (smr_offset_t)IDN_ALIGNPTR(sizeof (smr_pkthdr_t),
5377 							data_rptr);
5378 		/*
5379 		 * If the alignment of bufoffset took us pass the
5380 		 * length of a smr_pkthdr_t then we need to possibly
5381 		 * lower xfersize since it was calulated based on
5382 		 * a perfect alignment.  However, if we're in DLPI
5383 		 * mode then shouldn't be necessary since the length
5384 		 * of the incoming packet (mblk) should have already
5385 		 * taken into consideration this possible adjustment.
5386 		 */
5387 #ifdef DEBUG
5388 		if (bufoffset != sizeof (smr_pkthdr_t))
5389 			PR_DATA("%s:%d: offset ALIGNMENT (%lu -> %u) "
5390 				"(data_rptr = %p)\n",
5391 				proc, dst_domid, sizeof (smr_pkthdr_t),
5392 				bufoffset, data_rptr);
5393 
5394 		n_xfersize = MIN(xfersize, (IDN_SMR_BUFSIZE - bufoffset));
5395 		if (xfersize != n_xfersize) {
5396 			PR_DATA("%s:%d: xfersize ADJUST (%d -> %d)\n",
5397 				proc, dst_domid, xfersize, n_xfersize);
5398 			cmn_err(CE_WARN, "%s: ERROR (xfersize = %d, > "
5399 					"bufsize(%d)-bufoffset(%d) = %d)",
5400 					proc, xfersize, IDN_SMR_BUFSIZE,
5401 					bufoffset,
5402 					IDN_SMR_BUFSIZE - bufoffset);
5403 		}
5404 #endif /* DEBUG */
5405 		xfersize = MIN(xfersize, (int)(IDN_SMR_BUFSIZE - bufoffset));
5406 
5407 		iodatap = IDN_BUF2DATA(iobufp, bufoffset);
5408 		mp = idn_fill_buffer(iodatap, xfersize, mp, &data_rptr);
5409 
5410 		hdrp->b_netaddr  = dst_netaddr.netaddr;
5411 		hdrp->b_netports = netports;
5412 		hdrp->b_offset   = bufoffset;
5413 		hdrp->b_length   = xfersize;
5414 		hdrp->b_next	 = IDN_NIL_SMROFFSET;
5415 		hdrp->b_rawio	 = 0;
5416 		hdrp->b_cksum    = IDN_CKSUM_PKT(hdrp);
5417 
5418 		xrv = idn_send_mboxdata(dst_domid, sip, channel, iobufp);
5419 		if (xrv) {
5420 			/*
5421 			 * Reclaim packet.
5422 			 * Return error on this packet so it can be retried
5423 			 * (putbq).  Note that it should be safe to assume
5424 			 * that this for-loop is only executed once when in
5425 			 * DLPI mode and so no need to worry about fractured
5426 			 * mblk packet.
5427 			 */
5428 			PR_DATA("%s:%d: DATA XFER to chan %d FAILED "
5429 				"(ret=%d)\n",
5430 				proc, dst_domid, channel, xrv);
5431 			smr_buf_free(dst_domid, iobufp, xfersize);
5432 
5433 			PR_DATA("%s:%d: (line %d) dec(dio) -> %d\n",
5434 				proc, dst_domid, __LINE__, dp->dio);
5435 
5436 			rv = IDNXMIT_DROP;
5437 			IDN_KSTAT_INC(sip, si_macxmt_errors);
5438 			goto nocando;
5439 		} else {
5440 			pktcnt++;
5441 			/*
5442 			 * Packet will get freed on a subsequent send
5443 			 * when we reclaim buffers that the receivers
5444 			 * has finished consuming.
5445 			 */
5446 		}
5447 	}
5448 
5449 #ifdef DEBUG
5450 	if (pktcnt > 1)
5451 		cmn_err(CE_WARN,
5452 			"%s: ERROR: sent multi-pkts (%d), len = %ld",
5453 			proc, pktcnt, orig_msglen);
5454 #endif /* DEBUG */
5455 
5456 	PR_DATA("%s:%d: SENT %d packets (%d @ 0x%x)\n",
5457 		proc, dst_domid, pktcnt, dst_netaddr.net.chan,
5458 		dst_netaddr.net.netid);
5459 
5460 	IDN_CHAN_LOCK_SEND(csp);
5461 	IDN_CHAN_SEND_DONE(csp);
5462 	IDN_CHAN_UNLOCK_SEND(csp);
5463 
5464 	return (IDNXMIT_OKAY);
5465 
5466 nocando:
5467 
5468 	if (csp) {
5469 		IDN_CHAN_LOCK_SEND(csp);
5470 		IDN_CHAN_SEND_DONE(csp);
5471 		IDN_CHAN_UNLOCK_SEND(csp);
5472 	}
5473 
5474 	if (rv == IDNXMIT_REQUEUE) {
5475 		/*
5476 		 * Better kick off monitor to check when
5477 		 * it's ready to reenable the queues for
5478 		 * this channel.
5479 		 */
5480 		idn_xmit_monitor_kickoff(channel);
5481 	}
5482 
5483 	return (rv);
5484 }
5485 
5486 /*
5487  * Function to support local loopback testing of IDN driver.
5488  * Primarily geared towards measuring stream-head and IDN driver
5489  * overhead with respect to data messages.  Setting idn_strhead_only
5490  * allows routine to focus on stream-head overhead by simply putting
5491  * the message straight to the 'next' queue of the destination
5492  * read-queue.  Current implementation puts the message directly to
5493  * the read-queue thus sending the message right back to the IDN driver
5494  * as though the data came in off the wire.  No need to worry about
5495  * any IDN layers attempting to ack data as that's normally handled
5496  * by idnh_recv_data.
5497  *
5498  * dst_netaddr = destination port-n-addr on local domain.
5499  * wq          = write queue from whence message came.
5500  * mp          = the (data-only) message.
5501  *
5502  * Returns 0		Indicates data handled.
5503  *	   errno	EAGAIN indicates data can be retried.
5504  *			Other errno's indicate failure to handle.
5505  */
5506 static int
5507 idn_send_data_loopback(idn_netaddr_t dst_netaddr, queue_t *wq, mblk_t *mp)
5508 {
5509 	register struct idnstr	*stp;
5510 	struct idn	*sip;
5511 	int		rv = 0;
5512 	procname_t	proc = "idn_send_data_loopback";
5513 
5514 	if (dst_netaddr.net.netid != idn_domain[idn.localid].dnetid) {
5515 		PR_DATA("%s: dst_netaddr.net.netid 0x%x != local 0x%x\n",
5516 			proc, dst_netaddr.net.netid,
5517 			idn_domain[idn.localid].dnetid);
5518 		rv = EADDRNOTAVAIL;
5519 		goto done;
5520 	}
5521 	stp = (struct idnstr *)wq->q_ptr;
5522 	if (!stp || !stp->ss_rq) {
5523 		rv = EDESTADDRREQ;
5524 		goto done;
5525 	}
5526 	sip = stp->ss_sip;
5527 
5528 	idndl_read(sip, mp);
5529 	rv = 0;
5530 
5531 done:
5532 	return (rv);
5533 }
5534 
5535 /*
5536  * Fill bufp with as much data as possible from the message pointed
5537  * to by mp up to size bytes.
5538  * Save our current read pointer in the variable parameter (data_rptrp)
5539  * so we know where to start on the next go around.  Don't want to
5540  * bump the actual b_rptr in the mblk because the mblk may need to
5541  * be reused, e.g. broadcast.
5542  * Return the mblk pointer to the position we had to stop.
5543  */
5544 static mblk_t *
5545 idn_fill_buffer(caddr_t bufp, int size, mblk_t *mp, uchar_t **data_rptrp)
5546 {
5547 	int	copysize;
5548 
5549 	ASSERT(bufp && size);
5550 
5551 	if (mp == NULL)
5552 		return (NULL);
5553 
5554 	while ((size > 0) && mp) {
5555 
5556 		copysize = MIN(mp->b_wptr - (*data_rptrp), size);
5557 
5558 		if (copysize > 0) {
5559 			/*
5560 			 * If there's data to copy, do it.
5561 			 */
5562 			bcopy((*data_rptrp), bufp, copysize);
5563 			(*data_rptrp) += copysize;
5564 			bufp += copysize;
5565 			size -= copysize;
5566 		}
5567 		if (mp->b_wptr <= (*data_rptrp)) {
5568 			/*
5569 			 * If we emptied the mblk, then
5570 			 * move on to the next one.
5571 			 */
5572 			for (mp = mp->b_cont;
5573 			    mp && (mp->b_datap->db_type != M_DATA);
5574 			    mp = mp->b_cont)
5575 				;
5576 			if (mp)
5577 				*data_rptrp = mp->b_rptr;
5578 		}
5579 	}
5580 	return (mp);
5581 }
5582 
5583 /*
5584  * Messages received here do NOT arrive on a stream, but are
5585  * instead handled via the idn_protocol_servers.  This routine
5586  * is effectively the job processor for the protocol servers.
5587  */
5588 static void
5589 idn_recv_proto(idn_protomsg_t *hp)
5590 {
5591 	int		domid, cpuid;
5592 	int		sync_lock = 0;
5593 	idn_domain_t	*dp;
5594 	register uint_t	mtype;
5595 	register uint_t	msgtype, acktype;
5596 	idn_msgtype_t	mt;
5597 	ushort_t	dcookie, tcookie;
5598 	procname_t	proc = "idn_recv_proto";
5599 
5600 
5601 	if (idn.state == IDNGS_IGNORE) {
5602 		/*
5603 		 * Fault injection to simulate non-responsive domain.
5604 		 */
5605 		return;
5606 	}
5607 
5608 	domid   = hp->m_domid;
5609 	cpuid   = hp->m_cpuid;
5610 	msgtype = hp->m_msgtype;
5611 	acktype = hp->m_acktype;
5612 	dcookie = IDN_DCOOKIE(hp->m_cookie);
5613 	tcookie = IDN_TCOOKIE(hp->m_cookie);
5614 	/*
5615 	 * msgtype =	Is the type of message we received,
5616 	 *		e.g. nego, ack, nego+ack, etc.
5617 	 *
5618 	 * acktype =	If we received a pure ack or nack
5619 	 *		then this variable is set to the
5620 	 *		type of message that was ack/nack'd.
5621 	 */
5622 	if ((mtype = msgtype & IDNP_MSGTYPE_MASK) == 0) {
5623 		/*
5624 		 * Received a pure ack/nack.
5625 		 */
5626 		mtype = acktype & IDNP_MSGTYPE_MASK;
5627 	}
5628 
5629 	if (!VALID_MSGTYPE(mtype)) {
5630 		PR_PROTO("%s:%d: ERROR: invalid message type (0x%x)\n",
5631 			proc, domid, mtype);
5632 		return;
5633 	}
5634 	if (!VALID_CPUID(cpuid)) {
5635 		PR_PROTO("%s:%d: ERROR: invalid cpuid (%d)\n",
5636 			proc, domid, cpuid);
5637 		return;
5638 	}
5639 
5640 	/*
5641 	 * No pure data packets should reach this level.
5642 	 * Data+ack messages will reach here, but only
5643 	 * for the purpose of stopping the timer which
5644 	 * happens by default when this routine is called.
5645 	 */
5646 	ASSERT(msgtype != IDNP_DATA);
5647 
5648 	/*
5649 	 * We should never receive a request from ourself,
5650 	 * except for commands in the case of broadcasts!
5651 	 */
5652 	if ((domid == idn.localid) && (mtype != IDNP_CMD)) {
5653 		char	str[15];
5654 
5655 		inum2str(hp->m_msgtype, str);
5656 
5657 		cmn_err(CE_WARN,
5658 			"IDN: 214: received message (%s[0x%x]) from self "
5659 			"(domid %d)",
5660 			str, hp->m_msgtype, domid);
5661 		return;
5662 	}
5663 
5664 	IDN_SYNC_LOCK();
5665 	/*
5666 	 * Set a flag indicating whether we really need
5667 	 * SYNC-LOCK.  We'll drop it in a little bit if
5668 	 * we really don't need it.
5669 	 */
5670 	switch (mtype) {
5671 	case IDNP_CON:
5672 	case IDNP_FIN:
5673 	case IDNP_NEGO:
5674 		sync_lock = 1;
5675 		break;
5676 
5677 	default:
5678 		break;
5679 	}
5680 
5681 	dp = &idn_domain[domid];
5682 	IDN_DLOCK_EXCL(domid);
5683 
5684 	/*
5685 	 * The only messages we do _not_ check the cookie are:
5686 	 *	nego
5687 	 *	nego+ack
5688 	 *	fin	 - if received cookie is 0.
5689 	 *	fin+ack	 - if received cookie is 0.
5690 	 *	ack/fin	 - if received cookie is 0.
5691 	 *	nack/fin - if received cookie is 0.
5692 	 */
5693 	if (((msgtype & IDNP_MSGTYPE_MASK) != IDNP_NEGO) &&
5694 			((mtype != IDNP_FIN) ||
5695 			(dcookie && dp->dcookie_recv))) {
5696 		if (dp->dcookie_recv != dcookie) {
5697 			dp->dcookie_errcnt++;
5698 			if (dp->dcookie_err == 0) {
5699 				/*
5700 				 * Set cookie error to prevent a
5701 				 * possible flood of bogus cookies
5702 				 * and thus error messages.
5703 				 */
5704 				dp->dcookie_err = 1;
5705 				cmn_err(CE_WARN,
5706 					"IDN: 215: invalid cookie (0x%x) "
5707 					"for message (0x%x) from domain %d",
5708 					dcookie, hp->m_msgtype, domid);
5709 
5710 				PR_PROTO("%s:%d: received cookie (0x%x), "
5711 					"expected (0x%x) [errcnt = %d]\n",
5712 					proc, domid, dcookie,
5713 					dp->dcookie_recv, dp->dcookie_errcnt);
5714 			}
5715 			IDN_DUNLOCK(domid);
5716 			IDN_SYNC_UNLOCK();
5717 			return;
5718 		}
5719 	}
5720 	dp->dcookie_err = 0;
5721 	IDN_GLOCK_EXCL();
5722 
5723 	idn_clear_awol(domid);
5724 
5725 	IDN_GUNLOCK();
5726 	if (!sync_lock)		/* really don't need SYNC-LOCK past here */
5727 		IDN_SYNC_UNLOCK();
5728 
5729 	/*
5730 	 * Stop any timers that may have been outstanding for
5731 	 * this domain, for this particular message type.
5732 	 * Note that CFG timers are directly managed by
5733 	 * config recv/send code.
5734 	 */
5735 	if ((mtype != IDNP_CFG) && (msgtype & IDNP_ACKNACK_MASK) && tcookie) {
5736 		IDN_MSGTIMER_STOP(domid, mtype, tcookie);
5737 	}
5738 
5739 	/*
5740 	 * Keep track of the last cpu to send us a message.
5741 	 * If the domain has not yet been assigned, we'll need
5742 	 * this cpuid in order to send back a respond.
5743 	 */
5744 	dp->dcpu_last = cpuid;
5745 
5746 	mt.mt_mtype = (ushort_t)msgtype;
5747 	mt.mt_atype = (ushort_t)acktype;
5748 	mt.mt_cookie = tcookie;
5749 
5750 	switch (mtype) {
5751 	case IDNP_NEGO:
5752 		idn_recv_nego(domid, &mt, hp->m_xargs, dcookie);
5753 		break;
5754 
5755 	case IDNP_CFG:
5756 		idn_recv_config(domid, &mt, hp->m_xargs);
5757 		break;
5758 
5759 	case IDNP_CON:
5760 		idn_recv_con(domid, &mt, hp->m_xargs);
5761 		break;
5762 
5763 	case IDNP_FIN:
5764 		idn_recv_fin(domid, &mt, hp->m_xargs);
5765 		break;
5766 
5767 	case IDNP_CMD:
5768 		idn_recv_cmd(domid, &mt, hp->m_xargs);
5769 		break;
5770 
5771 	case IDNP_DATA:
5772 		ASSERT(msgtype & IDNP_ACKNACK_MASK);
5773 		/*
5774 		 * When doing the fast track we simply process
5775 		 * possible nack error conditions.  The actual
5776 		 * processing of the SMR data buffer is taken
5777 		 * care of in idnh_recv_dataack.  When NOT doing
5778 		 * the fast track, we do all the processing here
5779 		 * in the protocol server.
5780 		 */
5781 		idn_recv_data(domid, &mt, hp->m_xargs);
5782 		break;
5783 
5784 	default:
5785 		/*
5786 		 * Should be receiving 0 inum and 0 acknack.
5787 		 */
5788 #ifdef DEBUG
5789 		cmn_err(CE_PANIC,
5790 #else /* DEBUG */
5791 		cmn_err(CE_WARN,
5792 #endif /* DEBUG */
5793 			"IDN: 216: (0x%x)msgtype/(0x%x)acktype rcvd from "
5794 			"domain %d", msgtype, acktype, domid);
5795 		break;
5796 	}
5797 
5798 	IDN_DUNLOCK(domid);
5799 	/*
5800 	 * All receiving routines are responsible for dropping drwlock.
5801 	 */
5802 
5803 	if (sync_lock)
5804 		IDN_SYNC_UNLOCK();
5805 }
5806 
5807 /*
5808  * Once the CONFIG state is hit we immediately blast out all
5809  * of our config info.  This guarantees that the CONFIG state
5810  * effectively signifies that the sender has sent _all_ of
5811  * their config info.
5812  */
5813 static void
5814 idn_send_config(int domid, int phase)
5815 {
5816 	idn_domain_t	*dp;
5817 	int		rv;
5818 	clock_t		cfg_waittime = idn_msg_waittime[IDNP_CFG];
5819 	procname_t	proc = "idn_send_config";
5820 
5821 	ASSERT(IDN_DLOCK_IS_EXCL(domid));
5822 
5823 	dp = &idn_domain[domid];
5824 
5825 	ASSERT(dp->dstate == IDNDS_CONFIG);
5826 
5827 	if (phase == 1) {
5828 		/*
5829 		 * Reset stuff in dtmp to 0:
5830 		 *	dcfgphase
5831 		 *	dcksum
5832 		 *	dncfgitems
5833 		 *	dmaxnets
5834 		 *	dmboxpernet
5835 		 */
5836 		dp->dtmp = 0;
5837 	}
5838 
5839 	if (dp->dcfgsnddone) {
5840 		if (!dp->dcfgrcvdone) {
5841 			IDN_MSGTIMER_START(domid, IDNP_CFG, 0,
5842 					cfg_waittime, NULL);
5843 		}
5844 		return;
5845 	}
5846 
5847 	IDN_DLOCK_SHARED(idn.localid);
5848 
5849 	PR_PROTO("%s:%d: sending %s config (phase %d)\n",
5850 		proc, domid,
5851 		idn_domain[idn.localid].dvote.v.master ? "MASTER" : "SLAVE",
5852 		phase);
5853 
5854 	if (idn_domain[idn.localid].dvote.v.master)
5855 		rv = idn_send_master_config(domid, phase);
5856 	else
5857 		rv = idn_send_slave_config(domid, phase);
5858 
5859 	IDN_DUNLOCK(idn.localid);
5860 
5861 	if (rv >= 0) {
5862 
5863 		if (rv == 1) {
5864 			dp->dcfgsnddone = 1;
5865 			PR_PROTO("%s:%d: SEND config DONE\n", proc, domid);
5866 			if (!dp->dcfgrcvdone) {
5867 				IDN_MSGTIMER_START(domid, IDNP_CFG, 0,
5868 						cfg_waittime, NULL);
5869 			}
5870 		} else {
5871 			IDN_MSGTIMER_START(domid, IDNP_CFG, 0,
5872 						cfg_waittime, NULL);
5873 		}
5874 	}
5875 }
5876 
5877 /*
5878  * Clear out the mailbox table.
5879  * NOTE: This routine touches the SMR.
5880  */
5881 static void
5882 idn_reset_mboxtbl(idn_mboxtbl_t *mtp)
5883 {
5884 	int		qi;
5885 	idn_mboxmsg_t	*mp = &mtp->mt_queue[0];
5886 
5887 	qi = 0;
5888 	do {
5889 		mp[qi].ms_bframe = 0;
5890 		mp[qi].ms_owner = 0;
5891 		mp[qi].ms_flag = 0;
5892 		IDN_MMBOXINDEX_INC(qi);
5893 	} while (qi);
5894 }
5895 
5896 static int
5897 idn_get_mbox_config(int domid, int *mindex,
5898 		smr_offset_t *mtable, smr_offset_t *mdomain)
5899 {
5900 	idn_domain_t	*dp, *ldp;
5901 
5902 	dp = &idn_domain[domid];
5903 	ldp = &idn_domain[idn.localid];
5904 
5905 	ASSERT(IDN_DLOCK_IS_EXCL(domid));
5906 	ASSERT(IDN_DLOCK_IS_SHARED(idn.localid));
5907 	ASSERT(IDN_GET_MASTERID() != IDN_NIL_DOMID);
5908 
5909 	/*
5910 	 * Get SMR offset of receive mailbox assigned
5911 	 * to respective domain.  If I'm a slave then
5912 	 * my dmbox.m_tbl will not have been assigned yet.
5913 	 * Instead of sending the actual offset I send
5914 	 * the master his assigned index.  Since the
5915 	 * master knows what offset it will assign to
5916 	 * me he can determine his assigned (recv) mailbox
5917 	 * based on the offset and given index.  The local
5918 	 * domain can also use this information once the
5919 	 * dmbox.m_tbl is received to properly assign the
5920 	 * correct mbox offset to the master.
5921 	 */
5922 	if (ldp->dmbox.m_tbl == NULL) {
5923 		/*
5924 		 * Local domain has not yet been assigned a
5925 		 * (recv) mailbox table.  This must be the
5926 		 * initial connection of this domain.
5927 		 */
5928 		ASSERT(dp->dvote.v.master && !ldp->dvote.v.master);
5929 		ASSERT(mindex);
5930 		*mindex = domid;
5931 	} else {
5932 		idn_mboxtbl_t	*mtp;
5933 
5934 		mtp = IDN_MBOXTBL_PTR(ldp->dmbox.m_tbl, domid);
5935 
5936 		ASSERT(mdomain);
5937 		*mdomain = IDN_ADDR2OFFSET(mtp);
5938 
5939 		if (ldp->dvote.v.master) {
5940 			/*
5941 			 * Need to calculate mailbox table to
5942 			 * assign to the given domain.  Since
5943 			 * I'm the master his mailbox is in
5944 			 * the (all-domains) mailbox table.
5945 			 */
5946 			mtp = IDN_MBOXAREA_BASE(idn.mboxarea, domid);
5947 			ASSERT(mtable);
5948 			*mtable = IDN_ADDR2OFFSET(mtp);
5949 
5950 			dp->dmbox.m_tbl = mtp;
5951 		}
5952 	}
5953 
5954 	return (0);
5955 }
5956 
5957 /*
5958  * RETURNS:
5959  *	1	Unexpected/unnecessary phase.
5960  *	0	Successfully handled, timer needed.
5961  */
5962 static int
5963 idn_send_master_config(int domid, int phase)
5964 {
5965 	idn_cfgsubtype_t	cfg_subtype;
5966 	int		rv = 0;
5967 	idn_domain_t	*dp, *ldp;
5968 	idn_msgtype_t	mt;
5969 	int		nmcadr;
5970 	uint_t		barpfn, larpfn;
5971 	uint_t		cpus_u32, cpus_l32;
5972 	uint_t		mcadr[3];
5973 	smr_offset_t	mbox_table, mbox_domain;
5974 	register int	b, p, m;
5975 	procname_t	proc = "idn_send_master_config";
5976 
5977 	ASSERT(IDN_DLOCK_IS_EXCL(domid));
5978 	ASSERT(IDN_DLOCK_IS_SHARED(idn.localid));
5979 
5980 	dp = &idn_domain[domid];
5981 	ldp = &idn_domain[idn.localid];
5982 
5983 	ASSERT(dp->dstate == IDNDS_CONFIG);
5984 	ASSERT(dp->dvote.v.master == 0);
5985 	ASSERT(ldp->dvote.v.master == 1);
5986 
5987 	mt.mt_mtype = IDNP_CFG;
5988 	mt.mt_atype = 0;
5989 	mt.mt_cookie = 0;
5990 	m = 0;
5991 	mcadr[0] = mcadr[1] = mcadr[2] = 0;
5992 	cfg_subtype.val = 0;
5993 
5994 	switch (phase) {
5995 
5996 	case 1:
5997 		mbox_table = mbox_domain = IDN_NIL_SMROFFSET;
5998 		idn_get_mbox_config(domid, NULL, &mbox_table,
5999 					&mbox_domain);
6000 		/*
6001 		 * ----------------------------------------------------
6002 		 * Send: SLABSIZE, DATAMBOX.DOMAIN, DATAMBOX.TABLE
6003 		 * ----------------------------------------------------
6004 		 */
6005 		cfg_subtype.param.p[0] = IDN_CFGPARAM(IDNCFG_SIZE,
6006 						IDNCFGARG_SIZE_SLAB);
6007 		cfg_subtype.param.p[1] = IDN_CFGPARAM(IDNCFG_DATAMBOX,
6008 						IDNCFGARG_DATAMBOX_DOMAIN);
6009 		cfg_subtype.param.p[2] = IDN_CFGPARAM(IDNCFG_DATAMBOX,
6010 						IDNCFGARG_DATAMBOX_TABLE);
6011 		cfg_subtype.info.num = 3;
6012 		cfg_subtype.info.phase = phase;
6013 		dp->dcfgphase = phase;
6014 
6015 		ASSERT(mbox_domain != IDN_NIL_SMROFFSET);
6016 		ASSERT(mbox_table != IDN_NIL_SMROFFSET);
6017 
6018 		PR_PROTO("%s:%d:%d: sending SLABSIZE (%d), "
6019 			"DATAMBOX.DOMAIN (0x%x), DATAMBOX.TABLE (0x%x)\n",
6020 			proc, domid, phase, IDN_SLAB_BUFCOUNT, mbox_domain,
6021 			mbox_table);
6022 
6023 		IDNXDC(domid, &mt, cfg_subtype.val, IDN_SLAB_BUFCOUNT,
6024 			mbox_domain, mbox_table);
6025 		break;
6026 
6027 	case 2:
6028 		barpfn = idn.smr.locpfn;
6029 		larpfn = barpfn + (uint_t)btop(MB2B(IDN_SMR_SIZE));
6030 		/*
6031 		 * ----------------------------------------------------
6032 		 * Send: NETID, BARLAR
6033 		 * ----------------------------------------------------
6034 		 */
6035 		cfg_subtype.param.p[0] = IDN_CFGPARAM(IDNCFG_NETID, 0);
6036 		cfg_subtype.param.p[1] = IDN_CFGPARAM(IDNCFG_BARLAR,
6037 						IDNCFGARG_BARLAR_BAR);
6038 		cfg_subtype.param.p[2] = IDN_CFGPARAM(IDNCFG_BARLAR,
6039 						IDNCFGARG_BARLAR_LAR);
6040 		cfg_subtype.info.num = 3;
6041 		cfg_subtype.info.phase = phase;
6042 		dp->dcfgphase = phase;
6043 
6044 		PR_PROTO("%s:%d:%d: sending NETID (%d), "
6045 			"BARPFN/LARPFN (0x%x/0x%x)\n",
6046 			proc, domid, phase, ldp->dnetid, barpfn, larpfn);
6047 
6048 		IDNXDC(domid, &mt, cfg_subtype.val,
6049 			(uint_t)ldp->dnetid, barpfn, larpfn);
6050 		break;
6051 
6052 	case 3:
6053 		nmcadr = ldp->dhw.dh_nmcadr;
6054 		cpus_u32 = UPPER32_CPUMASK(ldp->dcpuset);
6055 		cpus_l32 = LOWER32_CPUMASK(ldp->dcpuset);
6056 		/*
6057 		 * ----------------------------------------------------
6058 		 * Send: CPUSET, NMCADR
6059 		 * ----------------------------------------------------
6060 		 */
6061 		cfg_subtype.param.p[0] = IDN_CFGPARAM(IDNCFG_CPUSET,
6062 						IDNCFGARG_CPUSET_UPPER);
6063 		cfg_subtype.param.p[1] = IDN_CFGPARAM(IDNCFG_CPUSET,
6064 						IDNCFGARG_CPUSET_LOWER);
6065 		cfg_subtype.param.p[2] = IDN_CFGPARAM(IDNCFG_NMCADR, 0);
6066 		cfg_subtype.info.num = 3;
6067 		cfg_subtype.info.phase = phase;
6068 		dp->dcfgphase = phase;
6069 
6070 		PR_PROTO("%s:%d:%d: sending CPUSET (0x%x.%x), NMCADR (%d)\n",
6071 			proc, domid, phase, cpus_u32, cpus_l32, nmcadr);
6072 
6073 		IDNXDC(domid, &mt, cfg_subtype.val,
6074 			cpus_u32, cpus_l32, nmcadr);
6075 		break;
6076 
6077 	case 4:
6078 		/*
6079 		 * ----------------------------------------------------
6080 		 * Send: BOARDSET, MTU, BUFSIZE
6081 		 * ----------------------------------------------------
6082 		 */
6083 		cfg_subtype.param.p[0] = IDN_CFGPARAM(IDNCFG_BOARDSET, 0);
6084 		cfg_subtype.param.p[1] = IDN_CFGPARAM(IDNCFG_SIZE,
6085 						IDNCFGARG_SIZE_MTU);
6086 		cfg_subtype.param.p[2] = IDN_CFGPARAM(IDNCFG_SIZE,
6087 						IDNCFGARG_SIZE_BUF);
6088 		cfg_subtype.info.num = 3;
6089 		cfg_subtype.info.phase = phase;
6090 		dp->dcfgphase = phase;
6091 
6092 		PR_PROTO("%s:%d:%d: sending BOARDSET (0x%x), MTU (0x%lx), "
6093 			"BUFSIZE (0x%x)\n", proc, domid, phase,
6094 			ldp->dhw.dh_boardset, IDN_MTU, IDN_SMR_BUFSIZE);
6095 
6096 		IDNXDC(domid, &mt, cfg_subtype.val,
6097 			ldp->dhw.dh_boardset, IDN_MTU, IDN_SMR_BUFSIZE);
6098 		break;
6099 
6100 	case 5:
6101 		/*
6102 		 * ----------------------------------------------------
6103 		 * Send: MAXNETS, MBOXPERNET, CKSUM
6104 		 * ----------------------------------------------------
6105 		 */
6106 		cfg_subtype.param.p[0] = IDN_CFGPARAM(IDNCFG_DATASVR,
6107 						IDNCFGARG_DATASVR_MAXNETS);
6108 		cfg_subtype.param.p[1] = IDN_CFGPARAM(IDNCFG_DATASVR,
6109 						IDNCFGARG_DATASVR_MBXPERNET);
6110 		cfg_subtype.param.p[2] = IDN_CFGPARAM(IDNCFG_OPTIONS,
6111 						IDNCFGARG_CHECKSUM);
6112 		cfg_subtype.info.num = 3;
6113 		cfg_subtype.info.phase = phase;
6114 		dp->dcfgphase = phase;
6115 
6116 		PR_PROTO("%s:%d:%d: sending MAXNETS (%d), "
6117 			"MBOXPERNET (%d), CKSUM (%d)\n",
6118 			proc, domid, phase,
6119 			IDN_MAX_NETS, IDN_MBOX_PER_NET,
6120 			IDN_CHECKSUM);
6121 
6122 		IDNXDC(domid, &mt, cfg_subtype.val,
6123 			IDN_MAX_NETS, IDN_MBOX_PER_NET, IDN_CHECKSUM);
6124 		break;
6125 
6126 	case 6:
6127 		/*
6128 		 * ----------------------------------------------------
6129 		 * Send: NWRSIZE (piggyback on MCADRs)
6130 		 * ----------------------------------------------------
6131 		 */
6132 		cfg_subtype.param.p[0] = IDN_CFGPARAM(IDNCFG_SIZE,
6133 						IDNCFGARG_SIZE_NWR);
6134 		mcadr[0] = IDN_NWR_SIZE;
6135 		m = 1;
6136 
6137 		/*FALLTHROUGH*/
6138 
6139 	default:	/* case 7 and above */
6140 		/*
6141 		 * ----------------------------------------------------
6142 		 * Send: MCADR's
6143 		 * ----------------------------------------------------
6144 		 * First need to figure how many we've already sent
6145 		 * based on what phase of CONFIG we're in.
6146 		 * ----------------------------------------------------
6147 		 */
6148 		if (phase > 6) {
6149 			p = ((phase - 7) * 3) + 2;
6150 			for (b = 0; (b < MAX_BOARDS) && (p > 0); b++)
6151 				if (ldp->dhw.dh_mcadr[b])
6152 					p--;
6153 		} else {
6154 			b = 0;
6155 		}
6156 
6157 		for (; (b < MAX_BOARDS) && (m < 3); b++) {
6158 			if (ldp->dhw.dh_mcadr[b] == 0)
6159 				continue;
6160 			mcadr[m] = ldp->dhw.dh_mcadr[b];
6161 			cfg_subtype.param.p[m] = IDN_CFGPARAM(IDNCFG_MCADR, b);
6162 			m++;
6163 		}
6164 		if (m > 0) {
6165 			if (phase == 6) {
6166 				PR_PROTO("%s:%d:%d: sending NWRSIZE (%d), "
6167 					"MCADRs (0x%x, 0x%x)\n",
6168 					proc, domid, phase,
6169 					mcadr[0], mcadr[1], mcadr[2]);
6170 			} else {
6171 				PR_PROTO("%s:%d:%d: sending MCADRs "
6172 					"(0x%x, 0x%x, 0x%x)\n",
6173 					proc, domid, phase,
6174 					mcadr[0], mcadr[1], mcadr[2]);
6175 			}
6176 			cfg_subtype.info.num = m;
6177 			cfg_subtype.info.phase = phase;
6178 			dp->dcfgphase = phase;
6179 
6180 			IDNXDC(domid, &mt, cfg_subtype.val,
6181 				mcadr[0], mcadr[1], mcadr[2]);
6182 		} else {
6183 			rv = 1;
6184 		}
6185 		break;
6186 	}
6187 
6188 	return (rv);
6189 }
6190 
6191 /*
6192  * RETURNS:
6193  *	1	Unexpected/unnecessary phase.
6194  *	0	Successfully handled.
6195  */
6196 static int
6197 idn_send_slave_config(int domid, int phase)
6198 {
6199 	idn_cfgsubtype_t	cfg_subtype;
6200 	int		rv = 0;
6201 	idn_domain_t	*dp, *ldp;
6202 	smr_offset_t	mbox_domain;
6203 	idn_msgtype_t	mt;
6204 	int		mbox_index;
6205 	uint_t		cpus_u32, cpus_l32;
6206 	procname_t	proc = "idn_send_slave_config";
6207 
6208 	ASSERT(IDN_DLOCK_IS_EXCL(domid));
6209 	ASSERT(IDN_DLOCK_IS_SHARED(idn.localid));
6210 
6211 	mt.mt_mtype = IDNP_CFG;
6212 	mt.mt_atype = 0;
6213 	dp = &idn_domain[domid];
6214 	ldp = &idn_domain[idn.localid];
6215 
6216 	ASSERT(dp->dstate == IDNDS_CONFIG);
6217 	ASSERT(ldp->dvote.v.master == 0);
6218 
6219 	switch (phase) {
6220 
6221 	case 1:
6222 		mbox_index = IDN_NIL_DOMID;
6223 		mbox_domain = IDN_NIL_SMROFFSET;
6224 		idn_get_mbox_config(domid, &mbox_index, NULL, &mbox_domain);
6225 		/*
6226 		 * ----------------------------------------------------
6227 		 * Send: DATAMBOX.DOMAIN or DATAMBOX.INDEX,
6228 		 *	 DATASVR.MAXNETS, DATASVR.MBXPERNET
6229 		 * ----------------------------------------------------
6230 		 */
6231 		cfg_subtype.val = 0;
6232 		if (mbox_index == IDN_NIL_DOMID) {
6233 			ASSERT(mbox_domain != IDN_NIL_SMROFFSET);
6234 			cfg_subtype.param.p[0] = IDN_CFGPARAM(IDNCFG_DATAMBOX,
6235 						    IDNCFGARG_DATAMBOX_DOMAIN);
6236 		} else {
6237 			/*
6238 			 * Should only be sending Index to
6239 			 * the master and not another slave.
6240 			 */
6241 			ASSERT(dp->dvote.v.master);
6242 			ASSERT(mbox_domain == IDN_NIL_SMROFFSET);
6243 			cfg_subtype.param.p[0] = IDN_CFGPARAM(IDNCFG_DATAMBOX,
6244 						    IDNCFGARG_DATAMBOX_INDEX);
6245 		}
6246 		cfg_subtype.param.p[1] = IDN_CFGPARAM(IDNCFG_DATASVR,
6247 						IDNCFGARG_DATASVR_MAXNETS);
6248 		cfg_subtype.param.p[2] = IDN_CFGPARAM(IDNCFG_DATASVR,
6249 						IDNCFGARG_DATASVR_MBXPERNET);
6250 		cfg_subtype.info.num = 3;
6251 		cfg_subtype.info.phase = phase;
6252 		dp->dcfgphase = phase;
6253 
6254 		PR_PROTO("%s:%d:%d: sending DATAMBOX.%s (0x%x), "
6255 			"MAXNETS (%d), MBXPERNET (%d)\n",
6256 			proc, domid, phase,
6257 			(IDN_CFGPARAM_ARG(cfg_subtype.param.p[0])
6258 			    == IDNCFGARG_DATAMBOX_INDEX)
6259 			    ? "INDEX" : "DOMAIN",
6260 			(mbox_index == IDN_NIL_DOMID)
6261 			    ? mbox_domain : mbox_index,
6262 			    IDN_MAX_NETS, IDN_MBOX_PER_NET);
6263 
6264 		IDNXDC(domid, &mt, cfg_subtype.val,
6265 			((mbox_index == IDN_NIL_DOMID)
6266 				? mbox_domain : mbox_index),
6267 			IDN_MAX_NETS, IDN_MBOX_PER_NET);
6268 		break;
6269 
6270 	case 2:
6271 		cpus_u32 = UPPER32_CPUMASK(ldp->dcpuset);
6272 		cpus_l32 = LOWER32_CPUMASK(ldp->dcpuset);
6273 		/*
6274 		 * ----------------------------------------------------
6275 		 * Send: NETID, CPUSET
6276 		 * ----------------------------------------------------
6277 		 */
6278 		cfg_subtype.val = 0;
6279 		cfg_subtype.param.p[0] = IDN_CFGPARAM(IDNCFG_NETID, 0);
6280 		cfg_subtype.param.p[1] = IDN_CFGPARAM(IDNCFG_CPUSET,
6281 						    IDNCFGARG_CPUSET_UPPER);
6282 		cfg_subtype.param.p[2] = IDN_CFGPARAM(IDNCFG_CPUSET,
6283 						    IDNCFGARG_CPUSET_LOWER);
6284 		cfg_subtype.info.num = 3;
6285 		cfg_subtype.info.phase = phase;
6286 		dp->dcfgphase = phase;
6287 
6288 		PR_PROTO("%s:%d:%d: sending NETID (%d), "
6289 			"CPUSET (0x%x.%x)\n", proc, domid, phase,
6290 			ldp->dnetid, cpus_u32, cpus_l32);
6291 
6292 		IDNXDC(domid, &mt, cfg_subtype.val,
6293 			(uint_t)ldp->dnetid, cpus_u32, cpus_l32);
6294 		break;
6295 
6296 	case 3:
6297 		/*
6298 		 * ----------------------------------------------------
6299 		 * Send: BOARDSET, MTU, BUFSIZE
6300 		 * ----------------------------------------------------
6301 		 */
6302 		cfg_subtype.val = 0;
6303 		cfg_subtype.param.p[0] = IDN_CFGPARAM(IDNCFG_BOARDSET, 0);
6304 		cfg_subtype.param.p[1] = IDN_CFGPARAM(IDNCFG_SIZE,
6305 							IDNCFGARG_SIZE_MTU);
6306 		cfg_subtype.param.p[2] = IDN_CFGPARAM(IDNCFG_SIZE,
6307 							IDNCFGARG_SIZE_BUF);
6308 		cfg_subtype.info.num = 3;
6309 		cfg_subtype.info.phase = phase;
6310 		dp->dcfgphase = phase;
6311 
6312 		PR_PROTO("%s:%d:%d: sending BOARDSET (0x%x), MTU (0x%lx), "
6313 			"BUFSIZE (0x%x)\n",
6314 			proc, domid, phase, ldp->dhw.dh_boardset, IDN_MTU,
6315 			IDN_SMR_BUFSIZE);
6316 
6317 		IDNXDC(domid, &mt, cfg_subtype.val,
6318 			ldp->dhw.dh_boardset, IDN_MTU, IDN_SMR_BUFSIZE);
6319 		break;
6320 
6321 	case 4:
6322 		/*
6323 		 * ----------------------------------------------------
6324 		 * Send: SLABSIZE, OPTIONS.CHECKSUM, NWR_SIZE
6325 		 * ----------------------------------------------------
6326 		 */
6327 		cfg_subtype.val = 0;
6328 		cfg_subtype.param.p[0] = IDN_CFGPARAM(IDNCFG_SIZE,
6329 						    IDNCFGARG_SIZE_SLAB);
6330 		cfg_subtype.param.p[1] = IDN_CFGPARAM(IDNCFG_OPTIONS,
6331 						    IDNCFGARG_CHECKSUM);
6332 		cfg_subtype.param.p[2] = IDN_CFGPARAM(IDNCFG_SIZE,
6333 						    IDNCFGARG_SIZE_NWR);
6334 		cfg_subtype.info.num = 3;
6335 		cfg_subtype.info.phase = phase;
6336 		dp->dcfgphase = phase;
6337 
6338 		PR_PROTO("%s:%d:%d: sending SLABSIZE (%d), CKSUM (%d), "
6339 			"NWRSIZE (%d)\n",
6340 			proc, domid, phase, IDN_SLAB_BUFCOUNT,
6341 			IDN_CHECKSUM, IDN_NWR_SIZE);
6342 
6343 		IDNXDC(domid, &mt, cfg_subtype.val,
6344 			IDN_SLAB_BUFCOUNT, IDN_CHECKSUM, IDN_NWR_SIZE);
6345 		break;
6346 
6347 	default:
6348 		rv = 1;
6349 		break;
6350 	}
6351 
6352 	return (rv);
6353 }
6354 
6355 #define	CFG_FATAL	((uint_t)-1)	/* reset link */
6356 #define	CFG_CONTINUE	0x0000		/* looking for more */
6357 #define	CFG_DONE	0x0001		/* got everything expected */
6358 #define	CFG_ERR_MTU	0x0002
6359 #define	CFG_ERR_BUF	0x0004
6360 #define	CFG_ERR_SLAB	0x0008
6361 #define	CFG_ERR_NWR	0x0010
6362 #define	CFG_ERR_NETS	0x0020
6363 #define	CFG_ERR_MBOX	0x0040
6364 #define	CFG_ERR_NMCADR	0x0080
6365 #define	CFG_ERR_MCADR	0x0100
6366 #define	CFG_ERR_CKSUM	0x0200
6367 #define	CFG_ERR_SMR	0x0400
6368 #define	CFG_MAX_ERRORS	16
6369 
6370 #define	CFGERR2IDNKERR(ce) \
6371 	(((ce) & CFG_ERR_MTU)	? IDNKERR_CONFIG_MTU 	: \
6372 	((ce) & CFG_ERR_BUF)	? IDNKERR_CONFIG_BUF 	: \
6373 	((ce) & CFG_ERR_SLAB)	? IDNKERR_CONFIG_SLAB 	: \
6374 	((ce) & CFG_ERR_NWR)	? IDNKERR_CONFIG_NWR 	: \
6375 	((ce) & CFG_ERR_NETS)	? IDNKERR_CONFIG_NETS 	: \
6376 	((ce) & CFG_ERR_MBOX)	? IDNKERR_CONFIG_MBOX 	: \
6377 	((ce) & CFG_ERR_NMCADR)	? IDNKERR_CONFIG_NMCADR	: \
6378 	((ce) & CFG_ERR_MCADR)	? IDNKERR_CONFIG_MCADR	: \
6379 	((ce) & CFG_ERR_CKSUM)	? IDNKERR_CONFIG_CKSUM	: \
6380 	((ce) & CFG_ERR_SMR)	? IDNKERR_CONFIG_SMR	: 0)
6381 
6382 #define	CFGERR2FINARG(ce) \
6383 	(((ce) & CFG_ERR_MTU)	? IDNFIN_ARG_CFGERR_MTU    : \
6384 	((ce) & CFG_ERR_BUF)	? IDNFIN_ARG_CFGERR_BUF    : \
6385 	((ce) & CFG_ERR_SLAB)	? IDNFIN_ARG_CFGERR_SLAB   : \
6386 	((ce) & CFG_ERR_NWR)	? IDNFIN_ARG_CFGERR_NWR    : \
6387 	((ce) & CFG_ERR_NETS)	? IDNFIN_ARG_CFGERR_NETS   : \
6388 	((ce) & CFG_ERR_MBOX)	? IDNFIN_ARG_CFGERR_MBOX   : \
6389 	((ce) & CFG_ERR_NMCADR)	? IDNFIN_ARG_CFGERR_NMCADR : \
6390 	((ce) & CFG_ERR_MCADR)	? IDNFIN_ARG_CFGERR_MCADR  : \
6391 	((ce) & CFG_ERR_CKSUM)	? IDNFIN_ARG_CFGERR_CKSUM  : \
6392 	((ce) & CFG_ERR_SMR)	? IDNFIN_ARG_CFGERR_SMR	   : IDNFIN_ARG_NONE)
6393 
6394 /*
6395  * Called when some CFG messages arrive.  We use dncfgitems to count the
6396  * total number of items received so far since we'll receive multiple CFG
6397  * messages during the CONFIG phase.  Note that dncfgitems is initialized
6398  * in idn_send_config.
6399  */
6400 static void
6401 idn_recv_config(int domid, idn_msgtype_t *mtp, idn_xdcargs_t xargs)
6402 {
6403 	uint_t		msg = mtp->mt_mtype;
6404 	uint_t		rv, rv_expected, rv_actual;
6405 	int		pnum;
6406 	int		phase;
6407 	register int	p;
6408 	register int	c;
6409 	idn_mainmbox_t	*mmp;
6410 	register uint_t	subtype, subtype_arg;
6411 	idn_domain_t	*dp;
6412 	int		index;
6413 	idn_domain_t	*ldp = &idn_domain[idn.localid];
6414 	idn_mboxtbl_t	*mbtp;
6415 	idn_cfgsubtype_t	cfg_subtype;
6416 	idn_xdcargs_t	cfg_arg;
6417 	idn_msgtype_t	mt;
6418 	idnsb_error_t	idnerr;
6419 	procname_t	proc = "idn_recv_config";
6420 
6421 	ASSERT(domid != idn.localid);
6422 
6423 	GET_XARGS(xargs, &cfg_subtype.val, &cfg_arg[0], &cfg_arg[1],
6424 			&cfg_arg[2]);
6425 	cfg_arg[3] = 0;
6426 
6427 	dp = &idn_domain[domid];
6428 
6429 	ASSERT(IDN_DLOCK_IS_EXCL(domid));
6430 
6431 	if (dp->dstate != IDNDS_CONFIG) {
6432 		/*
6433 		 * Not ready to receive config info.
6434 		 * Drop whatever he sent us.  Let the
6435 		 * timer continue and timeout if needed.
6436 		 */
6437 		PR_PROTO("%s:%d: WARNING state(%s) != CONFIG\n",
6438 			proc, domid, idnds_str[dp->dstate]);
6439 		return;
6440 	}
6441 
6442 	if ((msg & IDNP_ACKNACK_MASK) || dp->dcfgsnddone) {
6443 		IDN_MSGTIMER_STOP(domid, IDNP_CFG, 0);
6444 	}
6445 
6446 	if (msg & IDNP_ACKNACK_MASK) {
6447 		/*
6448 		 * ack/cfg
6449 		 */
6450 		phase = GET_XARGS_CFG_PHASE(xargs);
6451 
6452 		PR_PROTO("%s:%d: received ACK for CFG phase %d\n",
6453 			proc, domid, phase);
6454 		if (phase != (int)dp->dcfgphase) {
6455 			/*
6456 			 * Phase is not what we were
6457 			 * expecting.  Something got lost
6458 			 * in the shuffle.  Restart the
6459 			 * timer and let it timeout if necessary
6460 			 * and reestablish the connection.
6461 			 */
6462 			IDN_MSGTIMER_START(domid, IDNP_CFG, dp->dcfgphase,
6463 					idn_msg_waittime[IDNP_CFG], NULL);
6464 		} else {
6465 			idn_send_config(domid, phase + 1);
6466 
6467 			if (dp->dcfgsnddone && dp->dcfgrcvdone) {
6468 				IDN_DUNLOCK(domid);
6469 				IDN_SYNC_LOCK();
6470 				IDN_DLOCK_EXCL(domid);
6471 				if (dp->dstate == IDNDS_CONFIG) {
6472 					dp->dxp = &xphase_con;
6473 					IDN_XSTATE_TRANSITION(dp, IDNXS_PEND);
6474 					bzero(xargs, sizeof (xargs));
6475 
6476 					idn_xphase_transition(domid, NULL,
6477 								xargs);
6478 				}
6479 				IDN_SYNC_UNLOCK();
6480 			}
6481 		}
6482 		return;
6483 	}
6484 
6485 	pnum = (int)cfg_subtype.info.num;
6486 	phase = (int)cfg_subtype.info.phase;
6487 
6488 	for (p = 0; p < pnum; p++) {
6489 		int	board;
6490 #ifdef DEBUG
6491 		uint_t	val;
6492 		char	*str;
6493 
6494 		val = 0;
6495 		str = NULL;
6496 #define	RCVCFG(s, v)	{ str = (s); val = (v); }
6497 #else
6498 #define	RCVCFG(s, v)	{}
6499 #endif /* DEBUG */
6500 
6501 		subtype	    = IDN_CFGPARAM_TYPE(cfg_subtype.param.p[p]);
6502 		subtype_arg = IDN_CFGPARAM_ARG(cfg_subtype.param.p[p]);
6503 
6504 		switch (subtype) {
6505 
6506 		case IDNCFG_BARLAR:
6507 			IDN_GLOCK_EXCL();
6508 			switch (subtype_arg) {
6509 
6510 			case IDNCFGARG_BARLAR_BAR:
6511 				if (idn.smr.rempfn == PFN_INVALID) {
6512 					idn.smr.rempfn = (pfn_t)cfg_arg[p];
6513 					dp->dncfgitems++;
6514 					RCVCFG("BARLAR_BAR", cfg_arg[p]);
6515 				}
6516 				break;
6517 
6518 			case IDNCFGARG_BARLAR_LAR:
6519 				if (idn.smr.rempfnlim == PFN_INVALID) {
6520 					idn.smr.rempfnlim = (pfn_t)cfg_arg[p];
6521 					dp->dncfgitems++;
6522 					RCVCFG("BARLAR_LAR", cfg_arg[p]);
6523 				}
6524 				break;
6525 
6526 			default:
6527 				cmn_err(CE_WARN,
6528 					"IDN 217: unknown CFGARG type (%d) "
6529 					"from domain %d",
6530 					subtype_arg, domid);
6531 				break;
6532 			}
6533 			IDN_GUNLOCK();
6534 			break;
6535 
6536 		case IDNCFG_MCADR:
6537 			board = subtype_arg;
6538 			if ((board >= 0) && (board < MAX_BOARDS) &&
6539 			    (dp->dhw.dh_mcadr[board] == 0)) {
6540 				dp->dhw.dh_mcadr[board] = cfg_arg[p];
6541 				dp->dncfgitems++;
6542 				RCVCFG("MCADR", cfg_arg[p]);
6543 			}
6544 			break;
6545 
6546 		case IDNCFG_NMCADR:
6547 			if (dp->dhw.dh_nmcadr == 0) {
6548 				dp->dhw.dh_nmcadr = cfg_arg[p];
6549 				dp->dncfgitems++;
6550 				RCVCFG("NMCADR", cfg_arg[p]);
6551 			}
6552 			break;
6553 
6554 		case IDNCFG_CPUSET:
6555 			switch (subtype_arg) {
6556 
6557 			case IDNCFGARG_CPUSET_UPPER:
6558 			{
6559 				cpuset_t	tmpset;
6560 
6561 				MAKE64_CPUMASK(tmpset, cfg_arg[p], 0);
6562 				CPUSET_OR(dp->dcpuset, tmpset);
6563 				dp->dncfgitems++;
6564 				RCVCFG("CPUSET_UPPER", cfg_arg[p]);
6565 				break;
6566 			}
6567 			case IDNCFGARG_CPUSET_LOWER:
6568 			{
6569 				cpuset_t	tmpset;
6570 
6571 				MAKE64_CPUMASK(tmpset, 0, cfg_arg[p]);
6572 				CPUSET_OR(dp->dcpuset, tmpset);
6573 				dp->dncfgitems++;
6574 				RCVCFG("CPUSET_LOWER", cfg_arg[p]);
6575 				break;
6576 			}
6577 			default:
6578 				ASSERT(0);
6579 				break;
6580 			}
6581 			break;
6582 
6583 		case IDNCFG_NETID:
6584 			if (dp->dnetid == (ushort_t)-1) {
6585 				dp->dnetid = (ushort_t)cfg_arg[p];
6586 				dp->dncfgitems++;
6587 				RCVCFG("NETID", cfg_arg[p]);
6588 			}
6589 			break;
6590 
6591 		case IDNCFG_BOARDSET:
6592 			if ((dp->dhw.dh_boardset & cfg_arg[p])
6593 						== dp->dhw.dh_boardset) {
6594 				/*
6595 				 * Boardset better include what we
6596 				 * already know about.
6597 				 */
6598 				dp->dhw.dh_boardset = cfg_arg[p];
6599 				dp->dncfgitems++;
6600 				RCVCFG("BOARDSET", cfg_arg[p]);
6601 			}
6602 			break;
6603 
6604 		case IDNCFG_SIZE:
6605 			switch (subtype_arg) {
6606 
6607 			case IDNCFGARG_SIZE_MTU:
6608 				if (dp->dmtu == 0) {
6609 					dp->dmtu = cfg_arg[p];
6610 					dp->dncfgitems++;
6611 					RCVCFG("MTU", cfg_arg[p]);
6612 				}
6613 				break;
6614 
6615 			case IDNCFGARG_SIZE_BUF:
6616 				if (dp->dbufsize == 0) {
6617 					dp->dbufsize = cfg_arg[p];
6618 					dp->dncfgitems++;
6619 					RCVCFG("BUFSIZE", cfg_arg[p]);
6620 				}
6621 				break;
6622 
6623 			case IDNCFGARG_SIZE_SLAB:
6624 				if (dp->dslabsize == 0) {
6625 					dp->dslabsize = (short)cfg_arg[p];
6626 					dp->dncfgitems++;
6627 					RCVCFG("SLABSIZE", cfg_arg[p]);
6628 				}
6629 				break;
6630 
6631 			case IDNCFGARG_SIZE_NWR:
6632 				if (dp->dnwrsize == 0) {
6633 					dp->dnwrsize = (short)cfg_arg[p];
6634 					dp->dncfgitems++;
6635 					RCVCFG("NWRSIZE", cfg_arg[p]);
6636 				}
6637 				break;
6638 
6639 			default:
6640 				ASSERT(0);
6641 				break;
6642 			}
6643 			break;
6644 
6645 		case IDNCFG_DATAMBOX:
6646 			switch (subtype_arg) {
6647 
6648 			case IDNCFGARG_DATAMBOX_TABLE:
6649 				if (ldp->dmbox.m_tbl ||
6650 				    !dp->dvote.v.master ||
6651 				    !VALID_NWROFFSET(cfg_arg[p], 4)) {
6652 					/*
6653 					 * Only a master should be
6654 					 * sending us a datambox table.
6655 					 */
6656 					break;
6657 				}
6658 				IDN_DLOCK_EXCL(idn.localid);
6659 				ldp->dmbox.m_tbl = (idn_mboxtbl_t *)
6660 						    IDN_OFFSET2ADDR(cfg_arg[p]);
6661 				IDN_DUNLOCK(idn.localid);
6662 				dp->dncfgitems++;
6663 				RCVCFG("DATAMBOX.TABLE", cfg_arg[p]);
6664 				break;
6665 
6666 			case IDNCFGARG_DATAMBOX_DOMAIN:
6667 				if (dp->dmbox.m_send->mm_smr_mboxp ||
6668 				    !VALID_NWROFFSET(cfg_arg[p], 4))
6669 					break;
6670 				mbtp = (idn_mboxtbl_t *)
6671 						IDN_OFFSET2ADDR(cfg_arg[p]);
6672 				mmp = dp->dmbox.m_send;
6673 				for (c = 0; c < IDN_MAX_NETS; c++) {
6674 
6675 					mutex_enter(&mmp[c].mm_mutex);
6676 					mmp[c].mm_smr_mboxp = mbtp;
6677 					mutex_exit(&mmp[c].mm_mutex);
6678 
6679 					IDN_MBOXTBL_PTR_INC(mbtp);
6680 				}
6681 				if (c <= 0)
6682 					break;
6683 				dp->dncfgitems++;
6684 				RCVCFG("DATAMBOX.DOMAIN", cfg_arg[p]);
6685 				break;
6686 
6687 			case IDNCFGARG_DATAMBOX_INDEX:
6688 				if (!ldp->dvote.v.master ||
6689 				    dp->dmbox.m_send->mm_smr_mboxp) {
6690 					/*
6691 					 * If I'm not the master then
6692 					 * I can't handle processing a
6693 					 * mailbox index.
6694 					 * OR, if I already have the send
6695 					 * mailbox, I'm done with this
6696 					 * config item.
6697 					 */
6698 					break;
6699 				}
6700 				ASSERT(dp->dmbox.m_tbl);
6701 				index = (int)cfg_arg[p];
6702 				/*
6703 				 * The given index is the local domain's
6704 				 * index into the remote domain's mailbox
6705 				 * table that contains the mailbox that
6706 				 * remote domain wants the local domain to
6707 				 * use as the send mailbox for messages
6708 				 * destined for the remote domain.
6709 				 * I.e. from the remote domain's
6710 				 *	perspective, this is his receive
6711 				 *	mailbox.
6712 				 */
6713 				mbtp = IDN_MBOXTBL_PTR(dp->dmbox.m_tbl, index);
6714 				mmp = dp->dmbox.m_send;
6715 				for (c = 0; c < IDN_MAX_NETS; c++) {
6716 
6717 					mutex_enter(&mmp[c].mm_mutex);
6718 					mmp[c].mm_smr_mboxp = mbtp;
6719 					mutex_exit(&mmp[c].mm_mutex);
6720 
6721 					IDN_MBOXTBL_PTR_INC(mbtp);
6722 				}
6723 				if (c <= 0)
6724 					break;
6725 				dp->dncfgitems++;
6726 				RCVCFG("DATAMBOX.INDEX", cfg_arg[p]);
6727 				break;
6728 
6729 			default:
6730 				ASSERT(0);
6731 				break;
6732 			}
6733 			break;
6734 
6735 		case IDNCFG_DATASVR:
6736 			switch (subtype_arg) {
6737 
6738 			case IDNCFGARG_DATASVR_MAXNETS:
6739 				if (dp->dmaxnets)
6740 					break;
6741 				dp->dmaxnets = (uint_t)(cfg_arg[p] & 0x3f);
6742 				dp->dncfgitems++;
6743 				RCVCFG("DATASVR.MAXNETS", cfg_arg[p]);
6744 				break;
6745 
6746 			case IDNCFGARG_DATASVR_MBXPERNET:
6747 				if (dp->dmboxpernet)
6748 					break;
6749 				dp->dmboxpernet = (uint_t)(cfg_arg[p] & 0x1ff);
6750 				dp->dncfgitems++;
6751 				RCVCFG("DATASVR.MBXPERNET", cfg_arg[p]);
6752 				break;
6753 
6754 			default:
6755 				ASSERT(0);
6756 				break;
6757 			}
6758 			break;
6759 
6760 		case IDNCFG_OPTIONS:
6761 			switch (subtype_arg) {
6762 
6763 			case IDNCFGARG_CHECKSUM:
6764 				if (dp->dcksum)
6765 					break;
6766 				if ((cfg_arg[p] & 0xff) == 0)
6767 					dp->dcksum = 1;		/* off */
6768 				else
6769 					dp->dcksum = 2;		/* on */
6770 				dp->dncfgitems++;
6771 				RCVCFG("OPTIONS.CHECKSUM", cfg_arg[p]);
6772 				break;
6773 
6774 			default:
6775 				ASSERT(0);
6776 				break;
6777 			}
6778 
6779 		default:
6780 			break;
6781 		}
6782 #ifdef DEBUG
6783 		PR_PROTO("%s:%d: received %s (0x%x)\n",
6784 			proc, domid, str ? str : "<empty>", val);
6785 #endif /* DEBUG */
6786 	}
6787 
6788 	mt.mt_mtype = IDNP_ACK;
6789 	mt.mt_atype = IDNP_CFG;
6790 	mt.mt_cookie = mtp->mt_cookie;
6791 	CLR_XARGS(cfg_arg);
6792 	SET_XARGS_CFG_PHASE(cfg_arg, phase);
6793 	idn_send_acknack(domid, &mt, cfg_arg);
6794 
6795 	rv_expected = rv_actual = 0;
6796 
6797 	if (dp->dvote.v.master == 0) {
6798 		/*
6799 		 * Remote domain is a slave, check if we've received
6800 		 * all that we were expecting, and if so transition to
6801 		 * the next state.
6802 		 */
6803 		rv = idn_check_slave_config(domid, &rv_expected, &rv_actual);
6804 	} else {
6805 		/*
6806 		 * Remote domain is a master, check if this slave has
6807 		 * received all that it was expecting, and if so
6808 		 * transition to the next state.
6809 		 */
6810 		rv = idn_check_master_config(domid, &rv_expected, &rv_actual);
6811 	}
6812 
6813 	switch (rv) {
6814 	case CFG_DONE:
6815 		/*
6816 		 * All config info received that was expected, wrap up.
6817 		 */
6818 		if (!idn_recv_config_done(domid) && dp->dvote.v.master) {
6819 			IDN_DLOCK_EXCL(idn.localid);
6820 			ldp->dvote.v.connected = 1;
6821 			IDN_DUNLOCK(idn.localid);
6822 		}
6823 		break;
6824 
6825 	case CFG_CONTINUE:
6826 		/*
6827 		 * If we're not done sending our own config, then
6828 		 * there's no need to set a timer since one will
6829 		 * automatically be set when we send a config
6830 		 * message waiting for an acknowledgement.
6831 		 */
6832 		if (dp->dcfgsnddone) {
6833 			/*
6834 			 * We haven't yet received all the config
6835 			 * information we were expecting.  Need to
6836 			 * restart CFG timer if we've sent everything..
6837 			 */
6838 			IDN_MSGTIMER_START(domid, IDNP_CFG, 0,
6839 					idn_msg_waittime[IDNP_CFG], NULL);
6840 		}
6841 		break;
6842 
6843 	case CFG_FATAL:
6844 		/*
6845 		 * Fatal error occurred during config exchange.
6846 		 * We need to shutdown connection in this
6847 		 * case, so initiate a (non-relink) FIN.
6848 		 * so let's get the show on the road.
6849 		 */
6850 		IDN_DUNLOCK(domid);
6851 		IDN_SYNC_LOCK();
6852 		IDN_DLOCK_EXCL(domid);
6853 		/*
6854 		 * If the state has changed from CONFIG
6855 		 * then somebody else has taken over
6856 		 * control of this domain so we can just
6857 		 * bail out.
6858 		 */
6859 		if (dp->dstate == IDNDS_CONFIG) {
6860 			INIT_IDNKERR(&idnerr);
6861 			SET_IDNKERR_ERRNO(&idnerr, EPROTO);
6862 			SET_IDNKERR_IDNERR(&idnerr, IDNKERR_CONFIG_FATAL);
6863 			SET_IDNKERR_PARAM0(&idnerr, domid);
6864 			idn_update_op(IDNOP_ERROR, DOMAINSET(domid), &idnerr);
6865 			/*
6866 			 * Keep this guy around so we can try again.
6867 			 */
6868 			DOMAINSET_ADD(idn.domset.ds_relink, domid);
6869 			IDN_HISTORY_LOG(IDNH_RELINK, domid, dp->dstate,
6870 					idn.domset.ds_relink);
6871 			idn_disconnect(domid, IDNFIN_NORMAL,
6872 					IDNFIN_ARG_CFGERR_FATAL,
6873 					IDNFIN_SYNC_NO);
6874 		}
6875 		IDN_SYNC_UNLOCK();
6876 		break;
6877 
6878 	default:	/* parameter conflict */
6879 		IDN_DUNLOCK(domid);
6880 		IDN_SYNC_LOCK();
6881 		IDN_DLOCK_EXCL(domid);
6882 		if (dp->dstate != IDNDS_CONFIG) {
6883 			/*
6884 			 * Hmmm...changed in the short period
6885 			 * we had dropped the lock, oh well.
6886 			 */
6887 			IDN_SYNC_UNLOCK();
6888 			break;
6889 		}
6890 		c = 0;
6891 		for (p = 0; p < CFG_MAX_ERRORS; p++)
6892 			if (rv & (1 << p))
6893 				c++;
6894 		INIT_IDNKERR(&idnerr);
6895 		SET_IDNKERR_ERRNO(&idnerr, EINVAL);
6896 		SET_IDNKERR_PARAM0(&idnerr, domid);
6897 		if (c > 1) {
6898 			SET_IDNKERR_IDNERR(&idnerr, IDNKERR_CONFIG_MULTIPLE);
6899 			SET_IDNKERR_PARAM1(&idnerr, c);
6900 		} else {
6901 			SET_IDNKERR_IDNERR(&idnerr, CFGERR2IDNKERR(rv));
6902 			SET_IDNKERR_PARAM1(&idnerr, rv_expected);
6903 			SET_IDNKERR_PARAM2(&idnerr, rv_actual);
6904 		}
6905 		/*
6906 		 * Any parameter conflicts are grounds for dismissal.
6907 		 */
6908 		if (idn.domset.ds_connected == 0) {
6909 			domainset_t	domset;
6910 			/*
6911 			 * We have no other connections yet.
6912 			 * We must blow out of here completely
6913 			 * unless we have relinkers left from
6914 			 * a RECONFIG.
6915 			 */
6916 			IDN_GLOCK_EXCL();
6917 			domset = ~idn.domset.ds_relink;
6918 			if (idn.domset.ds_relink == 0) {
6919 				IDN_GSTATE_TRANSITION(IDNGS_DISCONNECT);
6920 			}
6921 			domset &= ~idn.domset.ds_hitlist;
6922 			IDN_SET_NEW_MASTERID(IDN_NIL_DOMID);
6923 			IDN_GUNLOCK();
6924 			IDN_DUNLOCK(domid);
6925 
6926 			DOMAINSET_DEL(domset, idn.localid);
6927 
6928 			idn_update_op(IDNOP_ERROR, DOMAINSET_ALL, &idnerr);
6929 
6930 			PR_HITLIST("%s:%d: unlink_domainset(%x) due to "
6931 				"CFG error (relink=%x, hitlist=%x)\n",
6932 				proc, domid, domset, idn.domset.ds_relink,
6933 				idn.domset.ds_hitlist);
6934 
6935 			idn_unlink_domainset(domset, IDNFIN_NORMAL,
6936 						CFGERR2FINARG(rv),
6937 						IDNFIN_OPT_UNLINK,
6938 						BOARDSET_ALL);
6939 			IDN_SYNC_UNLOCK();
6940 			IDN_DLOCK_EXCL(domid);
6941 		} else {
6942 			PR_HITLIST("%s:%d: idn_disconnect(%d) due to CFG "
6943 				"error (conn=%x, relink=%x, hitlist=%x)\n",
6944 				proc, domid, domid, idn.domset.ds_connected,
6945 				idn.domset.ds_relink, idn.domset.ds_hitlist);
6946 			/*
6947 			 * If we have other connections then
6948 			 * we're only going to blow away this
6949 			 * single connection.
6950 			 */
6951 			idn_update_op(IDNOP_ERROR, DOMAINSET(domid), &idnerr);
6952 
6953 			DOMAINSET_DEL(idn.domset.ds_relink, domid);
6954 			idn_disconnect(domid, IDNFIN_NORMAL,
6955 					CFGERR2FINARG(rv), IDNFIN_SYNC_NO);
6956 			IDN_SYNC_UNLOCK();
6957 		}
6958 		break;
6959 	}
6960 }
6961 
6962 /*
6963  * Called by master or slave which expects exactly the following
6964  * with respect to config info received from a SLAVE:
6965  * 	IDNCFG_CPUSET
6966  *	IDNCFG_NETID
6967  *	IDNCFG_BOARDSET
6968  *	IDNCFG_SIZE (MTU, BUF, SLAB, NWR)
6969  *	IDNCFG_DATAMBOX (DOMAIN or INDEX if caller is master)
6970  *	IDNCFG_DATASVR (MAXNETS, MBXPERNET)
6971  *	IDNCFG_OPTIONS (CHECKSUM)
6972  */
6973 static uint_t
6974 idn_check_slave_config(int domid, uint_t *exp, uint_t *act)
6975 {
6976 	uint_t		rv = 0;
6977 	idn_domain_t	*ldp, *dp;
6978 	procname_t	proc = "idn_check_slave_config";
6979 
6980 	dp = &idn_domain[domid];
6981 	ldp = &idn_domain[idn.localid];
6982 
6983 	ASSERT(domid != idn.localid);
6984 	ASSERT(IDN_DLOCK_IS_EXCL(domid));
6985 	ASSERT(dp->dstate == IDNDS_CONFIG);
6986 
6987 	PR_PROTO("%s:%d: number received %d, number expected %d\n",
6988 		proc, domid, (int)dp->dncfgitems, IDN_SLAVE_NCFGITEMS);
6989 
6990 	if ((int)dp->dncfgitems < IDN_SLAVE_NCFGITEMS)
6991 		return (CFG_CONTINUE);
6992 
6993 	if ((dp->dnetid == (ushort_t)-1) ||
6994 	    CPUSET_ISNULL(dp->dcpuset) ||
6995 	    (dp->dhw.dh_boardset == 0) ||
6996 	    (dp->dmbox.m_send->mm_smr_mboxp == NULL) ||
6997 	    (dp->dmaxnets == 0) ||
6998 	    (dp->dmboxpernet == 0) ||
6999 	    (dp->dcksum == 0) ||
7000 	    (dp->dmtu == 0) ||
7001 	    (dp->dbufsize == 0) ||
7002 	    (dp->dslabsize == 0) ||
7003 	    (dp->dnwrsize == 0)) {
7004 		/*
7005 		 * We received our IDN_SLAVE_NCFGITEMS config items,
7006 		 * but not all what we were expecting!  Gotta nack and
7007 		 * close connection.
7008 		 */
7009 		cmn_err(CE_WARN,
7010 			"IDN: 218: missing some required config items from "
7011 			"domain %d", domid);
7012 
7013 		rv = CFG_FATAL;
7014 		goto done;
7015 	}
7016 
7017 	if (!valid_mtu(dp->dmtu)) {
7018 		cmn_err(CE_WARN,
7019 			"IDN: 219: remote domain %d MTU (%d) invalid "
7020 			"(local.mtu = %d)", dp->domid, dp->dmtu, ldp->dmtu);
7021 
7022 		*exp = (uint_t)ldp->dmtu;
7023 		*act = (uint_t)dp->dmtu;
7024 		rv |= CFG_ERR_MTU;
7025 	}
7026 	if (!valid_bufsize(dp->dbufsize)) {
7027 		cmn_err(CE_WARN,
7028 			"IDN: 220: remote domain %d BUFSIZE (%d) invalid "
7029 			"(local.bufsize = %d)", dp->domid, dp->dbufsize,
7030 			ldp->dbufsize);
7031 
7032 		*exp = (uint_t)ldp->dbufsize;
7033 		*act = (uint_t)dp->dbufsize;
7034 		rv |= CFG_ERR_BUF;
7035 	}
7036 	if (!valid_slabsize((int)dp->dslabsize)) {
7037 		cmn_err(CE_WARN,
7038 			"IDN: 221: remote domain %d SLABSIZE (%d) invalid "
7039 			"(local.slabsize = %d)",
7040 			dp->domid, dp->dslabsize, ldp->dslabsize);
7041 
7042 		*exp = (uint_t)ldp->dslabsize;
7043 		*act = (uint_t)dp->dslabsize;
7044 		rv |= CFG_ERR_SLAB;
7045 	}
7046 	if (!valid_nwrsize((int)dp->dnwrsize)) {
7047 		cmn_err(CE_WARN,
7048 			"IDN: 223: remote domain %d NWRSIZE (%d) invalid "
7049 			"(local.nwrsize = %d)",
7050 			dp->domid, dp->dnwrsize, ldp->dnwrsize);
7051 
7052 		*exp = (uint_t)ldp->dnwrsize;
7053 		*act = (uint_t)dp->dnwrsize;
7054 		rv |= CFG_ERR_NWR;
7055 	}
7056 	if ((int)dp->dmaxnets != IDN_MAX_NETS) {
7057 		cmn_err(CE_WARN,
7058 			"IDN: 224: remote domain %d MAX_NETS (%d) invalid "
7059 			"(local.maxnets = %d)",
7060 			dp->domid, (int)dp->dmaxnets, IDN_MAX_NETS);
7061 
7062 		*exp = (uint_t)IDN_MAX_NETS;
7063 		*act = (uint_t)dp->dmaxnets;
7064 		rv |= CFG_ERR_NETS;
7065 	}
7066 	if ((int)dp->dmboxpernet != IDN_MBOX_PER_NET) {
7067 		cmn_err(CE_WARN,
7068 			"IDN: 225: remote domain %d MBOX_PER_NET (%d) "
7069 			"invalid (local.mboxpernet = %d)",
7070 			dp->domid, (int)dp->dmboxpernet, IDN_MBOX_PER_NET);
7071 
7072 		*exp = (uint_t)IDN_MBOX_PER_NET;
7073 		*act = (uint_t)dp->dmboxpernet;
7074 		rv |= CFG_ERR_MBOX;
7075 	}
7076 	if ((dp->dcksum - 1) != (uchar_t)IDN_CHECKSUM) {
7077 		cmn_err(CE_WARN,
7078 			"IDN: 226: remote domain %d CHECKSUM flag (%d) "
7079 			"mismatches local domain's (%d)",
7080 			dp->domid, (int)dp->dcksum - 1, IDN_CHECKSUM);
7081 
7082 		*exp = (uint_t)IDN_CHECKSUM;
7083 		*act = (uint_t)(dp->dcksum - 1);
7084 		rv |= CFG_ERR_CKSUM;
7085 	}
7086 
7087 done:
7088 
7089 	return (rv ? rv : CFG_DONE);
7090 }
7091 
7092 /*
7093  * Called by slave ONLY which expects exactly the following
7094  * config info from the MASTER:
7095  *	IDNCFG_BARLAR
7096  *	IDNCFG_MCADR
7097  *	IDNCFG_NMCADR
7098  * 	IDNCFG_CPUSET
7099  *	IDNCFG_NETID
7100  *	IDNCFG_BOARDSET
7101  *	IDNCFG_SIZE (MTU, BUF, SLAB, NWR)
7102  *	IDNCFG_DATAMBOX (TABLE, DOMAIN)
7103  *	IDNCFG_DATASVR (MAXNETS, MBXPERNET)
7104  *	IDNCFG_OPTIONS (CHECKSUM)
7105  */
7106 static uint_t
7107 idn_check_master_config(int domid, uint_t *exp, uint_t *act)
7108 {
7109 	uint_t		rv = 0;
7110 	int		nmcadr;
7111 	int		total_expitems;
7112 	int		p, m, err;
7113 	idn_domain_t	*dp;
7114 	idn_domain_t	*ldp = &idn_domain[idn.localid];
7115 	procname_t	proc = "idn_check_master_config";
7116 
7117 	dp = &idn_domain[domid];
7118 
7119 	ASSERT(IDN_GET_MASTERID() != idn.localid);
7120 	ASSERT(domid != idn.localid);
7121 	ASSERT(IDN_DLOCK_IS_EXCL(domid));
7122 	ASSERT(dp->dstate == IDNDS_CONFIG);
7123 
7124 	PR_PROTO("%s:%d: number received %d, minimum number expected %d\n",
7125 		proc, domid, (int)dp->dncfgitems, IDN_MASTER_NCFGITEMS);
7126 
7127 	if ((int)dp->dncfgitems < IDN_MASTER_NCFGITEMS)
7128 		return (CFG_CONTINUE);
7129 
7130 	/*
7131 	 * We have at least IDN_MASTER_NCFGITEMS items which
7132 	 * means we have at least one MCADR.  Need to make sure
7133 	 * we have all that we're expecting, NMCADR.
7134 	 */
7135 	total_expitems = IDN_MASTER_NCFGITEMS + dp->dhw.dh_nmcadr - 1;
7136 	if ((dp->dhw.dh_nmcadr == 0) ||
7137 	    ((int)dp->dncfgitems < total_expitems)) {
7138 		/*
7139 		 * We have not yet received all the MCADRs
7140 		 * we're expecting.
7141 		 */
7142 		PR_PROTO("%s:%d: haven't received all MCADRs yet.\n",
7143 			proc, domid);
7144 		return (CFG_CONTINUE);
7145 	}
7146 
7147 	nmcadr = 0;
7148 	for (p = 0; p < MAX_BOARDS; p++)
7149 		if (dp->dhw.dh_mcadr[p] != 0)
7150 			nmcadr++;
7151 
7152 	IDN_GLOCK_SHARED();
7153 	if ((idn.smr.rempfn == PFN_INVALID) ||
7154 	    (idn.smr.rempfnlim == PFN_INVALID) ||
7155 	    (dp->dnetid == (ushort_t)-1) ||
7156 	    CPUSET_ISNULL(dp->dcpuset) ||
7157 	    (dp->dhw.dh_boardset == 0) ||
7158 	    (nmcadr != dp->dhw.dh_nmcadr) ||
7159 	    (dp->dmbox.m_send->mm_smr_mboxp == NULL) ||
7160 	    (ldp->dmbox.m_tbl == NULL) ||
7161 	    (dp->dmaxnets == 0) ||
7162 	    (dp->dmboxpernet == 0) ||
7163 	    (dp->dcksum == 0) ||
7164 	    (dp->dmtu == 0) ||
7165 	    (dp->dbufsize == 0) ||
7166 	    (dp->dnwrsize == 0)) {
7167 
7168 		IDN_GUNLOCK();
7169 		/*
7170 		 * We received all of our config items, but not
7171 		 * all what we were expecting!  Gotta reset and
7172 		 * close connection.
7173 		 */
7174 		cmn_err(CE_WARN,
7175 			"IDN: 227: missing some required config items from "
7176 			"domain %d", domid);
7177 
7178 		rv = CFG_FATAL;
7179 		goto done;
7180 	}
7181 	if ((idn.smr.rempfnlim - idn.smr.rempfn) > btop(MB2B(IDN_SMR_SIZE))) {
7182 		/*
7183 		 * The master's SMR region is larger than
7184 		 * mine!  This means that this domain may
7185 		 * receive I/O buffers which are out of the
7186 		 * range of this local domain's SMR virtual
7187 		 * address space.  The master SMR has to be
7188 		 * no larger than the local SMR in order to
7189 		 * guarantee enough local virtual addresses
7190 		 * to see all of the SMR space.
7191 		 * XXX - Possibly add negotiating SMR size.
7192 		 *	 Try to create a new virtual mapping.
7193 		 *	 Could let domains negotiate SMR size.
7194 		 *	 Winning size would have to be smallest
7195 		 *	 in DC.  If so, how to handle incoming
7196 		 *	 domains with even smaller SMRs?
7197 		 *	 - Could either disallow connection
7198 		 *	 - Could reconfigure to use smaller SMR.
7199 		 */
7200 		cmn_err(CE_WARN,
7201 			"IDN: 228: master's SMR (%ld) larger than "
7202 			"local's SMR (%ld)",
7203 			idn.smr.rempfnlim - idn.smr.rempfn,
7204 			btop(MB2B(IDN_SMR_SIZE)));
7205 
7206 		*exp = (uint_t)IDN_SMR_SIZE;
7207 		*act = (uint_t)B2MB(ptob(idn.smr.rempfnlim - idn.smr.rempfn));
7208 		rv |= CFG_ERR_SMR;
7209 	}
7210 	IDN_GUNLOCK();
7211 
7212 	if (!valid_mtu(dp->dmtu)) {
7213 		cmn_err(CE_WARN,
7214 			"IDN: 219: remote domain %d MTU (%d) invalid "
7215 			"(local.mtu = %d)", dp->domid, dp->dmtu, ldp->dmtu);
7216 
7217 		*exp = (uint_t)ldp->dmtu;
7218 		*act = (uint_t)dp->dmtu;
7219 		rv |= CFG_ERR_MTU;
7220 	}
7221 	if (!valid_bufsize(dp->dbufsize)) {
7222 		cmn_err(CE_WARN,
7223 			"IDN: 220: remote domain %d BUFSIZE (%d) invalid "
7224 			"(local.bufsize = %d)", dp->domid, dp->dbufsize,
7225 			ldp->dbufsize);
7226 
7227 		*exp = (uint_t)ldp->dbufsize;
7228 		*act = (uint_t)dp->dbufsize;
7229 		rv |= CFG_ERR_BUF;
7230 	}
7231 	if (!valid_nwrsize((int)dp->dnwrsize)) {
7232 		cmn_err(CE_WARN,
7233 			"IDN: 223: remote domain %d NWRSIZE (%d) invalid "
7234 			"(local.nwrsize = %d)",
7235 			dp->domid, dp->dnwrsize, ldp->dnwrsize);
7236 
7237 		*exp = (uint_t)ldp->dnwrsize;
7238 		*act = (uint_t)dp->dnwrsize;
7239 		rv |= CFG_ERR_NWR;
7240 	}
7241 	if ((int)dp->dmaxnets != IDN_MAX_NETS) {
7242 		cmn_err(CE_WARN,
7243 			"IDN: 224: remote domain %d MAX_NETS (%d) invalid "
7244 			"(local.maxnets = %d)",
7245 			dp->domid, (int)dp->dmaxnets, IDN_MAX_NETS);
7246 
7247 		*exp = (uint_t)IDN_MAX_NETS;
7248 		*act = (uint_t)dp->dmaxnets;
7249 		rv |= CFG_ERR_NETS;
7250 	}
7251 	if ((int)dp->dmboxpernet != IDN_MBOX_PER_NET) {
7252 		cmn_err(CE_WARN,
7253 			"IDN: 225: remote domain %d MBOX_PER_NET (%d) "
7254 			"invalid (local.mboxpernet = %d)",
7255 			dp->domid, (int)dp->dmboxpernet, IDN_MBOX_PER_NET);
7256 
7257 		*exp = (uint_t)IDN_MBOX_PER_NET;
7258 		*act = (uint_t)dp->dmboxpernet;
7259 		rv |= CFG_ERR_MBOX;
7260 	}
7261 	if ((dp->dcksum - 1) != (uchar_t)IDN_CHECKSUM) {
7262 		cmn_err(CE_WARN,
7263 			"IDN: 226: remote domain %d CHECKSUM flag (%d) "
7264 			"mismatches local domain's (%d)",
7265 			dp->domid, (int)dp->dcksum - 1, IDN_CHECKSUM);
7266 
7267 		*exp = (uint_t)IDN_CHECKSUM;
7268 		*act = (uint_t)(dp->dcksum - 1);
7269 		rv |= CFG_ERR_CKSUM;
7270 	}
7271 	nmcadr = 0;
7272 	err = 0;
7273 	for (m = 0; m < MAX_BOARDS; m++) {
7274 		if (!BOARD_IN_SET(dp->dhw.dh_boardset, m) &&
7275 				dp->dhw.dh_mcadr[m]) {
7276 			cmn_err(CE_WARN,
7277 				"IDN: 229: remote domain %d boardset (0x%x) "
7278 				"conflicts with MCADR(board %d) [0x%x]",
7279 				dp->domid, (uint_t)dp->dhw.dh_boardset, m,
7280 				dp->dhw.dh_mcadr[m]);
7281 			err++;
7282 		}
7283 		if (dp->dhw.dh_mcadr[m])
7284 			nmcadr++;
7285 	}
7286 	if (err) {
7287 		*exp = 0;
7288 		*act = err;
7289 		rv |= CFG_ERR_MCADR;
7290 	} else if (nmcadr != dp->dhw.dh_nmcadr) {
7291 		cmn_err(CE_WARN,
7292 			"IDN: 230: remote domain %d reported number of "
7293 			"MCADRs (%d) mismatches received (%d)",
7294 			dp->domid, dp->dhw.dh_nmcadr, nmcadr);
7295 		*exp = (uint_t)dp->dhw.dh_nmcadr;
7296 		*act = (uint_t)nmcadr;
7297 		rv |= CFG_ERR_NMCADR;
7298 	}
7299 
7300 done:
7301 
7302 	return (rv ? rv : CFG_DONE);
7303 }
7304 
7305 static int
7306 idn_recv_config_done(int domid)
7307 {
7308 	boardset_t		b_conflicts;
7309 	cpuset_t		p_conflicts;
7310 	register int		p, i;
7311 	register idn_domain_t	*dp;
7312 	idnsb_error_t		idnerr;
7313 	procname_t		proc = "idn_recv_config_done";
7314 
7315 	ASSERT(domid != IDN_NIL_DOMID);
7316 	dp = &idn_domain[domid];
7317 	ASSERT(IDN_DLOCK_IS_EXCL(domid));
7318 
7319 	/*
7320 	 * Well, we received all that we were expecting
7321 	 * so stop any CFG timers we had going.
7322 	 */
7323 	IDN_MSGTIMER_STOP(domid, IDNP_CFG, 0);
7324 
7325 	dp->dncpus = 0;
7326 	for (p = 0; p < NCPU; p++)
7327 		if (CPU_IN_SET(dp->dcpuset, p))
7328 			dp->dncpus++;
7329 	dp->dhw.dh_nboards = 0;
7330 	for (p = 0; p < MAX_BOARDS; p++)
7331 		if (BOARD_IN_SET(dp->dhw.dh_boardset, p))
7332 			dp->dhw.dh_nboards++;
7333 
7334 	IDN_GLOCK_EXCL();
7335 	/*
7336 	 * Verify dcpuset and dhw.dh_boardset don't
7337 	 * conflict with any existing DC member.
7338 	 */
7339 	b_conflicts = idn.dc_boardset & dp->dhw.dh_boardset;
7340 	CPUSET_ZERO(p_conflicts);
7341 	CPUSET_OR(p_conflicts, idn.dc_cpuset);
7342 	CPUSET_AND(p_conflicts, dp->dcpuset);
7343 
7344 	if (b_conflicts || !CPUSET_ISNULL(p_conflicts)) {
7345 		if (b_conflicts) {
7346 			cmn_err(CE_WARN,
7347 				"IDN: 231: domain %d boardset "
7348 				"(0x%x) conflicts with existing "
7349 				"IDN boardset (0x%x)",
7350 				domid, dp->dhw.dh_boardset,
7351 				b_conflicts);
7352 		}
7353 		if (!CPUSET_ISNULL(p_conflicts)) {
7354 			cmn_err(CE_WARN,
7355 				"IDN: 232: domain %d cpuset "
7356 				"(0x%x.%0x) conflicts with existing "
7357 				"IDN cpuset (0x%x.%0x)", domid,
7358 				UPPER32_CPUMASK(dp->dcpuset),
7359 				LOWER32_CPUMASK(dp->dcpuset),
7360 				UPPER32_CPUMASK(p_conflicts),
7361 				LOWER32_CPUMASK(p_conflicts));
7362 		}
7363 		IDN_GUNLOCK();
7364 		/*
7365 		 * Need to disconnect and not retry with this guy.
7366 		 */
7367 		IDN_DUNLOCK(domid);
7368 		IDN_SYNC_LOCK();
7369 		DOMAINSET_DEL(idn.domset.ds_relink, domid);
7370 		IDN_DLOCK_EXCL(domid);
7371 
7372 		INIT_IDNKERR(&idnerr);
7373 		SET_IDNKERR_ERRNO(&idnerr, EPROTO);
7374 		SET_IDNKERR_IDNERR(&idnerr, IDNKERR_CONFIG_FATAL);
7375 		SET_IDNKERR_PARAM0(&idnerr, domid);
7376 		idn_update_op(IDNOP_ERROR, DOMAINSET(domid), &idnerr);
7377 
7378 		idn_disconnect(domid, IDNFIN_FORCE_HARD,
7379 				IDNFIN_ARG_CFGERR_FATAL, IDNFIN_SYNC_NO);
7380 		IDN_SYNC_UNLOCK();
7381 
7382 		return (-1);
7383 	}
7384 
7385 	idn_mainmbox_reset(domid, dp->dmbox.m_send);
7386 	idn_mainmbox_reset(domid, dp->dmbox.m_recv);
7387 
7388 #ifdef IDNBUG_CPUPERBOARD
7389 	/*
7390 	 * We only allow connections to domains whose (mem) boards
7391 	 * all have at least one cpu.  This is necessary so that
7392 	 * we can program the CICs of that respective board.  This
7393 	 * is primarily only a requirement if the remote domain
7394 	 * is the master _and_ has the SMR in that particular board.
7395 	 * To simplify the checking we simply restrict connections to
7396 	 * domains that have at least one cpu on all boards that
7397 	 * contain memory.
7398 	 */
7399 	if (!idn_cpu_per_board((void *)NULL, dp->dcpuset, &dp->dhw)) {
7400 		cmn_err(CE_WARN,
7401 			"IDN: 233: domain %d missing CPU per "
7402 			"memory boardset (0x%x), CPU boardset (0x%x)",
7403 			domid, dp->dhw.dh_boardset,
7404 			cpuset2boardset(dp->dcpuset));
7405 
7406 		IDN_GUNLOCK();
7407 		/*
7408 		 * Need to disconnect and not retry with this guy.
7409 		 */
7410 		IDN_DUNLOCK(domid);
7411 		IDN_SYNC_LOCK();
7412 		DOMAINSET_DEL(idn.domset.ds_relink, domid);
7413 		IDN_DLOCK_EXCL(domid);
7414 
7415 		INIT_IDNKERR(&idnerr);
7416 		SET_IDNKERR_ERRNO(&idnerr, EINVAL);
7417 		SET_IDNKERR_IDNERR(&idnerr, IDNKERR_CPU_CONFIG);
7418 		SET_IDNKERR_PARAM0(&idnerr, domid);
7419 		idn_update_op(IDNOP_ERROR, DOMAINSET(domid), &idnerr);
7420 
7421 		idn_disconnect(domid, IDNFIN_FORCE_HARD,
7422 				IDNFIN_ARG_CPUCFG, IDNFIN_SYNC_NO);
7423 		IDN_SYNC_UNLOCK();
7424 
7425 		return (-1);
7426 	}
7427 #endif /* IDNBUG_CPUPERBOARD */
7428 
7429 	CPUSET_OR(idn.dc_cpuset, dp->dcpuset);
7430 	idn.dc_boardset |= dp->dhw.dh_boardset;
7431 
7432 	IDN_GUNLOCK();
7433 
7434 	/*
7435 	 * Set up the portmap for this domain.
7436 	 */
7437 	i = -1;
7438 	for (p = 0; p < NCPU; p++) {
7439 		BUMP_INDEX(dp->dcpuset, i);
7440 		dp->dcpumap[p] = (uchar_t)i;
7441 	}
7442 
7443 	/*
7444 	 * Got everything we need from the remote
7445 	 * domain, now we can program hardware as needed.
7446 	 */
7447 	if (idn_program_hardware(domid) != 0) {
7448 		domainset_t	domset;
7449 		/*
7450 		 * Yikes!  Failed to program hardware.
7451 		 * Gotta bail.
7452 		 */
7453 		cmn_err(CE_WARN,
7454 			"IDN: 234: failed to program hardware for domain %d "
7455 			"(boardset = 0x%x)",
7456 			domid, dp->dhw.dh_boardset);
7457 
7458 		IDN_DUNLOCK(domid);
7459 		/*
7460 		 * If we're having problems programming our
7461 		 * hardware we better unlink completely from
7462 		 * the IDN before things get really bad.
7463 		 */
7464 		IDN_SYNC_LOCK();
7465 		IDN_GLOCK_EXCL();
7466 		IDN_GSTATE_TRANSITION(IDNGS_DISCONNECT);
7467 		domset = DOMAINSET_ALL;
7468 		DOMAINSET_DEL(domset, idn.localid);
7469 		IDN_SET_NEW_MASTERID(IDN_NIL_DOMID);
7470 		IDN_GUNLOCK();
7471 
7472 		INIT_IDNKERR(&idnerr);
7473 		SET_IDNKERR_ERRNO(&idnerr, EINVAL);
7474 		SET_IDNKERR_IDNERR(&idnerr, IDNKERR_HW_ERROR);
7475 		SET_IDNKERR_PARAM0(&idnerr, domid);
7476 		idn_update_op(IDNOP_ERROR, DOMAINSET_ALL, &idnerr);
7477 
7478 		idn_unlink_domainset(domset, IDNFIN_NORMAL, IDNFIN_ARG_HWERR,
7479 					IDNFIN_OPT_UNLINK, BOARDSET_ALL);
7480 
7481 		IDN_SYNC_UNLOCK();
7482 		IDN_DLOCK_EXCL(domid);
7483 
7484 		return (-1);
7485 	}
7486 
7487 	/*
7488 	 * Now that hardware has been programmed we can
7489 	 * remap the SMR into our local space, if necessary.
7490 	 */
7491 	IDN_GLOCK_EXCL();
7492 	if (domid == IDN_GET_MASTERID()) {
7493 		/*
7494 		 * No need to worry about disabling the data
7495 		 * server since at this stage there is only
7496 		 * one and he doesn't go active until his
7497 		 * mailbox (dmbox.m_recv->mm_smr_mboxp) is set up.
7498 		 */
7499 		smr_remap(&kas, idn.smr.vaddr, idn.smr.rempfn, IDN_SMR_SIZE);
7500 	}
7501 	IDN_GUNLOCK();
7502 
7503 	/*
7504 	 * There is no need to ACK the CFG messages since remote
7505 	 * domain would not progress to the next state (CON_SENT)
7506 	 * unless he has received everything.
7507 	 */
7508 
7509 	dp->dcfgrcvdone = 1;
7510 	PR_PROTO("%s:%d: RECV config DONE\n", proc, domid);
7511 
7512 	if (dp->dcfgsnddone) {
7513 		idn_xdcargs_t	xargs;
7514 		/*
7515 		 * Well, we've received all that we were expecting,
7516 		 * but we don't know if the remote domain has
7517 		 * received all that it was expecting from us,
7518 		 * although we know we transferred everything
7519 		 * so let's get the show on the road.
7520 		 */
7521 		IDN_DUNLOCK(domid);
7522 		IDN_SYNC_LOCK();
7523 		IDN_DLOCK_EXCL(domid);
7524 		/*
7525 		 * If the state has changed from CONFIG
7526 		 * then somebody else has taken over
7527 		 * control of this domain so we can just
7528 		 * bail out.
7529 		 */
7530 		if (dp->dstate == IDNDS_CONFIG) {
7531 			dp->dxp = &xphase_con;
7532 			IDN_XSTATE_TRANSITION(dp, IDNXS_PEND);
7533 			bzero(xargs, sizeof (xargs));
7534 
7535 			idn_xphase_transition(domid, NULL, xargs);
7536 		}
7537 		IDN_SYNC_UNLOCK();
7538 	}
7539 
7540 	return (0);
7541 }
7542 
7543 static int
7544 idn_verify_config_mbox(int domid)
7545 {
7546 	idn_domain_t	*ldp, *dp;
7547 	idn_mainmbox_t	*mmp;
7548 	idn_mboxtbl_t	*mtp;
7549 	int		c, rv = 0;
7550 	uint_t		activeptr, readyptr;
7551 	ushort_t	mbox_csum;
7552 
7553 	ASSERT(IDN_DLOCK_IS_EXCL(domid));
7554 
7555 	dp = &idn_domain[domid];
7556 	ldp = &idn_domain[idn.localid];
7557 
7558 	/*
7559 	 * The master will have assigned us the dmbox.m_tbl
7560 	 * from which we assign our receive mailboxes.
7561 	 * The first (0) entry contains the cookie used
7562 	 * for verification.
7563 	 */
7564 	IDN_DLOCK_SHARED(idn.localid);
7565 	/*
7566 	 * Now that we have an assigned mboxtbl from the
7567 	 * master, we can determine which receive mailbox
7568 	 * we indirectly assigned to him at the time we
7569 	 * sent him his MBOX_INDEX.  Prep it, however note
7570 	 * that the master will have not been able to
7571 	 * validate it because of the chicken 'n egg
7572 	 * problem between a master and slave.  Thus we
7573 	 * need to reset the cookie after the prep.
7574 	 */
7575 	mmp = dp->dmbox.m_recv;
7576 	mtp = IDN_MBOXTBL_PTR(ldp->dmbox.m_tbl, domid);
7577 	for (c = 0; c < IDN_MAX_NETS; c++) {
7578 		mutex_enter(&mmp[c].mm_mutex);
7579 		ASSERT(!mmp[c].mm_smr_mboxp);
7580 
7581 		mmp[c].mm_smr_mboxp = mtp;
7582 		mbox_csum = IDN_CKSUM_MBOX(&mtp->mt_header);
7583 		if (!VALID_MBOXHDR(&mtp->mt_header, c, mbox_csum)) {
7584 			cmn_err(CE_WARN,
7585 				"IDN: 235: [recv] mailbox (domain %d, "
7586 				"channel %d) SMR CORRUPTED - RELINK",
7587 				domid, c);
7588 			cmn_err(CE_CONT,
7589 				"IDN: 235: [recv] expected (cookie 0x%x, "
7590 				"cksum 0x%x) actual (cookie 0x%x, "
7591 				"cksum 0x%x)\n",
7592 				IDN_GET_MBOXHDR_COOKIE(&mtp->mt_header),
7593 				(int)mtp->mt_header.mh_cksum,
7594 				IDN_MAKE_MBOXHDR_COOKIE(0, 0, c),
7595 				(int)mbox_csum);
7596 			mutex_exit(&mmp[c].mm_mutex);
7597 			rv = -1;
7598 			break;
7599 		}
7600 		activeptr = mtp->mt_header.mh_svr_active_ptr;
7601 		readyptr = mtp->mt_header.mh_svr_ready_ptr;
7602 		/*
7603 		 * Verify pointers are valid.
7604 		 */
7605 		if (!activeptr || !VALID_NWROFFSET(activeptr, 2) ||
7606 			!readyptr || !VALID_NWROFFSET(readyptr, 2)) {
7607 			cmn_err(CE_WARN,
7608 				"IDN: 235: [recv] mailbox (domain %d, "
7609 				"channel %d) SMR CORRUPTED - RELINK",
7610 				domid, c);
7611 			cmn_err(CE_CONT,
7612 				"IDN: 235: [recv] activeptr (0x%x), "
7613 				"readyptr (0x%x)\n",
7614 				activeptr, readyptr);
7615 			mutex_exit(&mmp[c].mm_mutex);
7616 			rv = -1;
7617 			break;
7618 		}
7619 		mmp[c].mm_smr_activep =
7620 			(ushort_t *)IDN_OFFSET2ADDR(activeptr);
7621 		mmp[c].mm_smr_readyp =
7622 			(ushort_t *)IDN_OFFSET2ADDR(readyptr);
7623 		mutex_exit(&mmp[c].mm_mutex);
7624 		IDN_MBOXTBL_PTR_INC(mtp);
7625 	}
7626 
7627 	IDN_DUNLOCK(idn.localid);
7628 
7629 	if (rv)
7630 		return (rv);
7631 
7632 	/*
7633 	 * Now we need to translate SMR offsets for send mailboxes
7634 	 * to actual virtual addresses.
7635 	 */
7636 	mmp = dp->dmbox.m_send;
7637 	for (c = 0; c < IDN_MAX_NETS; mmp++, c++) {
7638 		mutex_enter(&mmp->mm_mutex);
7639 		if ((mtp = mmp->mm_smr_mboxp) == NULL) {
7640 			mutex_exit(&mmp->mm_mutex);
7641 			rv = -1;
7642 			break;
7643 		}
7644 
7645 		mbox_csum = IDN_CKSUM_MBOX(&mtp->mt_header);
7646 
7647 		if (!VALID_MBOXHDR(&mtp->mt_header, c, mbox_csum)) {
7648 			cmn_err(CE_WARN,
7649 				"IDN: 235: [send] mailbox (domain %d, "
7650 				"channel %d) SMR CORRUPTED - RELINK",
7651 				domid, c);
7652 			cmn_err(CE_CONT,
7653 				"IDN: 235: [send] expected (cookie 0x%x, "
7654 				"cksum 0x%x) actual (cookie 0x%x, "
7655 				"cksum 0x%x)\n",
7656 				IDN_GET_MBOXHDR_COOKIE(&mtp->mt_header),
7657 				(int)mtp->mt_header.mh_cksum,
7658 				IDN_MAKE_MBOXHDR_COOKIE(0, 0, c),
7659 				(int)mbox_csum);
7660 			mutex_exit(&mmp->mm_mutex);
7661 			rv = -1;
7662 			break;
7663 		}
7664 		activeptr = mtp->mt_header.mh_svr_active_ptr;
7665 		readyptr = mtp->mt_header.mh_svr_ready_ptr;
7666 		/*
7667 		 * Paranoid check.
7668 		 */
7669 		if (!activeptr || !VALID_NWROFFSET(activeptr, 2) ||
7670 			!readyptr || !VALID_NWROFFSET(readyptr, 2)) {
7671 			cmn_err(CE_WARN,
7672 				"IDN: 235: [send] mailbox (domain %d, "
7673 				"channel %d) SMR CORRUPTED - RELINK",
7674 				domid, c);
7675 			cmn_err(CE_CONT,
7676 				"IDN: 235: [send] activeptr (0x%x), "
7677 				"readyptr (0x%x)\n",
7678 				activeptr, readyptr);
7679 			mutex_exit(&mmp->mm_mutex);
7680 			rv = -1;
7681 			break;
7682 		}
7683 		mmp->mm_smr_activep = (ushort_t *)IDN_OFFSET2ADDR(activeptr);
7684 		mmp->mm_smr_readyp = (ushort_t *)IDN_OFFSET2ADDR(readyptr);
7685 		idn_reset_mboxtbl(mtp);
7686 		mutex_exit(&mmp->mm_mutex);
7687 		IDN_MBOXTBL_PTR_INC(mtp);
7688 	}
7689 
7690 	return (rv);
7691 }
7692 
7693 /*
7694  * The BUFSIZEs between domains have to be equal so that slave buffers
7695  * and the master's slabpool are consistent.
7696  * The MTUs between domains have to be equal so they can transfer
7697  * packets consistently without possible data truncation.
7698  *
7699  * ZZZ - Perhaps these could be negotiated?
7700  */
7701 static int
7702 valid_mtu(uint_t mtu)
7703 {
7704 	return ((mtu == idn_domain[idn.localid].dmtu) && mtu);
7705 }
7706 
7707 static int
7708 valid_bufsize(uint_t bufsize)
7709 {
7710 	return ((bufsize == idn_domain[idn.localid].dbufsize) && bufsize);
7711 }
7712 
7713 static int
7714 valid_slabsize(int slabsize)
7715 {
7716 	return ((slabsize == idn_domain[idn.localid].dslabsize) && slabsize);
7717 }
7718 
7719 static int
7720 valid_nwrsize(int nwrsize)
7721 {
7722 	return ((nwrsize == idn_domain[idn.localid].dnwrsize) && nwrsize);
7723 }
7724 
7725 static int
7726 idn_program_hardware(int domid)
7727 {
7728 	int		rv, is_master;
7729 	idn_domain_t	*dp;
7730 	uint_t		*mcadrp;
7731 	pfn_t		rem_pfn, rem_pfnlimit;
7732 	procname_t	proc = "idn_program_hardware";
7733 
7734 	PR_PROTO("%s:%d: program hw in domain %d w.r.t remote domain %d\n",
7735 		proc, domid, idn.localid, domid);
7736 
7737 	dp = &idn_domain[domid];
7738 
7739 	ASSERT(domid != idn.localid);
7740 	ASSERT(IDN_DLOCK_IS_EXCL(domid));
7741 	ASSERT(dp->dstate == IDNDS_CONFIG);
7742 
7743 	IDN_GLOCK_EXCL();
7744 
7745 	if (DOMAIN_IN_SET(idn.domset.ds_hwlinked, domid)) {
7746 		IDN_GUNLOCK();
7747 		return (0);
7748 	}
7749 
7750 	DOMAINSET_ADD(idn.domset.ds_flush, domid);
7751 	CHECKPOINT_OPENED(IDNSB_CHKPT_CACHE, dp->dhw.dh_boardset, 1);
7752 
7753 	if (domid != IDN_GET_MASTERID()) {
7754 		/*
7755 		 * If the remote domain is a slave, then
7756 		 * all we have to program is the CIC sm_mask.
7757 		 */
7758 		is_master = 0;
7759 		if ((idn.localid == IDN_GET_MASTERID()) &&
7760 					lock_try(&idn.first_hwlink)) {
7761 			/*
7762 			 * This is our first HW link and I'm the
7763 			 * master, which means we need to program
7764 			 * our local bar/lar.
7765 			 */
7766 			ASSERT(idn.first_hwmasterid == (short)IDN_NIL_DOMID);
7767 			idn.first_hwmasterid = (short)idn.localid;
7768 			rem_pfn = idn.smr.locpfn;
7769 			rem_pfnlimit = idn.smr.locpfn +
7770 					btop(MB2B(IDN_SMR_SIZE));
7771 		} else {
7772 			/*
7773 			 * Otherwise, just a slave linking to
7774 			 * another slave.  No bar/lar updating
7775 			 * necessary.
7776 			 */
7777 			rem_pfn = rem_pfnlimit = PFN_INVALID;
7778 		}
7779 		mcadrp = NULL;
7780 	} else {
7781 		/*
7782 		 * If the remote domain is a master, then
7783 		 * we need to program the CIC sm_mask/sm_bar/sm_lar,
7784 		 * and PC's.
7785 		 */
7786 		is_master = 1;
7787 		rem_pfn = idn.smr.rempfn;
7788 		rem_pfnlimit = idn.smr.rempfnlim;
7789 		mcadrp = dp->dhw.dh_mcadr;
7790 		ASSERT(idn.first_hwmasterid == (short)IDN_NIL_DOMID);
7791 		idn.first_hwmasterid = (short)domid;
7792 	}
7793 
7794 	PR_PROTO("%s:%d: ADD bset (0x%x)\n",
7795 		proc, domid, dp->dhw.dh_boardset);
7796 
7797 	rv = idnxf_shmem_add(is_master, dp->dhw.dh_boardset,
7798 				rem_pfn, rem_pfnlimit, mcadrp);
7799 
7800 	if (rv == 0) {
7801 		DOMAINSET_ADD(idn.domset.ds_hwlinked, domid);
7802 	} else {
7803 		if (rem_pfn == idn.smr.locpfn)
7804 			lock_clear(&idn.first_hwlink);
7805 
7806 		if (idn.first_hwmasterid == (short)domid)
7807 			idn.first_hwmasterid = (short)IDN_NIL_DOMID;
7808 
7809 		(void) idnxf_shmem_sub(is_master, dp->dhw.dh_boardset);
7810 	}
7811 
7812 	IDN_GUNLOCK();
7813 
7814 	return (rv);
7815 }
7816 
7817 static int
7818 idn_deprogram_hardware(int domid)
7819 {
7820 	int		rv, is_master;
7821 	idn_domain_t	*dp;
7822 	procname_t	proc = "idn_deprogram_hardware";
7823 
7824 
7825 	dp = &idn_domain[domid];
7826 
7827 	ASSERT(domid != idn.localid);
7828 	ASSERT(IDN_DLOCK_IS_EXCL(domid));
7829 
7830 	/*
7831 	 * Need to take into consideration what boards remote
7832 	 * domain was connected to.  If we don't have a connection to
7833 	 * them ourself, then we better remove them now , otherwise
7834 	 * they'll never be removed (unless we link to them at some point).
7835 	 */
7836 #if 0
7837 	DEBUG_USECDELAY(500000);
7838 #endif /* 0 */
7839 
7840 	IDN_GLOCK_EXCL();
7841 
7842 	if (!DOMAIN_IN_SET(idn.domset.ds_hwlinked, domid)) {
7843 		IDN_GUNLOCK();
7844 		return (0);
7845 	}
7846 
7847 	PR_PROTO("%s:%d: DEprogram hw in domain %d w.r.t remote domain %d\n",
7848 		proc, domid, idn.localid, domid);
7849 
7850 	/*
7851 	 * It's possible to come through this flow for domains that
7852 	 * have not been programmed, i.e. not in idn.hwlinked_domset,
7853 	 * so don't bother asserting that they might be in there.
7854 	 * This can occur if we lose a domain during the config/syn
7855 	 * sequence.  If this occurs we won't know whether the remote
7856 	 * domain has programmed its hardware or not.  If it has then
7857 	 * it will have to go through the DMAP sequence and thus we
7858 	 * have to go through it also.  So, if we reach at least the
7859 	 * CONFIG state, we need to go through the DMAP handshake.
7860 	 */
7861 
7862 	PR_PROTO("%s:%d: SUB bset (0x%x)\n",
7863 		proc, domid, dp->dhw.dh_boardset);
7864 
7865 	if (idn.first_hwmasterid == (short)domid) {
7866 		is_master = 1;
7867 		idn.first_hwmasterid = (short)IDN_NIL_DOMID;
7868 	} else {
7869 		is_master = 0;
7870 	}
7871 	rv = idnxf_shmem_sub(is_master, dp->dhw.dh_boardset);
7872 
7873 	if (rv == 0)
7874 		DOMAINSET_DEL(idn.domset.ds_hwlinked, domid);
7875 
7876 	IDN_GUNLOCK();
7877 
7878 	return (rv);
7879 }
7880 
7881 /*
7882  * Remember can't send slabs back to master at this point.
7883  * Entered with write-drwlock held.
7884  * Returns with drwlock dropped.
7885  */
7886 static void
7887 idn_deconfig(int domid)
7888 {
7889 	idn_domain_t	*dp, *ldp;
7890 	smr_slab_t	*sp;
7891 	int		c, masterid;
7892 	procname_t	proc = "idn_deconfig";
7893 
7894 	ASSERT(IDN_SYNC_IS_LOCKED());
7895 	ASSERT(IDN_DLOCK_IS_EXCL(domid));
7896 	ASSERT(domid != idn.localid);
7897 
7898 	ldp = &idn_domain[idn.localid];
7899 	dp = &idn_domain[domid];
7900 
7901 	ASSERT(dp->dstate == IDNDS_DMAP);
7902 
7903 	PR_PROTO("%s:%d: (dio=%d, dioerr=%d, dnslabs=%d)\n",
7904 		proc, domid, dp->dio, dp->dioerr, dp->dnslabs);
7905 
7906 	IDN_GLOCK_EXCL();
7907 	masterid = IDN_GET_MASTERID();
7908 
7909 	idn.dc_boardset &= ~dp->dhw.dh_boardset;
7910 	for (c = 0; c < NCPU; c++) {
7911 		if (CPU_IN_SET(dp->dcpuset, c)) {
7912 			CPUSET_DEL(idn.dc_cpuset, c);
7913 		}
7914 	}
7915 
7916 	IDN_GUNLOCK();
7917 
7918 	(void) smr_buf_free_all(domid);
7919 
7920 	if (idn.localid == masterid) {
7921 		/*
7922 		 * Since I'm the master there may
7923 		 * have been slabs in this domain's
7924 		 * idn_domain[] entry.
7925 		 */
7926 		DSLAB_LOCK_EXCL(domid);
7927 		if ((sp = dp->dslab) != NULL) {
7928 			PR_PROTO("%s:%d: freeing up %d dead slabs\n",
7929 				proc, domid, dp->dnslabs);
7930 			smr_slab_free(domid, sp);
7931 			dp->dslab = NULL;
7932 			dp->dnslabs = 0;
7933 			dp->dslab_state = DSLAB_STATE_UNKNOWN;
7934 		}
7935 		DSLAB_UNLOCK(domid);
7936 	} else if (domid == masterid) {
7937 		/*
7938 		 * We're shutting down the master!
7939 		 * We need to blow away our local slab
7940 		 * data structures.
7941 		 * Since I'm not the master, there should
7942 		 * be no slab structures in the given
7943 		 * domain's idn_domain[] entry.  They should
7944 		 * only exist in the local domain's entry.
7945 		 */
7946 		DSLAB_LOCK_EXCL(idn.localid);
7947 		ASSERT(dp->dslab == NULL);
7948 #ifdef DEBUG
7949 		{
7950 			int	nbusy = 0;
7951 			uint_t	dommask = 0;
7952 			for (sp = ldp->dslab; sp; sp = sp->sl_next) {
7953 			    smr_slabbuf_t *bp;
7954 
7955 			    if (!smr_slab_busy(sp))
7956 				continue;
7957 			    nbusy++;
7958 			    for (bp = sp->sl_inuse; bp; bp = bp->sb_next)
7959 				if (bp->sb_domid != IDN_NIL_DOMID)
7960 				    DOMAINSET_ADD(dommask, bp->sb_domid);
7961 			}
7962 			if (nbusy)
7963 				PR_PROTO("%s:%d: found %d busy slabs "
7964 					"(dommask = 0x%x)\n",
7965 					proc, domid, nbusy, dommask);
7966 		}
7967 #endif /* DEBUG */
7968 		if ((sp = ldp->dslab) != NULL) {
7969 			PR_PROTO("%s:%d: freeing up %d local slab "
7970 				"structs\n", proc, domid, ldp->dnslabs);
7971 			smr_slab_garbage_collection(sp);
7972 			ldp->dslab = NULL;
7973 			ldp->dnslabs = 0;
7974 			ldp->dslab_state = DSLAB_STATE_UNKNOWN;
7975 		}
7976 		DSLAB_UNLOCK(idn.localid);
7977 	}
7978 	if (dp->dio) {
7979 		PR_PROTO("%s:%d: reset dio (%d) to 0\n",
7980 			proc, domid, dp->dio);
7981 		dp->dio = 0;
7982 	}
7983 	dp->dioerr = 0;
7984 
7985 	PR_PROTO("%s:%d: reset diocheck (%x) to 0\n",
7986 			proc, domid, dp->diocheck);
7987 	lock_clear(&dp->diocheck);
7988 
7989 	CHECKPOINT_CLOSED(IDNSB_CHKPT_LINK, dp->dhw.dh_boardset, 2);
7990 
7991 	/*
7992 	 * Should have already flush our memory before
7993 	 * reaching this stage.  The issue is that by the
7994 	 * time we reach here the remote domains may have
7995 	 * already reprogrammed their hardware and so flushing
7996 	 * out caches now could result in a arbstop/hang
7997 	 * if we have data that needs to go back to one
7998 	 * of the remote domains that has already reprogrammed
7999 	 * its hardware.
8000 	 */
8001 	ASSERT(!DOMAIN_IN_SET(idn.domset.ds_flush, domid));
8002 
8003 	(void) idn_deprogram_hardware(domid);
8004 	/*
8005 	 * XXX - what to do if we
8006 	 *	 fail to program hardware
8007 	 *	 probably should panic since
8008 	 *	 demise of system may be near?
8009 	 *	 Sufficient to just shutdown network?
8010 	 */
8011 
8012 	IDN_DSTATE_TRANSITION(dp, IDNDS_CLOSED);
8013 
8014 	idn_close_domain(domid);
8015 }
8016 
8017 /*
8018  * If we're sending a Reset we better make sure we don't have any
8019  * references or traffic headed in the direction of this guy, since
8020  * when he receives the reset, he'll start shutting down which means
8021  * we effectively have to shutdown _before_ sending the reset.
8022  * DO NOT HOLD ANY DOMAIN RWLOCKS ON ENTRY.  Could result in deadlock
8023  * due to channel server looping back through STREAMs and attempting
8024  * to acquire domain lock, i.e. channel server will never "stop".
8025  */
8026 static void
8027 idn_shutdown_datapath(domainset_t domset, int force)
8028 {
8029 	int		do_allchan;
8030 	idn_domain_t	*dp;
8031 	register int	d;
8032 	procname_t	proc = "idn_shutdown_datapath";
8033 
8034 
8035 	PR_CHAN("%s: domset = 0x%x\n", proc, (uint_t)domset);
8036 
8037 	do_allchan = (domset == DOMAINSET_ALL) ? 1 : 0;
8038 
8039 	DOMAINSET_DEL(domset, idn.localid);
8040 
8041 	if (do_allchan) {
8042 		/*
8043 		 * Need to stop all outgoing and
8044 		 * incoming SMR references.
8045 		 */
8046 		idn_deactivate_channel(CHANSET_ALL, IDNCHAN_OFFLINE);
8047 	}
8048 
8049 	/*
8050 	 * If force is set then we don't want to reference
8051 	 * the SMR at all, so deactivate the domains from
8052 	 * channels first.  This will result in the mainmbox-flush
8053 	 * routines to just clean up without referencing the
8054 	 * SMR space.
8055 	 */
8056 	if (force)
8057 		idn_mainmbox_deactivate(domset);
8058 
8059 	/*
8060 	 * Flush out mailboxes (clear smr reference).
8061 	 */
8062 	for (d = 0; d < MAX_DOMAINS; d++) {
8063 		if (!DOMAIN_IN_SET(domset, d))
8064 			continue;
8065 
8066 		dp = &idn_domain[d];
8067 		if ((dp->dmbox.m_send == NULL) && (dp->dmbox.m_recv == NULL))
8068 			continue;
8069 
8070 		IDN_MBOX_LOCK(d);
8071 		if (dp->dmbox.m_send)
8072 			(void) idn_mainmbox_flush(d, dp->dmbox.m_send);
8073 		if (dp->dmbox.m_recv)
8074 			(void) idn_mainmbox_flush(d, dp->dmbox.m_recv);
8075 		IDN_MBOX_UNLOCK(d);
8076 	}
8077 	/*
8078 	 * Deactivate all domain references also.
8079 	 * Only necessary if it wasn't already done above.
8080 	 */
8081 	if (!force)
8082 		idn_mainmbox_deactivate(domset);
8083 }
8084 
8085 void
8086 idn_send_cmd(int domid, idn_cmd_t cmdtype,
8087 		uint_t arg1, uint_t arg2, uint_t arg3)
8088 {
8089 	idn_msgtype_t	mt;
8090 	procname_t	proc = "idn_send_cmd";
8091 
8092 	mt.mt_mtype = IDNP_CMD;
8093 	mt.mt_atype = 0;
8094 	mt.mt_cookie = 0;
8095 
8096 	ASSERT(IDN_DLOCK_IS_HELD(domid));
8097 
8098 	PR_PROTO("%s:%d: sending command %s\n",
8099 		proc, domid,
8100 		VALID_IDNCMD(cmdtype) ? idncmd_str[cmdtype] : "unknown");
8101 
8102 	IDN_MSGTIMER_START(domid, IDNP_CMD, (ushort_t)cmdtype,
8103 				idn_msg_waittime[IDNP_CMD], &mt.mt_cookie);
8104 
8105 	IDNXDC(domid, &mt, (uint_t)cmdtype, arg1, arg2, arg3);
8106 }
8107 
8108 void
8109 idn_send_cmdresp(int domid, idn_msgtype_t *mtp, idn_cmd_t cmdtype,
8110 			uint_t arg1, uint_t arg2, uint_t cerrno)
8111 {
8112 	idn_msgtype_t	mt;
8113 
8114 	ASSERT(IDN_DLOCK_IS_HELD(domid));
8115 
8116 	if (domid == idn.localid) {
8117 		/*
8118 		 * It's possible local domain received a command
8119 		 * from itself.  However, we cannot send a normal
8120 		 * "ack" response (XDC) to ourself.
8121 		 */
8122 		return;
8123 	}
8124 
8125 	mt.mt_mtype = IDNP_CMD | IDNP_ACK;
8126 	mt.mt_atype = 0;
8127 	mt.mt_cookie = mtp->mt_cookie;
8128 
8129 	IDNXDC(domid, &mt, (uint_t)cmdtype, arg1, arg2, cerrno);
8130 }
8131 
8132 static void
8133 idn_send_cmd_nackresp(int domid, idn_msgtype_t *mtp, idn_cmd_t cmdtype,
8134 			idn_nack_t nacktype)
8135 {
8136 	idn_msgtype_t	mt;
8137 
8138 	if (domid == idn.localid)
8139 		return;
8140 
8141 	mt.mt_mtype = IDNP_CMD | IDNP_NACK;
8142 	mt.mt_atype = 0;
8143 	mt.mt_cookie = mtp->mt_cookie;
8144 
8145 	(void) IDNXDC(domid, &mt, (uint_t)cmdtype, (uint_t)nacktype, 0, 0);
8146 }
8147 
8148 void
8149 idn_broadcast_cmd(idn_cmd_t cmdtype, uint_t arg1, uint_t arg2, uint_t arg3)
8150 {
8151 	idn_msgtype_t	mt;
8152 	domainset_t	domset;
8153 	procname_t	proc = "idn_broadcast_cmd";
8154 
8155 	IDN_GLOCK_SHARED();
8156 
8157 	domset = idn.domset.ds_connected;
8158 	DOMAINSET_DEL(domset, idn.localid);
8159 
8160 	PR_PROTO("%s: broadcasting command (%s) to domainset 0x%x\n",
8161 		proc, VALID_IDNCMD(cmdtype) ? idncmd_str[cmdtype] : "unknown",
8162 		domset);
8163 
8164 	mt.mt_mtype = IDNP_CMD;
8165 	mt.mt_atype = 0;
8166 	mt.mt_cookie = 0;
8167 
8168 	IDNXDC_BROADCAST(domset, &mt, (uint_t)cmdtype, arg1, arg2, arg3);
8169 
8170 	IDN_GUNLOCK();
8171 	/*
8172 	 * This is a broadcast which means local domain needs
8173 	 * to process it also.  Since we can't XDC to ourselves
8174 	 * we simply call a local function.
8175 	 */
8176 	idn_local_cmd(cmdtype, arg1, arg2, arg3);
8177 }
8178 
8179 /*
8180  * Since xargs[0] contains the cmdtype, only xargs[1], xargs[2], xargs[3]
8181  * are valid possible response arguments.
8182  */
8183 static void
8184 idn_recv_cmd(int domid, idn_msgtype_t *mtp, idn_xdcargs_t xargs)
8185 {
8186 	uint_t			msg = mtp->mt_mtype;
8187 	register idn_domain_t	*dp;
8188 	idn_cmd_t		cmdtype;
8189 	uint_t			acknack;
8190 	uint_t			cmdarg1, cmdarg2, cmdarg3;
8191 	int			islocal;
8192 	int			unsup_cmd_sent, unsup_cmd_recvd;
8193 	procname_t		proc = "idn_recv_cmd";
8194 
8195 	acknack = msg & IDNP_ACKNACK_MASK;
8196 	GET_XARGS(xargs, &cmdtype, &cmdarg1, &cmdarg2, &cmdarg3);
8197 
8198 	dp = &idn_domain[domid];
8199 	ASSERT(IDN_DLOCK_IS_EXCL(domid));
8200 
8201 	IDN_GLOCK_SHARED();
8202 
8203 	islocal = (domid == idn.localid);
8204 
8205 	ASSERT(!acknack || (acknack & IDNP_ACKNACK_MASK));
8206 
8207 	PR_PROTO("%s:%d: (local=%d) acknack=0x%x, cmdtype=%s(%d), "
8208 		"a1=0x%x, a2=0x%x, a3=0x%x\n",
8209 		proc, domid, islocal, acknack,
8210 		VALID_IDNCMD(cmdtype) ? idncmd_str[cmdtype] : "unknown",
8211 		cmdtype, cmdarg1, cmdarg2, cmdarg3);
8212 
8213 	unsup_cmd_sent = unsup_cmd_recvd = 0;
8214 
8215 	if ((IDN_GET_MASTERID() == IDN_NIL_DOMID) ||
8216 			(dp->dstate != IDNDS_CONNECTED)) {
8217 		/*
8218 		 * Commands cannot be handled without a valid
8219 		 * master.  If this is a request then nack him.
8220 		 */
8221 		PR_PROTO("%s:%d: cannot process CMD w/o master (%d, %s)\n",
8222 			proc, domid, IDN_GET_MASTERID(),
8223 			idnds_str[dp->dstate]);
8224 
8225 		if (!islocal && !(acknack & IDNP_ACKNACK_MASK))
8226 			idn_send_cmd_nackresp(domid, mtp, cmdtype,
8227 						IDNNACK_NOCONN);
8228 		IDN_GUNLOCK();
8229 		return;
8230 	}
8231 	IDN_GUNLOCK();
8232 
8233 	if (acknack & IDNP_ACKNACK_MASK) {
8234 		idn_nack_t	nack;
8235 		/*
8236 		 * Receiving a cmd+ack or cmd+nack in response to some
8237 		 * earlier command we must have issued.
8238 		 * If the response is a nack, there are two possibilites:
8239 		 *
8240 		 *	1. Remote domain failed to allocate due
8241 		 *	   to limited resources.
8242 		 *
8243 		 *	2. Remote domain does not support this
8244 		 *	   particular command.
8245 		 *
8246 		 * In the case of #2, the argument immediately after
8247 		 * the cmdtype (xargs[1]) will be (-1).
8248 		 */
8249 		nack = (idn_nack_t)cmdarg1;
8250 		if ((acknack & IDNP_NACK) && (nack == IDNNACK_BADCMD))
8251 			unsup_cmd_sent++;
8252 
8253 		if (islocal) {
8254 			/*
8255 			 * Shouldn't be receiving local commands w/acks.
8256 			 */
8257 			cmdtype = (idn_cmd_t)0;
8258 		}
8259 
8260 		switch (cmdtype) {
8261 		case IDNCMD_SLABALLOC:
8262 			idn_recv_slaballoc_resp(domid, cmdarg1, cmdarg2,
8263 						cmdarg3);
8264 			break;
8265 
8266 		case IDNCMD_SLABFREE:
8267 			idn_recv_slabfree_resp(domid, cmdarg1, cmdarg2,
8268 						cmdarg3);
8269 			break;
8270 
8271 		case IDNCMD_SLABREAP:
8272 			/*
8273 			 * We only care if successful.
8274 			 */
8275 			if (acknack & IDNP_ACK)
8276 				idn_recv_slabreap_resp(domid, cmdarg1,
8277 							cmdarg3);
8278 			break;
8279 
8280 		case IDNCMD_NODENAME:
8281 			if ((acknack & IDNP_NACK) == 0) {
8282 				idn_recv_nodename_resp(domid, cmdarg1,
8283 							cmdarg3);
8284 				break;
8285 			}
8286 			switch (nack) {
8287 			case IDNNACK_NOCONN:
8288 			case IDNNACK_RETRY:
8289 				/*
8290 				 * Remote domain was not quite
8291 				 * ready, try again.
8292 				 */
8293 				PR_PROTO("%s:%d: remote not ready "
8294 					"for %s - retrying "
8295 					"[dstate=%s]\n",
8296 					proc, domid,
8297 					idncmd_str[IDNCMD_NODENAME],
8298 					idnds_str[dp->dstate]);
8299 
8300 				if (dp->dstate == IDNDS_CONNECTED)
8301 					(void) timeout(idn_retry_nodename_req,
8302 					    (void *)(uintptr_t)domid, hz);
8303 			default:
8304 				break;
8305 			}
8306 			break;
8307 
8308 		default:
8309 			/*
8310 			 * Unsupported command.
8311 			 */
8312 			unsup_cmd_recvd++;
8313 			break;
8314 		}
8315 		if (unsup_cmd_sent) {
8316 			PR_PROTO("%s:%d: unsupported command "
8317 				"requested (0x%x)\n",
8318 				proc, domid, cmdtype);
8319 		}
8320 		if (unsup_cmd_recvd) {
8321 			PR_PROTO("%s:%d: unsupported command "
8322 				"response (0x%x)\n",
8323 				proc, domid, cmdtype);
8324 		}
8325 	} else {
8326 		/*
8327 		 * Receiving a regular cmd from a remote domain.
8328 		 */
8329 		switch (cmdtype) {
8330 		case IDNCMD_SLABALLOC:
8331 			idn_recv_slaballoc_req(domid, mtp, cmdarg1);
8332 			break;
8333 
8334 		case IDNCMD_SLABFREE:
8335 			idn_recv_slabfree_req(domid, mtp, cmdarg1, cmdarg2);
8336 			break;
8337 
8338 		case IDNCMD_SLABREAP:
8339 			idn_recv_slabreap_req(domid, mtp, cmdarg1);
8340 			break;
8341 
8342 		case IDNCMD_NODENAME:
8343 			idn_recv_nodename_req(domid, mtp, cmdarg1);
8344 			break;
8345 
8346 		default:
8347 			/*
8348 			 * Unsupported command.
8349 			 */
8350 			unsup_cmd_recvd++;
8351 			break;
8352 		}
8353 		if (!islocal && unsup_cmd_recvd) {
8354 			/*
8355 			 * Received an unsupported IDN command.
8356 			 */
8357 			idn_send_cmd_nackresp(domid, mtp, cmdtype,
8358 					IDNNACK_BADCMD);
8359 		}
8360 	}
8361 }
8362 
8363 /*
8364  * This is a supporting routine for idn_broadcast_cmd() to
8365  * handle processing of the requested command for the local
8366  * domain.  Currently the only support broadcast command
8367  * supported is reaping.
8368  */
8369 /*ARGSUSED2*/
8370 static void
8371 idn_local_cmd(idn_cmd_t cmdtype, uint_t arg1, uint_t arg2, uint_t arg3)
8372 {
8373 	idn_protojob_t	*jp;
8374 	idn_domain_t	*ldp = &idn_domain[idn.localid];
8375 	procname_t	proc = "idn_local_cmd";
8376 
8377 	PR_PROTO("%s: submitting local command %s on domain %d\n",
8378 		proc, VALID_IDNCMD(cmdtype) ? idncmd_str[cmdtype] : "unknown",
8379 		idn.localid);
8380 
8381 
8382 	jp = idn_protojob_alloc(KM_SLEEP);
8383 
8384 	jp->j_msg.m_domid    = ldp->domid;
8385 	jp->j_msg.m_msgtype  = IDNP_CMD;
8386 	jp->j_msg.m_cookie   = ldp->dcookie_recv;
8387 	SET_XARGS(jp->j_msg.m_xargs, cmdtype, arg1, arg2, arg3);
8388 
8389 	idn_protojob_submit(ldp->domid, jp);
8390 }
8391 
8392 /*
8393  * Terminate any outstanding commands that may have
8394  * been targeted for the given domain.  A command is
8395  * designated as outstanding if it has an active timer.
8396  *
8397  * serrno = ECANCELED.
8398  */
8399 static void
8400 idn_terminate_cmd(int domid, int serrno)
8401 {
8402 	idn_domain_t	*dp;
8403 	idn_timer_t	*tplist = NULL, *tp;
8404 	procname_t	proc = "idn_terminate_cmd";
8405 
8406 	dp = &idn_domain[domid];
8407 
8408 	ASSERT(IDN_DLOCK_IS_EXCL(domid));
8409 
8410 	IDN_MSGTIMER_GET(dp, IDNP_CMD, tplist, 0);
8411 	/*
8412 	 * At this point the timers are effectively terminated
8413 	 * since when they're t_onq indication is set false.
8414 	 */
8415 	if (tplist == NULL) {
8416 		PR_PROTO("%s:%d: no outstanding cmds found\n",
8417 			proc, domid);
8418 		/*
8419 		 * There is a window where we may have caught a
8420 		 * request just prior to issuing the actual
8421 		 * command (SLABALLOC).  We're guaranteed if there
8422 		 * was, then he will have at least registered.
8423 		 * So, if we abort the command now, he'll catch
8424 		 * it before going to sleep.
8425 		 * Drop through.
8426 		 */
8427 	}
8428 	ASSERT(tplist ? (tplist->t_back->t_forw == NULL) : 1);
8429 
8430 	for (tp = tplist; tp; tp = tp->t_forw) {
8431 		ASSERT(tp->t_type == IDNP_CMD);
8432 
8433 		PR_PROTO("%s:%d: found outstanding cmd: %s\n",
8434 			proc, domid, idncmd_str[tp->t_subtype]);
8435 
8436 		switch (tp->t_subtype) {
8437 		case IDNCMD_SLABALLOC:
8438 			/*
8439 			 * Outstanding slaballoc request may have
8440 			 * slab waiters hanging around.  Need to
8441 			 * tell them to bail out.  The given domain
8442 			 * must be the master if we have an outstanding
8443 			 * command to him.  This also presumes that
8444 			 * if there are any waiters they're only in
8445 			 * the local domain's waiting area (i.e. we're
8446 			 * a slave).
8447 			 */
8448 #ifdef DEBUG
8449 			IDN_GLOCK_SHARED();
8450 			ASSERT(domid == IDN_GET_MASTERID());
8451 			ASSERT(idn.localid != IDN_GET_MASTERID());
8452 			IDN_GUNLOCK();
8453 #endif /* DEBUG */
8454 			(void) smr_slabwaiter_abort(idn.localid, serrno);
8455 			break;
8456 
8457 		case IDNCMD_SLABFREE:
8458 		case IDNCMD_SLABREAP:
8459 		case IDNCMD_NODENAME:
8460 			/*
8461 			 * Nothing really waiting for these operations
8462 			 * so no biggy if we just drop.
8463 			 * Note that NODENAME may have an outstanding
8464 			 * buffer, however that will be reclaimed
8465 			 * when we actually unlink from domain.
8466 			 */
8467 			break;
8468 
8469 		default:
8470 			ASSERT(0);
8471 			break;
8472 		}
8473 	}
8474 	/*
8475 	 * As mentioned before the timers are effectively no-op'd
8476 	 * once they're dequeued, however let's cleanup house and
8477 	 * get rid of the useless entries in the timeout queue.
8478 	 */
8479 	if (tplist) {
8480 		IDN_TIMER_STOPALL(tplist);
8481 	}
8482 
8483 	if (idn_domain[idn.localid].dvote.v.master) {
8484 		/*
8485 		 * I'm the master so it's possible I had
8486 		 * outstanding commands (SLABALLOC) waiting
8487 		 * to be satisfied for the given domain.
8488 		 * Since we're forcing an error it's okay
8489 		 * to continue holding onto the drwlock.
8490 		 */
8491 		PR_PROTO("%s:%d: abort slaballoc waiters\n", proc, domid);
8492 		(void) smr_slabwaiter_abort(domid, serrno);
8493 
8494 	} else if (dp->dvote.v.master) {
8495 		PR_PROTO("%s:%d: abort (local domain) slaballoc waiters\n",
8496 			proc, domid);
8497 		(void) smr_slabwaiter_abort(idn.localid, serrno);
8498 	}
8499 }
8500 
8501 static void
8502 idn_send_acknack(int domid, idn_msgtype_t *mtp, idn_xdcargs_t xargs)
8503 {
8504 	idn_domain_t	*dp = &idn_domain[domid];
8505 	procname_t	proc = "idn_send_acknack";
8506 
8507 	ASSERT(mtp ? (mtp->mt_mtype & IDNP_ACKNACK_MASK) : 1);
8508 	ASSERT(domid != IDN_NIL_DOMID);
8509 
8510 #ifdef DEBUG
8511 	{
8512 		STRING(mstr);
8513 		STRING(astr);
8514 
8515 		INUM2STR(mtp->mt_mtype, mstr);
8516 		INUM2STR(mtp->mt_atype, astr);
8517 
8518 		if (mtp->mt_mtype & IDNP_ACK) {
8519 			PR_PROTO("%s:%d: dstate=%s, msg=(%s/%s), "
8520 				"a1=0x%x, a2=0x%x, a3=0x%x, a4 = 0x%x\n",
8521 				proc, domid, idnds_str[dp->dstate],
8522 				astr, mstr, xargs[0], xargs[1],
8523 				xargs[2], xargs[3]);
8524 		} else {
8525 			idn_nack_t	nack;
8526 
8527 			nack = GET_XARGS_NACK_TYPE(xargs);
8528 			PR_PROTO("%s:%d: dstate=%s, msg=(%s/%s), "
8529 				"nack=%s(0x%x)\n",
8530 				proc, domid, idnds_str[dp->dstate],
8531 				astr, mstr, idnnack_str[nack],
8532 				(uint_t)nack);
8533 		}
8534 	}
8535 #endif /* DEBUG */
8536 
8537 	(void) IDNXDC(domid, mtp, xargs[0], xargs[1], xargs[2], xargs[3]);
8538 }
8539 
8540 /*ARGSUSED0*/
8541 static void
8542 idn_prealloc_slab(int nslabs)
8543 {
8544 	register int	s, serrno;
8545 	smr_slab_t	*sp;
8546 	idn_domain_t	*ldp = &idn_domain[idn.localid];
8547 	procname_t	proc = "idn_prealloc_slab";
8548 
8549 	IDN_GLOCK_SHARED();
8550 	DSLAB_LOCK_SHARED(idn.localid);
8551 	if ((idn.state != IDNGS_ONLINE) || (ldp->dnslabs > 0)) {
8552 		/*
8553 		 * Not in the proper state or slab already allocated.
8554 		 */
8555 		DSLAB_UNLOCK(idn.localid);
8556 		IDN_GUNLOCK();
8557 		return;
8558 	}
8559 	IDN_GUNLOCK();
8560 	ASSERT(!ldp->dslab);
8561 
8562 	serrno = 0;
8563 	for (s = 0; (s < nslabs) && ((int)ldp->dnslabs < nslabs); s++) {
8564 		/*
8565 		 * Returns with ldp->drwlock dropped.
8566 		 */
8567 		serrno = smr_slab_alloc(idn.localid, &sp);
8568 		if (serrno != 0) {
8569 			PR_PROTO("%s: FAILED to pre-alloc'd "
8570 				"slab (serrno = %d)\n", proc, serrno);
8571 			break;
8572 		}
8573 		/*
8574 		 * State may have changed since smr_slab_alloc
8575 		 * temporarily drops drwlock.  Make sure we're
8576 		 * still connected.
8577 		 */
8578 		PR_PROTO("%s: SUCCESSFULLY pre-alloc'd slab\n", proc);
8579 
8580 		if (idn.state != IDNGS_ONLINE) {
8581 			PR_PROTO("%s: Lost connection..leaving\n", proc);
8582 			break;
8583 		}
8584 	}
8585 
8586 	DSLAB_UNLOCK(idn.localid);
8587 }
8588 
8589 /*
8590  * Received a request from a remote domain to
8591  * allocate a slab from the master SMR for him.
8592  * Allocate slab and return the response.
8593  */
8594 static void
8595 idn_recv_slaballoc_req(int domid, idn_msgtype_t *mtp, uint_t slab_size)
8596 {
8597 	register idn_domain_t	*dp;
8598 	procname_t		proc = "idn_recv_slaballoc_req";
8599 
8600 	PR_PROTO("%s: slaballoc req from domain %d (size=0x%x)\n",
8601 		proc, domid, slab_size);
8602 
8603 	dp = &idn_domain[domid];
8604 
8605 	ASSERT(IDN_DLOCK_IS_EXCL(domid));
8606 
8607 	IDN_GLOCK_SHARED();
8608 
8609 	if (idn.localid != IDN_GET_MASTERID()) {
8610 		IDN_GUNLOCK();
8611 		/*
8612 		 * It's a fatal error if the remote domain thinks
8613 		 * we're the master.
8614 		 */
8615 		idn_send_slaballoc_resp(domid, mtp, 0, 0, EACCES);
8616 
8617 	} else if (dp->dstate != IDNDS_CONNECTED) {
8618 
8619 		IDN_GUNLOCK();
8620 		/*
8621 		 * It's a fatal error if we don't yet have a
8622 		 * connection established with the requestor.
8623 		 */
8624 		idn_send_slaballoc_resp(domid, mtp, 0, 0, ENOLINK);
8625 	} else {
8626 		int		serrno;
8627 		smr_slab_t	*sp;
8628 		smr_offset_t	slab_offset;
8629 
8630 		IDN_GUNLOCK();
8631 		DSLAB_LOCK_SHARED(domid);
8632 		IDN_DUNLOCK(domid);
8633 		/*
8634 		 * We're connected and we're the master.
8635 		 * smr_slab_alloc() returns with dp->drwlock dropped.
8636 		 */
8637 		if ((serrno = smr_slab_alloc(domid, &sp)) == 0) {
8638 			/*
8639 			 * Successfully allocated slab for remote slave.
8640 			 */
8641 			slab_offset = IDN_ADDR2OFFSET(sp->sl_start);
8642 			slab_size   = sp->sl_end - sp->sl_start;
8643 			ASSERT((slab_offset != 0) && (slab_size != 0));
8644 		} else {
8645 			slab_offset = slab_size = 0;
8646 		}
8647 		DSLAB_UNLOCK(domid);
8648 		/*
8649 		 * The drwlock is dropped during smr_slab_alloc.
8650 		 * During that time our connection with the given
8651 		 * domain may have changed.  Better check again.
8652 		 */
8653 		IDN_DLOCK_SHARED(domid);
8654 		if ((dp->dstate != IDNDS_CONNECTED) && !serrno) {
8655 			/*
8656 			 * Connection broke.  Keep the slab here.
8657 			 */
8658 			DSLAB_LOCK_EXCL(domid);
8659 			IDN_DUNLOCK(domid);
8660 			smr_slab_free(domid, sp);
8661 			DSLAB_UNLOCK(domid);
8662 			slab_offset = slab_size = 0;
8663 			serrno = ECANCELED;
8664 			IDN_DLOCK_SHARED(domid);
8665 		}
8666 		/*
8667 		 * Send response.
8668 		 * Note that smr_slab_alloc automatically installs
8669 		 * slab into domains respective idn_domain entry
8670 		 * to be associated with that domain.
8671 		 */
8672 		idn_send_slaballoc_resp(domid, mtp,
8673 					slab_offset, slab_size, serrno);
8674 	}
8675 }
8676 
8677 static void
8678 idn_send_slaballoc_resp(int domid, idn_msgtype_t *mtp,
8679 			smr_offset_t slab_offset, uint_t slab_size, int serrno)
8680 {
8681 	procname_t	proc = "idn_send_slaballoc_resp";
8682 
8683 	PR_PROTO("%s: slaballoc resp to domain %d (off=0x%x, size=0x%x) "
8684 		"[serrno = %d]\n",
8685 		proc, domid, slab_offset, slab_size, serrno);
8686 
8687 	idn_send_cmdresp(domid, mtp, IDNCMD_SLABALLOC,
8688 				slab_offset, slab_size, serrno);
8689 }
8690 
8691 /*
8692  * Received the ack or nack to a previous allocation request
8693  * made by the local domain to the master for a slab.  Need
8694  * to "put" the response into the waiting area for any
8695  * waiters.
8696  */
8697 static void
8698 idn_recv_slaballoc_resp(int domid, smr_offset_t slab_offset,
8699 			uint_t slab_size, int serrno)
8700 {
8701 	smr_slab_t		*sp = NULL;
8702 	int			rv;
8703 	procname_t		proc = "idn_recv_slaballoc_resp";
8704 
8705 
8706 	ASSERT(IDN_DLOCK_IS_EXCL(domid));
8707 
8708 	PR_PROTO("%s: slaballoc resp from domain %d (off=0x%x, size=0x%x) "
8709 		"[serrno = %d]\n",
8710 		proc, domid, slab_offset, slab_size, serrno);
8711 
8712 	if (!serrno) {
8713 		IDN_GLOCK_SHARED();
8714 		if (domid != IDN_GET_MASTERID()) {
8715 			/*
8716 			 * We should only be receiving responses from
8717 			 * our master.  This is either a bogus message
8718 			 * or an old response.  In either case dump it.
8719 			 */
8720 			PR_PROTO("%s: BOGUS slaballoc resp from domid %d "
8721 				"(master = %d)\n",
8722 				proc, domid, IDN_GET_MASTERID());
8723 			serrno = EPROTO;
8724 		}
8725 		IDN_GUNLOCK();
8726 
8727 		if (!serrno &&
8728 			!VALID_NWROFFSET(slab_offset, IDN_SMR_BUFSIZE)) {
8729 
8730 			PR_PROTO("%s: slab offset (0x%x) out of range "
8731 				"(0-0x%lx)\n",
8732 				proc, slab_offset, MB2B(IDN_NWR_SIZE));
8733 			serrno = EPROTO;
8734 		} else if (!serrno) {
8735 			sp = GETSTRUCT(smr_slab_t, 1);
8736 			sp->sl_start = IDN_OFFSET2ADDR(slab_offset);
8737 			sp->sl_end   = sp->sl_start + slab_size;
8738 			smr_alloc_buflist(sp);
8739 		}
8740 	}
8741 
8742 	/*
8743 	 * Always "put" slabs back to yourself since you're a slave.
8744 	 * Note that we set the forceflag so that even if there are
8745 	 * no waiters we still install the slab for the domain.
8746 	 */
8747 	if (!serrno) {
8748 		DSLAB_LOCK_EXCL(idn.localid);
8749 	}
8750 	rv = smr_slaballoc_put(idn.localid, sp, 1, serrno);
8751 	if (!serrno) {
8752 		DSLAB_UNLOCK(idn.localid);
8753 	}
8754 
8755 	if (rv < 0) {
8756 		/*
8757 		 * Some kind of error trying to install response.
8758 		 * If there was a valid slab sent to us, we'll
8759 		 * just have to send it back.
8760 		 */
8761 		PR_PROTO("%s: failed to install response in waiting area\n",
8762 			proc);
8763 		if (slab_size != 0) {
8764 			PR_PROTO("%s: sending slab back to domain %d "
8765 				"(master = %d)\n",
8766 				proc, domid, IDN_GET_MASTERID());
8767 			idn_send_cmd(domid, IDNCMD_SLABFREE,
8768 						slab_offset, slab_size, 0);
8769 		}
8770 		if (sp) {
8771 			smr_free_buflist(sp);
8772 			FREESTRUCT(sp, smr_slab_t, 1);
8773 		}
8774 	}
8775 }
8776 
8777 /*
8778  * Note that slab reaping is effectively performed asynchronously
8779  * since the request will be received a protocol server.
8780  */
8781 static void
8782 idn_recv_slabreap_req(int domid, idn_msgtype_t *mtp, int nslabs)
8783 {
8784 	procname_t	proc = "idn_recv_slabreap_req";
8785 
8786 	PR_PROTO("%s: slab reap request (nslabs = %d)\n", proc, nslabs);
8787 
8788 	ASSERT(IDN_DLOCK_IS_EXCL(domid));
8789 
8790 	IDN_GLOCK_SHARED();
8791 	if (domid != IDN_GET_MASTERID()) {
8792 		/*
8793 		 * Only the master can request that slabs be reaped.
8794 		 */
8795 		IDN_GUNLOCK();
8796 		PR_PROTO("%s: only master can request slab reaping\n", proc);
8797 
8798 		idn_send_cmdresp(domid, mtp, IDNCMD_SLABREAP, 0, 0, EACCES);
8799 
8800 		return;
8801 	}
8802 	IDN_GUNLOCK();
8803 
8804 	if (nslabs != 0) {
8805 		IDN_DUNLOCK(domid);
8806 		smr_slab_reap(idn.localid, &nslabs);
8807 		IDN_DLOCK_SHARED(domid);
8808 	}
8809 
8810 	PR_PROTO("%s: slab reap result (nslabs = %d)\n", proc, nslabs);
8811 
8812 	/*
8813 	 * Go ahead and send the reap response back before we start
8814 	 * free'ing off the individual slabs.
8815 	 */
8816 	idn_send_slabreap_resp(domid, mtp, nslabs, 0);
8817 }
8818 
8819 static void
8820 idn_recv_slabreap_resp(int domid, int nslabs, int serrno)
8821 {
8822 	procname_t	proc = "idn_recv_slabreap_resp";
8823 
8824 	ASSERT(IDN_DLOCK_IS_EXCL(domid));
8825 
8826 	if ((idn.localid != IDN_GET_MASTERID()) || (idn.localid == domid)) {
8827 		PR_PROTO("%s: unexpected slabreap resp received "
8828 			"(domid = %d)\n", proc, domid);
8829 		ASSERT(0);
8830 		return;
8831 	}
8832 	PR_PROTO("%s: recvd reap response from domain %d for %d slabs "
8833 		"[serrno = %d]\n", proc, domid, nslabs, serrno);
8834 }
8835 
8836 /*
8837  * Not really necessary to send slabreap response.
8838  * XXX - perhaps useful to master for accounting or
8839  *	 throttling of further reaping?
8840  */
8841 static void
8842 idn_send_slabreap_resp(int domid, idn_msgtype_t *mtp, int nslabs, int serrno)
8843 {
8844 	idn_send_cmdresp(domid, mtp, IDNCMD_SLABREAP, nslabs, 0, serrno);
8845 }
8846 
8847 /*
8848  * Slave -> Master ONLY
8849  * Master never sends slabfree request to itself.
8850  */
8851 static void
8852 idn_recv_slabfree_req(int domid, idn_msgtype_t *mtp,
8853 			smr_offset_t slab_offset, uint_t slab_size)
8854 {
8855 	smr_slab_t	*sp;
8856 	int		serrno;
8857 	caddr_t		s_start, s_end;
8858 	procname_t	proc = "idn_recv_slabfree_req";
8859 
8860 	ASSERT(IDN_DLOCK_IS_EXCL(domid));
8861 
8862 	if (domid == IDN_GET_MASTERID()) {
8863 		PR_PROTO("%s: unexpected slabfree req received (domid = %d)\n",
8864 			proc, domid);
8865 		idn_send_slabfree_resp(domid, mtp,
8866 					slab_offset, slab_size, EACCES);
8867 		return;
8868 	}
8869 	if (slab_size > IDN_SLAB_SIZE) {
8870 		PR_PROTO("%s: unexpected slab size. exp %d, recvd %d\n",
8871 			proc, IDN_SLAB_SIZE, slab_size);
8872 		idn_send_slabfree_resp(domid, mtp,
8873 					slab_offset, slab_size, EINVAL);
8874 		return;
8875 	}
8876 	s_start = IDN_OFFSET2ADDR(slab_offset);
8877 	s_end   = s_start + slab_size;
8878 	/*
8879 	 * Master has received a SLABFREE request (effectively a response
8880 	 * to some earlier SLABREAP request.
8881 	 * Find the slab associated with this slab and free it up.
8882 	 */
8883 	DSLAB_LOCK_EXCL(domid);
8884 	if ((sp = smr_slaballoc_get(domid, s_start, s_end)) != NULL) {
8885 		smr_slab_free(domid, sp);
8886 		serrno = 0;
8887 	} else {
8888 		serrno = EINVAL;
8889 	}
8890 	DSLAB_UNLOCK(domid);
8891 
8892 	idn_send_slabfree_resp(domid, mtp, slab_offset, slab_size, serrno);
8893 }
8894 
8895 /*
8896  * Master -> Slave ONLY
8897  */
8898 static void
8899 idn_recv_slabfree_resp(int domid, uint_t slab_offset,
8900 			uint_t slab_size, int serrno)
8901 {
8902 	procname_t	proc = "idn_recv_slabfree_resp";
8903 
8904 	ASSERT(IDN_DLOCK_IS_EXCL(domid));
8905 
8906 	if (domid != IDN_GET_MASTERID()) {
8907 		PR_PROTO("%s: unexpected slabfree resp received (domid = %d)\n",
8908 			proc, domid);
8909 		ASSERT(0);
8910 		return;
8911 	}
8912 	if (slab_size > IDN_SLAB_SIZE) {
8913 		PR_PROTO("%s: unexpected slab size. exp %d, recvd %d\n",
8914 			proc, IDN_SLAB_SIZE, slab_size);
8915 		ASSERT(0);
8916 		return;
8917 	}
8918 	PR_PROTO("%s: recvd free resp from dom %d "
8919 		"- slab (off/size) 0x%x/0x%x [serrno = %d]\n",
8920 		proc, domid, slab_offset, slab_size, serrno);
8921 }
8922 
8923 static void
8924 idn_send_slabfree_resp(int domid, idn_msgtype_t *mtp,
8925 			uint_t slab_offset, uint_t slab_size, int serrno)
8926 {
8927 	idn_send_cmdresp(domid, mtp, IDNCMD_SLABFREE,
8928 				slab_offset, slab_size, serrno);
8929 }
8930 
8931 static void
8932 idn_retry_nodename_req(void *arg)
8933 {
8934 	int	domid = (int)(uintptr_t)arg;
8935 
8936 	idn_send_nodename_req(domid);
8937 }
8938 
8939 static void
8940 idn_send_nodename_req(int domid)
8941 {
8942 	caddr_t		b_bufp;
8943 	smr_offset_t	bufoffset;
8944 	int		serrno;
8945 	idn_domain_t	*dp = &idn_domain[domid];
8946 	procname_t	proc = "idn_send_nodename_req";
8947 
8948 	/*
8949 	 * Need to drop domain lock across
8950 	 * SMR allocation.
8951 	 */
8952 	serrno = smr_buf_alloc(domid, MAXDNAME+1, &b_bufp);
8953 
8954 	IDN_DLOCK_SHARED(domid);
8955 	if (dp->dstate != IDNDS_CONNECTED) {
8956 		/*
8957 		 * Lost connection.
8958 		 */
8959 		PR_PROTO("%s:%d: connection lost [dstate = %s]\n",
8960 			proc, domid, idnds_str[dp->dstate]);
8961 		IDN_DUNLOCK(domid);
8962 		if (!serrno)
8963 			(void) smr_buf_free(domid, b_bufp, MAXDNAME+1);
8964 		return;
8965 	}
8966 	if (serrno) {
8967 		/*
8968 		 * Failed to allocate buffer, but still have
8969 		 * connection so keep trying.  We may have queried
8970 		 * the master a little too earlier.
8971 		 */
8972 		PR_PROTO("%s:%d: buffer alloc failed [dstate = %s]\n",
8973 			proc, domid, idnds_str[dp->dstate]);
8974 		(void) timeout(idn_retry_nodename_req, (void *)(uintptr_t)domid,
8975 		    hz);
8976 		IDN_DUNLOCK(domid);
8977 		return;
8978 	}
8979 
8980 	*b_bufp = (char)MAXDNAME;
8981 	bufoffset = IDN_ADDR2OFFSET(b_bufp);
8982 
8983 	idn_send_cmd(domid, IDNCMD_NODENAME, bufoffset, 0, 0);
8984 	IDN_DUNLOCK(domid);
8985 }
8986 
8987 static void
8988 idn_send_nodename_resp(int domid, idn_msgtype_t *mtp,
8989 			smr_offset_t bufoffset, int serrno)
8990 {
8991 	idn_send_cmdresp(domid, mtp, IDNCMD_NODENAME,
8992 			(uint_t)bufoffset, 0, serrno);
8993 }
8994 
8995 static void
8996 idn_recv_nodename_req(int domid, idn_msgtype_t *mtp, smr_offset_t bufoffset)
8997 {
8998 	caddr_t		b_bufp;
8999 	int		length;
9000 	idn_domain_t	*ldp = &idn_domain[idn.localid];
9001 	procname_t	proc = "idn_recv_nodename_req";
9002 
9003 	IDN_DLOCK_EXCL(idn.localid);
9004 	if (!strlen(ldp->dname)) {
9005 		if (!strlen(utsname.nodename)) {
9006 			/*
9007 			 * Local domain's nodename hasn't been
9008 			 * set yet.
9009 			 */
9010 			IDN_DUNLOCK(idn.localid);
9011 			idn_send_cmd_nackresp(domid, mtp, IDNCMD_NODENAME,
9012 						IDNNACK_RETRY);
9013 			return;
9014 		}
9015 		strncpy(ldp->dname, utsname.nodename, MAXDNAME - 1);
9016 	}
9017 	IDN_DLOCK_DOWNGRADE(idn.localid);
9018 
9019 	if (!VALID_NWROFFSET(bufoffset, IDN_SMR_BUFSIZE)) {
9020 		PR_PROTO("%s:%d: invalid SMR offset received (0x%x)\n",
9021 			proc, domid, bufoffset);
9022 		IDN_DUNLOCK(idn.localid);
9023 		idn_send_nodename_resp(domid, mtp, bufoffset, EINVAL);
9024 		return;
9025 	}
9026 
9027 	b_bufp = IDN_OFFSET2ADDR(bufoffset);
9028 	length = (int)(*b_bufp++ & 0xff);
9029 
9030 	if (length < strlen(ldp->dname)) {
9031 		PR_PROTO("%s:%d: buffer not big enough (req %lu, got %d)\n",
9032 			proc, domid, strlen(ldp->dname), length);
9033 		IDN_DUNLOCK(idn.localid);
9034 		idn_send_nodename_resp(domid, mtp, bufoffset, EINVAL);
9035 		return;
9036 	}
9037 
9038 	strncpy(b_bufp, ldp->dname, MAXDNAME);
9039 	b_bufp[MAXDNAME-1] = 0;
9040 	IDN_DUNLOCK(idn.localid);
9041 
9042 	idn_send_nodename_resp(domid, mtp, bufoffset, 0);
9043 }
9044 
9045 static void
9046 idn_recv_nodename_resp(int domid, smr_offset_t bufoffset, int serrno)
9047 {
9048 	caddr_t		b_bufp;
9049 	idn_domain_t	*dp = &idn_domain[domid];
9050 	procname_t	proc = "idn_recv_nodename_resp";
9051 
9052 	ASSERT(IDN_DLOCK_IS_EXCL(domid));
9053 
9054 	if (!VALID_NWROFFSET(bufoffset, IDN_SMR_BUFSIZE)) {
9055 		PR_PROTO("%s:%d: invalid SMR offset received (0x%x)\n",
9056 			proc, domid, bufoffset);
9057 		return;
9058 	}
9059 
9060 	if (serrno == 0) {
9061 		b_bufp = IDN_OFFSET2ADDR(bufoffset) + 1;
9062 		b_bufp[MAXDNAME-1] = 0;
9063 
9064 		if (strlen(b_bufp) > 0) {
9065 			strncpy(dp->dname, b_bufp, MAXDNAME);
9066 			PR_PROTO("%s:%d: received nodename(%s)\n",
9067 				proc, domid, dp->dname);
9068 		}
9069 	}
9070 
9071 	(void) smr_buf_free(domid, b_bufp - 1, MAXDNAME + 1);
9072 }
9073 
9074 /*
9075  * The master allocations the SMR management structures.
9076  */
9077 static int
9078 idn_master_init()
9079 {
9080 	idn_domain_t	*ldp = &idn_domain[idn.localid];
9081 	size_t		reserved_size = 0;
9082 	caddr_t		reserved_area = NULL;
9083 	procname_t	proc = "idn_master_init";
9084 
9085 	ASSERT(IDN_GLOCK_IS_EXCL());
9086 	ASSERT(IDN_DLOCK_IS_EXCL(idn.localid));
9087 
9088 	if (idn.mboxarea != NULL) {
9089 		PR_PROTO("%s: master data already initialized\n", proc);
9090 		return (0);
9091 	}
9092 
9093 	PR_PROTO("%s: initializing master data (domid = %d)\n",
9094 		proc, idn.localid);
9095 
9096 	/*
9097 	 * Reserve an area of the SMR for mailbox usage.
9098 	 * This area is allocated to other domains via
9099 	 * the master.  Round it up to IDN_SMR_BUFSIZE multiple.
9100 	 */
9101 	reserved_size = IDNROUNDUP(IDN_MBOXAREA_SIZE, IDN_SMR_BUFSIZE);
9102 
9103 	PR_PROTO("%s: reserving %lu bytes for mailbox area\n",
9104 		proc, reserved_size);
9105 
9106 #ifdef DEBUG
9107 	if (reserved_size > (size_t)IDN_SLAB_SIZE) {
9108 		PR_PROTO("%s: WARNING mbox area (%ld) > slab size (%d)\n",
9109 			proc, reserved_size, IDN_SLAB_SIZE);
9110 	}
9111 #endif /* DEBUG */
9112 	/*
9113 	 * Initialize the pool of slabs and SMR I/O buffers.
9114 	 */
9115 	if (smr_slabpool_init(reserved_size, &reserved_area) != 0) {
9116 		idn_master_deinit();
9117 		return (-1);
9118 	}
9119 
9120 	ASSERT(idn.mboxarea == NULL);
9121 	ASSERT(reserved_area);
9122 
9123 	bzero(reserved_area, reserved_size);
9124 
9125 	idn.mboxarea = (idn_mboxtbl_t *)reserved_area;
9126 	ldp->dmbox.m_tbl = IDN_MBOXAREA_BASE(idn.mboxarea, idn.localid);
9127 	/*
9128 	 * Initialize the SMR pointers in the entire
9129 	 * mailbox table.
9130 	 */
9131 	idn_mboxarea_init(idn.mboxarea, IDN_MBOXAREA_SIZE / IDN_MBOXTBL_SIZE);
9132 
9133 	return (0);
9134 }
9135 
9136 static void
9137 idn_master_deinit()
9138 {
9139 	idn_domain_t	*ldp;
9140 	smr_slab_t	*sp;
9141 	procname_t	proc = "idn_master_deinit";
9142 
9143 	ASSERT(IDN_GLOCK_IS_EXCL());
9144 	ASSERT(IDN_DLOCK_IS_EXCL(idn.localid));
9145 
9146 	if (idn.mboxarea == NULL) {
9147 		PR_PROTO("%s: master data already deinitialized\n", proc);
9148 		return;
9149 	}
9150 
9151 	ldp = &idn_domain[idn.localid];
9152 
9153 	PR_PROTO("%s: deinitializing master data (domid = %d)\n",
9154 		proc, idn.localid);
9155 
9156 	ldp->dmbox.m_tbl = NULL;
9157 	idn.mboxarea = NULL;
9158 	/*
9159 	 * Master may still be holding onto slabs of his own.
9160 	 */
9161 	DSLAB_LOCK_EXCL(idn.localid);
9162 	sp = ldp->dslab;
9163 	ldp->dslab = NULL;
9164 	ldp->dnslabs = 0;
9165 	if (sp)
9166 		smr_slab_free(idn.localid, sp);
9167 	ldp->dslab_state = DSLAB_STATE_UNKNOWN;
9168 	DSLAB_UNLOCK(idn.localid);
9169 
9170 	smr_slabpool_deinit();
9171 }
9172 
9173 static int
9174 idn_mark_awol(int domid, clock_t *atime)
9175 {
9176 	clock_t		awol;
9177 	idn_domain_t	*dp = &idn_domain[domid];
9178 
9179 	ASSERT(IDN_SYNC_IS_LOCKED());
9180 	ASSERT(IDN_GLOCK_IS_EXCL());
9181 
9182 	if (!DOMAIN_IN_SET(idn.domset.ds_awol, domid)) {
9183 		DOMAINSET_ADD(idn.domset.ds_awol, domid);
9184 		idn.nawols++;
9185 	}
9186 	awol = lbolt;
9187 	if (dp->dawol.a_count++ == 0)
9188 		dp->dawol.a_time = awol;
9189 	dp->dawol.a_last = awol;
9190 	if ((awol - dp->dawol.a_msg) >= (clock_t)(idn_awolmsg_interval * hz))
9191 		dp->dawol.a_msg = awol;
9192 	else
9193 		awol = 0;
9194 
9195 	*atime = awol;
9196 
9197 	idn_awol_event_set(dp->dhw.dh_boardset);
9198 
9199 	return (dp->dawol.a_count);
9200 }
9201 
9202 void
9203 idn_clear_awol(int domid)
9204 {
9205 	idn_domain_t	*dp = &idn_domain[domid];
9206 
9207 	ASSERT(IDN_SYNC_IS_LOCKED());
9208 	ASSERT(IDN_GLOCK_IS_EXCL());
9209 	if (DOMAIN_IN_SET(idn.domset.ds_awol, domid)) {
9210 		DOMAINSET_DEL(idn.domset.ds_awol, domid);
9211 		idn.nawols--;
9212 	}
9213 	if (dp->dawol.a_count > 0) {
9214 		dp->dawol.a_count = 0;
9215 		dp->dawol.a_last = dp->dawol.a_time;
9216 		dp->dawol.a_time = 0;
9217 		dp->dawol.a_msg = 0;
9218 
9219 		idn_awol_event_clear(dp->dhw.dh_boardset);
9220 	}
9221 }
9222 
9223 /*
9224  * A timer expired.
9225  */
9226 void
9227 idn_timer_expired(void *arg)
9228 {
9229 	idn_domain_t	*dp;
9230 	char		*op = "UNKNOWN";
9231 	clock_t		awol = 0;
9232 	int		awolcount, dcpu, domid;
9233 	idn_timer_t	*tp = (idn_timer_t *)arg;
9234 	idn_timerq_t	*tq = NULL;
9235 	uint_t		token;
9236 	char		dname[MAXDNAME];
9237 	procname_t	proc = "idn_timer_expired";
9238 	STRING(str);
9239 
9240 	tq = tp->t_q;
9241 
9242 	ASSERT(tp->t_domid != IDN_NIL_DOMID);
9243 
9244 	IDN_TIMERQ_LOCK(tq);
9245 
9246 	INUM2STR(tp->t_type, str);
9247 
9248 	if (tp->t_onq == 0) {
9249 		PR_TIMER("%s: timer CAUGHT TERMINATION (type = %s)\n",
9250 			proc, str);
9251 		/*
9252 		 * Timer was dequeued.  Somebody is trying
9253 		 * to shut it down.
9254 		 */
9255 		IDN_TIMERQ_UNLOCK(tq);
9256 		return;
9257 	}
9258 
9259 	IDN_TIMER_DEQUEUE(tq, tp);
9260 
9261 	IDN_TIMERQ_UNLOCK(tq);
9262 
9263 	IDN_SYNC_LOCK();
9264 	IDN_DLOCK_EXCL(tp->t_domid);
9265 
9266 	domid = tp->t_domid;
9267 
9268 	dp = &idn_domain[domid];
9269 	strcpy(dname, dp->dname);
9270 	dcpu = dp->dcpu;
9271 
9272 	IDN_TIMER_EXEC(tp);
9273 
9274 #ifdef DEBUG
9275 	PR_TIMER("%s:%d: [%s] timer EXPIRED (C=0x%x, P=0x%llx, X=0x%llx)\n",
9276 		proc, tp->t_domid, str, tp->t_cookie,
9277 		tp->t_posttime, tp->t_exectime);
9278 #endif /* DEBUG */
9279 
9280 	/*
9281 	 * IMPORTANT:
9282 	 * Each case is responsible for dropping SYNC_LOCK & DLOCK.
9283 	 */
9284 	switch (tp->t_type) {
9285 	case IDNP_DATA:
9286 		IDN_SYNC_UNLOCK();
9287 		/*
9288 		 * Timed out waiting for a data packet response.
9289 		 * We can't close domain since he may just be
9290 		 * temporarily AWOL.
9291 		 * Note that dio and diocheck do not get cleared.
9292 		 * This is taken care of when the domain restarts
9293 		 * or is fatally closed.
9294 		 * We only need a reader lock for this.
9295 		 */
9296 		IDN_DLOCK_DOWNGRADE(domid);
9297 		if (dp->diocheck && dp->dmbox.m_send) {
9298 			(void) idn_reclaim_mboxdata(domid, 0, -1);
9299 			if (dp->dio >= IDN_WINDOW_EMAX) {
9300 				idn_msgtype_t	mt;
9301 				/*
9302 				 * Restart timer for another
9303 				 * go around.
9304 				 */
9305 				IDN_MSGTIMER_START(domid, IDNP_DATA, 0,
9306 						idn_msg_waittime[IDNP_DATA],
9307 						&mt.mt_cookie);
9308 			} else {
9309 				lock_clear(&dp->diocheck);
9310 			}
9311 		}
9312 		IDN_DUNLOCK(domid);
9313 		break;
9314 
9315 	case IDNP_NEGO:
9316 		/*
9317 		 * If we're not in a NEGO transition, then
9318 		 * just ignore this timeout.
9319 		 */
9320 		if (dp->dxp == &xphase_nego) {
9321 			uint_t		token;
9322 
9323 			IDN_GLOCK_EXCL();
9324 			op = "CONNECT";
9325 			awolcount = idn_mark_awol(domid, &awol);
9326 			IDN_GUNLOCK();
9327 
9328 			idn_nego_cleanup_check(domid, IDN_NIL_DOMID,
9329 						IDN_NIL_DCPU);
9330 
9331 			IDN_XSTATE_TRANSITION(dp, IDNXS_PEND);
9332 			token = IDN_RETRY_TOKEN(domid, IDNRETRY_NEGO);
9333 			idn_retry_submit(idn_retry_nego, NULL, token,
9334 					idn_msg_retrytime[(int)IDNRETRY_NEGO]);
9335 		}
9336 		IDN_DUNLOCK(domid);
9337 		IDN_SYNC_UNLOCK();
9338 		break;
9339 
9340 	case IDNP_CMD:
9341 		/*
9342 		 * Timeouts on commands typically mean that the
9343 		 * the master is not responding.  Furthermore, we
9344 		 * can't FORCE a FIN disconnect since at this stage
9345 		 * we are CONNECTED and thus other domains may
9346 		 * have cache entries that we're sharing with them.
9347 		 * Only choice is to completely disconnect from
9348 		 * IDN and try to reestablish connection.
9349 		 *
9350 		 * However, timeouts attempting to get nodename
9351 		 * are not fatal.  Although we don't want to retry
9352 		 * either since each timeout is a lost buffer to
9353 		 * the remote domain.
9354 		 */
9355 		if (tp->t_subtype == (ushort_t)IDNCMD_NODENAME) {
9356 			PR_PROTO("%s:%d: timedout waiting for nodename\n",
9357 				proc, domid);
9358 			IDN_DUNLOCK(domid);
9359 			IDN_SYNC_UNLOCK();
9360 			break;
9361 		}
9362 
9363 		IDN_GLOCK_EXCL();
9364 		if (idn.state == IDNGS_ONLINE) {
9365 			domainset_t	domset;
9366 			int		masterid = IDN_GET_MASTERID();
9367 
9368 			IDN_GKSTAT_GLOBAL_EVENT(gk_reconfigs,
9369 						gk_reconfig_last);
9370 
9371 			PR_PROTO("%s:%d: RECONFIG trying old masterid = %d\n",
9372 				proc, domid, masterid);
9373 
9374 			IDN_GSTATE_TRANSITION(IDNGS_RECONFIG);
9375 			IDN_SET_NEW_MASTERID(masterid);
9376 			IDN_GUNLOCK();
9377 			IDN_DUNLOCK(domid);
9378 
9379 			domset = idn.domset.ds_trans_on |
9380 					idn.domset.ds_connected;
9381 
9382 			idn_unlink_domainset(domset, IDNFIN_NORMAL,
9383 						IDNFIN_ARG_NONE,
9384 						IDNFIN_OPT_RELINK,
9385 						BOARDSET_ALL);
9386 		} else {
9387 			IDN_GUNLOCK();
9388 			IDN_DUNLOCK(domid);
9389 		}
9390 		IDN_SYNC_UNLOCK();
9391 		break;
9392 
9393 	case IDNP_CON:
9394 		if (tp->t_subtype == (ushort_t)IDNCON_QUERY) {
9395 			/*
9396 			 * Timed out sending a CON-query.  This is
9397 			 * non-fatal.  We simply need to retry.
9398 			 */
9399 			IDN_GLOCK_EXCL();
9400 			op = "CONNECT";
9401 			awolcount = idn_mark_awol(domid, &awol);
9402 			IDN_GUNLOCK();
9403 			token = IDN_RETRY_TOKEN(domid, IDNRETRY_CONQ);
9404 			idn_retry_submit(idn_retry_query, NULL, token,
9405 					idn_msg_retrytime[(int)IDNRETRY_CONQ]);
9406 			IDN_DUNLOCK(domid);
9407 			IDN_SYNC_UNLOCK();
9408 			break;
9409 		}
9410 		/*FALLTHROUGH*/
9411 	case IDNP_CFG:
9412 		/*
9413 		 * Any timeouts here we simply try to disconnect
9414 		 * and reestablish the link.  Since we haven't
9415 		 * reached the connected state w.r.t. this domain
9416 		 * we put his fin state to FORCE-HARD in order
9417 		 * to shoot right through without involving other
9418 		 * domains.  Recall that other domains may have
9419 		 * established connections with the given domain
9420 		 * which means any FIN queries to them will always
9421 		 * return connected to the given domain.  Since
9422 		 * neither the given domain nor the local domain
9423 		 * plan on disconnecting from the IDN the connection
9424 		 * to the other domains will remain thereby preventing
9425 		 * the local FIN from ever completing.  Recall that
9426 		 * a FIN depends on all member domains FIN'ing also.
9427 		 */
9428 		IDN_GLOCK_EXCL();
9429 		op = "CONNECT";
9430 		awolcount = idn_mark_awol(domid, &awol);
9431 		IDN_GUNLOCK();
9432 		DOMAINSET_ADD(idn.domset.ds_relink, domid);
9433 		IDN_HISTORY_LOG(IDNH_RELINK, domid, dp->dstate,
9434 				idn.domset.ds_relink);
9435 		idn_disconnect(domid, IDNFIN_FORCE_SOFT,
9436 				IDNFIN_ARG_NONE, IDNFIN_SYNC_NO);
9437 		IDN_DUNLOCK(domid);
9438 		IDN_SYNC_UNLOCK();
9439 		break;
9440 
9441 	case IDNP_FIN:
9442 		/*
9443 		 * Timeouts here simply try to retry.
9444 		 */
9445 		IDN_GLOCK_EXCL();
9446 		op = "DISCONNECT";
9447 		awolcount = idn_mark_awol(domid, &awol);
9448 		IDN_GUNLOCK();
9449 		if (tp->t_subtype == (ushort_t)IDNFIN_QUERY) {
9450 			int		d;
9451 			domainset_t	rdyset;
9452 			/*
9453 			 * Timed out sending a FIN-query.  This is
9454 			 * non-fatal.  We simply need to retry.
9455 			 * If we were doing a forced unlink of any
9456 			 * domains, we don't want this awol guy
9457 			 * to hold us up.  Looks for any forced
9458 			 * unlinks and make them "ready" with
9459 			 * respect to this awol domain.
9460 			 */
9461 			rdyset = 0;
9462 			for (d = 0; d < MAX_DOMAINS; d++) {
9463 				if (FIN_IS_FORCE(idn_domain[d].dfin)) {
9464 					DOMAINSET_ADD(rdyset, d);
9465 				}
9466 			}
9467 			if (rdyset)
9468 				(void) idn_sync_register(domid,
9469 						IDNSYNC_DISCONNECT,
9470 						rdyset, IDNSYNC_REG_REG);
9471 
9472 			token = IDN_RETRY_TOKEN(domid, IDNRETRY_FINQ);
9473 			idn_retry_submit(idn_retry_query, NULL, token,
9474 					idn_msg_retrytime[(int)IDNRETRY_FINQ]);
9475 			IDN_DUNLOCK(domid);
9476 			IDN_SYNC_UNLOCK();
9477 			break;
9478 		}
9479 
9480 		if (dp->dfin == IDNFIN_FORCE_SOFT) {
9481 			IDN_FSTATE_TRANSITION(dp, IDNFIN_FORCE_HARD);
9482 		}
9483 		/*
9484 		 * Anybody that was waiting on this domain and
9485 		 * had a hard-force in action gets this guy for
9486 		 * free in their base ready-set.
9487 		 */
9488 		idn_sync_register_awol(domid);
9489 
9490 		dp->dxp = &xphase_fin;
9491 		IDN_XSTATE_TRANSITION(dp, IDNXS_PEND);
9492 		token = IDN_RETRY_TOKEN(domid, IDNRETRY_FIN);
9493 		idn_retry_submit(idn_retry_fin, NULL, token,
9494 				idn_msg_retrytime[(int)IDNRETRY_FIN]);
9495 		IDN_DUNLOCK(domid);
9496 		IDN_SYNC_UNLOCK();
9497 		break;
9498 
9499 	default:
9500 
9501 		ASSERT(0);
9502 		IDN_DUNLOCK(domid);
9503 		IDN_SYNC_UNLOCK();
9504 		break;
9505 	}
9506 
9507 	IDN_TIMER_FREE(tp);
9508 
9509 	if (awol) {
9510 		if (strlen(dname) > 0) {
9511 			cmn_err(CE_WARN,
9512 				"IDN: 236: domain (%s) [ID %d] not "
9513 				"responding to %s [#%d]",
9514 				dname, domid, op, awolcount);
9515 		} else {
9516 			cmn_err(CE_WARN,
9517 				"IDN: 236: domain [ID %d, CPU %d] not "
9518 				"responding to %s [#%d]",
9519 				domid, dcpu, op, awolcount);
9520 		}
9521 	}
9522 }
9523 
9524 #if 0
9525 static int
9526 idn_retry_check(uint_t token)
9527 {
9528 	int			i, count = 0;
9529 	int			domid = IDN_RETRY_TOKEN2DOMID(token);
9530 	int			key = IDN_RETRY_TOKEN2TYPE(token);
9531 	idn_retry_job_t		*rp;
9532 	idn_retry_queue_t	*qp;
9533 
9534 	qp = &idn.retryqueue;
9535 
9536 	mutex_enter(&qp->rq_mutex);
9537 
9538 	for (i = 0, rp = qp->rq_jobs; i < qp->rq_count; i++, rp = rp->rj_next)
9539 		if ((domid == IDN_RETRY_TOKEN2DOMID(rp->rj_token)) &&
9540 				((key == IDN_RETRY_TYPEALL) ||
9541 				(rp->rj_token == token)))
9542 			count++;
9543 
9544 	mutex_exit(&qp->rq_mutex);
9545 
9546 	return (count);
9547 }
9548 #endif /* 0 */
9549 
9550 static void
9551 idn_retry_execute(void *arg)
9552 {
9553 	idn_retry_job_t		*rp = (idn_retry_job_t *)arg;
9554 	idn_retry_queue_t	*qp;
9555 
9556 	qp = &idn.retryqueue;
9557 
9558 	mutex_enter(&qp->rq_mutex);
9559 	if (rp->rj_onq == 0) {
9560 		/*
9561 		 * Job has already been claimed by
9562 		 * retry termination routine.
9563 		 * Bail out.
9564 		 */
9565 		mutex_exit(&qp->rq_mutex);
9566 		return;
9567 	}
9568 	rp->rj_next->rj_prev = rp->rj_prev;
9569 	rp->rj_prev->rj_next = rp->rj_next;
9570 	if (--(qp->rq_count) == 0)
9571 		qp->rq_jobs = NULL;
9572 	else if (qp->rq_jobs == rp)
9573 		qp->rq_jobs = rp->rj_next;
9574 	mutex_exit(&qp->rq_mutex);
9575 
9576 	(*rp->rj_func)(rp->rj_token, rp->rj_arg);
9577 
9578 	IDNRETRY_FREEJOB(rp);
9579 }
9580 
9581 /*
9582  *
9583  */
9584 static void
9585 idn_retry_submit(void (*func)(uint_t token, void *arg),
9586 		void *arg, uint_t token, clock_t ticks)
9587 {
9588 	idn_retry_job_t		*rp, *cp;
9589 	idn_retry_queue_t	*qp;
9590 	int			c;
9591 	procname_t		proc = "idn_retry_submit";
9592 
9593 	if (ticks < 0) {
9594 		PR_PROTO("%s: (token = 0x%x) WARNING ticks = %ld\n",
9595 			proc, token, ticks);
9596 		return;
9597 	}
9598 	if (ticks == 0)		/* At least one tick to get into background */
9599 		ticks++;
9600 
9601 	PR_PROTO("%s: token = 0x%x\n", proc, token);
9602 
9603 	qp = &idn.retryqueue;
9604 
9605 	mutex_enter(&qp->rq_mutex);
9606 	for (c = 0, cp = qp->rq_jobs;
9607 			c < qp->rq_count;
9608 			cp = cp->rj_next, c++) {
9609 		if (cp->rj_token == token) {
9610 			PR_PROTO("%s: token = (%d,0x%x) already present\n",
9611 				proc, IDN_RETRY_TOKEN2DOMID(token),
9612 				IDN_RETRY_TOKEN2TYPE(token));
9613 			break;
9614 		}
9615 	}
9616 
9617 	if (c < qp->rq_count) {
9618 		mutex_exit(&qp->rq_mutex);
9619 		return;
9620 	}
9621 
9622 	rp = IDNRETRY_ALLOCJOB();
9623 	rp->rj_func = func;
9624 	rp->rj_arg = arg;
9625 	rp->rj_token = token;
9626 	rp->rj_prev = rp->rj_next = rp;
9627 
9628 	if (qp->rq_jobs == NULL) {
9629 		qp->rq_jobs = rp;
9630 	} else {
9631 		rp->rj_next = qp->rq_jobs;
9632 		rp->rj_prev = qp->rq_jobs->rj_prev;
9633 		rp->rj_next->rj_prev = rp;
9634 		rp->rj_prev->rj_next = rp;
9635 	}
9636 	rp->rj_onq = 1;
9637 	qp->rq_count++;
9638 	rp->rj_id = timeout(idn_retry_execute, (caddr_t)rp, ticks);
9639 	mutex_exit(&qp->rq_mutex);
9640 }
9641 
9642 int
9643 idn_retry_terminate(uint_t token)
9644 {
9645 	int			i, domid;
9646 	uint_t			key, count;
9647 	idn_retry_job_t		*rp, *nrp, *fp;
9648 	idn_retry_queue_t	*qp;
9649 	procname_t		proc = "idn_retry_terminate";
9650 
9651 	key = IDN_RETRY_TOKEN2TYPE(token);
9652 	domid = IDN_RETRY_TOKEN2DOMID(token);
9653 	fp = NULL;
9654 	qp = &idn.retryqueue;
9655 
9656 	mutex_enter(&qp->rq_mutex);
9657 	for (i = count = 0, rp = qp->rq_jobs; i < qp->rq_count; i++) {
9658 		nrp = rp->rj_next;
9659 		if ((domid == IDN_RETRY_TOKEN2DOMID(rp->rj_token)) &&
9660 			((key == IDN_RETRY_TYPEALL) ||
9661 				(rp->rj_token == token))) {
9662 			/*
9663 			 * Turn off onq field as a signal to
9664 			 * the execution routine that this
9665 			 * retry has been terminated.  This
9666 			 * is necessary since we can't untimeout
9667 			 * while holding the rq_mutex otherwise
9668 			 * we'll deadlock with the execution
9669 			 * routine.  We'll untimeout these guys
9670 			 * _after_ we drop rq_mutex.
9671 			 */
9672 			rp->rj_onq = 0;
9673 			rp->rj_next->rj_prev = rp->rj_prev;
9674 			rp->rj_prev->rj_next = rp->rj_next;
9675 			if (qp->rq_jobs == rp)
9676 				qp->rq_jobs = rp->rj_next;
9677 			rp->rj_next = fp;
9678 			fp = rp;
9679 			count++;
9680 		}
9681 		rp = nrp;
9682 	}
9683 
9684 	if ((qp->rq_count -= count) == 0)
9685 		qp->rq_jobs = NULL;
9686 
9687 	mutex_exit(&qp->rq_mutex);
9688 
9689 	PR_PROTO("%s: token = (%d,0x%x), dequeued = %d\n",
9690 		proc, domid, key, count);
9691 
9692 	for (; fp; fp = nrp) {
9693 		(void) untimeout(fp->rj_id);
9694 
9695 		nrp = fp->rj_next;
9696 		IDNRETRY_FREEJOB(fp);
9697 	}
9698 
9699 	return (count);
9700 }
9701 
9702 /*
9703  * -----------------------------------------------------------------------
9704  * The sole purpose of the idn_protocol_server is to manage the IDN
9705  * protocols between the various domains.  These messages do _not_ go
9706  * through the regular streams queues since they are not dependent on
9707  * any user process or module necessarily having the IDN driver open.
9708  * There may be multiple instances of these servers to enhance performance
9709  * of domain management.  Each server is assigned a idn_protoqueue_t
9710  * from which to obtain the work they need to do.
9711  * -----------------------------------------------------------------------
9712  */
9713 int
9714 idn_protocol_init(int nservers)
9715 {
9716 	int		i;
9717 	idn_protojob_t	*jp;
9718 	register idn_protoqueue_t	*protoq;
9719 
9720 	if (nservers <= 0) {
9721 		cmn_err(CE_WARN,
9722 			"IDN: 237: invalid number (%d) of protocol servers",
9723 			nservers);
9724 		return (-1);
9725 	}
9726 
9727 	idn.protocol.p_jobpool = kmem_cache_create("idn_protocol_jobcache",
9728 						sizeof (idn_protojob_t),
9729 						0, NULL, NULL, NULL,
9730 						NULL, NULL, 0);
9731 	if (idn.protocol.p_jobpool == NULL) {
9732 		cmn_err(CE_WARN,
9733 			"IDN: 238: kmem_cache_create(jobcache) failed");
9734 		return (-1);
9735 	}
9736 
9737 	/*
9738 	 * Initialize static cache for protojob.
9739 	 */
9740 	mutex_init(&idn_protojob_cache_lock, NULL, MUTEX_DRIVER, NULL);
9741 	jp = &idn_protojob_cache[0];
9742 	for (i = 1; i < IDN_DMV_PENDING_MAX; jp = jp->j_next, i++) {
9743 		jp->j_cache = 1;
9744 		jp->j_next = &idn_protojob_cache[i];
9745 	}
9746 	jp->j_cache = 1;
9747 	jp->j_next = NULL;
9748 	idn_protojob_cache_list = &idn_protojob_cache[0];
9749 
9750 	/*
9751 	 * Init morgue semaphore.
9752 	 */
9753 	sema_init(&idn.protocol.p_morgue, 0, NULL, SEMA_DEFAULT, NULL);
9754 	/*
9755 	 * Alloc server queues.
9756 	 */
9757 	idn.protocol.p_serverq = GETSTRUCT(idn_protoqueue_t, nservers);
9758 
9759 	/*
9760 	 * Init server queues.
9761 	 */
9762 	protoq = idn.protocol.p_serverq;
9763 	for (i = 0; i < nservers; protoq++, i++) {
9764 		mutex_init(&protoq->q_mutex, NULL, MUTEX_DRIVER, NULL);
9765 		cv_init(&protoq->q_cv, NULL, CV_DEFAULT, NULL);
9766 		protoq->q_id	  = i;
9767 		protoq->q_joblist = NULL;
9768 		protoq->q_joblist_tail = NULL;
9769 		protoq->q_die	  = 0;
9770 		protoq->q_morgue  = &idn.protocol.p_morgue;
9771 		/*
9772 		 * Create protocol server thread.
9773 		 */
9774 		protoq->q_threadp = thread_create(NULL, 0,
9775 		    idn_protocol_server, (caddr_t)&i, sizeof (i), &p0,
9776 		    TS_RUN, maxclsyspri);
9777 	}
9778 	/*
9779 	 * The servers are kept in the p_server[] array, however
9780 	 * we'll build a linked list of them to facilitate debugging.
9781 	 */
9782 	protoq = idn.protocol.p_serverq;
9783 	for (i = 0; i < (nservers - 1); protoq++, i++)
9784 		protoq->q_next = (protoq + 1);
9785 	protoq->q_next = NULL;
9786 
9787 	idn.nservers = nservers;
9788 
9789 	return (idn.nservers);
9790 }
9791 
9792 void
9793 idn_protocol_deinit()
9794 {
9795 	register int	i;
9796 	int		nservers;
9797 	register idn_protoqueue_t	*protoq;
9798 
9799 	nservers = idn.nservers;
9800 
9801 	if (nservers <= 0)
9802 		return;
9803 
9804 	/*
9805 	 * Make sure the servers are dead.
9806 	 */
9807 	idn_protocol_server_killall();
9808 	ASSERT(idn.nservers == 0);
9809 	/*
9810 	 * Destroy the mutexes.
9811 	 */
9812 	protoq = idn.protocol.p_serverq;
9813 	for (i = 0; i < nservers; protoq++, i++) {
9814 		mutex_destroy(&protoq->q_mutex);
9815 		cv_destroy(&protoq->q_cv);
9816 	}
9817 	/*
9818 	 * Free up the protoqueue memory.
9819 	 */
9820 	FREESTRUCT(idn.protocol.p_serverq, idn_protoqueue_t, nservers);
9821 	idn.protocol.p_serverq = NULL;
9822 	/*
9823 	 * Destroy the morgue semaphore.
9824 	 */
9825 	sema_destroy(&idn.protocol.p_morgue);
9826 
9827 	if (idn.protocol.p_jobpool) {
9828 		kmem_cache_destroy(idn.protocol.p_jobpool);
9829 		idn.protocol.p_jobpool = NULL;
9830 	}
9831 }
9832 
9833 static void
9834 idn_protocol_server(int *id)
9835 {
9836 	idn_protoqueue_t	*pq;
9837 	idn_protojob_t		*jl;
9838 	register idn_protojob_t	*jp;
9839 	procname_t		proc = "idn_protocol_server";
9840 
9841 	if (id == NULL) {
9842 		PR_PROTO("%s: id == NULL, thread exiting\n",
9843 			proc);
9844 		return;
9845 	}
9846 	ASSERT((*id >= 0) && (*id < idn_protocol_nservers));
9847 
9848 	pq = &idn.protocol.p_serverq[*id];
9849 
9850 	ASSERT(pq->q_id == *id);
9851 
9852 	PR_PROTO("%s: id %d starting up (pq = 0x%p)\n",
9853 		proc, pq->q_id, pq);
9854 
9855 	/*CONSTCOND*/
9856 	while (1) {
9857 		mutex_enter(&pq->q_mutex);
9858 
9859 		while (((jl = pq->q_joblist) == NULL) && !pq->q_die)
9860 			cv_wait(&pq->q_cv, &pq->q_mutex);
9861 
9862 		pq->q_joblist = pq->q_joblist_tail = NULL;
9863 
9864 		if (pq->q_die) {
9865 			/*
9866 			 * We've been killed.  Need to check-in
9867 			 * at the morgue.
9868 			 */
9869 			pq->q_threadp = NULL;
9870 			mutex_exit(&pq->q_mutex);
9871 			PR_PROTO("%s: thread (%d) killed...bye bye\n",
9872 				proc, pq->q_id);
9873 			for (jp = jl; jp; jp = jl) {
9874 				jl = jp->j_next;
9875 				idn_protojob_free(jp);
9876 			}
9877 			sema_v(pq->q_morgue);
9878 			thread_exit();
9879 			/*NOTREACHED*/
9880 		}
9881 		mutex_exit(&pq->q_mutex);
9882 
9883 		/*
9884 		 * We can process the jobs asynchronously while more are
9885 		 * put on.
9886 		 */
9887 		for (jp = jl; jp; jp = jl) {
9888 			jl = jp->j_next;
9889 			idn_recv_proto(&(jp->j_msg));
9890 			idn_protojob_free(jp);
9891 		}
9892 	}
9893 }
9894 
9895 /*
9896  * Kill off all the protocol servers.
9897  */
9898 static void
9899 idn_protocol_server_killall()
9900 {
9901 	register idn_protoqueue_t	*pq;
9902 	int		i;
9903 	procname_t	proc = "idn_protocol_server_killall";
9904 
9905 	PR_PROTO("%s: killing off %d protocol servers\n",
9906 		proc, idn.nservers);
9907 
9908 	pq = idn.protocol.p_serverq;
9909 	for (i = 0; i < idn.nservers; pq++, i++) {
9910 		mutex_enter(&pq->q_mutex);
9911 		pq->q_die = 1;
9912 		cv_signal(&pq->q_cv);
9913 		mutex_exit(&pq->q_mutex);
9914 	}
9915 
9916 	while (idn.nservers > 0) {
9917 		sema_p(&idn.protocol.p_morgue);
9918 		idn.nservers--;
9919 	}
9920 }
9921 
9922 idn_protojob_t *
9923 idn_protojob_alloc(int kmflag)
9924 {
9925 	idn_protojob_t	*jp;
9926 
9927 	jp = kmem_cache_alloc(idn.protocol.p_jobpool, kmflag);
9928 	if (jp == NULL) {
9929 		mutex_enter(&idn_protojob_cache_lock);
9930 		if ((jp = idn_protojob_cache_list) != NULL)
9931 			idn_protojob_cache_list = jp->j_next;
9932 		mutex_exit(&idn_protojob_cache_lock);
9933 	} else {
9934 		jp->j_cache = 0;
9935 	}
9936 
9937 	return (jp);
9938 }
9939 
9940 static void
9941 idn_protojob_free(idn_protojob_t *jp)
9942 {
9943 	ASSERT(jp);
9944 
9945 	if (jp->j_cache) {
9946 		mutex_enter(&idn_protojob_cache_lock);
9947 		jp->j_next = idn_protojob_cache_list;
9948 		idn_protojob_cache_list = jp;
9949 		mutex_exit(&idn_protojob_cache_lock);
9950 	} else {
9951 		kmem_cache_free(idn.protocol.p_jobpool, (void *)jp);
9952 	}
9953 }
9954 
9955 void
9956 idn_protojob_submit(int cookie, idn_protojob_t *jp)
9957 {
9958 	idn_protoqueue_t	*pq;
9959 	int			serverid;
9960 	procname_t		proc = "idn_protojob_submit";
9961 	STRING(str);
9962 
9963 	if (jp == NULL)
9964 		return;
9965 
9966 	serverid = IDN_PROTOCOL_SERVER_HASH(cookie);
9967 
9968 	pq = &idn.protocol.p_serverq[serverid];
9969 
9970 	INUM2STR(jp->j_msg.m_msgtype, str);
9971 	PR_PROTO("%s: job (d=%d, m=0x%x, %s) submitted to "
9972 		"protocol server %d\n", proc, jp->j_msg.m_domid,
9973 		jp->j_msg.m_msgtype, str, serverid);
9974 
9975 	mutex_enter(&pq->q_mutex);
9976 	/*
9977 	 * Can't submit jobs to dying servers.
9978 	 */
9979 	if (!pq->q_die) {
9980 		if (pq->q_joblist_tail) {
9981 			pq->q_joblist_tail->j_next = jp;
9982 			pq->q_joblist_tail = jp;
9983 		} else {
9984 			pq->q_joblist = pq->q_joblist_tail = jp;
9985 		}
9986 		jp->j_next = NULL;
9987 		cv_signal(&pq->q_cv);
9988 	} else {
9989 		PR_PROTO("%s: protocol server dead.  freeing protojob\n",
9990 			proc);
9991 		idn_protojob_free(jp);
9992 	}
9993 	mutex_exit(&pq->q_mutex);
9994 }
9995 
9996 static void
9997 idn_mboxarea_init(idn_mboxtbl_t *mtp, register int ntbls)
9998 {
9999 	register int	d;
10000 	caddr_t		state_ptr = NULL, mtbasep = (caddr_t)mtp;
10001 	idn_mboxtbl_t	*amtp;
10002 	procname_t	proc = "idn_mboxarea_init";
10003 
10004 	ASSERT(mtp && (ntbls > 0));
10005 
10006 	PR_PROTO("%s: init mboxtbl (0x%p) ntbls = %d\n",
10007 		proc, mtp, ntbls);
10008 
10009 	for (d = 0; d < ntbls; d++) {
10010 		register int	pd, sd;
10011 		register int	ch;
10012 
10013 		mtp->mt_header.mh_svr_active = 0;
10014 		mtp->mt_header.mh_svr_ready = 0;
10015 		/*
10016 		 * Initialize the header of each mbox table
10017 		 * with a cookie for identity.
10018 		 */
10019 		/*
10020 		 * Format: 0xc0c0DSCC
10021 		 *	 D = primary domain
10022 		 *	 S = sub-domain of primary
10023 		 *	CC = channel of sub-domain.
10024 		 */
10025 		pd = (d / MAX_DOMAINS) / IDN_MAX_NETS;
10026 		sd = (d / IDN_MAX_NETS) % MAX_DOMAINS;
10027 		ch = d % IDN_MAX_NETS;
10028 
10029 		/*
10030 		 * We point all sub-domains in the same channel
10031 		 * to the same active sync flag since a single server
10032 		 * services all domains in the same channel.
10033 		 */
10034 		amtp = IDN_MBOXTBL_ABS_PTR(mtbasep, pd, 0, ch);
10035 
10036 		state_ptr = (caddr_t)&amtp->mt_header.mh_svr_active;
10037 		mtp->mt_header.mh_svr_active_ptr = IDN_ADDR2OFFSET(state_ptr);
10038 
10039 		state_ptr = (caddr_t)&amtp->mt_header.mh_svr_ready;
10040 		mtp->mt_header.mh_svr_ready_ptr = IDN_ADDR2OFFSET(state_ptr);
10041 
10042 		mtp->mt_header.mh_cookie = IDN_MAKE_MBOXHDR_COOKIE(pd, sd, ch);
10043 
10044 		mtp->mt_header.mh_cksum = IDN_CKSUM_MBOX(&mtp->mt_header);
10045 
10046 		IDN_MBOXTBL_PTR_INC(mtp);
10047 	}
10048 	/*
10049 	 * Now that the master has initialized the entire mailbox
10050 	 * region the referenced memory may not necessarily be up-to-date
10051 	 * with respect to the actual SMR memory due to caching.
10052 	 * In order to make sure future connecting domains get a
10053 	 * consistent picture of the mailbox region, it's necessary
10054 	 * for the master to flush its caches.
10055 	 */
10056 	PR_PROTO("%s: flushing ecache's of local (master) domain\n", proc);
10057 
10058 	idnxf_flushall_ecache();
10059 }
10060 
10061 idn_mainmbox_t *
10062 idn_mainmbox_init(int domid, int mbx)
10063 {
10064 	idn_mainmbox_t	*mmp;
10065 	int		c;
10066 	idn_mainmbox_t	*cmp;
10067 	procname_t	proc = "idn_mainmbox_init";
10068 
10069 	ASSERT(idn_domain[domid].dcpu != IDN_NIL_DCPU);
10070 	ASSERT(IDN_DLOCK_IS_HELD(domid));
10071 
10072 	PR_PROTO("%s: initializing main %s mailbox for domain %d\n",
10073 		proc, IDNMBOX_IS_RECV(mbx) ? "RECV" : "SEND", domid);
10074 
10075 	cmp = GETSTRUCT(idn_mainmbox_t, IDN_MAX_NETS);
10076 	for (c = 0; c < IDN_MAX_NETS; c++) {
10077 		mmp = &cmp[c];
10078 		mmp->mm_channel = (short)c;
10079 		mutex_init(&mmp->mm_mutex, NULL, MUTEX_DRIVER, NULL);
10080 		mmp->mm_domid = (short)domid;
10081 		mmp->mm_type = mbx;
10082 	}
10083 	mmp = cmp;
10084 	/*
10085 	 * The actual SMR mailbox (mmp->mm_smr_mboxp) gets setup
10086 	 * when the SMR is setup.
10087 	 */
10088 
10089 	return (mmp);
10090 }
10091 
10092 static void
10093 idn_mainmbox_reset(int domid, idn_mainmbox_t *cmp)
10094 {
10095 	idn_mainmbox_t	*mmp;
10096 	int		c;
10097 	procname_t	proc = "idn_mainmbox_reset";
10098 
10099 	ASSERT(IDN_DLOCK_IS_EXCL(domid));
10100 
10101 	PR_PROTO("%s: reseting main %s mailbox for domain %d\n",
10102 		proc, IDNMBOX_IS_RECV(cmp->mm_type) ? "RECV" : "SEND", domid);
10103 
10104 	for (c = 0; c < IDN_MAX_NETS; c++) {
10105 		mmp = &cmp[c];
10106 
10107 		mmp->mm_channel = (short)c;
10108 		mmp->mm_domid = (short)domid;
10109 		mmp->mm_count = 0;
10110 		mmp->mm_flags = 0;
10111 		mmp->mm_qiget = mmp->mm_qiput = 0;
10112 		mmp->mm_csp = NULL;
10113 		ASSERT(mmp->mm_type == cmp->mm_type);
10114 	}
10115 }
10116 
10117 void
10118 idn_mainmbox_deinit(int domid, idn_mainmbox_t *mmp)
10119 {
10120 	procname_t	proc = "idn_mainmbox_deinit";
10121 
10122 	ASSERT(IDN_DLOCK_IS_HELD(domid));
10123 
10124 	PR_PROTO("%s: deinitializing main %s mailbox for domain %d\n",
10125 		proc, IDNMBOX_IS_RECV(mmp->mm_type) ? "RECV" : "SEND", domid);
10126 
10127 	ASSERT(idn_domain_is_registered(domid, -1, NULL) == 0);
10128 
10129 	FREESTRUCT(mmp, idn_mainmbox_t, IDN_MAX_NETS);
10130 }
10131 
10132 static void
10133 idn_mainmbox_activate(int domid)
10134 {
10135 	register int	c;
10136 	idn_domain_t	*dp = &idn_domain[domid];
10137 	procname_t	proc = "idn_mainmbox_activate";
10138 
10139 	ASSERT(IDN_DLOCK_IS_HELD(domid));
10140 
10141 	PR_PROTO("%s:%d: activating main mailbox\n", proc, domid);
10142 
10143 	for (c = 0; c < IDN_MAX_NETS; c++)
10144 		idn_mainmbox_chan_register(domid, &dp->dmbox.m_send[c],
10145 						&dp->dmbox.m_recv[c], c);
10146 }
10147 
10148 /*
10149  * Called upon disabling the SMR to deactivate all the mailboxes
10150  * so that they no longer reference the SMR that's going away.
10151  *
10152  * stopall - Indicates to stop all channel services, across the board.
10153  */
10154 static void
10155 idn_mainmbox_deactivate(ushort_t domset)
10156 {
10157 	int		svr_count;
10158 	procname_t	proc = "idn_mainmbox_deactivate";
10159 
10160 
10161 	if (domset == 0)
10162 		return;
10163 
10164 	PR_PROTO("%s: %s deactivating main mailboxes for domset 0x%x\n",
10165 		proc, (domset == (ushort_t)-1) ? "STOP-ALL" : "NORMAL", domset);
10166 
10167 	svr_count = idn_mainmbox_chan_unregister(domset, -1);
10168 
10169 	PR_PROTO("%s: deactivated %d chansvrs (domset 0x%x)\n",
10170 		proc, svr_count, domset);
10171 }
10172 
10173 static void
10174 idn_mainmbox_chan_register(int domid, idn_mainmbox_t *send_mmp,
10175 					idn_mainmbox_t *recv_mmp, int channel)
10176 {
10177 	ASSERT(IDN_DLOCK_IS_HELD(domid));
10178 
10179 	/*
10180 	 * Obtain receive mailbox lock first.
10181 	 */
10182 	mutex_enter(&recv_mmp->mm_mutex);
10183 	mutex_enter(&send_mmp->mm_mutex);
10184 
10185 	ASSERT(recv_mmp->mm_channel == (short)channel);
10186 	ASSERT(send_mmp->mm_channel == (short)channel);
10187 
10188 	recv_mmp->mm_csp = &idn.chan_servers[channel];
10189 	recv_mmp->mm_count = 0;
10190 	recv_mmp->mm_dropped = 0;
10191 	recv_mmp->mm_flags = 0;
10192 
10193 	send_mmp->mm_csp = &idn.chan_servers[channel];
10194 	send_mmp->mm_count = 0;
10195 	send_mmp->mm_dropped = 0;
10196 	send_mmp->mm_flags = 0;
10197 
10198 	mutex_exit(&send_mmp->mm_mutex);
10199 	mutex_exit(&recv_mmp->mm_mutex);
10200 
10201 	/*
10202 	 * We have to add ourselves to the respective
10203 	 * channel server's service table.
10204 	 * Note that the channel may not necessarily be
10205 	 * active at this time.
10206 	 */
10207 	ASSERT(idn.chan_servers);
10208 	/*
10209 	 * Have to get the channel server under
10210 	 * control so we can add ourselves.
10211 	 * Returns w/c_mutex.
10212 	 */
10213 	IDN_CHAN_LOCK_GLOBAL(&idn.chan_servers[channel]);
10214 	/*
10215 	 * Add the following domain (mailbox) for monitoring
10216 	 * by the respective channel server.
10217 	 */
10218 	idn_chan_addmbox(channel, DOMAINSET(domid));
10219 
10220 	IDN_CHAN_UNLOCK_GLOBAL(&idn.chan_servers[channel]);
10221 }
10222 
10223 /*
10224  * Unregister the given domain from the specified channel(s) for monitoring.
10225  */
10226 static int
10227 idn_mainmbox_chan_unregister(ushort_t domset, int channel)
10228 {
10229 	int		c, dd_count;
10230 	int		min_chan, max_chan;
10231 	procname_t	proc = "idn_mainmbox_chan_unregister";
10232 
10233 
10234 	PR_CHAN("%s: deactivating main mailboxes (channel %d) "
10235 		"for domset 0x%x\n", proc, channel, domset);
10236 
10237 	if (channel == -1) {
10238 		min_chan = 0;
10239 		max_chan = IDN_MAX_NETS - 1;
10240 	} else {
10241 		min_chan = max_chan = channel;
10242 	}
10243 	/*
10244 	 * Point all the data dispatchers to the same morgue
10245 	 * so we can kill them all at once.
10246 	 */
10247 	dd_count = 0;
10248 	for (c = min_chan; c <= max_chan; c++) {
10249 
10250 		/*
10251 		 * Have to get the channel server under
10252 		 * control so we can remove ourselves.
10253 		 * Returns w/c_mutex held.
10254 		 */
10255 		IDN_CHAN_LOCK_GLOBAL(&idn.chan_servers[c]);
10256 		/*
10257 		 * Delete the following domain (mailbox) from
10258 		 * monitoring by the respective channel server.
10259 		 */
10260 		idn_chan_delmbox(c, (ushort_t)domset);
10261 
10262 		IDN_CHAN_UNLOCK_GLOBAL(&idn.chan_servers[c]);
10263 		dd_count++;
10264 	}
10265 	PR_CHAN("%s: deactivated %d channel mboxes for domset 0x%x, chan %d\n",
10266 		proc, dd_count, domset, channel);
10267 	return (dd_count);
10268 }
10269 
10270 /*
10271  * Check if the given domain is registered with the given channel(s).
10272  */
10273 int
10274 idn_domain_is_registered(int domid, int channel, idn_chanset_t *chansetp)
10275 {
10276 	int		regcount;
10277 	int		c, min_chan, max_chan;
10278 	idn_chanset_t	chanset;
10279 	procname_t	proc = "idn_domain_is_registered";
10280 
10281 
10282 	CHANSET_ZERO(chanset);
10283 
10284 	if (idn.chan_servers == NULL) {
10285 		PR_CHAN("%s: idn.chan_servers == NULL!!\n", proc);
10286 		return (0);
10287 	}
10288 
10289 	if (channel == -1) {
10290 		min_chan = 0;
10291 		max_chan = IDN_MAX_NETS - 1;
10292 	} else {
10293 		min_chan = max_chan = channel;
10294 	}
10295 
10296 	regcount = 0;
10297 
10298 	for (c = min_chan; c <= max_chan; c++) {
10299 		idn_chansvr_t	*csp;
10300 
10301 		csp = &idn.chan_servers[c];
10302 		IDN_CHAN_LOCK_SEND(csp);
10303 		/*
10304 		 * Don't really need recv side lock since registeration
10305 		 * can't change while we're holding send side.
10306 		 * No need to wait for send side to actually suspend
10307 		 * since all we want to do is prevent the registered
10308 		 * information from changing.
10309 		 */
10310 		if (IDN_CHAN_DOMAIN_IS_REGISTERED(csp, domid)) {
10311 			regcount++;
10312 			CHANSET_ADD(chanset, c);
10313 		}
10314 
10315 		IDN_CHAN_UNLOCK_SEND(csp);
10316 	}
10317 
10318 	PR_CHAN("%s: domid %d mbox reg'd with %d channels [0x%x] (req=%d)\n",
10319 		proc, domid, regcount, chanset, channel);
10320 
10321 	if (chansetp)
10322 		*chansetp = chanset;
10323 
10324 	return (regcount);
10325 }
10326 
10327 static int
10328 idn_mainmbox_flush(int domid, idn_mainmbox_t *mmp)
10329 {
10330 	register int		qi;
10331 	register idn_mboxmsg_t	*mqp;
10332 	int		total_count = 0;
10333 	int		c, count;
10334 	int		mbox_type;
10335 	char		*mbox_str;
10336 	int		lost_io, total_lost_io = 0;
10337 	idn_chanset_t	chanset;
10338 	procname_t	proc = "idn_mainmbox_flush";
10339 
10340 
10341 	if (mmp == NULL)
10342 		return (0);
10343 
10344 	CHANSET_ZERO(chanset);
10345 
10346 	mbox_type = mmp->mm_type;
10347 	ASSERT((mbox_type == IDNMMBOX_TYPE_SEND) ||
10348 			(mbox_type == IDNMMBOX_TYPE_RECV));
10349 
10350 	mbox_str = (mbox_type == IDNMMBOX_TYPE_SEND) ? "SEND" : "RECV";
10351 
10352 	/*
10353 	 * Determine which channels this domain is registered
10354 	 * with.  If he's not registered with any, then we
10355 	 * can't touch the SMR.
10356 	 */
10357 	(void) idn_domain_is_registered(domid, -1, &chanset);
10358 
10359 	for (c = 0; c < IDN_MAX_NETS; c++) {
10360 		ushort_t	mbox_csum;
10361 
10362 		if (mmp[c].mm_smr_mboxp == NULL)
10363 			continue;
10364 		mutex_enter(&mmp[c].mm_mutex);
10365 		ASSERT(mmp[c].mm_type == mbox_type);
10366 		if (CHAN_IN_SET(chanset, c) == 0) {
10367 			/*
10368 			 * Domain is no longer registered.
10369 			 * DON'T TOUCH THE SMR - IT'S POISON!
10370 			 */
10371 			if (mmp[c].mm_smr_mboxp) {
10372 				PR_CHAN("%s:%d:%s: domain unregistered "
10373 					"w/chan %d - DUMPING SMR reference\n",
10374 					proc, domid, mbox_str, c);
10375 				lost_io = IDN_MMBOXINDEX_DIFF(mmp[c].mm_qiput,
10376 							mmp[c].mm_qiget);
10377 #ifdef DEBUG
10378 				if (mbox_type == IDNMMBOX_TYPE_RECV) {
10379 					PR_CHAN("%s:%d:%s: blowing away %d "
10380 						"incoming pkts\n",
10381 						proc, domid, mbox_str, lost_io);
10382 				} else {
10383 					PR_CHAN("%s:%d:%s: blowing away %d/%d "
10384 						"outstanding pkts\n",
10385 						proc, domid, mbox_str, lost_io,
10386 						idn_domain[domid].dio);
10387 				}
10388 #endif /* DEBUG */
10389 			}
10390 			mmp[c].mm_qiput = mmp[c].mm_qiget = 0;
10391 			mmp[c].mm_smr_mboxp = NULL;
10392 			total_lost_io += lost_io;
10393 		}
10394 		if (mmp[c].mm_smr_mboxp) {
10395 			mbox_csum =
10396 				IDN_CKSUM_MBOX(&mmp[c].mm_smr_mboxp->mt_header);
10397 			if (!VALID_NWRADDR(mmp[c].mm_smr_mboxp, 4) ||
10398 			    !VALID_MBOXHDR(&mmp[c].mm_smr_mboxp->mt_header,
10399 							c, mbox_csum)) {
10400 				lost_io = IDN_MMBOXINDEX_DIFF(mmp[c].mm_qiput,
10401 							mmp[c].mm_qiget);
10402 #ifdef DEBUG
10403 				if (mbox_type == IDNMMBOX_TYPE_RECV) {
10404 					PR_CHAN("%s:%d:%s: bad mbox.  blowing "
10405 						"away %d incoming pkts\n",
10406 						proc, domid, mbox_str, lost_io);
10407 				} else {
10408 					PR_CHAN("%s:%d:%s: bad mbox.  blowing "
10409 						"away %d/%d outstanding pkts\n",
10410 						proc, domid, mbox_str, lost_io,
10411 						idn_domain[domid].dio);
10412 				}
10413 #endif /* DEBUG */
10414 				mmp[c].mm_smr_mboxp = NULL;
10415 				mmp[c].mm_qiput = mmp[c].mm_qiget = 0;
10416 				total_lost_io += lost_io;
10417 			}
10418 		}
10419 		if (mmp[c].mm_smr_mboxp == NULL) {
10420 			mutex_exit(&mmp[c].mm_mutex);
10421 			continue;
10422 		}
10423 		mqp = &mmp[c].mm_smr_mboxp->mt_queue[0];
10424 		qi = 0;
10425 		count = 0;
10426 		/*
10427 		 * It's quite possible the remote domain may be accessing
10428 		 * these mailbox entries at the exact same time we're
10429 		 * clearing the owner bit.  That's okay.  All we're trying
10430 		 * to do at this point is to minimize the number of packets
10431 		 * the remote domain might try to process unnecessarily.
10432 		 */
10433 		do {
10434 			if (mqp[qi].ms_owner)
10435 				count++;
10436 			mqp[qi].ms_owner = 0;
10437 			IDN_MMBOXINDEX_INC(qi);
10438 		} while (qi);
10439 
10440 		lost_io = IDN_MMBOXINDEX_DIFF(mmp[c].mm_qiput, mmp[c].mm_qiget);
10441 		total_lost_io += lost_io;
10442 
10443 		mmp[c].mm_qiput = mmp[c].mm_qiget = 0;
10444 		mmp[c].mm_smr_mboxp = NULL;
10445 		mutex_exit(&mmp[c].mm_mutex);
10446 
10447 		total_count += count;
10448 
10449 		PR_CHAN("%s:%d:%s: flushed out %d mbox entries for chan %d\n",
10450 			proc, domid, mbox_str, count, c);
10451 	}
10452 
10453 	if (total_lost_io && (mbox_type == IDNMMBOX_TYPE_SEND)) {
10454 		int	lost_bufs;
10455 		/*
10456 		 * If we lost all our outstanding I/O.  We could
10457 		 * possible could have slabs now with mistakenly
10458 		 * outstanding I/O buffers.  Need to clean them up.
10459 		 * Clean up of leftovers our self.
10460 		 */
10461 		lost_bufs = smr_buf_free_all(domid);
10462 
10463 		PR_CHAN("%s:%d:%s: flushed %d/%d buffers from slabs\n",
10464 			proc, domid, mbox_str, lost_bufs, total_lost_io);
10465 	}
10466 
10467 	PR_CHAN("%s:%d:%s: flushed total of %d mailbox entries (lost %d)\n",
10468 		proc, domid, mbox_str, total_count, total_lost_io);
10469 
10470 	return (total_count);
10471 }
10472 
10473 void
10474 idn_chanserver_bind(int net, int cpuid)
10475 {
10476 	int		ocpuid;
10477 	cpu_t		*cp;
10478 	idn_chansvr_t	*csp;
10479 	kthread_id_t	tp;
10480 	procname_t	proc = "idn_chanserver_bind";
10481 
10482 	csp = &idn.chan_servers[net];
10483 	IDN_CHAN_LOCK_GLOBAL(csp);
10484 
10485 	mutex_enter(&cpu_lock);		/* protect checking cpu_ready_set */
10486 	ocpuid = csp->ch_bound_cpuid;
10487 	cp = cpu_get(cpuid);
10488 	if ((cpuid != -1) && ((cp == NULL) || !cpu_is_online(cp))) {
10489 		mutex_exit(&cpu_lock);
10490 		cmn_err(CE_WARN,
10491 			"IDN: 239: invalid CPU ID (%d) specified for "
10492 			"IDN net %d",
10493 			cpuid, net);
10494 		IDN_CHAN_UNLOCK_GLOBAL(csp);
10495 		return;
10496 	}
10497 	if ((tp = csp->ch_recv_threadp) == NULL) {
10498 		/*
10499 		 * Thread is not yet active.  Set ch_bound_cpuid
10500 		 * so when thread activates it will automatically
10501 		 * bind itself.
10502 		 */
10503 		csp->ch_bound_cpuid = -1;
10504 		csp->ch_bound_cpuid_pending = cpuid;
10505 	} else {
10506 		if (ocpuid != -1) {
10507 			thread_affinity_clear(tp);
10508 			csp->ch_bound_cpuid = -1;
10509 		}
10510 		if (cpuid >= 0) {
10511 			thread_affinity_set(tp, cpuid);
10512 			csp->ch_bound_cpuid = cpuid;
10513 		}
10514 		csp->ch_bound_cpuid_pending = -1;
10515 	}
10516 	mutex_exit(&cpu_lock);
10517 
10518 	PR_CHAN("%s: bound net/channel (%d) from cpuid %d to%scpuid %d\n",
10519 		proc, net, ocpuid, tp ? " " : " (pending) ", cpuid);
10520 
10521 	IDN_CHAN_UNLOCK_GLOBAL(csp);
10522 }
10523 
10524 #ifdef DEBUG
10525 static idn_mboxhdr_t	*prev_mhp[IDN_MAXMAX_NETS];
10526 #endif /* DEBUG */
10527 /*
10528  * Get access to the respective channel server's synchronization
10529  * header which resides in SMR space.
10530  */
10531 static idn_mboxhdr_t *
10532 idn_chan_server_syncheader(int channel)
10533 {
10534 	idn_domain_t	*ldp = &idn_domain[idn.localid];
10535 	idn_mboxtbl_t	*mtp;
10536 	idn_mboxhdr_t	*mhp;
10537 	ushort_t		mbox_csum;
10538 	procname_t	proc = "idn_chan_server_syncheader";
10539 
10540 	ASSERT(IDN_CHAN_RECV_IS_LOCKED(&idn.chan_servers[channel]));
10541 
10542 	IDN_DLOCK_SHARED(idn.localid);
10543 
10544 	if (ldp->dmbox.m_tbl == NULL) {
10545 		PR_CHAN("%s: local dmbox.m_tbl == NULL\n", proc);
10546 		IDN_DUNLOCK(idn.localid);
10547 		return (NULL);
10548 	}
10549 
10550 	mtp = IDN_MBOXTBL_PTR_CHAN(ldp->dmbox.m_tbl, channel);
10551 	mhp = &mtp->mt_header;
10552 	mbox_csum = IDN_CKSUM_MBOX(&mtp->mt_header);
10553 
10554 #ifdef DEBUG
10555 	if (mhp != prev_mhp[channel]) {
10556 		prev_mhp[channel] = mhp;
10557 		PR_CHAN("%s: chan_server (%d) cookie = 0x%x (exp 0x%x)\n",
10558 			proc, channel, IDN_GET_MBOXHDR_COOKIE(mhp),
10559 			IDN_MAKE_MBOXHDR_COOKIE(0, 0, channel));
10560 		PR_CHAN("%s: chan_server (%d) actv_ptr = 0x%x (exp 0x%x)\n",
10561 			proc, channel, mhp->mh_svr_active_ptr,
10562 			IDN_ADDR2OFFSET(&mhp->mh_svr_active));
10563 		PR_CHAN("%s: chan_server (%d) ready_ptr = 0x%x (exp 0x%x)\n",
10564 			proc, channel, mhp->mh_svr_ready_ptr,
10565 			IDN_ADDR2OFFSET(&mhp->mh_svr_ready));
10566 		PR_CHAN("%s: chan_server (%d) mbox_cksum = 0x%x (exp 0x%x)\n",
10567 			proc, channel, (int)mhp->mh_cksum, (int)mbox_csum);
10568 	}
10569 #endif /* DEBUG */
10570 
10571 	if ((IDN_ADDR2OFFSET(&mhp->mh_svr_active) !=
10572 					mhp->mh_svr_active_ptr) ||
10573 			(IDN_ADDR2OFFSET(&mhp->mh_svr_ready) !=
10574 					mhp->mh_svr_ready_ptr) ||
10575 			!VALID_MBOXHDR(mhp, channel, mbox_csum)) {
10576 		idn_chansvr_t	*csp;
10577 
10578 		csp = &idn.chan_servers[channel];
10579 		if (IDN_CHANNEL_IS_RECV_CORRUPTED(csp) == 0) {
10580 			IDN_CHANSVC_MARK_RECV_CORRUPTED(csp);
10581 
10582 			cmn_err(CE_WARN,
10583 				"IDN: 240: (channel %d) SMR CORRUPTED "
10584 				"- RELINK", channel);
10585 			cmn_err(CE_CONT,
10586 				"IDN: 240: (channel %d) cookie "
10587 				"(expected 0x%x, actual 0x%x)\n",
10588 				channel,
10589 				IDN_MAKE_MBOXHDR_COOKIE(0, 0, channel),
10590 				mhp->mh_cookie);
10591 			cmn_err(CE_CONT,
10592 				"IDN: 240: (channel %d) actv_flg "
10593 				"(expected 0x%x, actual 0x%x)\n",
10594 				channel, mhp->mh_svr_active_ptr,
10595 				IDN_ADDR2OFFSET(&mhp->mh_svr_active));
10596 			cmn_err(CE_CONT,
10597 				"IDN: 240: (channel %d) ready_flg "
10598 				"(expected 0x%x, actual 0x%x)\n",
10599 				channel, mhp->mh_svr_ready_ptr,
10600 				IDN_ADDR2OFFSET(&mhp->mh_svr_ready));
10601 		}
10602 
10603 		mhp = NULL;
10604 	}
10605 	IDN_DUNLOCK(idn.localid);
10606 
10607 	PR_CHAN("%s: channel(%d) mainhp = 0x%p\n", proc, channel, mhp);
10608 
10609 	return (mhp);
10610 }
10611 
10612 #define	CHANSVR_SYNC_CACHE(csp, mmp, chan) \
10613 { \
10614 	ASSERT(IDN_CHAN_RECV_IS_LOCKED(csp)); \
10615 	if ((csp)->ch_recv_changed) { \
10616 		register int _d; \
10617 		(csp)->ch_recv_scanset = (csp)->ch_recv_scanset_pending; \
10618 		(csp)->ch_recv_domset = (csp)->ch_recv_domset_pending; \
10619 		for (_d = 0; _d < MAX_DOMAINS; _d++) { \
10620 			if (DOMAIN_IN_SET((csp)->ch_recv_domset, _d)) { \
10621 				(mmp)[_d] = \
10622 					&idn_domain[_d].dmbox.m_recv[chan]; \
10623 			} else { \
10624 				(mmp)[_d] = NULL; \
10625 			} \
10626 		} \
10627 		(csp)->ch_recv_changed = 0; \
10628 	} \
10629 }
10630 #define	CHANSVR_NEXT_DOMID(csp, i, d) \
10631 { \
10632 	(i) = ((i) + 1) & (MAX_DOMAINS - 1); \
10633 	(d) = (int)(((csp)->ch_recv_scanset >> ((i) << 2)) & 0xf); \
10634 }
10635 #define	CHANSVR_RESET_INDEX(i)	((i) = -1)
10636 
10637 #ifdef DEBUG
10638 static idn_mainmbox_t	*Mmp[IDN_MAXMAX_NETS][MAX_DOMAINS];
10639 #endif /* DEBUG */
10640 
10641 static void
10642 idn_chan_server(idn_chansvr_t **cspp)
10643 {
10644 	idn_mboxhdr_t	*mainhp;
10645 	register idn_chansvr_t		*csp;
10646 	register idn_mboxmsg_t		*mqp;
10647 #ifdef DEBUG
10648 	idn_mainmbox_t			**mmp;
10649 #else
10650 	idn_mainmbox_t			*mmp[MAX_DOMAINS];
10651 #endif /* DEBUG */
10652 	register int	qi;
10653 	struct idn	*sip;
10654 	int		channel;
10655 	int		cpuid;
10656 	int		empty;
10657 	int		tot_pktcount, tot_dropcount;
10658 	register int	index;
10659 	register int	domid;
10660 	register int	idleloops;
10661 	procname_t	proc = "idn_chan_server";
10662 
10663 
10664 #ifdef DEBUG
10665 	mmp = &Mmp[(*cspp)->ch_id][0];
10666 	bzero(mmp, MAX_DOMAINS * sizeof (idn_mainmbox_t *));
10667 #else /* DEBUG */
10668 	bzero(mmp, sizeof (mmp));
10669 #endif /* DEBUG */
10670 
10671 	tot_pktcount = tot_dropcount = 0;
10672 
10673 	ASSERT(cspp && *cspp);
10674 
10675 	csp = *cspp;
10676 	channel = csp->ch_id;
10677 	sip = IDN_INST2SIP(channel);
10678 	ASSERT(sip);
10679 
10680 	PR_CHAN("%s: CHANNEL SERVER (channel %d) GOING ACTIVE...\n",
10681 		proc, channel);
10682 
10683 	IDN_CHAN_LOCK_RECV(csp);
10684 	IDN_CHAN_RECV_INPROGRESS(csp);
10685 	ASSERT(csp->ch_recv_threadp == curthread);
10686 	mutex_enter(&cpu_lock);
10687 	if ((cpuid = csp->ch_bound_cpuid_pending) != -1) {
10688 		cpu_t	*cp = cpu_get(cpuid);
10689 		/*
10690 		 * We've been requested to bind to
10691 		 * a particular cpu.
10692 		 */
10693 		if ((cp == NULL) || !cpu_is_online(cp)) {
10694 			/*
10695 			 * Cpu seems to have gone away or gone offline
10696 			 * since originally requested.
10697 			 */
10698 			mutex_exit(&cpu_lock);
10699 			cmn_err(CE_WARN,
10700 				"IDN: 239: invalid CPU ID (%d) specified for "
10701 				"IDN net %d",
10702 				cpuid, channel);
10703 		} else {
10704 			csp->ch_bound_cpuid = cpuid;
10705 			affinity_set(csp->ch_bound_cpuid);
10706 			mutex_exit(&cpu_lock);
10707 		}
10708 		csp->ch_bound_cpuid_pending = -1;
10709 	} else {
10710 		mutex_exit(&cpu_lock);
10711 	}
10712 	if (csp->ch_bound_cpuid != -1) {
10713 		PR_CHAN("%s: thread bound to cpuid %d\n",
10714 			proc, csp->ch_bound_cpuid);
10715 	}
10716 	/*
10717 	 * Only the first (main) mbox header is used for
10718 	 * synchronization with data delivery since there is
10719 	 * only data server for all mailboxes for this
10720 	 * given channel.
10721 	 */
10722 	CHANSVR_SYNC_CACHE(csp, mmp, channel);
10723 
10724 	mainhp = ((csp->ch_recv_domcount > 0) &&
10725 		    IDN_CHANNEL_IS_RECV_ACTIVE(csp))
10726 			? idn_chan_server_syncheader(channel) : NULL;
10727 
10728 	if (mainhp && IDN_CHANNEL_IS_RECV_ACTIVE(csp))
10729 		mainhp->mh_svr_active = 1;
10730 
10731 	ASSERT(csp->ch_recv_domcount ?
10732 		(csp->ch_recv_scanset && csp->ch_recv_domset) : 1);
10733 
10734 	IDN_CHAN_UNLOCK_RECV(csp);
10735 
10736 	empty = 0;
10737 	idleloops = 0;
10738 	CHANSVR_RESET_INDEX(index);
10739 
10740 	/*
10741 	 * ---------------------------------------------
10742 	 */
10743 	/*CONSTCOND*/
10744 	while (1) {
10745 		register int	pktcount;
10746 		register int	dropcount;
10747 		ushort_t		mbox_csum;
10748 		idn_mboxtbl_t	*smr_mboxp;	/* points to SMR space */
10749 		register smr_offset_t	bufoffset;
10750 #ifdef DEBUG
10751 		register smr_pkthdr_t	*hdrp;
10752 		idn_netaddr_t		netaddr;
10753 #endif /* DEBUG */
10754 
10755 		/*
10756 		 * Speed through and find the next available domid.
10757 		 */
10758 		CHANSVR_NEXT_DOMID(csp, index, domid);
10759 
10760 		if (!index) {
10761 			/*
10762 			 * We only check state changes when
10763 			 * we wrap around.  Done for performance.
10764 			 */
10765 			if (!IDN_CHANNEL_IS_RECV_ACTIVE(csp) ||
10766 					csp->ch_recv.c_checkin ||
10767 					(idn.state != IDNGS_ONLINE)) {
10768 
10769 				PR_DATA("%s: (channel %d) %s\n",
10770 					proc, channel,
10771 					IDN_CHANNEL_IS_DETACHED(csp)
10772 					? "DEAD" :
10773 					IDN_CHANNEL_IS_PENDING(csp)
10774 					? "IDLED" :
10775 					IDN_CHANNEL_IS_ACTIVE(csp)
10776 					? "ACTIVE" : "DISABLED");
10777 				goto cc_sleep;
10778 			}
10779 		}
10780 		if (csp->ch_recv.c_checkin)
10781 			goto cc_sleep;
10782 
10783 		if (empty == csp->ch_recv_domcount) {
10784 			empty = 0;
10785 			goto cc_slowdown;
10786 		}
10787 
10788 		ASSERT(mmp[domid] != NULL);
10789 
10790 		mutex_enter(&mmp[domid]->mm_mutex);
10791 		if ((smr_mboxp = mmp[domid]->mm_smr_mboxp) == NULL) {
10792 			/*
10793 			 * Somebody is trying to shut things down.
10794 			 */
10795 			empty++;
10796 			mutex_exit(&mmp[domid]->mm_mutex);
10797 			continue;
10798 		}
10799 		ASSERT(mmp[domid]->mm_channel == (short)channel);
10800 		/*
10801 		 * We don't care if the mm_smr_mboxp is nullified
10802 		 * after this point.  The thread attempting to shut
10803 		 * us down has to formally pause this channel before
10804 		 * anything is official anyway.  So, we can continue
10805 		 * with our local SMR reference until the thread
10806 		 * shutting us down really stops us.
10807 		 *
10808 		 * Need to get the qiget index _before_ we drop the
10809 		 * lock since it might get flushed (idn_mainmbox_flush)
10810 		 * once we drop the mm_mutex.
10811 		 *
10812 		 * We prefer not to hold the mm_mutex across the
10813 		 * idn_recv_mboxdata() call since that may be time-
10814 		 * consuming.
10815 		 */
10816 		qi  = mmp[domid]->mm_qiget;
10817 
10818 		/*
10819 		 * Check the mailbox header if checksum is turned on.
10820 		 */
10821 		mbox_csum = IDN_CKSUM_MBOX(&smr_mboxp->mt_header);
10822 		if (!VALID_MBOXHDR(&smr_mboxp->mt_header, channel, mbox_csum)) {
10823 			IDN_KSTAT_INC(sip, si_mboxcrc);
10824 			IDN_KSTAT_INC(sip, si_ierrors);
10825 			if (!(mmp[domid]->mm_flags & IDNMMBOX_FLAG_CORRUPTED)) {
10826 				cmn_err(CE_WARN,
10827 					"IDN: 241: [recv] (domain %d, "
10828 					"channel %d) SMR CORRUPTED - RELINK",
10829 					domid, channel);
10830 				mmp[domid]->mm_flags |= IDNMMBOX_FLAG_CORRUPTED;
10831 			}
10832 			empty = 0;
10833 			mutex_exit(&mmp[domid]->mm_mutex);
10834 			goto cc_sleep;
10835 		}
10836 		mutex_exit(&mmp[domid]->mm_mutex);
10837 		mqp = &smr_mboxp->mt_queue[0];
10838 
10839 		pktcount = dropcount = 0;
10840 
10841 		if (mqp[qi].ms_owner == 0)
10842 			goto cc_next;
10843 
10844 		bufoffset = IDN_BFRAME2OFFSET(mqp[qi].ms_bframe);
10845 
10846 		if (!VALID_NWROFFSET(bufoffset, IDN_SMR_BUFSIZE)) {
10847 			/* ASSERT(0); */
10848 			mqp[qi].ms_flag |= IDN_MBOXMSG_FLAG_ERR_BADOFFSET;
10849 			mqp[qi].ms_owner = 0;
10850 			IDN_MMBOXINDEX_INC(qi);
10851 			dropcount++;
10852 
10853 			IDN_KSTAT_INC(sip, si_smraddr);
10854 			IDN_KSTAT_INC(sip, si_ierrors);
10855 
10856 		} else {
10857 			PR_DATA("%s: (channel %d) pkt (off 0x%x, "
10858 				"qiget %d) from domain %d\n",
10859 				proc, channel, bufoffset, qi, domid);
10860 #ifdef DEBUG
10861 
10862 			hdrp = IDN_BUF2HDR(IDN_OFFSET2ADDR(bufoffset));
10863 			netaddr.netaddr = hdrp->b_netaddr;
10864 			ASSERT(netaddr.net.chan == (ushort_t)channel);
10865 #endif /* DEBUG */
10866 
10867 			if (idn_recv_mboxdata(channel,
10868 					IDN_OFFSET2ADDR(bufoffset)) < 0) {
10869 				mutex_enter(&mmp[domid]->mm_mutex);
10870 				if (!(mmp[domid]->mm_flags &
10871 					IDNMMBOX_FLAG_CORRUPTED)) {
10872 					cmn_err(CE_WARN,
10873 						"IDN: 241: [recv] (domain "
10874 						"%d, channel %d) SMR "
10875 						"CORRUPTED - RELINK",
10876 						domid, channel);
10877 					mmp[domid]->mm_flags |=
10878 						IDNMMBOX_FLAG_CORRUPTED;
10879 				}
10880 				mutex_exit(&mmp[domid]->mm_mutex);
10881 			}
10882 
10883 			mqp[qi].ms_owner = 0;
10884 			IDN_MMBOXINDEX_INC(qi);
10885 			pktcount++;
10886 		}
10887 
10888 cc_next:
10889 
10890 		mutex_enter(&mmp[domid]->mm_mutex);
10891 		if (mmp[domid]->mm_smr_mboxp) {
10892 			if (dropcount)
10893 				mmp[domid]->mm_dropped += dropcount;
10894 			mmp[domid]->mm_qiget = qi;
10895 			mmp[domid]->mm_count += pktcount;
10896 		}
10897 		mutex_exit(&mmp[domid]->mm_mutex);
10898 
10899 		if (pktcount == 0) {
10900 			empty++;
10901 		} else {
10902 			csp->ch_recv_waittime = IDN_NETSVR_WAIT_MIN;
10903 			empty = 0;
10904 			idleloops = 0;
10905 
10906 			PR_DATA("%s: (channel %d) dom=%d, pktcnt=%d\n",
10907 				proc, channel, domid, pktcount);
10908 		}
10909 
10910 		continue;
10911 
10912 cc_slowdown:
10913 
10914 #ifdef DEBUG
10915 		if (idleloops == 0) {
10916 			PR_DATA("%s: (channel %d) going SOFT IDLE...\n",
10917 				proc, channel);
10918 		}
10919 #endif /* DEBUG */
10920 		if (idleloops++ < IDN_NETSVR_SPIN_COUNT) {
10921 			/*
10922 			 * At this level we only busy-wait.
10923 			 * Get back into action.
10924 			 */
10925 			continue;
10926 		}
10927 		idleloops = 0;
10928 
10929 cc_sleep:
10930 
10931 		if (mainhp)
10932 			mainhp->mh_svr_active = 0;
10933 
10934 		IDN_CHAN_LOCK_RECV(csp);
10935 
10936 cc_die:
10937 
10938 		ASSERT(IDN_CHAN_RECV_IS_LOCKED(csp));
10939 
10940 		if (!IDN_CHANNEL_IS_RECV_ACTIVE(csp) &&
10941 					IDN_CHANNEL_IS_DETACHED(csp)) {
10942 			/*
10943 			 * Time to die...
10944 			 */
10945 			PR_CHAN("%s: (channel %d) serviced %d "
10946 				"packets, drop = %d\n", proc, channel,
10947 				tot_pktcount, tot_dropcount);
10948 			PR_CHAN("%s: (channel %d) TERMINATING\n",
10949 				proc, channel);
10950 			PR_CHAN("%s: (channel %d) ch_morguep = %p\n",
10951 				proc, channel, csp->ch_recv_morguep);
10952 
10953 			csp->ch_recv_threadp = NULL;
10954 #ifdef DEBUG
10955 			for (index = 0; index < csp->ch_recv_domcount;
10956 							index++) {
10957 				if ((int)((csp->ch_recv_scanset >>
10958 							(index*4)) & 0xf)
10959 							== domid) {
10960 					PR_DATA("%s: WARNING (channel %d) "
10961 						"DROPPING domid %d...\n",
10962 						proc, channel, domid);
10963 				}
10964 			}
10965 #endif /* DEBUG */
10966 			IDN_CHAN_RECV_DONE(csp);
10967 
10968 			sema_v(csp->ch_recv_morguep);
10969 
10970 			IDN_CHAN_UNLOCK_RECV(csp);
10971 
10972 			thread_exit();
10973 			/* not reached */
10974 		}
10975 
10976 		do {
10977 			if (IDN_CHANNEL_IS_DETACHED(csp)) {
10978 				PR_CHAN("%s: (channel %d) going to DIE...\n",
10979 					proc, channel);
10980 				goto cc_die;
10981 			}
10982 #ifdef DEBUG
10983 			if (IDN_CHANNEL_IS_RECV_ACTIVE(csp) &&
10984 					(csp->ch_recv_waittime <=
10985 						IDN_NETSVR_WAIT_MAX)) {
10986 				PR_CHAN("%s: (channel %d) going SOFT IDLE "
10987 					"(waittime = %d ticks)...\n",
10988 					proc, channel,
10989 					csp->ch_recv_waittime);
10990 			} else {
10991 				PR_CHAN("%s: (channel %d) going "
10992 					"HARD IDLE...\n", proc, channel);
10993 			}
10994 #endif /* DEBUG */
10995 			IDN_CHAN_RECV_DONE(csp);
10996 
10997 			/*
10998 			 * If we're being asked to check-in then
10999 			 * go into a hard sleep.  Want to give the
11000 			 * thread requesting us to checkin a chance.
11001 			 */
11002 			while (csp->ch_recv.c_checkin)
11003 				cv_wait(&csp->ch_recv_cv,
11004 					&csp->ch_recv.c_mutex);
11005 
11006 			if (csp->ch_recv_waittime > IDN_NETSVR_WAIT_MAX)
11007 				cv_wait(&csp->ch_recv_cv,
11008 					&csp->ch_recv.c_mutex);
11009 			else
11010 				(void) cv_timedwait(&csp->ch_recv_cv,
11011 						&csp->ch_recv.c_mutex,
11012 						lbolt +
11013 						csp->ch_recv_waittime);
11014 
11015 			IDN_CHAN_RECV_INPROGRESS(csp);
11016 
11017 			IDN_KSTAT_INC(sip, si_sigsvr);
11018 
11019 			if (csp->ch_recv_waittime <= IDN_NETSVR_WAIT_MAX)
11020 				csp->ch_recv_waittime <<=
11021 						IDN_NETSVR_WAIT_SHIFT;
11022 
11023 		} while (!IDN_CHANNEL_IS_RECV_ACTIVE(csp));
11024 
11025 		/*
11026 		 * Before we see the world (and touch SMR space),
11027 		 * see if we've been told to die.
11028 		 */
11029 		mainhp = NULL;
11030 		/*
11031 		 * The world may have changed since we were
11032 		 * asleep.  Need to resync cache and check for a
11033 		 * new syncheader.
11034 		 *
11035 		 * Reset chansvr cache against any changes in
11036 		 * mbox fields we need (mm_qiget).
11037 		 */
11038 		CHANSVR_SYNC_CACHE(csp, mmp, channel);
11039 		if (csp->ch_recv_domcount <= 0) {
11040 			/*
11041 			 * Everybody disappeared on us.
11042 			 * Go back to sleep.
11043 			 */
11044 			goto cc_die;
11045 		}
11046 		ASSERT(csp->ch_recv_scanset && csp->ch_recv_domset);
11047 
11048 		mainhp = idn_chan_server_syncheader(channel);
11049 		if (mainhp == NULL) {
11050 			/*
11051 			 * Bummer...we're idling...
11052 			 */
11053 			goto cc_die;
11054 		}
11055 
11056 		mainhp->mh_svr_active = 1;
11057 
11058 		IDN_CHAN_UNLOCK_RECV(csp);
11059 		/*
11060 		 * Reset the domid index after sleeping.
11061 		 */
11062 		CHANSVR_RESET_INDEX(index);
11063 
11064 		empty = 0;
11065 		idleloops = 0;
11066 	}
11067 }
11068 
11069 #if 0
11070 /*
11071  * We maintain a separate function for flushing the STREAMs
11072  * queue of a channel because it must be done outside the
11073  * context of the idn_chan_action routine.  The streams flush
11074  * cannot occur inline with the idn_chan_action because
11075  * the act of flushing may cause IDN send functions to be called
11076  * directly and thus locks to be obtained which could result
11077  * in deadlocks.
11078  */
11079 static void
11080 idn_chan_flush(idn_chansvr_t *csp)
11081 {
11082 	queue_t		*rq;
11083 	struct idn	*sip;
11084 	int		flush_type = 0;
11085 	idn_chaninfo_t	*csend, *crecv;
11086 	procname_t	proc = "idn_chan_flush";
11087 
11088 	csend = &csp->ch_send;
11089 	crecv = &csp->ch_recv;
11090 
11091 	mutex_enter(&crecv->c_mutex);
11092 	mutex_enter(&csend->c_mutex);
11093 
11094 	if (crecv->c_state & IDN_CHANSVC_STATE_FLUSH)
11095 		flush_type |= FLUSHR;
11096 
11097 	if (csend->c_state & IDN_CHANSVC_STATE_FLUSH)
11098 		flush_type |= FLUSHW;
11099 
11100 	if (flush_type) {
11101 		rq = NULL;
11102 		rw_enter(&idn.struprwlock, RW_READER);
11103 		if ((sip = IDN_INST2SIP(csp->ch_id)) != NULL)
11104 			rq = sip->si_ipq;
11105 		rw_exit(&idn.struprwlock);
11106 		if (rq) {
11107 			/*
11108 			 * Flush the STREAM if possible
11109 			 * to get the channel server coherent
11110 			 * enough to respond to us.
11111 			 */
11112 			PR_CHAN("%s: sending FLUSH (%x) to channel %d\n",
11113 				proc, flush_type, csp->ch_id);
11114 
11115 			(void) putnextctl1(rq, M_FLUSH, flush_type);
11116 		}
11117 		crecv->c_state &= ~IDN_CHANSVC_STATE_FLUSH;
11118 		csend->c_state &= ~IDN_CHANSVC_STATE_FLUSH;
11119 
11120 		if (crecv->c_waiters)
11121 			cv_broadcast(&crecv->c_cv);
11122 	}
11123 
11124 	mutex_exit(&csend->c_mutex);
11125 	mutex_exit(&crecv->c_mutex);
11126 }
11127 #endif /* 0 */
11128 
11129 /*
11130  * Locks are with respect to SEND/RECV locks (c_mutex).
11131  *
11132  * STOP/SUSPEND/DETACH
11133  *	- Entered with locks dropped, leave with locks held.
11134  *	  DETACH - lock dropped manually.
11135  * RESTART/RESUME
11136  *	- Entered with locks held, leave with locks dropped.
11137  * ATTACH
11138  *	- both enter and leave with locks dropped.
11139  */
11140 static void
11141 idn_chan_action(int channel, idn_chanaction_t chanaction, int wait)
11142 {
11143 	uchar_t		clr_state, set_state;
11144 	uint_t		is_running;
11145 	domainset_t	closed_slabwaiters = 0;
11146 	struct idn	*sip;
11147 	idn_chansvr_t	*csp;
11148 	idn_chaninfo_t	*csend, *crecv;
11149 	procname_t	proc = "idn_chan_action";
11150 
11151 	ASSERT((channel >= 0) && (channel < IDN_MAX_NETS));
11152 	ASSERT(idn.chan_servers);
11153 
11154 	csp = &idn.chan_servers[channel];
11155 
11156 	PR_CHAN("%s: requesting %s for channel %d\n",
11157 		proc, chanaction_str[(int)chanaction], channel);
11158 
11159 	csend = &csp->ch_send;
11160 	crecv = &csp->ch_recv;
11161 
11162 	ASSERT(IDN_CHAN_GLOBAL_IS_LOCKED(csp));
11163 
11164 	clr_state = set_state = 0;
11165 
11166 	switch (chanaction) {
11167 	case IDNCHAN_ACTION_DETACH:
11168 		clr_state = IDN_CHANSVC_STATE_MASK;
11169 		/*FALLTHROUGH*/
11170 
11171 	case IDNCHAN_ACTION_STOP:
11172 		clr_state |= IDN_CHANSVC_STATE_ENABLED;
11173 		/*FALLTHROUGH*/
11174 
11175 	case IDNCHAN_ACTION_SUSPEND:
11176 		clr_state |= IDN_CHANSVC_STATE_ACTIVE;
11177 
11178 		/*
11179 		 * Must maintain this locking order.
11180 		 * Set asynchronous check-in flags.
11181 		 */
11182 		crecv->c_checkin = 1;
11183 		csend->c_checkin = 1;
11184 
11185 		is_running = 0;
11186 		if ((csend->c_inprogress || crecv->c_inprogress) &&
11187 			wait && (csp->ch_recv_threadp != curthread)) {
11188 
11189 			rw_enter(&idn.struprwlock, RW_READER);
11190 			if ((sip = IDN_INST2SIP(channel)) != NULL) {
11191 				/*
11192 				 * Temporarily turn off the STREAM
11193 				 * to give a chance to breath.
11194 				 */
11195 				is_running = sip->si_flags & IDNRUNNING;
11196 				if (is_running)
11197 					sip->si_flags &= ~IDNRUNNING;
11198 			}
11199 			rw_exit(&idn.struprwlock);
11200 		}
11201 
11202 		mutex_enter(&crecv->c_mutex);
11203 		crecv->c_state &= ~clr_state;
11204 
11205 		mutex_enter(&csend->c_mutex);
11206 		csend->c_state &= ~clr_state;
11207 
11208 		/*
11209 		 * It's possible the channel server could come
11210 		 * through this flow itself due to putting data upstream
11211 		 * that ultimately turned around and came back down for
11212 		 * sending.  If this is the case we certainly don't
11213 		 * want to cv_wait, otherwise we'll obviously deadlock
11214 		 * waiting for ourself.  So, only block if somebody
11215 		 * other than the channel server we're attempting to
11216 		 * suspend/stop.
11217 		 */
11218 		if (wait && (csp->ch_recv_threadp != curthread)) {
11219 			int	do_flush = 0;
11220 
11221 			if (csend->c_inprogress || crecv->c_inprogress)
11222 				do_flush++;
11223 
11224 			if (do_flush) {
11225 				rw_enter(&idn.struprwlock, RW_READER);
11226 				if ((sip = IDN_INST2SIP(channel)) != NULL) {
11227 					/*
11228 					 * Temporarily turn off the STREAM
11229 					 * to give a chance to breath.
11230 					 */
11231 					if (sip->si_flags & IDNRUNNING) {
11232 						is_running = 1;
11233 						sip->si_flags &= ~IDNRUNNING;
11234 					}
11235 				}
11236 				rw_exit(&idn.struprwlock);
11237 			}
11238 
11239 			/*
11240 			 * If we have any senders in-progress
11241 			 * it's possible they're stuck waiting
11242 			 * down in smr_buf_alloc which may never
11243 			 * arrive if we're in an unlink process.
11244 			 * Rather than wait for it to timeout
11245 			 * let's be proactive so we can disconnect
11246 			 * asap.
11247 			 */
11248 			closed_slabwaiters = csp->ch_reg_domset;
11249 			DOMAINSET_ADD(closed_slabwaiters, idn.localid);
11250 			if (closed_slabwaiters)
11251 				smr_slabwaiter_close(closed_slabwaiters);
11252 
11253 			do {
11254 				/*
11255 				 * It's possible due to a STREAMs
11256 				 * loopback from read queue to write queue
11257 				 * that receiver and sender may be same
11258 				 * thread, i.e. receiver's inprogress
11259 				 * flag will never clear until sender's
11260 				 * inprogress flag clears.  So, we wait
11261 				 * for sender's inprogress first.
11262 				 */
11263 				while (csend->c_inprogress) {
11264 					mutex_exit(&crecv->c_mutex);
11265 					while (csend->c_inprogress) {
11266 						csend->c_waiters++;
11267 						cv_wait(&csend->c_cv,
11268 							&csend->c_mutex);
11269 						csend->c_waiters--;
11270 					}
11271 					/*
11272 					 * Maintain lock ordering.
11273 					 * Eventually we will catch
11274 					 * him due to the flag settings.
11275 					 */
11276 					mutex_exit(&csend->c_mutex);
11277 					mutex_enter(&crecv->c_mutex);
11278 					mutex_enter(&csend->c_mutex);
11279 				}
11280 				if (crecv->c_inprogress) {
11281 					mutex_exit(&csend->c_mutex);
11282 					while (crecv->c_inprogress) {
11283 						crecv->c_waiters++;
11284 						cv_wait(&crecv->c_cv,
11285 							&crecv->c_mutex);
11286 						crecv->c_waiters--;
11287 					}
11288 					mutex_enter(&csend->c_mutex);
11289 				}
11290 			} while (csend->c_inprogress);
11291 		}
11292 
11293 		if (is_running) {
11294 			/*
11295 			 * Restore the IDNRUNNING bit in
11296 			 * the flags to let them know the
11297 			 * channel is still alive.
11298 			 */
11299 			rw_enter(&idn.struprwlock, RW_READER);
11300 			if ((sip = IDN_INST2SIP(channel)) != NULL)
11301 				sip->si_flags |= IDNRUNNING;
11302 			rw_exit(&idn.struprwlock);
11303 		}
11304 
11305 		if (closed_slabwaiters) {
11306 			/*
11307 			 * We can reopen now since at this point no new
11308 			 * slabwaiters will attempt to come in and wait.
11309 			 */
11310 			smr_slabwaiter_open(csp->ch_reg_domset);
11311 		}
11312 
11313 		crecv->c_checkin = 0;
11314 		csend->c_checkin = 0;
11315 
11316 		/*
11317 		 * ALL leave with locks held.
11318 		 */
11319 		PR_CHAN("%s: action (%s) for channel %d - COMPLETED\n",
11320 			proc, chanaction_str[(int)chanaction], channel);
11321 		break;
11322 
11323 	case IDNCHAN_ACTION_ATTACH:
11324 		mutex_enter(&crecv->c_mutex);
11325 		mutex_enter(&csend->c_mutex);
11326 		set_state |= csp->ch_state & IDN_CHANSVC_STATE_ATTACHED;
11327 		/*FALLTHROUGH*/
11328 
11329 	case IDNCHAN_ACTION_RESTART:
11330 		set_state |= csp->ch_state & IDN_CHANSVC_STATE_ENABLED;
11331 		/*FALLTHROUGH*/
11332 
11333 	case IDNCHAN_ACTION_RESUME:
11334 		ASSERT(IDN_CHAN_LOCAL_IS_LOCKED(csp));
11335 		set_state |= csp->ch_state & IDN_CHANSVC_STATE_ACTIVE;
11336 
11337 		crecv->c_state |= set_state;
11338 		csend->c_state |= set_state;
11339 
11340 		/*
11341 		 * The channel server itself could come through this
11342 		 * flow, so obviously no point in attempting to wake
11343 		 * ourself up!.
11344 		 */
11345 		if (csp->ch_recv_threadp &&
11346 				(csp->ch_recv_threadp != curthread))
11347 			cv_signal(&csp->ch_recv_cv);
11348 
11349 		PR_CHAN("%s: action (%s) for channel %d - COMPLETED\n",
11350 			proc, chanaction_str[(int)chanaction], channel);
11351 
11352 		/*
11353 		 * Leaves with lock released.
11354 		 */
11355 		mutex_exit(&csend->c_mutex);
11356 		mutex_exit(&crecv->c_mutex);
11357 		break;
11358 
11359 	default:
11360 		ASSERT(0);
11361 		break;
11362 	}
11363 }
11364 
11365 static void
11366 idn_chan_addmbox(int channel, ushort_t domset)
11367 {
11368 	idn_chansvr_t	*csp;
11369 	register int	d;
11370 	procname_t	proc = "idn_chan_addmbox";
11371 
11372 
11373 	PR_CHAN("%s: adding domset 0x%x main mailboxes to channel %d\n",
11374 		proc, domset, channel);
11375 
11376 	ASSERT(idn.chan_servers);
11377 
11378 	csp = &idn.chan_servers[channel];
11379 
11380 	/*
11381 	 * Adding domains to a channel can be
11382 	 * asynchonous, so we don't bother waiting.
11383 	 */
11384 	IDN_CHANNEL_SUSPEND(channel, 0);
11385 
11386 	/*
11387 	 * Now we have the sending and receiving sides blocked
11388 	 * for this channel.
11389 	 */
11390 	for (d = 0; d < MAX_DOMAINS; d++) {
11391 		if (!DOMAIN_IN_SET(domset, d))
11392 			continue;
11393 		if (IDN_CHAN_DOMAIN_IS_REGISTERED(csp, d)) {
11394 			DOMAINSET_DEL(domset, d);
11395 			continue;
11396 		}
11397 		IDN_CHANSVR_SCANSET_ADD_PENDING(csp, d);
11398 		DOMAINSET_ADD(csp->ch_recv_domset_pending, d);
11399 		IDN_CHAN_DOMAIN_REGISTER(csp, d);
11400 
11401 		PR_CHAN("%s: domain %d (channel %d) RECV (pending) "
11402 			"scanset = 0x%lx\n", proc, d, channel,
11403 			csp->ch_recv_scanset_pending);
11404 		PR_CHAN("%s: domain %d (channel %d) domset = 0x%x\n",
11405 			proc, d, channel, (uint_t)csp->ch_reg_domset);
11406 
11407 		CHECKPOINT_OPENED(IDNSB_CHKPT_CHAN,
11408 					idn_domain[d].dhw.dh_boardset, 1);
11409 	}
11410 	if (domset)
11411 		csp->ch_recv_changed = 1;
11412 
11413 	IDN_CHANNEL_RESUME(channel);
11414 }
11415 
11416 static void
11417 idn_chan_delmbox(int channel, ushort_t domset)
11418 {
11419 	idn_chansvr_t	*csp;
11420 	register int	d;
11421 	procname_t	proc = "idn_chan_delmbox";
11422 
11423 
11424 	PR_CHAN("%s: deleting domset 0x%x main mailboxes from channel %d\n",
11425 		proc, domset, channel);
11426 
11427 	ASSERT(idn.chan_servers);
11428 
11429 	csp = &idn.chan_servers[channel];
11430 
11431 	/*
11432 	 * Here we have to wait for the channel server
11433 	 * as it's vital that we don't return without guaranteeing
11434 	 * that the given domset is no longer registered.
11435 	 */
11436 	IDN_CHANNEL_SUSPEND(channel, 1);
11437 
11438 	/*
11439 	 * Now we have the sending and receiving sides blocked
11440 	 * for this channel.
11441 	 */
11442 	for (d = 0; d < MAX_DOMAINS; d++) {
11443 		if (!DOMAIN_IN_SET(domset, d))
11444 			continue;
11445 		if (!IDN_CHAN_DOMAIN_IS_REGISTERED(csp, d)) {
11446 			DOMAINSET_DEL(domset, d);
11447 			continue;
11448 		}
11449 		/*
11450 		 * This domain has a mailbox hanging on this channel.
11451 		 * Get him out.
11452 		 *
11453 		 * First remove him from the receive side.
11454 		 */
11455 		ASSERT(csp->ch_recv_domcount > 0);
11456 		IDN_CHANSVR_SCANSET_DEL_PENDING(csp, d);
11457 		DOMAINSET_DEL(csp->ch_recv_domset_pending, d);
11458 		IDN_CHAN_DOMAIN_UNREGISTER(csp, d);
11459 
11460 		PR_CHAN("%s: domain %d (channel %d) RECV (pending) "
11461 			"scanset = 0x%lx\n", proc, d, channel,
11462 			csp->ch_recv_scanset_pending);
11463 		PR_CHAN("%s: domain %d (channel %d) domset = 0x%x\n",
11464 			proc, d, channel, (uint_t)csp->ch_reg_domset);
11465 
11466 		CHECKPOINT_CLOSED(IDNSB_CHKPT_CHAN,
11467 					idn_domain[d].dhw.dh_boardset, 2);
11468 
11469 	}
11470 	if (domset)
11471 		csp->ch_recv_changed = 1;
11472 
11473 	IDN_CHANNEL_RESUME(channel);
11474 }
11475 
11476 static int
11477 idn_valid_etherheader(struct ether_header *ehp)
11478 {
11479 	uchar_t	*eap;
11480 
11481 	eap = &ehp->ether_dhost.ether_addr_octet[0];
11482 
11483 	if ((eap[IDNETHER_ZERO] != 0) && (eap[IDNETHER_ZERO] != 0xff))
11484 		return (0);
11485 
11486 	if ((eap[IDNETHER_COOKIE1] != IDNETHER_COOKIE1_VAL) &&
11487 		(eap[IDNETHER_COOKIE1] != 0xff))
11488 		return (0);
11489 
11490 	if ((eap[IDNETHER_COOKIE2] != IDNETHER_COOKIE2_VAL) &&
11491 		(eap[IDNETHER_COOKIE2] != 0xff))
11492 		return (0);
11493 
11494 	if ((eap[IDNETHER_RESERVED] != IDNETHER_RESERVED_VAL) &&
11495 		(eap[IDNETHER_RESERVED] != 0xff))
11496 		return (0);
11497 
11498 	if (!VALID_UCHANNEL(eap[IDNETHER_CHANNEL]) &&
11499 		(eap[IDNETHER_CHANNEL] != 0xff))
11500 		return (0);
11501 
11502 	if (!VALID_UDOMAINID(IDN_NETID2DOMID(eap[IDNETHER_NETID])) &&
11503 		(eap[IDNETHER_NETID] != 0xff))
11504 		return (0);
11505 
11506 	return (1);
11507 }
11508 
11509 /*
11510  * Packet header has already been filled in.
11511  * RETURNS:	0
11512  *		ENOLINK
11513  *		EPROTO
11514  *		ENOSPC
11515  */
11516 /*ARGSUSED*/
11517 static int
11518 idn_send_mboxdata(int domid, struct idn *sip, int channel, caddr_t bufp)
11519 {
11520 	idn_mainmbox_t	*mmp;
11521 	idn_mboxmsg_t	*mqp;
11522 	smr_pkthdr_t	*hdrp;
11523 	smr_offset_t	bufoffset;
11524 	idn_netaddr_t	dst;
11525 	ushort_t		mbox_csum;
11526 	int		rv = 0;
11527 	int		pktlen, qi;
11528 	procname_t	proc = "idn_send_mboxdata";
11529 
11530 	mmp = idn_domain[domid].dmbox.m_send;
11531 	if (mmp == NULL) {
11532 		PR_DATA("%s: dmbox.m_send == NULL\n", proc);
11533 		IDN_KSTAT_INC(sip, si_linkdown);
11534 		return (ENOLINK);
11535 	}
11536 
11537 	mmp += channel;
11538 	mutex_enter(&mmp->mm_mutex);
11539 
11540 	if (mmp->mm_smr_mboxp == NULL) {
11541 		PR_DATA("%s: (d %d, chn %d) mm_smr_mboxp == NULL\n",
11542 			proc, domid, channel);
11543 		IDN_KSTAT_INC(sip, si_linkdown);
11544 		rv = ENOLINK;
11545 		goto send_err;
11546 	}
11547 	mbox_csum = IDN_CKSUM_MBOX(&mmp->mm_smr_mboxp->mt_header);
11548 	if (mbox_csum != mmp->mm_smr_mboxp->mt_header.mh_cksum) {
11549 		PR_DATA("%s: (d %d, chn %d) mbox hdr cksum (%d) "
11550 			"!= actual (%d)\n",
11551 			proc, domid, channel, mbox_csum,
11552 			mmp->mm_smr_mboxp->mt_header.mh_cksum);
11553 		if ((mmp->mm_flags & IDNMMBOX_FLAG_CORRUPTED) == 0) {
11554 			cmn_err(CE_WARN,
11555 				"IDN: 241: [send] (domain %d, "
11556 				"channel %d) SMR CORRUPTED - RELINK",
11557 				domid, channel);
11558 			mmp->mm_flags |= IDNMMBOX_FLAG_CORRUPTED;
11559 		}
11560 		IDN_KSTAT_INC(sip, si_mboxcrc);
11561 		IDN_KSTAT_INC(sip, si_oerrors);
11562 		rv = EPROTO;
11563 		goto send_err;
11564 	}
11565 
11566 	bufoffset = IDN_ADDR2OFFSET(bufp);
11567 	hdrp	  = IDN_BUF2HDR(bufp);
11568 	pktlen    = hdrp->b_length;
11569 	dst.netaddr = hdrp->b_netaddr;
11570 	ASSERT(dst.net.chan == (ushort_t)channel);
11571 
11572 	mqp = &mmp->mm_smr_mboxp->mt_queue[0];
11573 	qi  = mmp->mm_qiput;
11574 
11575 	if (mqp[qi].ms_owner) {
11576 		PR_DATA("%s: mailbox FULL (qiput=%d, qiget=%d)\n",
11577 			proc, mmp->mm_qiput, mmp->mm_qiget);
11578 		IDN_KSTAT_INC(sip, si_txfull);
11579 		rv = ENOSPC;
11580 		goto send_err;
11581 	}
11582 	if (mqp[qi].ms_flag & IDN_MBOXMSG_FLAG_RECLAIM) {
11583 		smr_offset_t	recl_bufoffset;
11584 		/*
11585 		 * Remote domain finished with mailbox entry,
11586 		 * however it has not been reclaimed yet.  A reclaim
11587 		 * was done before coming into this routine, however
11588 		 * timing may have been such that the entry became
11589 		 * free just after the reclamation, but before
11590 		 * entry into here.  Go ahead and reclaim this entry.
11591 		 */
11592 		recl_bufoffset = IDN_BFRAME2OFFSET(mqp[qi].ms_bframe);
11593 
11594 		PR_DATA("%s: attempting reclaim (domain %d) "
11595 			"(qiput=%d, b_off=0x%x)\n",
11596 			proc, domid, qi, recl_bufoffset);
11597 
11598 		if (VALID_NWROFFSET(recl_bufoffset, IDN_SMR_BUFSIZE)) {
11599 			int		recl;
11600 			caddr_t		b_bufp;
11601 			smr_pkthdr_t	*b_hdrp;
11602 
11603 			b_bufp = IDN_OFFSET2ADDR(recl_bufoffset);
11604 			b_hdrp = IDN_BUF2HDR(b_bufp);
11605 
11606 			if (IDN_CKSUM_PKT(b_hdrp) != b_hdrp->b_cksum) {
11607 				IDN_KSTAT_INC(sip, si_crc);
11608 				IDN_KSTAT_INC(sip, si_fcs_errors);
11609 				IDN_KSTAT_INC(sip, si_reclaim);
11610 				IDN_KSTAT_INC(sip, si_oerrors);
11611 			}
11612 
11613 			recl = smr_buf_free(domid, b_bufp, b_hdrp->b_length);
11614 #ifdef DEBUG
11615 			if (recl == 0) {
11616 				PR_DATA("%s: SUCCESSFULLY reclaimed buf "
11617 					"(domain %d)\n", proc, domid);
11618 			} else {
11619 				PR_DATA("%s: WARNING: reclaim failed (FREE) "
11620 					"(domain %d)\n", proc, domid);
11621 			}
11622 #endif /* DEBUG */
11623 		} else {
11624 			IDN_KSTAT_INC(sip, si_smraddr);
11625 			IDN_KSTAT_INC(sip, si_reclaim);
11626 			PR_DATA("%s: WARNING: reclaim failed (BAD OFFSET) "
11627 				"(domain %d)\n", proc, domid);
11628 		}
11629 	}
11630 
11631 	if (*mmp->mm_smr_readyp == 0) {
11632 		mmp->mm_qiput = qi;
11633 		IDN_KSTAT_INC(sip, si_linkdown);
11634 		rv = ENOLINK;
11635 		goto send_err;
11636 	}
11637 
11638 	mqp[qi].ms_flag = IDN_MBOXMSG_FLAG_RECLAIM;
11639 	mqp[qi].ms_bframe = IDN_OFFSET2BFRAME(bufoffset);
11640 	/* membar_stst(); */
11641 	mqp[qi].ms_owner = 1;
11642 
11643 	IDN_MMBOXINDEX_INC(qi);
11644 
11645 	mmp->mm_qiput = qi;
11646 
11647 	mmp->mm_count++;
11648 
11649 	if ((*mmp->mm_smr_readyp) && !(*mmp->mm_smr_activep)) {
11650 		idn_msgtype_t	mt;
11651 
11652 		mt.mt_mtype = IDNP_DATA;
11653 		mt.mt_atype = 0;
11654 		IDN_KSTAT_INC(sip, si_xdcall);
11655 		(void) IDNXDC(domid, &mt, (uint_t)dst.net.chan,
11656 				0, 0, 0);
11657 	}
11658 	mutex_exit(&mmp->mm_mutex);
11659 	IDN_KSTAT_INC(sip, si_opackets);
11660 	IDN_KSTAT_INC(sip, si_opackets64);
11661 	IDN_KSTAT_ADD(sip, si_xmtbytes, pktlen);
11662 	IDN_KSTAT_ADD(sip, si_obytes64, (uint64_t)pktlen);
11663 
11664 	return (0);
11665 
11666 send_err:
11667 	mmp->mm_dropped++;
11668 
11669 	mutex_exit(&mmp->mm_mutex);
11670 
11671 	return (rv);
11672 }
11673 
11674 static int
11675 idn_recv_mboxdata(int channel, caddr_t bufp)
11676 {
11677 	smr_pkthdr_t	*hdrp;
11678 	struct idn	*sip;
11679 	mblk_t		*mp = nilp(mblk_t);
11680 	int		pktlen;
11681 	int		apktlen;
11682 	int		rv = 0;
11683 	smr_offset_t	bufoffset;
11684 	ushort_t	csum;
11685 	idn_netaddr_t	dst, daddr;
11686 	procname_t	proc = "idn_recv_mboxdata";
11687 
11688 	hdrp = IDN_BUF2HDR(bufp);
11689 
11690 	csum = IDN_CKSUM_PKT(hdrp);
11691 
11692 	sip = IDN_INST2SIP(channel);
11693 	if (sip == NULL) {
11694 		/*LINTED*/
11695 		sip = IDN_INST2SIP(0);
11696 	}
11697 	ASSERT(sip);
11698 
11699 	if (csum != hdrp->b_cksum) {
11700 		PR_DATA("%s: bad checksum(%x) != expected(%x)\n",
11701 			proc, (uint_t)csum, (uint_t)hdrp->b_cksum);
11702 		IDN_KSTAT_INC(sip, si_crc);
11703 		IDN_KSTAT_INC(sip, si_fcs_errors);
11704 		rv = -1;
11705 		goto recv_err;
11706 	}
11707 
11708 	daddr.net.chan = (ushort_t)channel;
11709 	daddr.net.netid = (ushort_t)idn.localid;
11710 
11711 	dst.netaddr = hdrp->b_netaddr;
11712 	bufoffset = hdrp->b_offset;
11713 
11714 	if (dst.netaddr != daddr.netaddr) {
11715 		PR_DATA("%s: wrong dest netaddr (0x%x), expected (0x%x)\n",
11716 			proc, dst.netaddr, daddr.netaddr);
11717 		IDN_KSTAT_INC(sip, si_nolink);
11718 		IDN_KSTAT_INC(sip, si_macrcv_errors);
11719 		goto recv_err;
11720 	}
11721 	pktlen  = hdrp->b_length;
11722 	apktlen = pktlen;
11723 
11724 	if ((pktlen <= 0) || (pktlen > IDN_DATA_SIZE)) {
11725 		PR_DATA("%s: invalid packet length (%d) <= 0 || > %lu\n",
11726 			proc, pktlen, IDN_DATA_SIZE);
11727 		IDN_KSTAT_INC(sip, si_buff);
11728 		IDN_KSTAT_INC(sip, si_toolong_errors);
11729 		goto recv_err;
11730 	}
11731 
11732 	mp = allocb(apktlen + IDN_ALIGNSIZE, BPRI_LO);
11733 	if (mp == nilp(mblk_t)) {
11734 		PR_DATA("%s: allocb(pkt) failed\n", proc);
11735 		IDN_KSTAT_INC(sip, si_allocbfail);
11736 		IDN_KSTAT_INC(sip, si_norcvbuf);	/* MIB II */
11737 		goto recv_err;
11738 	}
11739 	ASSERT(DB_TYPE(mp) == M_DATA);
11740 	/*
11741 	 * Copy data packet into its streams buffer.
11742 	 * Align pointers for maximum bcopy performance.
11743 	 */
11744 	mp->b_rptr = (uchar_t *)IDN_ALIGNPTR(mp->b_rptr, bufoffset);
11745 	bcopy(IDN_BUF2DATA(bufp, bufoffset), mp->b_rptr, apktlen);
11746 	mp->b_wptr = mp->b_rptr + pktlen;
11747 
11748 	if (IDN_CHECKSUM &&
11749 		!idn_valid_etherheader((struct ether_header *)mp->b_rptr)) {
11750 		freeb(mp);
11751 		mp = nilp(mblk_t);
11752 		PR_DATA("%s: etherheader CORRUPTED\n", proc);
11753 		IDN_KSTAT_INC(sip, si_crc);
11754 		IDN_KSTAT_INC(sip, si_fcs_errors);
11755 		rv = -1;
11756 		goto recv_err;
11757 	}
11758 
11759 	idndl_read(NULL, mp);
11760 
11761 recv_err:
11762 
11763 	if (mp == nilp(mblk_t)) {
11764 		IDN_KSTAT_INC(sip, si_ierrors);
11765 	}
11766 
11767 	return (rv);
11768 }
11769 
11770 /*
11771  * When on shutdown path (idn_active_resources) must call
11772  * idn_mainmbox_flush() _BEFORE_ calling idn_reclaim_mboxdata()
11773  * for any final data.  This is necessary incase the mailboxes
11774  * have been unregistered.  If they have then idn_mainmbox_flush()
11775  * will set mm_smr_mboxp to NULL which prevents us from touching
11776  * poison SMR space.
11777  */
11778 int
11779 idn_reclaim_mboxdata(int domid, int channel, int nbufs)
11780 {
11781 	idn_mainmbox_t	*mmp;
11782 	idn_mboxmsg_t	*mqp;
11783 	smr_pkthdr_t	*hdrp;
11784 	idn_domain_t	*dp;
11785 	int		qi;
11786 	int		mi;
11787 	int		reclaim_cnt = 0;
11788 	int		free_cnt;
11789 	ushort_t	csum;
11790 	struct idn	*sip;
11791 	smr_offset_t	reclaim_list, curr, prev;
11792 	procname_t	proc = "idn_reclaim_mboxdata";
11793 
11794 
11795 	sip = IDN_INST2SIP(channel);
11796 	if (sip == NULL) {
11797 		/*LINTED*/
11798 		sip = IDN_INST2SIP(0);
11799 	}
11800 	ASSERT(sip);
11801 
11802 	dp = &idn_domain[domid];
11803 
11804 	PR_DATA("%s: requested %d buffers from domain %d\n",
11805 		proc, nbufs, domid);
11806 
11807 	if (lock_try(&dp->dreclaim_inprogress) == 0) {
11808 		/*
11809 		 * Reclaim is already in progress, don't
11810 		 * bother.
11811 		 */
11812 		PR_DATA("%s: reclaim already in progress\n", proc);
11813 		return (0);
11814 	}
11815 
11816 	if (dp->dmbox.m_send == NULL)
11817 		return (0);
11818 
11819 	reclaim_list = curr = prev = IDN_NIL_SMROFFSET;
11820 
11821 	mi = (int)dp->dreclaim_index;
11822 	do {
11823 		ushort_t	mbox_csum;
11824 
11825 		mmp = &dp->dmbox.m_send[mi];
11826 		/* do-while continues down */
11827 		ASSERT(mmp);
11828 		if (mutex_tryenter(&mmp->mm_mutex) == 0) {
11829 			/*
11830 			 * This channel is busy, move on.
11831 			 */
11832 			IDN_MBOXCHAN_INC(mi);
11833 			continue;
11834 		}
11835 
11836 		if (mmp->mm_smr_mboxp == NULL) {
11837 			PR_DATA("%s: no smr pointer for domid %d, chan %d\n",
11838 				proc, domid, (int)mmp->mm_channel);
11839 			ASSERT(mmp->mm_qiget == mmp->mm_qiput);
11840 			mutex_exit(&mmp->mm_mutex);
11841 			IDN_MBOXCHAN_INC(mi);
11842 			continue;
11843 		}
11844 		mbox_csum = IDN_CKSUM_MBOX(&mmp->mm_smr_mboxp->mt_header);
11845 		if (mbox_csum != mmp->mm_smr_mboxp->mt_header.mh_cksum) {
11846 			PR_DATA("%s: (d %d, chn %d) mbox hdr "
11847 				"cksum (%d) != actual (%d)\n",
11848 				proc, domid, (int)mmp->mm_channel, mbox_csum,
11849 				mmp->mm_smr_mboxp->mt_header.mh_cksum);
11850 			IDN_KSTAT_INC(sip, si_mboxcrc);
11851 			IDN_KSTAT_INC(sip, si_oerrors);
11852 			mutex_exit(&mmp->mm_mutex);
11853 			IDN_MBOXCHAN_INC(mi);
11854 			continue;
11855 		}
11856 		mqp = &mmp->mm_smr_mboxp->mt_queue[0];
11857 		qi  = mmp->mm_qiget;
11858 
11859 		while (!mqp[qi].ms_owner &&
11860 			(mqp[qi].ms_flag & IDN_MBOXMSG_FLAG_RECLAIM) &&
11861 			nbufs) {
11862 			idn_mboxmsg_t	*msp;
11863 			int		badbuf;
11864 
11865 			badbuf = 0;
11866 			msp = &mqp[qi];
11867 
11868 			if (msp->ms_flag & IDN_MBOXMSG_FLAG_ERRMASK) {
11869 				PR_DATA("%s: msg.flag ERROR(0x%x) (off=0x%x, "
11870 					"domid=%d, qiget=%d)\n", proc,
11871 					(uint_t)(msp->ms_flag &
11872 						IDN_MBOXMSG_FLAG_ERRMASK),
11873 					IDN_BFRAME2OFFSET(msp->ms_bframe),
11874 					domid, qi);
11875 			}
11876 			prev = curr;
11877 			curr = IDN_BFRAME2OFFSET(mqp[qi].ms_bframe);
11878 
11879 			if (!VALID_NWROFFSET(curr, IDN_SMR_BUFSIZE)) {
11880 				badbuf = 1;
11881 				IDN_KSTAT_INC(sip, si_reclaim);
11882 			} else {
11883 				/*
11884 				 * Put the buffers onto a list that will be
11885 				 * formally reclaimed down below.  This allows
11886 				 * us to free up mboxq entries as fast as
11887 				 * possible.
11888 				 */
11889 				hdrp = IDN_BUF2HDR(IDN_OFFSET2ADDR(curr));
11890 				csum = IDN_CKSUM_PKT(hdrp);
11891 
11892 				if (csum != hdrp->b_cksum) {
11893 					badbuf = 1;
11894 					IDN_KSTAT_INC(sip, si_crc);
11895 					IDN_KSTAT_INC(sip, si_fcs_errors);
11896 					IDN_KSTAT_INC(sip, si_reclaim);
11897 					if (!(mmp->mm_flags &
11898 						IDNMMBOX_FLAG_CORRUPTED)) {
11899 						cmn_err(CE_WARN,
11900 							"IDN: 241: [send] "
11901 							"(domain %d, channel "
11902 							"%d) SMR CORRUPTED - "
11903 							"RELINK",
11904 							domid, channel);
11905 						mmp->mm_flags |=
11906 							IDNMMBOX_FLAG_CORRUPTED;
11907 					}
11908 
11909 				} else if (reclaim_list == IDN_NIL_SMROFFSET) {
11910 					reclaim_list = curr;
11911 				} else {
11912 					caddr_t	bufp;
11913 
11914 					bufp = IDN_OFFSET2ADDR(prev);
11915 					hdrp = IDN_BUF2HDR(bufp);
11916 					hdrp->b_next = curr;
11917 				}
11918 			}
11919 
11920 			mqp[qi].ms_flag = 0;
11921 
11922 			IDN_MMBOXINDEX_INC(qi);
11923 
11924 			if (!badbuf) {
11925 				nbufs--;
11926 				reclaim_cnt++;
11927 			}
11928 
11929 			if (qi == mmp->mm_qiget)
11930 				break;
11931 		}
11932 		mmp->mm_qiget = qi;
11933 
11934 		mutex_exit(&mmp->mm_mutex);
11935 
11936 		IDN_MBOXCHAN_INC(mi);
11937 
11938 	} while ((mi != (int)dp->dreclaim_index) && nbufs);
11939 
11940 	dp->dreclaim_index = (uchar_t)mi;
11941 
11942 	if (reclaim_list != IDN_NIL_SMROFFSET) {
11943 		hdrp = IDN_BUF2HDR(IDN_OFFSET2ADDR(curr));
11944 		hdrp->b_next = IDN_NIL_SMROFFSET;
11945 	}
11946 
11947 	PR_DATA("%s: reclaimed %d buffers from domain %d\n",
11948 		proc, reclaim_cnt, domid);
11949 
11950 	if (reclaim_cnt == 0) {
11951 		lock_clear(&dp->dreclaim_inprogress);
11952 		return (0);
11953 	}
11954 
11955 	/*
11956 	 * Now actually go and reclaim (free) the buffers.
11957 	 */
11958 	free_cnt = 0;
11959 
11960 	for (curr = reclaim_list; curr != IDN_NIL_SMROFFSET; ) {
11961 		caddr_t		bufp;
11962 
11963 		bufp = IDN_OFFSET2ADDR(curr);
11964 		hdrp = IDN_BUF2HDR(bufp);
11965 		csum = IDN_CKSUM_PKT(hdrp);
11966 		if (csum != hdrp->b_cksum) {
11967 			/*
11968 			 * Once corruption is detected we
11969 			 * can't trust our list any further.
11970 			 * These buffers are effectively lost.
11971 			 */
11972 			cmn_err(CE_WARN,
11973 				"IDN: 241: [send] (domain %d, channel %d) SMR "
11974 				"CORRUPTED - RELINK", domid, channel);
11975 			break;
11976 		}
11977 
11978 		curr = hdrp->b_next;
11979 
11980 		if (!smr_buf_free(domid, bufp, hdrp->b_length))
11981 			free_cnt++;
11982 	}
11983 
11984 	if ((dp->dio < IDN_WINDOW_EMAX) && dp->diocheck) {
11985 		lock_clear(&dp->diocheck);
11986 		IDN_MSGTIMER_STOP(domid, IDNP_DATA, 0);
11987 	}
11988 
11989 #ifdef DEBUG
11990 	if (free_cnt != reclaim_cnt) {
11991 		PR_DATA("%s: *** WARNING *** freecnt(%d) != reclaim_cnt (%d)\n",
11992 			proc, free_cnt, reclaim_cnt);
11993 	}
11994 #endif /* DEBUG */
11995 
11996 	lock_clear(&dp->dreclaim_inprogress);
11997 
11998 	return (reclaim_cnt);
11999 }
12000 
12001 void
12002 idn_signal_data_server(int domid, ushort_t channel)
12003 {
12004 	idn_nack_t	nacktype = 0;
12005 	idn_domain_t	*dp;
12006 	idn_chansvr_t	*csp;
12007 	int		c, min_chan, max_chan;
12008 	idn_mainmbox_t	*mmp;
12009 	procname_t	proc = "idn_signal_data_server";
12010 
12011 
12012 	if (domid == IDN_NIL_DOMID)
12013 		return;
12014 
12015 	dp = &idn_domain[domid];
12016 
12017 	if (dp->dawol.a_count > 0) {
12018 		/*
12019 		 * Domain was previously AWOL, but no longer.
12020 		 */
12021 		IDN_SYNC_LOCK();
12022 		IDN_GLOCK_EXCL();
12023 		idn_clear_awol(domid);
12024 		IDN_GUNLOCK();
12025 		IDN_SYNC_UNLOCK();
12026 	}
12027 	/*
12028 	 * Do a precheck before wasting time trying to acquire the lock.
12029 	 */
12030 	if ((dp->dstate != IDNDS_CONNECTED) || !IDN_DLOCK_TRY_SHARED(domid)) {
12031 		/*
12032 		 * Either we're not connected or somebody is busy working
12033 		 * on the domain.  Bail on the signal for now, we'll catch
12034 		 * it on the next go around.
12035 		 */
12036 		return;
12037 	}
12038 	/*
12039 	 * We didn't have the drwlock on the first check of dstate,
12040 	 * but now that we do, make sure the world hasn't changed!
12041 	 */
12042 	if (dp->dstate != IDNDS_CONNECTED) {
12043 		/*
12044 		 * If we reach here, then no connection.
12045 		 * Send no response if this is the case.
12046 		 */
12047 		nacktype = IDNNACK_NOCONN;
12048 		goto send_dresp;
12049 	}
12050 
12051 	/*
12052 	 * No need to worry about locking mainmbox
12053 	 * because we're already holding reader
12054 	 * lock on domain, plus we're just reading
12055 	 * fields in the mainmbox which only change
12056 	 * (or go away) when the writer lock is
12057 	 * held on the domain.
12058 	 */
12059 	if ((mmp = dp->dmbox.m_recv) == NULL) {
12060 		/*
12061 		 * No local mailbox.
12062 		 */
12063 		nacktype = IDNNACK_BADCFG;
12064 		goto send_dresp;
12065 	}
12066 	if ((channel != IDN_BROADCAST_ALLCHAN) && (channel >= IDN_MAX_NETS)) {
12067 		nacktype = IDNNACK_BADCHAN;
12068 		goto send_dresp;
12069 	}
12070 	if (channel == IDN_BROADCAST_ALLCHAN) {
12071 		PR_DATA("%s: requested signal to ALL channels on domain %d\n",
12072 			proc, domid);
12073 		min_chan = 0;
12074 		max_chan = IDN_MAX_NETS - 1;
12075 	} else {
12076 		PR_DATA("%s: requested signal to channel %d on domain %d\n",
12077 			proc, channel, domid);
12078 		min_chan = max_chan = (int)channel;
12079 	}
12080 	mmp += min_chan;
12081 	for (c = min_chan; c <= max_chan; mmp++, c++) {
12082 
12083 		/*
12084 		 * We do a quick check for a pending channel.
12085 		 * If pending it will need activation and we rather
12086 		 * do that through a separate (proto) thread.
12087 		 */
12088 		csp = &idn.chan_servers[c];
12089 
12090 		if (csp->ch_recv.c_checkin) {
12091 			PR_DATA("%s: chansvr (%d) for domid %d CHECK-IN\n",
12092 				proc, c, domid);
12093 			continue;
12094 		}
12095 
12096 		if (IDN_CHAN_TRYLOCK_RECV(csp) == 0) {
12097 			/*
12098 			 * Failed to grab lock, server must be active.
12099 			 */
12100 			PR_DATA("%s: chansvr (%d) for domid %d already actv\n",
12101 				proc, c, domid);
12102 			continue;
12103 		}
12104 
12105 		if (IDN_CHANNEL_IS_PENDING(csp)) {
12106 			/*
12107 			 * Lock is pending.  Submit asynchronous
12108 			 * job to activate and move-on.
12109 			 */
12110 			IDN_CHAN_UNLOCK_RECV(csp);
12111 			idn_submit_chanactivate_job(c);
12112 			continue;
12113 		}
12114 
12115 		/*
12116 		 * If he ain't active, we ain't talkin'.
12117 		 */
12118 		if (IDN_CHANNEL_IS_RECV_ACTIVE(csp) == 0) {
12119 			IDN_CHAN_UNLOCK_RECV(csp);
12120 			PR_DATA("%s: chansvr (%d) for domid %d inactive\n",
12121 				proc, c, domid);
12122 			continue;
12123 		}
12124 
12125 		if (mutex_tryenter(&mmp->mm_mutex) == 0) {
12126 			IDN_CHAN_UNLOCK_RECV(csp);
12127 			continue;
12128 		}
12129 
12130 		if (mmp->mm_csp != csp) {
12131 			/*
12132 			 * Not registered.
12133 			 */
12134 			mutex_exit(&mmp->mm_mutex);
12135 			IDN_CHAN_UNLOCK_RECV(csp);
12136 			continue;
12137 
12138 		}
12139 		if (mmp->mm_smr_mboxp == NULL) {
12140 			/*
12141 			 * No SMR mailbox.
12142 			 */
12143 			mutex_exit(&mmp->mm_mutex);
12144 			IDN_CHAN_UNLOCK_RECV(csp);
12145 			continue;
12146 		}
12147 		mutex_exit(&mmp->mm_mutex);
12148 
12149 		if (csp->ch_recv.c_inprogress) {
12150 			/*
12151 			 * Data server is already active.
12152 			 */
12153 			IDN_CHAN_UNLOCK_RECV(csp);
12154 			PR_DATA("%s: chansvr (%d) for domid %d already actv\n",
12155 				proc, c, domid);
12156 			continue;
12157 		}
12158 		ASSERT(csp == &idn.chan_servers[c]);
12159 
12160 
12161 		PR_DATA("%s: signaling data dispatcher for chan %d dom %d\n",
12162 			proc, c, domid);
12163 		ASSERT(csp);
12164 		cv_signal(&csp->ch_recv_cv);
12165 		IDN_CHAN_UNLOCK_RECV(csp);
12166 	}
12167 
12168 	if (!nacktype || (channel == IDN_BROADCAST_ALLCHAN)) {
12169 		/*
12170 		 * If there were no real errors or we were
12171 		 * handling multiple channels, then just
12172 		 * return.
12173 		 */
12174 		IDN_DUNLOCK(domid);
12175 		return;
12176 	}
12177 
12178 send_dresp:
12179 
12180 	PR_DATA("%s: sending NACK (%s) back to domain %d (cpu %d)\n",
12181 		proc, idnnack_str[nacktype], domid, idn_domain[domid].dcpu);
12182 
12183 	idn_send_dataresp(domid, nacktype);
12184 
12185 	IDN_DUNLOCK(domid);
12186 }
12187 
12188 /*ARGSUSED*/
12189 static int
12190 idn_recv_data(int domid, idn_msgtype_t *mtp, idn_xdcargs_t xargs)
12191 {
12192 #ifdef DEBUG
12193 	uint_t		msg = mtp ? mtp->mt_mtype : 0;
12194 	uint_t		msgarg = mtp ? mtp->mt_atype : 0;
12195 	procname_t	proc = "idn_recv_data";
12196 
12197 	PR_PROTO("%s:%d: DATA message received (msg = 0x%x, msgarg = 0x%x)\n",
12198 		proc, domid, msg, msgarg);
12199 	PR_PROTO("%s:%d: xargs = (0x%x, 0x%x, 0x%x, 0x%x)\n",
12200 		proc, domid, xargs[0], xargs[1], xargs[2], xargs[3]);
12201 #endif /* DEBUG */
12202 
12203 	return (0);
12204 }
12205 
12206 /*
12207  * Only used when sending a negative response.
12208  */
12209 static void
12210 idn_send_dataresp(int domid, idn_nack_t nacktype)
12211 {
12212 	idn_msgtype_t	mt;
12213 
12214 	ASSERT(IDN_DLOCK_IS_HELD(domid));
12215 
12216 	if (idn_domain[domid].dcpu == IDN_NIL_DCPU)
12217 		return;
12218 
12219 	mt.mt_mtype = IDNP_NACK;
12220 	mt.mt_atype = IDNP_DATA;
12221 
12222 	(void) IDNXDC(domid, &mt, (uint_t)nacktype, 0, 0, 0);
12223 }
12224 
12225 /*
12226  * Checksum routine used in checksum smr_pkthdr_t and idn_mboxhdr_t.
12227  */
12228 static ushort_t
12229 idn_cksum(register ushort_t *hdrp, register int count)
12230 {
12231 	register int		i;
12232 	register ushort_t	sum = 0;
12233 
12234 	for (i = 0; i < count; i++)
12235 		sum += hdrp[i];
12236 
12237 	sum = (sum >> 16) + (sum & 0xffff);
12238 	sum += (sum >> 16);
12239 
12240 	return (~sum);
12241 }
12242 
12243 /*
12244  * ------------------------------------------------
12245  */
12246 
12247 int
12248 idn_open_channel(int channel)
12249 {
12250 	int		masterid;
12251 	idn_chansvr_t	*csp;
12252 	struct idn	*sip;
12253 	procname_t	proc = "idn_open_channel";
12254 
12255 	if (channel >= IDN_MAX_NETS) {
12256 		cmn_err(CE_WARN,
12257 			"IDN: 242: maximum channels (%d) already open",
12258 			IDN_MAX_NETS);
12259 		return (-1);
12260 	}
12261 	IDN_GLOCK_EXCL();
12262 
12263 	ASSERT(idn.chan_servers != NULL);
12264 
12265 	csp = &idn.chan_servers[channel];
12266 
12267 	IDN_CHAN_LOCK_GLOBAL(csp);
12268 
12269 	if (IDN_CHANNEL_IS_ATTACHED(csp)) {
12270 		PR_CHAN("%s: channel %d already open\n", proc, channel);
12271 		IDN_CHAN_UNLOCK_GLOBAL(csp);
12272 		IDN_GUNLOCK();
12273 		return (0);
12274 	}
12275 
12276 	/*
12277 	 * Need to zero out the kstats now that we're activating
12278 	 * this channel.
12279 	 */
12280 	for (sip = idn.sip; sip; sip = sip->si_nextp) {
12281 		if (sip->si_dip && (ddi_get_instance(sip->si_dip) == channel)) {
12282 			bzero(&sip->si_kstat, sizeof (sip->si_kstat));
12283 			break;
12284 		}
12285 	}
12286 
12287 	IDN_CHANSVC_MARK_ATTACHED(csp);
12288 	idn.nchannels++;
12289 	CHANSET_ADD(idn.chanset, channel);
12290 	IDN_CHANNEL_ATTACH(channel);
12291 
12292 	IDN_CHAN_UNLOCK_GLOBAL(csp);
12293 
12294 	/*
12295 	 * We increase our window threshold each time a channel
12296 	 * is opened.
12297 	 */
12298 	ASSERT(idn.nchannels > 0);
12299 	IDN_WINDOW_EMAX = IDN_WINDOW_MAX +
12300 				((idn.nchannels - 1) * IDN_WINDOW_INCR);
12301 
12302 	PR_CHAN("%s: channel %d is OPEN (nchannels = %d)\n",
12303 		proc, channel, idn.nchannels);
12304 
12305 	masterid = IDN_GET_MASTERID();
12306 	IDN_GUNLOCK();
12307 
12308 	/*
12309 	 * Check if there is an active master to which
12310 	 * we're connected.  If so, then activate channel.
12311 	 */
12312 	if (masterid != IDN_NIL_DOMID) {
12313 		idn_domain_t	*dp;
12314 
12315 		dp = &idn_domain[masterid];
12316 		IDN_DLOCK_SHARED(masterid);
12317 		if (dp->dvote.v.master && (dp->dstate == IDNDS_CONNECTED))
12318 			(void) idn_activate_channel(CHANSET(channel),
12319 							IDNCHAN_ONLINE);
12320 		IDN_DUNLOCK(masterid);
12321 	}
12322 
12323 	return (0);
12324 }
12325 
12326 void
12327 idn_close_channel(int channel, idn_chanop_t chanop)
12328 {
12329 	idn_chansvr_t	*csp;
12330 	procname_t	proc = "idn_close_channel";
12331 
12332 
12333 	ASSERT(idn.chan_servers != NULL);
12334 
12335 	csp = &idn.chan_servers[channel];
12336 
12337 	IDN_GLOCK_EXCL();
12338 
12339 	IDN_CHAN_LOCK_GLOBAL(csp);
12340 	if (IDN_CHANNEL_IS_DETACHED(csp)) {
12341 		PR_CHAN("%s: channel %d already closed\n", proc, channel);
12342 		IDN_CHAN_UNLOCK_GLOBAL(csp);
12343 		IDN_GUNLOCK();
12344 		return;
12345 	}
12346 	IDN_CHAN_UNLOCK_GLOBAL(csp);
12347 
12348 	idn_deactivate_channel(CHANSET(channel), chanop);
12349 
12350 	IDN_CHAN_LOCK_GLOBAL(csp);
12351 
12352 	if (chanop == IDNCHAN_HARD_CLOSE) {
12353 		idn.nchannels--;
12354 		CHANSET_DEL(idn.chanset, channel);
12355 		/*
12356 		 * We increase our window threshold each time a channel
12357 		 * is opened.
12358 		 */
12359 		if (idn.nchannels <= 0)
12360 			IDN_WINDOW_EMAX = 0;
12361 		else
12362 			IDN_WINDOW_EMAX = IDN_WINDOW_MAX +
12363 					((idn.nchannels - 1) * IDN_WINDOW_INCR);
12364 	}
12365 
12366 	PR_CHAN("%s: channel %d is (%s) CLOSED (nchannels = %d)\n",
12367 		proc, channel,
12368 		(chanop == IDNCHAN_SOFT_CLOSE) ? "SOFT"
12369 		: (chanop == IDNCHAN_HARD_CLOSE) ? "HARD" : "OFFLINE",
12370 		idn.nchannels);
12371 
12372 	IDN_CHAN_UNLOCK_GLOBAL(csp);
12373 	IDN_GUNLOCK();
12374 }
12375 
12376 static int
12377 idn_activate_channel(idn_chanset_t chanset, idn_chanop_t chanop)
12378 {
12379 	int		c, rv = 0;
12380 	procname_t	proc = "idn_activate_channel";
12381 
12382 	PR_CHAN("%s: chanset = 0x%x, chanop = %s\n",
12383 		proc, chanset, chanop_str[chanop]);
12384 
12385 	if (idn.state != IDNGS_ONLINE) {
12386 		/*
12387 		 * Can't activate any channels unless local
12388 		 * domain is connected and thus has a master.
12389 		 */
12390 		PR_CHAN("%s: local domain not connected.  no data servers\n",
12391 			proc);
12392 		return (-1);
12393 	}
12394 
12395 	for (c = 0; c < IDN_MAX_NETS; c++) {
12396 		idn_chansvr_t	*csp;
12397 		idn_mboxhdr_t	*mainhp;
12398 		struct idn	*sip;
12399 
12400 		if (!CHAN_IN_SET(chanset, c))
12401 			continue;
12402 		csp = &idn.chan_servers[c];
12403 
12404 		if (chanop == IDNCHAN_ONLINE) {
12405 			IDN_CHAN_LOCK_GLOBAL(csp);
12406 		} else {
12407 			/*
12408 			 * We don't wait to grab the global lock
12409 			 * if IDNCHAN_OPEN since these occur along
12410 			 * critical data paths and will be retried
12411 			 * anyway if needed.
12412 			 */
12413 			if (IDN_CHAN_TRYLOCK_GLOBAL(csp) == 0) {
12414 				PR_CHAN("%s: failed to acquire global "
12415 					"lock for channel %d\n",
12416 					proc, c);
12417 				continue;
12418 			}
12419 		}
12420 
12421 		if (!IDN_CHANNEL_IS_ATTACHED(csp)) {
12422 			PR_CHAN("%s: channel %d NOT open\n", proc, c);
12423 			IDN_CHAN_UNLOCK_GLOBAL(csp);
12424 			continue;
12425 
12426 		}
12427 
12428 		if (IDN_CHANNEL_IS_ACTIVE(csp)) {
12429 
12430 			PR_CHAN("%s: channel %d already active\n", proc, c);
12431 			rv++;
12432 			IDN_CHAN_UNLOCK_GLOBAL(csp);
12433 			continue;
12434 
12435 		}
12436 		/*
12437 		 * Channel activation can happen asynchronously.
12438 		 */
12439 		IDN_CHANNEL_SUSPEND(c, 0);
12440 
12441 		if (IDN_CHANNEL_IS_PENDING(csp) && (chanop == IDNCHAN_OPEN)) {
12442 
12443 			PR_CHAN("%s: ACTIVATING channel %d\n", proc, c);
12444 
12445 			if (idn_activate_channel_services(c) >= 0) {
12446 				PR_CHAN("%s: Setting channel %d ACTIVE\n",
12447 					proc, c);
12448 				IDN_CHANSVC_MARK_ACTIVE(csp);
12449 				rv++;
12450 			}
12451 		} else if (!IDN_CHANNEL_IS_PENDING(csp) &&
12452 					(chanop == IDNCHAN_ONLINE)) {
12453 			PR_CHAN("%s: Setting channel %d PENDING\n", proc, c);
12454 
12455 			IDN_CHANSVC_MARK_PENDING(csp);
12456 		}
12457 		/*
12458 		 * Don't syncheader (i.e. touch SMR) unless
12459 		 * channel is at least ENABLED.  For a DISABLED
12460 		 * channel, the SMR may be invalid so do NOT
12461 		 * touch it.
12462 		 */
12463 		if (IDN_CHANNEL_IS_ENABLED(csp) &&
12464 			((mainhp = idn_chan_server_syncheader(c)) != NULL)) {
12465 			PR_CHAN("%s: marking chansvr (mhp=0x%p) %d READY\n",
12466 				proc, mainhp, c);
12467 			mainhp->mh_svr_ready = 1;
12468 		}
12469 
12470 		IDN_CHANNEL_RESUME(c);
12471 		sip = IDN_INST2SIP(c);
12472 		ASSERT(sip);
12473 		if (sip->si_wantw) {
12474 			mutex_enter(&idn.sipwenlock);
12475 			idndl_wenable(sip);
12476 			mutex_exit(&idn.sipwenlock);
12477 		}
12478 		IDN_CHAN_UNLOCK_GLOBAL(csp);
12479 
12480 	}
12481 	/*
12482 	 * Returns "not active", i.e. value of 0 indicates
12483 	 * no channels are activated.
12484 	 */
12485 	return (rv == 0);
12486 }
12487 
12488 static void
12489 idn_deactivate_channel(idn_chanset_t chanset, idn_chanop_t chanop)
12490 {
12491 	int		c;
12492 	procname_t	proc = "idn_deactivate_channel";
12493 
12494 
12495 	PR_CHAN("%s: chanset = 0x%x, chanop = %s\n",
12496 		proc, chanset, chanop_str[chanop]);
12497 
12498 	for (c = 0; c < IDN_MAX_NETS; c++) {
12499 		idn_chansvr_t	*csp;
12500 		idn_mboxhdr_t	*mainhp;
12501 
12502 		if (!CHAN_IN_SET(chanset, c))
12503 			continue;
12504 
12505 		csp = &idn.chan_servers[c];
12506 
12507 		IDN_CHAN_LOCK_GLOBAL(csp);
12508 
12509 		if (((chanop == IDNCHAN_SOFT_CLOSE) &&
12510 				!IDN_CHANNEL_IS_ACTIVE(csp)) ||
12511 			((chanop == IDNCHAN_HARD_CLOSE) &&
12512 				IDN_CHANNEL_IS_DETACHED(csp)) ||
12513 			((chanop == IDNCHAN_OFFLINE) &&
12514 				!IDN_CHANNEL_IS_ENABLED(csp))) {
12515 
12516 			ASSERT(!IDN_CHANNEL_IS_RECV_ACTIVE(csp));
12517 			ASSERT(!IDN_CHANNEL_IS_SEND_ACTIVE(csp));
12518 
12519 			PR_CHAN("%s: channel %d already deactivated\n",
12520 				proc, c);
12521 			IDN_CHAN_UNLOCK_GLOBAL(csp);
12522 			continue;
12523 		}
12524 
12525 		switch (chanop) {
12526 		case IDNCHAN_OFFLINE:
12527 			IDN_CHANSVC_MARK_IDLE(csp);
12528 			IDN_CHANSVC_MARK_DISABLED(csp);
12529 			IDN_CHANNEL_STOP(c, 1);
12530 			mainhp = idn_chan_server_syncheader(c);
12531 			if (mainhp != NULL)
12532 				mainhp->mh_svr_ready = 0;
12533 			break;
12534 
12535 		case IDNCHAN_HARD_CLOSE:
12536 			IDN_CHANSVC_MARK_DETACHED(csp);
12537 			IDN_CHANNEL_DETACH(c, 1);
12538 			mainhp = idn_chan_server_syncheader(c);
12539 			if (mainhp != NULL)
12540 				mainhp->mh_svr_ready = 0;
12541 			break;
12542 
12543 		default:
12544 			IDN_CHANSVC_MARK_IDLE(csp);
12545 			IDN_CHANNEL_SUSPEND(c, 1);
12546 			ASSERT(IDN_CHANNEL_IS_ATTACHED(csp));
12547 			break;
12548 		}
12549 
12550 		lock_clear(&csp->ch_actvlck);
12551 		lock_clear(&csp->ch_initlck);
12552 
12553 		PR_CHAN("%s: DEACTIVATING channel %d (%s)\n", proc, c,
12554 			chanop_str[chanop]);
12555 		PR_CHAN("%s: removing chanset 0x%x data svrs for "
12556 			"each domain link\n", proc, chanset);
12557 
12558 		(void) idn_deactivate_channel_services(c, chanop);
12559 	}
12560 	/*
12561 	 * Returns with channels unlocked.
12562 	 */
12563 }
12564 
12565 /*
12566  * The priority of the channel server must be less than that
12567  * of the protocol server since the protocol server tasks
12568  * are (can be) of more importance.
12569  *
12570  * Possible range: 60-99.
12571  */
12572 static pri_t	idn_chansvr_pri = (7 * MAXCLSYSPRI) / 8;
12573 
12574 static int
12575 idn_activate_channel_services(int channel)
12576 {
12577 	idn_chansvr_t	*csp;
12578 	procname_t	proc = "idn_activate_channel_services";
12579 
12580 
12581 	ASSERT((channel >= 0) && (channel < IDN_MAX_NETS));
12582 
12583 	csp = &idn.chan_servers[channel];
12584 
12585 	ASSERT(IDN_CHAN_GLOBAL_IS_LOCKED(csp));
12586 	ASSERT(IDN_CHAN_LOCAL_IS_LOCKED(csp));
12587 
12588 	if (csp->ch_recv_threadp) {
12589 		/*
12590 		 * There's an existing dispatcher!
12591 		 * Must have been idle'd during an earlier
12592 		 * stint.
12593 		 */
12594 		ASSERT(csp->ch_id == (uchar_t)channel);
12595 		PR_CHAN("%s: existing chansvr FOUND for (c=%d)\n",
12596 			proc, channel);
12597 
12598 		if (IDN_CHANNEL_IS_PENDING(csp) == 0)
12599 			return (-1);
12600 
12601 		PR_CHAN("%s: chansvr (c=%d) Rstate = 0x%x, Sstate = 0x%x\n",
12602 			proc, channel, csp->ch_recv.c_state,
12603 			csp->ch_send.c_state);
12604 
12605 		cv_signal(&csp->ch_recv_cv);
12606 
12607 		return (0);
12608 	}
12609 
12610 	if (IDN_CHANNEL_IS_PENDING(csp) == 0)
12611 		return (-1);
12612 
12613 	csp->ch_id = (uchar_t)channel;
12614 
12615 	PR_CHAN("%s: init channel %d server\n", proc, channel);
12616 
12617 	csp->ch_recv_morguep = GETSTRUCT(ksema_t, 1);
12618 	sema_init(csp->ch_recv_morguep, 0, NULL, SEMA_DRIVER, NULL);
12619 
12620 	csp->ch_recv.c_inprogress = 0;
12621 	csp->ch_recv.c_waiters = 0;
12622 	csp->ch_recv.c_checkin = 0;
12623 	csp->ch_recv_changed = 1;
12624 
12625 	csp->ch_recv_domset = csp->ch_reg_domset;
12626 
12627 	csp->ch_recv_waittime = IDN_NETSVR_WAIT_MIN;
12628 
12629 	csp->ch_recv_threadp = thread_create(NULL, 0,
12630 	    idn_chan_server, &csp, sizeof (csp), &p0, TS_RUN, idn_chansvr_pri);
12631 
12632 	csp->ch_send.c_inprogress = 0;
12633 	csp->ch_send.c_waiters = 0;
12634 	csp->ch_send.c_checkin = 0;
12635 
12636 	return (0);
12637 }
12638 
12639 /*
12640  * This routine can handle terminating a set of channel
12641  * servers all at once, however currently only used
12642  * for serial killing, i.e. one-at-a-time.
12643  *
12644  * Entered with RECV locks held on chanset.
12645  * Acquires SEND locks if needed.
12646  * Leaves with all RECV and SEND locks dropped.
12647  */
12648 static int
12649 idn_deactivate_channel_services(int channel, idn_chanop_t chanop)
12650 {
12651 	idn_chansvr_t	*csp;
12652 	int		cs_count;
12653 	int		c;
12654 	idn_chanset_t	chanset;
12655 	ksema_t		*central_morguep = NULL;
12656 	procname_t	proc = "idn_deactivate_channel_services";
12657 
12658 
12659 	ASSERT(idn.chan_servers);
12660 
12661 	PR_CHAN("%s: deactivating channel %d services\n", proc, channel);
12662 
12663 	/*
12664 	 * XXX
12665 	 * Old code allowed us to deactivate multiple channel
12666 	 * servers at once.  Keep for now just in case.
12667 	 */
12668 	chanset = CHANSET(channel);
12669 
12670 	/*
12671 	 * Point all the data dispatchers to the same morgue
12672 	 * so we can kill them all at once.
12673 	 */
12674 	cs_count = 0;
12675 	for (c = 0; c < IDN_MAX_NETS; c++) {
12676 		if (!CHAN_IN_SET(chanset, c))
12677 			continue;
12678 
12679 		csp = &idn.chan_servers[c];
12680 		ASSERT(IDN_CHAN_GLOBAL_IS_LOCKED(csp));
12681 		ASSERT(IDN_CHAN_LOCAL_IS_LOCKED(csp));
12682 
12683 		if (csp->ch_recv_threadp == NULL) {
12684 			/*
12685 			 * No channel server home.
12686 			 * But we're still holding the c_mutex.
12687 			 * At mark him idle incase we start him up.
12688 			 */
12689 			PR_CHAN("%s: no channel server found for chan %d\n",
12690 				proc, c);
12691 			IDN_CHAN_UNLOCK_LOCAL(csp);
12692 			IDN_CHAN_UNLOCK_GLOBAL(csp);
12693 			continue;
12694 		}
12695 		ASSERT(csp->ch_id == (uchar_t)c);
12696 
12697 		/*
12698 		 * Okay, now we've blocked the send and receive sides.
12699 		 */
12700 
12701 		if ((chanop == IDNCHAN_SOFT_CLOSE) ||
12702 		    (chanop == IDNCHAN_OFFLINE)) {
12703 			/*
12704 			 * We set turned off the ACTIVE flag, but there's
12705 			 * no guarantee he stopped because of it.  He may
12706 			 * have already been sleeping.  We need to be
12707 			 * sure he recognizes the IDLE, so we need to
12708 			 * signal him and give him a chance to see it.
12709 			 */
12710 			cv_signal(&csp->ch_recv_cv);
12711 			IDN_CHAN_UNLOCK_LOCAL(csp);
12712 			IDN_CHAN_UNLOCK_GLOBAL(csp);
12713 			cs_count++;
12714 			continue;
12715 		}
12716 
12717 		PR_CHAN("%s: pointing chansvr %d to morgue (0x%p)\n",
12718 			proc, c, central_morguep ? central_morguep
12719 						: csp->ch_recv_morguep);
12720 
12721 		if (central_morguep == NULL) {
12722 			central_morguep = csp->ch_recv_morguep;
12723 		} else {
12724 			sema_destroy(csp->ch_recv_morguep);
12725 			FREESTRUCT(csp->ch_recv_morguep, ksema_t, 1);
12726 
12727 			csp->ch_recv_morguep = central_morguep;
12728 		}
12729 		cv_signal(&csp->ch_recv_cv);
12730 		if (csp->ch_recv.c_waiters > 0)
12731 			cv_broadcast(&csp->ch_recv.c_cv);
12732 		/*
12733 		 * Save any existing binding for next reincarnation.
12734 		 * Note that we're holding the local and global
12735 		 * locks so we're protected against others touchers
12736 		 * of the ch_bound_cpuid fields.
12737 		 */
12738 		csp->ch_bound_cpuid_pending = csp->ch_bound_cpuid;
12739 		csp->ch_bound_cpuid = -1;
12740 		IDN_CHAN_UNLOCK_LOCAL(csp);
12741 		IDN_CHAN_UNLOCK_GLOBAL(csp);
12742 		cs_count++;
12743 	}
12744 	PR_CHAN("%s: signaled %d chansvrs for chanset 0x%x\n",
12745 		proc, cs_count, chanset);
12746 
12747 	if ((chanop == IDNCHAN_SOFT_CLOSE) || (chanop == IDNCHAN_OFFLINE))
12748 		return (cs_count);
12749 
12750 	PR_CHAN("%s: waiting for %d (chnset=0x%x) chan svrs to term\n",
12751 		proc, cs_count, chanset);
12752 	PR_CHAN("%s: morguep = 0x%p\n", proc, central_morguep);
12753 
12754 	ASSERT((cs_count > 0) ? (central_morguep != NULL) : 1);
12755 	while (cs_count-- > 0)
12756 		sema_p(central_morguep);
12757 
12758 	if (central_morguep) {
12759 		sema_destroy(central_morguep);
12760 		FREESTRUCT(central_morguep, ksema_t, 1);
12761 	}
12762 
12763 	return (cs_count);
12764 }
12765 
12766 int
12767 idn_chanservers_init()
12768 {
12769 	int		c;
12770 	idn_chansvr_t	*csp;
12771 
12772 
12773 	if (idn.chan_servers)
12774 		return (0);
12775 
12776 	idn.chan_servers = GETSTRUCT(idn_chansvr_t, IDN_MAXMAX_NETS);
12777 
12778 	for (c = 0; c < IDN_MAXMAX_NETS; c++) {
12779 		csp = &idn.chan_servers[c];
12780 		mutex_init(&csp->ch_send.c_mutex, NULL, MUTEX_DEFAULT, NULL);
12781 		mutex_init(&csp->ch_recv.c_mutex, NULL, MUTEX_DEFAULT, NULL);
12782 		cv_init(&csp->ch_send.c_cv, NULL, CV_DRIVER, NULL);
12783 		cv_init(&csp->ch_recv.c_cv, NULL, CV_DRIVER, NULL);
12784 		cv_init(&csp->ch_recv_cv, NULL, CV_DRIVER, NULL);
12785 		csp->ch_bound_cpuid = -1;
12786 		csp->ch_bound_cpuid_pending = -1;
12787 	}
12788 
12789 	return (c);
12790 }
12791 
12792 void
12793 idn_chanservers_deinit()
12794 {
12795 	int		c;
12796 	idn_chansvr_t	*csp;
12797 
12798 
12799 	if (idn.chan_servers == NULL)
12800 		return;
12801 
12802 	for (c = 0; c < IDN_MAXMAX_NETS; c++) {
12803 		csp = &idn.chan_servers[c];
12804 
12805 		mutex_destroy(&csp->ch_send.c_mutex);
12806 		mutex_destroy(&csp->ch_recv.c_mutex);
12807 		cv_destroy(&csp->ch_send.c_cv);
12808 		cv_destroy(&csp->ch_recv.c_cv);
12809 		cv_destroy(&csp->ch_recv_cv);
12810 	}
12811 
12812 	FREESTRUCT(idn.chan_servers, idn_chansvr_t, IDN_MAXMAX_NETS);
12813 	idn.chan_servers = NULL;
12814 }
12815 
12816 static void
12817 idn_exec_chanactivate(void *chn)
12818 {
12819 	int		not_active, channel;
12820 	idn_chansvr_t	*csp;
12821 
12822 	channel = (int)(uintptr_t)chn;
12823 
12824 	IDN_GLOCK_SHARED();
12825 	if (idn.chan_servers == NULL) {
12826 		IDN_GUNLOCK();
12827 		return;
12828 	}
12829 	csp = &idn.chan_servers[channel];
12830 
12831 	if (IDN_CHAN_TRYLOCK_GLOBAL(csp) == 0) {
12832 		/*
12833 		 * If we can't grab the global lock, then
12834 		 * something is up, skip out.
12835 		 */
12836 		IDN_GUNLOCK();
12837 		return;
12838 	}
12839 	IDN_GUNLOCK();
12840 
12841 	if (IDN_CHANNEL_IS_PENDING(csp) && lock_try(&csp->ch_actvlck)) {
12842 		IDN_CHAN_UNLOCK_GLOBAL(csp);
12843 		not_active = idn_activate_channel(CHANSET(channel),
12844 							IDNCHAN_OPEN);
12845 		if (not_active)
12846 			lock_clear(&csp->ch_actvlck);
12847 	} else {
12848 		IDN_CHAN_UNLOCK_GLOBAL(csp);
12849 	}
12850 }
12851 
12852 /*
12853  * Delayed activation of channel.  We don't want to do this within
12854  * idn_signal_data_server() since that's called within the context
12855  * of an XDC handler so we submit it as a timeout() call to be short
12856  * as soon as possible.
12857  * The ch_initlck & ch_actvlck are used to synchronize activation
12858  * of the channel so that we don't have multiple idn_activate_channel's
12859  * attempting to activate the same channel.
12860  */
12861 static void
12862 idn_submit_chanactivate_job(int channel)
12863 {
12864 	idn_chansvr_t	*csp;
12865 
12866 	if (idn.chan_servers == NULL)
12867 		return;
12868 	csp = &idn.chan_servers[channel];
12869 
12870 	if (lock_try(&csp->ch_initlck) == 0)
12871 		return;
12872 
12873 	(void) timeout(idn_exec_chanactivate, (caddr_t)(uintptr_t)channel, 1);
12874 }
12875 
12876 /*ARGSUSED0*/
12877 static void
12878 idn_xmit_monitor(void *unused)
12879 {
12880 	int		c, d;
12881 	idn_chansvr_t	*csp;
12882 	idn_chanset_t	wake_set;
12883 	domainset_t	conset;
12884 	smr_slab_t	*sp;
12885 	procname_t	proc = "idn_xmit_monitor";
12886 
12887 	CHANSET_ZERO(wake_set);
12888 
12889 	mutex_enter(&idn.xmit_lock);
12890 	if ((idn.xmit_tid == NULL) || !idn.xmit_chanset_wanted) {
12891 		idn.xmit_tid = NULL;
12892 		mutex_exit(&idn.xmit_lock);
12893 		PR_XMON("%s: bailing out\n", proc);
12894 		return;
12895 	}
12896 
12897 	/*
12898 	 * No point in transmitting unless state
12899 	 * is ONLINE.
12900 	 */
12901 	if (idn.state != IDNGS_ONLINE)
12902 		goto retry;
12903 
12904 	conset = idn.domset.ds_connected;
12905 
12906 	/*
12907 	 * Try and reclaim some buffers if possible.
12908 	 */
12909 	for (d = 0; d < MAX_DOMAINS; d++) {
12910 		if (!DOMAIN_IN_SET(conset, d))
12911 			continue;
12912 
12913 		if (!IDN_DLOCK_TRY_SHARED(d))
12914 			continue;
12915 
12916 		if (idn_domain[d].dcpu != IDN_NIL_DCPU)
12917 			(void) idn_reclaim_mboxdata(d, 0, -1);
12918 
12919 		IDN_DUNLOCK(d);
12920 	}
12921 
12922 	/*
12923 	 * Now check if we were successful in getting
12924 	 * any buffers.
12925 	 */
12926 	DSLAB_LOCK_SHARED(idn.localid);
12927 	sp = idn_domain[idn.localid].dslab;
12928 	for (; sp; sp = sp->sl_next)
12929 		if (sp->sl_free)
12930 			break;
12931 	DSLAB_UNLOCK(idn.localid);
12932 
12933 	/*
12934 	 * If there are no buffers available,
12935 	 * no point in reenabling the queues.
12936 	 */
12937 	if (sp == NULL)
12938 		goto retry;
12939 
12940 	CHANSET_ZERO(wake_set);
12941 	for (c = 0; c < IDN_MAX_NETS; c++) {
12942 		int		pending_bits;
12943 		struct idn	*sip;
12944 
12945 		if (!CHAN_IN_SET(idn.xmit_chanset_wanted, c))
12946 			continue;
12947 
12948 		csp = &idn.chan_servers[c];
12949 		if (!IDN_CHAN_TRYLOCK_GLOBAL(csp))
12950 			continue;
12951 
12952 		pending_bits = csp->ch_state &
12953 				IDN_CHANSVC_PENDING_BITS;
12954 
12955 		sip = IDN_INST2SIP(c);
12956 
12957 		if (!csp->ch_send.c_checkin &&
12958 			(pending_bits == IDN_CHANSVC_PENDING_BITS) &&
12959 			sip && (sip->si_flags & IDNRUNNING)) {
12960 
12961 			IDN_CHAN_UNLOCK_GLOBAL(csp);
12962 			CHANSET_ADD(wake_set, c);
12963 
12964 			PR_XMON("%s: QENABLE for channel %d\n",
12965 				proc, c);
12966 
12967 			rw_enter(&idn.struprwlock, RW_READER);
12968 			mutex_enter(&idn.sipwenlock);
12969 			idndl_wenable(sip);
12970 			mutex_exit(&idn.sipwenlock);
12971 			rw_exit(&idn.struprwlock);
12972 		} else {
12973 			IDN_CHAN_UNLOCK_GLOBAL(csp);
12974 		}
12975 	}
12976 
12977 	/*
12978 	 * Clear the channels we enabled.
12979 	 */
12980 	idn.xmit_chanset_wanted &= ~wake_set;
12981 
12982 retry:
12983 
12984 	if (idn.xmit_chanset_wanted == 0)
12985 		idn.xmit_tid = NULL;
12986 	else
12987 		idn.xmit_tid = timeout(idn_xmit_monitor, NULL,
12988 					idn_xmit_monitor_freq);
12989 
12990 	mutex_exit(&idn.xmit_lock);
12991 }
12992 
12993 void
12994 idn_xmit_monitor_kickoff(int chan_wanted)
12995 {
12996 	procname_t	proc = "idn_xmit_monitor_kickoff";
12997 
12998 	mutex_enter(&idn.xmit_lock);
12999 
13000 	if (chan_wanted < 0) {
13001 		/*
13002 		 * Wants all channels.
13003 		 */
13004 		idn.xmit_chanset_wanted = CHANSET_ALL;
13005 	} else {
13006 		CHANSET_ADD(idn.xmit_chanset_wanted, chan_wanted);
13007 	}
13008 
13009 	if (idn.xmit_tid != (timeout_id_t)NULL) {
13010 		/*
13011 		 * A monitor is already running, so
13012 		 * he will catch the new "wants" when
13013 		 * he comes around.
13014 		 */
13015 		mutex_exit(&idn.xmit_lock);
13016 		return;
13017 	}
13018 
13019 	PR_XMON("%s: xmit_mon kicked OFF (chanset = 0x%x)\n",
13020 		proc, idn.xmit_chanset_wanted);
13021 
13022 	idn.xmit_tid = timeout(idn_xmit_monitor, NULL, idn_xmit_monitor_freq);
13023 
13024 	mutex_exit(&idn.xmit_lock);
13025 }
13026