xref: /titanic_52/usr/src/uts/sun4u/starfire/io/idn_proto.c (revision 55f5292c612446ce6f93ddd248c0019b5974618b)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 /*
27  * Inter-Domain Network
28  *
29  * IDN Protocol functions to support domain link/unlink/reconfig.
30  */
31 
32 #include <sys/types.h>
33 #include <sys/param.h>
34 #include <sys/machparam.h>
35 #include <sys/debug.h>
36 #include <sys/cpuvar.h>
37 #include <sys/kmem.h>
38 #include <sys/mutex.h>
39 #include <sys/rwlock.h>
40 #include <sys/systm.h>
41 #include <sys/stream.h>
42 #include <sys/strsun.h>
43 #include <sys/stropts.h>
44 #include <sys/sema_impl.h>
45 #include <sys/membar.h>
46 #include <sys/utsname.h>
47 #include <inet/common.h>
48 #include <inet/mi.h>
49 #include <netinet/ip6.h>
50 #include <inet/ip.h>
51 #include <netinet/in.h>
52 #include <sys/vm_machparam.h>
53 #include <sys/x_call.h>
54 #include <sys/ddi.h>
55 #include <sys/sunddi.h>
56 #include <sys/atomic.h>
57 #include <vm/as.h>		/* kas decl */
58 
59 #include <sys/idn.h>
60 #include <sys/idn_xf.h>
61 
62 #define	IDNBUG_CPUPERBOARD
63 
64 extern pri_t		maxclsyspri;
65 extern u_longlong_t	gettick();
66 
67 clock_t	idn_xmit_monitor_freq = 50;
68 
69 static int	idn_connect(int domid);
70 static int	idn_disconnect(int domid, idn_fin_t fintype,
71 				idn_finarg_t finarg, idn_finsync_t finsync);
72 static void	idn_deconfig(int domid);
73 static void	idn_unlink_domainset(domainset_t domset, idn_fin_t fintype,
74 				idn_finarg_t finarg, idn_finopt_t finopt,
75 				boardset_t idnset);
76 static void	idn_retry_execute(void *arg);
77 static void	idn_retry_submit(void (*func)(uint_t token, void *arg),
78 				void *arg, uint_t token, clock_t ticks);
79 static void	idn_shutdown_datapath(domainset_t domset, int force);
80 static mblk_t	*idn_fill_buffer(caddr_t bufp, int size, mblk_t *mp,
81 				uchar_t **data_rptrp);
82 static ushort_t	idn_cksum(register ushort_t *hdrp, register int count);
83 static int	idn_mark_awol(int domid, clock_t *atime);
84 
85 static void	idn_recv_proto(idn_protomsg_t *hp);
86 static void	idn_send_config(int domid, int phase);
87 static void	idn_recv_config(int domid, idn_msgtype_t *mtp,
88 				idn_xdcargs_t xargs);
89 static int	idn_send_master_config(int domid, int phase);
90 static int	idn_send_slave_config(int domid, int phase);
91 static uint_t	idn_check_master_config(int domid, uint_t *exp, uint_t *act);
92 static uint_t	idn_check_slave_config(int domid, uint_t *exp, uint_t *act);
93 static int	idn_recv_config_done(int domid);
94 static void	idn_nego_cleanup_check(int domid, int new_masterid,
95 				int new_cpuid);
96 static void	idn_recv_cmd(int domid, idn_msgtype_t *mtp,
97 				idn_xdcargs_t xargs);
98 static int	idn_recv_data(int domid, idn_msgtype_t *mtp,
99 				idn_xdcargs_t xargs);
100 static int	idn_send_data_loopback(idn_netaddr_t dst_netaddr,
101 				queue_t *wq, mblk_t *mp);
102 static void	idn_send_dataresp(int domid, idn_nack_t nacktype);
103 static int	idn_send_mboxdata(int domid, struct idn *sip, int channel,
104 				caddr_t bufp);
105 static int	idn_recv_mboxdata(int channel, caddr_t bufp);
106 static int	idn_program_hardware(int domid);
107 static int	idn_deprogram_hardware(int domid);
108 
109 static void	idn_send_cmd_nackresp(int domid, idn_msgtype_t *mtp,
110 			idn_cmd_t cmdtype, idn_nack_t nacktype);
111 static void	idn_local_cmd(idn_cmd_t cmdtype, uint_t arg1,
112 				uint_t arg2, uint_t arg3);
113 static void	idn_terminate_cmd(int domid, int serrno);
114 static void	idn_mboxarea_init(idn_mboxtbl_t *mtp, register int ntbls);
115 static void	idn_mainmbox_activate(int domid);
116 static void	idn_mainmbox_deactivate(ushort_t domset);
117 static void	idn_mainmbox_chan_register(int domid,
118 				idn_mainmbox_t *send_mmp,
119 				idn_mainmbox_t *recv_mmp, int channel);
120 static int	idn_mainmbox_chan_unregister(ushort_t domset, int channel);
121 static int	idn_mainmbox_flush(int domid, idn_mainmbox_t *mmp);
122 static void	idn_mainmbox_reset(int domid, idn_mainmbox_t *cmp);
123 static int	idn_activate_channel(idn_chanset_t chanset,
124 				idn_chanop_t chanop);
125 static void	idn_deactivate_channel(idn_chanset_t chanset,
126 				idn_chanop_t chanop);
127 static int	idn_deactivate_channel_services(int channel,
128 				idn_chanop_t chanop);
129 static int	idn_activate_channel_services(int channel);
130 static void	idn_chan_server(idn_chansvr_t **cspp);
131 #if 0
132 static void	idn_chan_flush(idn_chansvr_t *csp);
133 #endif /* 0 */
134 static void	idn_chan_action(int channel, idn_chanaction_t chanaction,
135 				int wait);
136 static void	idn_chan_addmbox(int channel, ushort_t domset);
137 static void	idn_chan_delmbox(int channel, ushort_t domset);
138 static void	idn_submit_chanactivate_job(int channel);
139 static void	idn_exec_chanactivate(void *chn);
140 
141 static void	idn_link_established(void *arg);
142 static void	idn_prealloc_slab(int nslabs);
143 static void	idn_recv_slaballoc_req(int domid, idn_msgtype_t *mtp,
144 				uint_t slab_size);
145 static void	idn_send_slaballoc_resp(int domid, idn_msgtype_t *mtp,
146 				uint_t slab_offset, uint_t slab_size,
147 				int serrno);
148 static void	idn_recv_slaballoc_resp(int domid, smr_offset_t slab_offset,
149 				uint_t slab_size, int serrno);
150 static void	idn_recv_slabreap_req(int domid, idn_msgtype_t *mtp,
151 				int nslabs);
152 static void	idn_recv_slabreap_resp(int domid, int nslabs, int serrno);
153 static void	idn_send_slabreap_resp(int domid, idn_msgtype_t *mtp,
154 				int nslabs, int serrno);
155 static void	idn_recv_slabfree_req(int domid, idn_msgtype_t *mtp,
156 				smr_offset_t slab_offset, uint_t slab_size);
157 static void	idn_recv_slabfree_resp(int domid, uint_t slab_offset,
158 				uint_t slab_size, int serrno);
159 static void	idn_send_slabfree_resp(int domid, idn_msgtype_t *mtp,
160 				uint_t slab_offset, uint_t slab_size,
161 				int serrno);
162 static void	idn_retry_nodename_req(void *arg);
163 static void	idn_send_nodename_req(int domid);
164 static void	idn_send_nodename_resp(int domid, idn_msgtype_t *mtp,
165 				uint_t bufoffset, int serrno);
166 static void	idn_recv_nodename_req(int domid, idn_msgtype_t *mtp,
167 				uint_t bufoffset);
168 static void	idn_recv_nodename_resp(int domid, uint_t bufoffset,
169 				int serrno);
170 
171 static void	idn_protocol_server(int *id);
172 static void	idn_protocol_server_killall();
173 static void	idn_protojob_free(idn_protojob_t *jp);
174 
175 static int	idn_xstate_transfunc(int domid, void *transarg);
176 static int	idn_xphase_transition(int domid, idn_msgtype_t *mtp,
177 				idn_xdcargs_t xargs);
178 static void	idn_sync_enter(int domid, idn_synccmd_t cmd,
179 				domainset_t xset, domainset_t rset,
180 				int (*transfunc)(), void *transarg);
181 static domainset_t
182 		idn_sync_register(int domid, idn_synccmd_t cmd,
183 				domainset_t ready_set, idn_syncreg_t regtype);
184 static void	idn_sync_register_awol(int domid);
185 static int	idn_verify_config_mbox(int domid);
186 static int	idn_select_master(int domid, int rmasterid, int rcpuid);
187 
188 static int	valid_mtu(uint_t mtu);
189 static int	valid_bufsize(uint_t bufsize);
190 static int	valid_slabsize(int slabsize);
191 static int	valid_nwrsize(int nwrsize);
192 
193 static int	idn_master_init();
194 static void	idn_master_deinit();
195 
196 static void	idn_send_acknack(int domid, idn_msgtype_t *mtp,
197 				idn_xdcargs_t xargs);
198 
199 static int	idn_send_nego(int domid, idn_msgtype_t *mtp,
200 				domainset_t conset);
201 static void	idn_retry_nego(uint_t token, void *arg);
202 static int	idn_check_nego(int domid, idn_msgtype_t *mtp,
203 				idn_xdcargs_t xargs);
204 static void	idn_action_nego_pend(int domid, idn_msgtype_t *mtp,
205 				idn_xdcargs_t xargs);
206 static void	idn_error_nego(int domid, idn_msgtype_t *mtp,
207 				idn_xdcargs_t xargs);
208 static void	idn_action_nego_sent(int domid, idn_msgtype_t *mtp,
209 				idn_xdcargs_t xargs);
210 static void	idn_action_nego_rcvd(int domid, idn_msgtype_t *mtp,
211 				idn_xdcargs_t xargs);
212 static void	idn_final_nego(int domid);
213 static void	idn_exit_nego(int domid, uint_t msgtype);
214 
215 static int	idn_send_con(int domid, idn_msgtype_t *mtp,
216 				idn_con_t contype, domainset_t conset);
217 static void	idn_retry_con(uint_t token, void *arg);
218 static int	idn_check_con(int domid, idn_msgtype_t *mtp,
219 				idn_xdcargs_t xargs);
220 static void	idn_action_con_pend(int domid, idn_msgtype_t *mtp,
221 				idn_xdcargs_t xargs);
222 static void	idn_error_con(int domid, idn_msgtype_t *mtp,
223 				idn_xdcargs_t xargs);
224 static void	idn_action_con_sent(int domid, idn_msgtype_t *mtp,
225 				idn_xdcargs_t xargs);
226 static void	idn_action_con_rcvd(int domid, idn_msgtype_t *mtp,
227 				idn_xdcargs_t xargs);
228 static void	idn_final_con(int domid);
229 static void	idn_exit_con(int domid, uint_t msgtype);
230 
231 static int	idn_send_fin(int domid, idn_msgtype_t *mtp, idn_fin_t fintype,
232 				idn_finarg_t finarg, idn_finopt_t finopt,
233 				domainset_t finset, uint_t finmaster);
234 static void	idn_retry_fin(uint_t token, void *arg);
235 static int	idn_check_fin_pend(int domid, idn_msgtype_t *mtp,
236 				idn_xdcargs_t xargs);
237 static void	idn_action_fin_pend(int domid, idn_msgtype_t *mtp,
238 				idn_xdcargs_t xargs);
239 static void	idn_error_fin_pend(int domid, idn_msgtype_t *mtp,
240 				idn_xdcargs_t xargs);
241 static int	idn_check_fin_sent(int domid, idn_msgtype_t *mtp,
242 				idn_xdcargs_t xargs);
243 static void	idn_action_fin_sent(int domid, idn_msgtype_t *mtp,
244 				idn_xdcargs_t xargs);
245 static void	idn_error_fin_sent(int domid, idn_msgtype_t *mtp,
246 				idn_xdcargs_t xargs);
247 static void	idn_action_fin_rcvd(int domid, idn_msgtype_t *mtp,
248 				idn_xdcargs_t xargs);
249 static void	idn_final_fin(int domid);
250 static void	idn_exit_fin(int domid, uint_t msgtype);
251 
252 /*
253  * We keep a small cache of protojob structures just
254  * in case allocation within idn_handler comes back
255  * with nothing from the land of kmem.
256  */
257 idn_protojob_t	idn_protojob_cache[IDN_DMV_PENDING_MAX];
258 idn_protojob_t	*idn_protojob_cache_list;
259 kmutex_t	idn_protojob_cache_lock;
260 
261 /*
262  *	- receive message.
263  *	- call check-function for current state.
264  *	- if (check-function == ok) then
265  *		call action-function for current state.
266  *	  else
267  *		call error-function for current state.
268  *	- transition state based on check results.
269  *	- if (next state == final state) then
270  *		call final-function.
271  */
272 static idn_xphase_t xphase_nego = {
273 	IDNP_NEGO,
274 	{
275 		{ IDNDS_NEGO_PEND,
276 			idn_check_nego,
277 			idn_action_nego_pend,
278 			idn_error_nego},
279 		{ IDNDS_NEGO_SENT,
280 			idn_check_nego,
281 			idn_action_nego_sent,
282 			idn_error_nego},
283 		{ IDNDS_NEGO_RCVD,
284 			NULL,
285 			idn_action_nego_rcvd,
286 			NULL },
287 		{ IDNDS_CONFIG, NULL, NULL, NULL },
288 	},
289 	idn_final_nego,
290 	idn_exit_nego
291 };
292 
293 static idn_xphase_t xphase_con = {
294 	IDNP_CON,
295 	{
296 		{ IDNDS_CON_PEND,
297 			idn_check_con,
298 			idn_action_con_pend,
299 			idn_error_con},
300 		{ IDNDS_CON_SENT,
301 			idn_check_con,
302 			idn_action_con_sent,
303 			idn_error_con},
304 		{ IDNDS_CON_RCVD,
305 			NULL,
306 			idn_action_con_rcvd,
307 			NULL },
308 		{ IDNDS_CON_READY, NULL, NULL, NULL },
309 	},
310 	idn_final_con,
311 	idn_exit_con
312 };
313 
314 static idn_xphase_t xphase_fin = {
315 	IDNP_FIN,
316 	{
317 		{ IDNDS_FIN_PEND,
318 			idn_check_fin_pend,
319 			idn_action_fin_pend,
320 			idn_error_fin_pend },
321 		{ IDNDS_FIN_SENT,
322 			idn_check_fin_sent,
323 			idn_action_fin_sent,
324 			idn_error_fin_sent },
325 		{ IDNDS_FIN_RCVD,
326 			NULL,
327 			idn_action_fin_rcvd,
328 			NULL },
329 		{ IDNDS_DMAP, NULL, NULL, NULL },
330 	},
331 	idn_final_fin,
332 	idn_exit_fin
333 };
334 
335 static int idnxs_state_table[4][5][2] = {
336 	{			/* IDNXS_PEND */
337 		{ IDNXS_SENT,	IDNXS_PEND },	/* 0 */
338 		{ IDNXS_RCVD,	IDNXS_PEND },	/* msg */
339 		{ IDNXS_NIL,	IDNXS_PEND },	/* msg+ack */
340 		{ IDNXS_PEND,	IDNXS_NIL },	/* ack */
341 		{ IDNXS_PEND,	IDNXS_NIL },	/* nack */
342 	},
343 	{			/* IDNXS_SENT */
344 		{ IDNXS_NIL,	IDNXS_NIL },	/* 0 */
345 		{ IDNXS_RCVD,	IDNXS_PEND },	/* msg */
346 		{ IDNXS_FINAL,	IDNXS_PEND },	/* msg+ack */
347 		{ IDNXS_NIL,	IDNXS_NIL },	/* ack */
348 		{ IDNXS_PEND,	IDNXS_NIL },	/* nack */
349 	},
350 	{			/* IDNXS_RCVD */
351 		{ IDNXS_NIL,	IDNXS_NIL },	/* 0 */
352 		{ IDNXS_NIL,	IDNXS_NIL },	/* msg */
353 		{ IDNXS_FINAL,	IDNXS_NIL },	/* msg+ack */
354 		{ IDNXS_FINAL,	IDNXS_NIL },	/* ack */
355 		{ IDNXS_PEND,	IDNXS_NIL },	/* nack */
356 	},
357 	{			/* IDNXS_FINAL */
358 		{ IDNXS_NIL,	IDNXS_NIL },	/* 0 */
359 		{ IDNXS_NIL,	IDNXS_NIL },	/* msg */
360 		{ IDNXS_NIL,	IDNXS_NIL },	/* msg+ack */
361 		{ IDNXS_NIL,	IDNXS_NIL },	/* ack */
362 		{ IDNXS_NIL,	IDNXS_NIL },	/* nack */
363 	}
364 };
365 
366 /*
367  * NONE		Respective domain does not have a master.
368  * OTHER	Respective domain has a master different
369  *		than either local or remote.
370  * LOCAL	Respective domain has chosen local as master.
371  * REMOTE	Respective domain has chosen remote as master.
372  *
373  * Actions:
374  *	VOTE		Compare votes and select one.
375  *	VOTE_RCFG	Compare votes and Reconfigure
376  *			if necessary, i.e. remote won.
377  *	CONNECT		Connect to remote's OTHER if different
378  *			than our local master.
379  *	LOCAL		Local domain is winner.
380  *	REMOTE		Remote domain is winner.
381  *	WAIT		Wait for remote to connect to our
382  *			master if his is different.
383  *	ERROR		An impossible condition.
384  *
385  * Index:
386  *	0 = Local
387  *	1 = Remote
388  */
389 static idn_master_select_t master_select_table[4][4] = {
390 	{				/* local	remote	*/
391 		MASTER_SELECT_VOTE,	/* NONE		NONE	*/
392 		MASTER_SELECT_CONNECT,	/* NONE		OTHER	*/
393 		MASTER_SELECT_LOCAL,	/* NONE		LOCAL	*/
394 		MASTER_SELECT_REMOTE	/* NONE		REMOTE	*/
395 	},
396 	{
397 		MASTER_SELECT_WAIT,	/* OTHER	NONE	*/
398 		MASTER_SELECT_CONNECT,	/* OTHER	OTHER	*/
399 		MASTER_SELECT_WAIT,	/* OTHER	LOCAL	*/
400 		MASTER_SELECT_WAIT	/* OTHER	REMOTE	*/
401 	},
402 	{
403 		MASTER_SELECT_LOCAL,	/* LOCAL	NONE	*/
404 		MASTER_SELECT_CONNECT,	/* LOCAL	OTHER	*/
405 		MASTER_SELECT_LOCAL,	/* LOCAL	LOCAL	*/
406 		MASTER_SELECT_VOTE_RCFG	/* LOCAL	REMOTE	*/
407 	},
408 	{
409 		MASTER_SELECT_REMOTE,	/* REMOTE	NONE	*/
410 		MASTER_SELECT_CONNECT,	/* REMOTE	OTHER	*/
411 		MASTER_SELECT_ERROR,	/* REMOTE	LOCAL	*/
412 		MASTER_SELECT_REMOTE	/* REMOTE	REMOTE	*/
413 	}
414 };
415 
416 void
417 idn_assign_cookie(int domid)
418 {
419 	static ushort_t	num = 0;
420 	ushort_t	cookie;
421 	procname_t	proc = "idn_assign_cookie";
422 
423 	if ((cookie = idn_domain[domid].dcookie_recv) != 0)
424 		return;
425 
426 	cookie = (ushort_t)(((uint64_t)&idn_domain[domid] >> 8) & 0xff);
427 	while ((cookie ^= num++ & 0xff) == 0)
428 		;
429 
430 	PR_PROTO("%s:%d: assigned RECV cookie 0x%x\n", proc, domid, cookie);
431 
432 	idn_domain[domid].dcookie_recv = cookie;
433 }
434 
435 void
436 idn_update_priority(int domid, int pri)
437 {
438 	idn_domain_t	*dp;
439 	procname_t	proc = "idn_update_priority";
440 
441 	ASSERT(IDN_DLOCK_IS_EXCL(domid));
442 
443 	dp = &idn_domain[domid];
444 
445 	if (pri >= IDNVOTE_MINPRI) {
446 		dp->dvote.v.priority = pri & IDNVOTE_PRI_MASK;
447 
448 		PR_PROTO("%s:%d: SETTING PRIORITY to req(%d) "
449 		    "(localpri = 0x%x)\n",
450 		    proc, domid, pri, IDNVOTE_PRIVALUE(dp->dvote));
451 	} else {
452 		PR_PROTO("%s:%d: PRIORITIES UNCHANGED (pri = 0x%x)\n",
453 		    proc, domid, IDNVOTE_PRIVALUE(dp->dvote));
454 	}
455 }
456 
457 /*
458  * Initiate a link between the local domain and the remote domain
459  * containing the given cpuid.
460  */
461 int
462 idn_link(int domid, int cpuid, int pri, int waittime, idnsb_error_t *sep)
463 {
464 	int		rv;
465 	idn_domain_t	*dp;
466 	void		*opcookie;
467 	procname_t	proc = "idn_link";
468 
469 	if ((cpuid < 0) || (cpuid >= NCPU)) {
470 		cmn_err(CE_WARN,
471 		    "IDN: 201: (LINK) invalid CPU ID (%d)", cpuid);
472 		return (EINVAL);
473 	}
474 	if (waittime < 0) {
475 		cmn_err(CE_WARN,
476 		    "IDN: 202: (LINK) invalid time-out value (%d)",
477 		    waittime);
478 		return (EINVAL);
479 	}
480 	if (!VALID_DOMAINID(domid)) {
481 		cmn_err(CE_WARN,
482 		    "IDN: 203: (LINK) invalid domain ID (%d)",
483 		    domid);
484 		return (EINVAL);
485 	}
486 	if (domid == idn.localid)
487 		return (0);
488 
489 	IDN_SYNC_LOCK();
490 	IDN_DLOCK_EXCL(domid);
491 
492 	dp = &idn_domain[domid];
493 
494 	switch (dp->dstate) {
495 	case IDNDS_CLOSED:
496 		break;
497 
498 	case IDNDS_CONNECTED:
499 #ifdef DEBUG
500 		cmn_err(CE_NOTE,
501 		    "!IDN: domain %d (CPU ID %d) already connected",
502 		    domid, cpuid);
503 #endif /* DEBUG */
504 		IDN_DUNLOCK(domid);
505 		IDN_SYNC_UNLOCK();
506 		return (0);
507 
508 	default:
509 		cmn_err(CE_WARN,
510 		    "IDN: 204: domain %d state (%s) inappropriate",
511 		    domid, idnds_str[dp->dstate]);
512 		IDN_DUNLOCK(domid);
513 		IDN_SYNC_UNLOCK();
514 		return (EINVAL);
515 	}
516 
517 	rv = idn_open_domain(domid, cpuid, 0);
518 	if (rv != 0) {
519 		cmn_err(CE_WARN,
520 		    "IDN: 205: (%s) failed to open-domain(%d,%d)",
521 		    proc, domid, cpuid);
522 		IDN_DUNLOCK(domid);
523 		IDN_SYNC_UNLOCK();
524 		return (EIO);
525 	}
526 
527 
528 	IDN_DLOCK_EXCL(idn.localid);
529 	idn_update_priority(idn.localid, pri);
530 	IDN_DUNLOCK(idn.localid);
531 
532 	if (waittime > 0)
533 		opcookie = idn_init_op(IDNOP_CONNECTED, DOMAINSET(domid), sep);
534 
535 	idn_connect(domid);
536 
537 	IDN_DUNLOCK(domid);
538 	IDN_SYNC_UNLOCK();
539 
540 	PR_PROTO("%s:%d: ALLOCATED idn_link(%d)\n", proc, domid, cpuid);
541 
542 	if (waittime > 0) {
543 		boardset_t	domset = 0;
544 		/*
545 		 * Well we've successfully allocated a domain id,
546 		 * but the link may not be fully established yet.
547 		 * Need to wait since it happens asynchronously.
548 		 */
549 		PR_PROTO("%s:%d: WAITING for op(%s) for (domset 0%x)...\n",
550 		    proc, domid, idnop_str[IDNOP_CONNECTED],
551 		    DOMAINSET(domid));
552 
553 		rv = idn_wait_op(opcookie, &domset, waittime);
554 	}
555 
556 #ifdef DEBUG
557 	if (rv == 0) {
558 		if (waittime > 0) {
559 			PR_PROTO("%s:%d: connect SUCCEEDED (cpu %d)\n",
560 			    proc, domid, cpuid);
561 		} else {
562 			PR_PROTO("%s:%d: connect KICKED OFF (cpu %d)\n",
563 			    proc, domid, cpuid);
564 		}
565 	} else {
566 		PR_PROTO("%s:%d: connect FAILED (cpu %d)\n",
567 		    proc, domid, cpuid);
568 	}
569 #endif /* DEBUG */
570 
571 	return (rv);
572 }
573 
574 /*
575  * Unlink the given domain from any domain cluster of
576  * which it might be a member.  Force indicates that domain
577  * should not go AWOL and if it's currently AWOL to close
578  * and remove it.
579  * IMPORTANT: If the (hard) force flag is set, the caller is
580  *	      assumed to GUARANTEE that the given domain will
581  *	      not attempt to communicate with the local domain
582  *	      in any manner.
583  */
584 int
585 idn_unlink(int domid, boardset_t idnset, idn_fin_t fintype,
586 		idn_finopt_t finopt, int waittime, idnsb_error_t *sep)
587 {
588 	int		rv = 0;
589 	domainset_t	domset;
590 	void		*opcookie;
591 	procname_t	proc = "idn_unlink";
592 
593 
594 	if (waittime < 0) {
595 		cmn_err(CE_WARN,
596 		    "IDN: 202: (UNLINK) invalid time-out value (%d)",
597 		    waittime);
598 		SET_IDNKERR_IDNERR(sep, IDNKERR_INVALID_WTIME);
599 		SET_IDNKERR_PARAM0(sep, waittime);
600 		return (EINVAL);
601 	}
602 	if (!VALID_DOMAINID(domid)) {
603 		cmn_err(CE_WARN,
604 		    "IDN: 203: (UNLINK) invalid domain ID (%d)",
605 		    domid);
606 		SET_IDNKERR_IDNERR(sep, IDNKERR_INVALID_DOMAIN);
607 		SET_IDNKERR_PARAM0(sep, domid);
608 		SET_IDNKERR_PARAM1(sep, -1);
609 		return (EINVAL);
610 	}
611 	if (idn.localid == IDN_NIL_DOMID) {
612 #ifdef DEBUG
613 		cmn_err(CE_NOTE,
614 		    "!IDN: %s: local domain not connected to an IDNnet",
615 		    proc);
616 #endif /* DEBUG */
617 		return (0);
618 	}
619 
620 	/*
621 	 * Lock ordering protocols requires that we grab the
622 	 * global lock _before_ the local domain's lock.
623 	 * However, non-local domains must have their lock
624 	 * grabbed _before_ the global lock.
625 	 */
626 	IDN_SYNC_LOCK();
627 	IDN_GLOCK_EXCL();
628 	domset = idn.domset.ds_trans_on | idn.domset.ds_trans_off;
629 	if ((idn.state == IDNGS_OFFLINE) && !domset) {
630 #ifdef DEBUG
631 		cmn_err(CE_WARN,
632 		    "!IDN: %s: local domain not connected to an IDNnet",
633 		    proc);
634 #endif /* DEBUG */
635 		IDN_GUNLOCK();
636 		IDN_SYNC_UNLOCK();
637 		return (0);
638 	}
639 
640 	if ((domid == IDN_NIL_DOMID) || (domid == idn.localid)) {
641 		domid = idn.localid;
642 		IDN_GSTATE_TRANSITION(IDNGS_DISCONNECT);
643 		IDN_SET_NEW_MASTERID(IDN_NIL_DOMID);
644 		domset = DOMAINSET_ALL;
645 		DOMAINSET_DEL(domset, idn.localid);
646 	} else {
647 		domset = DOMAINSET(domid);
648 	}
649 	IDN_GUNLOCK();
650 
651 	if (waittime > 0)
652 		opcookie = idn_init_op(IDNOP_DISCONNECTED, domset, sep);
653 
654 	idn_unlink_domainset(domset, fintype, IDNFIN_ARG_NONE, finopt, idnset);
655 
656 	IDN_SYNC_UNLOCK();
657 
658 	if (waittime > 0) {
659 		/*
660 		 * Well the unlink has successfully kicked off.
661 		 * Since process is asynchronous we need to wait
662 		 * for it to complete.
663 		 */
664 		PR_PROTO("%s:%d: WAITING for op(%s) for (domset 0%x)...\n",
665 		    proc, domid, idnop_str[IDNOP_DISCONNECTED],
666 		    domset);
667 
668 		rv = idn_wait_op(opcookie, &domset, waittime);
669 	}
670 
671 	if (rv == 0) {
672 		if (waittime > 0) {
673 			PR_PROTO("%s:%d: disconnect SUCCEEDED\n",
674 			    proc, domid);
675 		} else {
676 			PR_PROTO("%s:%d: disconnect KICKED OFF\n",
677 			    proc, domid);
678 		}
679 	} else {
680 		PR_PROTO("%s:%d: disconnect FAILED\n", proc, domid);
681 	}
682 
683 	return (rv);
684 }
685 
686 static void
687 idn_unlink_domainset(domainset_t domset, idn_fin_t fintype,
688 			idn_finarg_t finarg, idn_finopt_t finopt,
689 			boardset_t idnset)
690 {
691 	int		d;
692 	domainset_t	offset;
693 	procname_t	proc = "idn_unlink_domainset";
694 
695 	ASSERT(IDN_SYNC_IS_LOCKED());
696 
697 	/*
698 	 * Determine subset for which we have
699 	 * no active connections.
700 	 */
701 	offset = domset & ~(idn.domset.ds_trans_on |
702 	    idn.domset.ds_connected |
703 	    idn.domset.ds_trans_off |
704 	    idn.domset.ds_relink);
705 	/*
706 	 * Determine subset that are really candidates.
707 	 * Note that we include those already down the path
708 	 * since it's possible a request came in to upgrade
709 	 * their fintype (e.g. NORMAL->FORCE_SOFT).
710 	 */
711 	domset &= ~offset;
712 
713 	if (offset)
714 		idn_update_op(IDNOP_DISCONNECTED, offset, NULL);
715 
716 	IDN_GLOCK_EXCL();
717 	if ((finopt == IDNFIN_OPT_RELINK) && (idn.state != IDNGS_DISCONNECT)) {
718 		/*
719 		 * Don't add domains already transitioning off.
720 		 * If they caught on an earlier Reconfig wave then
721 		 * they'll already be in ds_relink anyway.  Otherwise,
722 		 * once a domain is transition off we can't upgrade
723 		 * him to a RELINK.
724 		 */
725 #ifdef DEBUG
726 		if (idn.domset.ds_hitlist & domset) {
727 			PR_HITLIST("%s: domset=%x, hitlist=%x, trans_off=%x "
728 			    "-> relink = %x -> %x\n",
729 			    proc, domset, idn.domset.ds_hitlist,
730 			    idn.domset.ds_relink, idn.domset.ds_trans_off,
731 			    idn.domset.ds_relink |
732 			    (domset & ~idn.domset.ds_trans_off));
733 		}
734 #endif /* DEBUG */
735 
736 		domset &= ~idn.domset.ds_trans_off;
737 		idn.domset.ds_relink |= domset;
738 	} else {
739 		idn.domset.ds_relink &= ~domset;
740 	}
741 	/*
742 	 * Update the ds_trans_on/off so we don't waste
743 	 * time talking to these folks.
744 	 */
745 	idn.domset.ds_trans_on  &= ~domset;
746 	idn.domset.ds_trans_off |= domset;
747 
748 	if (domset == 0) {
749 		if ((idn.domset.ds_trans_on |
750 		    idn.domset.ds_connected |
751 		    idn.domset.ds_trans_off |
752 		    idn.domset.ds_relink) == 0) {
753 			PR_HITLIST("%s:%x: HITLIST %x -> 0\n",
754 			    proc, domset, idn.domset.ds_hitlist);
755 			idn.domset.ds_hitlist = 0;
756 			IDN_GSTATE_TRANSITION(IDNGS_OFFLINE);
757 		}
758 		IDN_GUNLOCK();
759 		return;
760 	}
761 	IDN_GUNLOCK();
762 
763 	for (d = 0; d < MAX_DOMAINS; d++) {
764 		idn_domain_t	*dp;
765 		idn_fin_t	ftype;
766 
767 		if (!DOMAIN_IN_SET(domset, d))
768 			continue;
769 
770 		dp = &idn_domain[d];
771 		IDN_DLOCK_EXCL(d);
772 		IDN_HISTORY_LOG(IDNH_RELINK, d, dp->dstate,
773 		    idn.domset.ds_relink);
774 		ftype = fintype;
775 		if ((dp->dcpu != IDN_NIL_DCPU) && dp->dhw.dh_boardset) {
776 			/*
777 			 * If domain is not in the IDNSET passed
778 			 * down then we need to upgrade this to
779 			 * hard-force in order to prevent possible
780 			 * system failures (arbstop).  This is simply
781 			 * extra protection beyond that checked by
782 			 * the SSP.  IDNSET contains the set of boards
783 			 * that have a "link" to the local domain,
784 			 * including the SMD regs.
785 			 */
786 			if ((idnset & dp->dhw.dh_boardset) == 0) {
787 				PR_PROTO("%s:%d: boardset 0x%x "
788 				    "NOT in IDNSET 0x%x\n",
789 				    proc, d, dp->dhw.dh_boardset,
790 				    idnset);
791 				if (ftype != IDNFIN_FORCE_HARD)
792 					cmn_err(CE_NOTE,
793 					    "!IDN: 222: no IDN linkage "
794 					    "found (b=0x%x, i=0x%x) "
795 					    "upgrading unlink %s to %s",
796 					    dp->dhw.dh_boardset,
797 					    idnset, idnfin_str[ftype],
798 					    idnfin_str[IDNFIN_FORCE_HARD]);
799 
800 				ftype = IDNFIN_FORCE_HARD;
801 			} else {
802 				PR_PROTO("%s:%d: boardset 0x%x "
803 				    "FOUND in IDNSET 0x%x\n",
804 				    proc, d, dp->dhw.dh_boardset,
805 				    idnset);
806 			}
807 		}
808 		idn_disconnect(d, ftype, finarg, IDNDS_SYNC_TYPE(dp));
809 		IDN_DUNLOCK(d);
810 	}
811 }
812 
813 /*
814  * Return w/locks held.
815  */
816 static int
817 idn_connect(int domid)
818 {
819 	idn_xdcargs_t	xargs;
820 	idn_domain_t	*dp = &idn_domain[domid];
821 	procname_t	proc = "idn_connect";
822 
823 	ASSERT(IDN_SYNC_IS_LOCKED());
824 	ASSERT(IDN_DLOCK_IS_EXCL(domid));
825 
826 	ASSERT(dp->dcpu != IDN_NIL_DCPU);
827 
828 	if (dp->dstate != IDNDS_CLOSED) {
829 		if (DOMAIN_IN_SET(idn.domset.ds_trans_on |
830 		    idn.domset.ds_connected, domid)) {
831 			PR_PROTO("%s:%d: already connected or "
832 			    "in-progress\n", proc, domid);
833 		} else {
834 			PR_PROTO("%s:%d: current state (%s) != "
835 			    "CLOSED\n", proc, domid,
836 			    idnds_str[dp->dstate]);
837 		}
838 		return (-1);
839 	}
840 
841 	ASSERT(!DOMAIN_IN_SET(idn.domset.ds_connected, domid));
842 	ASSERT(!DOMAIN_IN_SET(idn.domset.ds_trans_off, domid));
843 
844 	dp->dxp = &xphase_nego;
845 	IDN_XSTATE_TRANSITION(dp, IDNXS_PEND);
846 
847 	idn_xphase_transition(domid, NULL, xargs);
848 
849 	return (0);
850 }
851 
852 /*
853  * Return w/locks held.
854  */
855 static int
856 idn_disconnect(int domid, idn_fin_t fintype, idn_finarg_t finarg,
857     idn_finsync_t finsync)
858 {
859 	int		new_masterid, new_cpuid = IDN_NIL_DCPU;
860 	uint_t		token;
861 	uint_t		finmaster;
862 	idn_xdcargs_t	xargs;
863 	idn_finopt_t	finopt;
864 	idn_domain_t	*dp = &idn_domain[domid];
865 	procname_t	proc = "idn_disconnect";
866 
867 	ASSERT(IDN_SYNC_IS_LOCKED());
868 	ASSERT(IDN_DLOCK_IS_EXCL(domid));
869 
870 	if (dp->dstate == IDNDS_CLOSED) {
871 		PR_PROTO("%s:%d: already CLOSED\n", proc, domid);
872 		idn_update_op(IDNOP_DISCONNECTED, DOMAINSET(domid), NULL);
873 		return (-1);
874 	}
875 
876 	/*
877 	 * Terminate any outstanding commands that were
878 	 * targeted towards this domain.
879 	 */
880 	idn_terminate_cmd(domid, ECANCELED);
881 
882 	/*
883 	 * Terminate any and all retries that may have
884 	 * outstanding for this domain.
885 	 */
886 	token = IDN_RETRY_TOKEN(domid, IDN_RETRY_TYPEALL);
887 	(void) idn_retry_terminate(token);
888 
889 	/*
890 	 * Stop all outstanding message timers for
891 	 * this guy.
892 	 */
893 	IDN_MSGTIMER_STOP(domid, 0, 0);
894 
895 	dp->dxp = &xphase_fin;
896 	IDN_XSTATE_TRANSITION(dp, IDNXS_PEND);
897 	if ((int)dp->dfin < (int)fintype) {
898 		/*
899 		 * You can only upgrade a fin type.
900 		 * We don't allow it to be downgraded
901 		 * as it's too dangerous since some
902 		 * state may have been blown away while
903 		 * we were fin'ing at a higher level.
904 		 */
905 		IDN_FSTATE_TRANSITION(dp, fintype);
906 	}
907 
908 	dp->dfin_sync = finsync;
909 	PR_PROTO("%s:%d: disconnect synchronously = %s\n",
910 	    proc, domid, (finsync == IDNFIN_SYNC_OFF) ? "OFF" :
911 	    (finsync == IDNFIN_SYNC_NO) ? "NO" : "YES");
912 
913 	IDN_GLOCK_SHARED();
914 	if (DOMAIN_IN_SET(idn.domset.ds_relink, domid) &&
915 	    (idn.state != IDNGS_DISCONNECT)) {
916 		finopt = IDNFIN_OPT_RELINK;
917 	} else {
918 		finopt = IDNFIN_OPT_UNLINK;
919 		PR_HITLIST("%s:%d: HITLIST %x -> %x\n",
920 		    proc, domid, idn.domset.ds_hitlist,
921 		    idn.domset.ds_hitlist | DOMAINSET(domid));
922 		DOMAINSET_ADD(idn.domset.ds_hitlist, domid);
923 	}
924 
925 	CLR_XARGS(xargs);
926 	SET_XARGS_FIN_TYPE(xargs, dp->dfin);
927 	SET_XARGS_FIN_ARG(xargs, finarg);
928 	SET_XARGS_FIN_OPT(xargs, finopt);
929 	SET_XARGS_FIN_DOMSET(xargs, 0);		/* unused when msg = 0 */
930 	new_masterid = IDN_GET_NEW_MASTERID();
931 	IDN_GUNLOCK();
932 	if (new_masterid != IDN_NIL_DOMID)
933 		new_cpuid = idn_domain[new_masterid].dcpu;
934 	finmaster = MAKE_FIN_MASTER(new_masterid, new_cpuid);
935 	SET_XARGS_FIN_MASTER(xargs, finmaster);
936 
937 	idn_xphase_transition(domid, NULL, xargs);
938 
939 	return (0);
940 }
941 
942 static int
943 idn_next_xstate(idn_xstate_t o_xstate, int err, uint_t msg)
944 {
945 	int		index;
946 	procname_t	proc = "idn_next_xstate";
947 
948 	ASSERT(((int)o_xstate >= 0) && ((int)o_xstate <= 4));
949 
950 	if (!msg)
951 		index = 0;
952 	else if ((msg & IDNP_MSGTYPE_MASK) == 0)
953 		index = (msg & IDNP_ACK) ? 3 : (msg & IDNP_NACK) ? 4 : -1;
954 	else
955 		index = (msg & IDNP_ACK) ? 2 :
956 		    !(msg & IDNP_ACKNACK_MASK) ? 1 : -1;
957 
958 	if (index == -1) {
959 		STRING(str);
960 
961 		INUM2STR(msg, str);
962 		PR_PROTO("%s: (msg = 0x%x(%s))\n", proc, msg, str);
963 		return (IDNXS_NIL);
964 	}
965 
966 	if (err == -1) {
967 		int	n_xstate;
968 		/*
969 		 * Caller is just interested in querying is this
970 		 * is a valid message to receive in the current
971 		 * xstate.  A return value of IDNXS_NIL indicates
972 		 * that it's not.  A return value of non-IDNXS_NIL
973 		 * indicates it's cool.  An invalid message is
974 		 * determined by both err & !err states being IDNXS_NIL.
975 		 */
976 		n_xstate = idnxs_state_table[(int)o_xstate][index][0];
977 		if (n_xstate != IDNXS_NIL)
978 			return (n_xstate);
979 		else
980 			return (idnxs_state_table[(int)o_xstate][index][1]);
981 	} else {
982 		return (idnxs_state_table[(int)o_xstate][index][err ? 1 : 0]);
983 	}
984 }
985 
986 static int
987 idn_select_candidate(domainset_t master_set)
988 {
989 	int		d, best_id = IDN_NIL_DOMID;
990 	uint_t		best_vote = 0;
991 	idn_domain_t	*dp;
992 	procname_t	proc = "idn_select_candidate";
993 
994 	ASSERT(IDN_SYNC_IS_LOCKED());
995 
996 	if (master_set == 0) {
997 		PR_PROTO("%s: %x -> %d\n", proc, master_set, IDN_NIL_DOMID);
998 		return (IDN_NIL_DOMID);
999 	}
1000 
1001 	for (d = 0; d < MAX_DOMAINS; d++) {
1002 		uint_t		vote;
1003 		idn_vote_t	v;
1004 
1005 		if (!DOMAIN_IN_SET(master_set, d))
1006 			continue;
1007 
1008 		dp = &idn_domain[d];
1009 
1010 		if ((dp->domid == IDN_NIL_DOMID) ||
1011 		    (dp->dcpu == IDN_NIL_DCPU) ||
1012 		    ((v.ticket = dp->dvote.ticket) == 0))
1013 			continue;
1014 
1015 		vote = IDNVOTE_ELECT(v);
1016 
1017 		if (vote > best_vote) {
1018 			best_vote = vote;
1019 			best_id = d;
1020 		}
1021 	}
1022 
1023 	PR_PROTO("%s: %x -> %d\n", proc, master_set, best_id);
1024 
1025 	return (best_id);
1026 }
1027 
1028 /*
1029  * If a non-zero value is returned then GLOCK will have been dropped.
1030  * Otherwise, routine returns with all incoming locks still held.
1031  */
1032 static int
1033 idn_select_master(int domid, int rmasterid, int rcpuid)
1034 {
1035 	char		*sel;
1036 	int		lmasterid, masterid;
1037 	int		do_reconfig = 0;
1038 	int		lindex, rindex;
1039 	idn_domain_t	*ldp, *rdp;
1040 	uint_t		rvote, lvote;
1041 	idn_master_select_t	select;
1042 	procname_t	proc = "idn_select_master";
1043 
1044 	ASSERT(IDN_SYNC_IS_LOCKED());
1045 	ASSERT(IDN_GLOCK_IS_EXCL());
1046 	ASSERT(IDN_DLOCK_IS_EXCL(domid));
1047 
1048 	PR_PROTO("%s:%d: lmasterid = %d, rmasterid = %d, rcpuid = %d\n",
1049 	    proc, domid, IDN_GET_MASTERID(), rmasterid, rcpuid);
1050 
1051 	IDN_DLOCK_EXCL(idn.localid);
1052 
1053 	ldp = &idn_domain[idn.localid];
1054 	rdp = &idn_domain[domid];
1055 
1056 	/*
1057 	 * Clear master bits since mastership is derived from
1058 	 * other information (local/remote idn.masterid/idn.new_masterid)
1059 	 * and we don't want the vote master bit to confuse matters.
1060 	 */
1061 	lvote = IDNVOTE_ELECT(ldp->dvote);
1062 	rvote = IDNVOTE_ELECT(rdp->dvote);
1063 
1064 	lmasterid = IDN_GET_MASTERID();
1065 
1066 	lindex = (lmasterid == IDN_NIL_DOMID) ? MASTER_IS_NONE :
1067 	    (lmasterid == idn.localid) ? MASTER_IS_LOCAL :
1068 	    (lmasterid == domid) ? MASTER_IS_REMOTE :
1069 	    MASTER_IS_OTHER;
1070 
1071 	rindex = (rmasterid == IDN_NIL_DOMID) ? MASTER_IS_NONE :
1072 	    (rmasterid == domid) ? MASTER_IS_REMOTE :
1073 	    (rmasterid == idn.localid) ? MASTER_IS_LOCAL :
1074 	    MASTER_IS_OTHER;
1075 
1076 	select = master_select_table[lindex][rindex];
1077 
1078 	masterid = IDN_NIL_DOMID;
1079 
1080 	/*
1081 	 * Each case is responsible for dropping DLOCK(localid)
1082 	 * and GLOCK if it doesn't select a master, unless a
1083 	 * reconfig is necessary.
1084 	 */
1085 	switch (select) {
1086 	case MASTER_SELECT_VOTE_RCFG:
1087 		sel = "VOTE_RECONFIG";
1088 		if (lvote > rvote) {
1089 			/*
1090 			 * If the local domain is the winner then remote
1091 			 * domain will have to Reconfig.  We'll continue
1092 			 * through the connection process anyway.  The
1093 			 * remote domains will tell us to back-off while
1094 			 * Reconfigs, but that's okay as we'll keep retrying.
1095 			 */
1096 			masterid = idn.localid;
1097 		} else if (lvote < rvote) {
1098 			do_reconfig = 1;
1099 			/*
1100 			 * GLOCK will get dropped once reconfig
1101 			 * is kicked off.
1102 			 */
1103 		} else {
1104 			cmn_err(CE_WARN,
1105 			    "IDN: 206: cannot link domains "
1106 			    "with equal votes (L(%d),R(%d),0x%x)",
1107 			    idn.localid, domid, rvote);
1108 			IDN_GUNLOCK();
1109 		}
1110 		IDN_DUNLOCK(idn.localid);
1111 		break;
1112 
1113 	case MASTER_SELECT_VOTE:
1114 		sel = "VOTE";
1115 		if (lvote > rvote) {
1116 			masterid = idn.localid;
1117 			ldp->dvote.v.master = 1;
1118 			rdp->dvote.v.master = 0;
1119 		} else if (lvote < rvote) {
1120 			masterid = domid;
1121 			ldp->dvote.v.master = 0;
1122 			rdp->dvote.v.master = 1;
1123 		} else {
1124 			cmn_err(CE_WARN,
1125 			    "IDN: 206: cannot link domains "
1126 			    "with equal votes (L(%d),R(%d),0x%x)",
1127 			    idn.localid, domid, rvote);
1128 		}
1129 		ASSERT(IDN_GET_MASTERID() == IDN_NIL_DOMID);
1130 		if (masterid != IDN_NIL_DOMID) {
1131 			IDN_SET_MASTERID(masterid);
1132 			IDN_SET_NEW_MASTERID(IDN_NIL_DOMID);
1133 		} else {
1134 			IDN_GUNLOCK();
1135 		}
1136 		IDN_DUNLOCK(idn.localid);
1137 		break;
1138 
1139 	case MASTER_SELECT_REMOTE:
1140 		sel = "REMOTE";
1141 		masterid = domid;
1142 		if (IDN_GET_MASTERID() == IDN_NIL_DOMID) {
1143 			IDN_SET_MASTERID(masterid);
1144 			IDN_SET_NEW_MASTERID(IDN_NIL_DOMID);
1145 			ldp->dvote.v.master = 0;
1146 			rdp->dvote.v.master = 1;
1147 		}
1148 		ASSERT(IDN_GET_MASTERID() == domid);
1149 		IDN_DUNLOCK(idn.localid);
1150 		break;
1151 
1152 	case MASTER_SELECT_LOCAL:
1153 		sel = "LOCAL";
1154 		masterid = idn.localid;
1155 		if (IDN_GET_MASTERID() == IDN_NIL_DOMID) {
1156 			IDN_SET_MASTERID(masterid);
1157 			IDN_SET_NEW_MASTERID(IDN_NIL_DOMID);
1158 			ldp->dvote.v.master = 1;
1159 			rdp->dvote.v.master = 0;
1160 		}
1161 		ASSERT(IDN_GET_MASTERID() == idn.localid);
1162 		IDN_DUNLOCK(idn.localid);
1163 		break;
1164 
1165 	case MASTER_SELECT_CONNECT:
1166 		sel = "CONNECT";
1167 		if (rmasterid == lmasterid) {
1168 			/*
1169 			 * Local and remote have same master,
1170 			 * let him come onboard.
1171 			 */
1172 			masterid = lmasterid;
1173 			IDN_DUNLOCK(idn.localid);
1174 
1175 		} else {
1176 			int	rv;
1177 
1178 			IDN_DUNLOCK(idn.localid);
1179 			IDN_GUNLOCK();
1180 			IDN_DLOCK_EXCL(rmasterid);
1181 			PR_PROTO("%s:%d: attempting connect w/remote "
1182 			    "master %d\n",
1183 			    proc, domid, rmasterid);
1184 			rv = idn_open_domain(rmasterid, rcpuid, 0);
1185 			if (rv == 0) {
1186 				idn_connect(rmasterid);
1187 			} else if (rv < 0) {
1188 				cmn_err(CE_WARN,
1189 				    "IDN: 205: (%s) failed to "
1190 				    "open-domain(%d,%d)",
1191 				    proc, rmasterid, rcpuid);
1192 			} else {
1193 				/*
1194 				 * Must already have a connection going.
1195 				 */
1196 				PR_PROTO("%s:%d: failed "
1197 				    "idn_open_domain(%d,%d,0) "
1198 				    "(rv = %d)\n",
1199 				    proc, domid, rmasterid,
1200 				    rcpuid, rv);
1201 			}
1202 			IDN_DUNLOCK(rmasterid);
1203 		}
1204 		break;
1205 
1206 	case MASTER_SELECT_WAIT:
1207 		sel = "WAIT";
1208 		/*
1209 		 * If the remote domain has the same master as the local
1210 		 * domain then there's no need to wait.
1211 		 */
1212 		if (rmasterid == lmasterid) {
1213 			masterid = lmasterid;
1214 		} else {
1215 			IDN_GUNLOCK();
1216 		}
1217 		IDN_DUNLOCK(idn.localid);
1218 		break;
1219 
1220 	case MASTER_SELECT_ERROR:
1221 		sel = "ERROR";
1222 		/*
1223 		 * Hit impossible condition.
1224 		 */
1225 		cmn_err(CE_WARN,
1226 		    "IDN: 207: local/remote master-id conflict "
1227 		    "(%d.lmasterid = %d, %d.rmasterid = %d)",
1228 		    idn.localid, lmasterid, domid, rmasterid);
1229 		IDN_GUNLOCK();
1230 		IDN_DUNLOCK(idn.localid);
1231 		break;
1232 
1233 	default:
1234 		cmn_err(CE_WARN,
1235 		    "IDN: 208: %s: unknown case (%d)",
1236 		    proc, (int)select);
1237 		IDN_GUNLOCK();
1238 		IDN_DUNLOCK(idn.localid);
1239 		ASSERT(0);
1240 		break;
1241 	}
1242 
1243 	if (masterid == IDN_NIL_DOMID) {
1244 		PR_PROTO("%s:%d: NO MASTER SELECTED (rmstr=%d) sel=%s\n",
1245 		    proc, domid, rmasterid, sel);
1246 	} else {
1247 		PR_PROTO("%s:%d: MASTER SELECTED = %d (%s)\n",
1248 		    proc, domid, masterid,
1249 		    (masterid == idn.localid) ? "LOCAL" :
1250 		    (masterid == domid) ? "REMOTE" : "OTHER");
1251 	}
1252 
1253 	if (do_reconfig) {
1254 		domainset_t	dis_set;
1255 
1256 		/*
1257 		 * Local domain already has a master.
1258 		 * Need to dismantle all connections
1259 		 * and reestablish one with new master.
1260 		 */
1261 		IDN_GKSTAT_GLOBAL_EVENT(gk_reconfigs, gk_reconfig_last);
1262 
1263 		PR_PROTO("%s:%d: RECONFIG new masterid = %d\n",
1264 		    proc, domid, domid);
1265 
1266 		IDN_GSTATE_TRANSITION(IDNGS_RECONFIG);
1267 		IDN_SET_NEW_MASTERID(domid);
1268 		IDN_GUNLOCK();
1269 
1270 		dis_set = idn.domset.ds_trans_on | idn.domset.ds_connected;
1271 		DOMAINSET_DEL(dis_set, domid);
1272 
1273 		idn_unlink_domainset(dis_set, IDNFIN_NORMAL, IDNFIN_ARG_NONE,
1274 		    IDNFIN_OPT_RELINK, BOARDSET_ALL);
1275 	}
1276 
1277 	return ((masterid == IDN_NIL_DOMID) ? -1 : 0);
1278 }
1279 
1280 /*ARGSUSED1*/
1281 static void
1282 idn_retry_query(uint_t token, void *arg)
1283 {
1284 	idn_retry_t	rtype = IDN_RETRY_TOKEN2TYPE(token);
1285 	int		d, domid = IDN_RETRY_TOKEN2DOMID(token);
1286 	idn_domain_t	*dp = &idn_domain[domid];
1287 	idn_synccmd_t	sync_cmd;
1288 	domainset_t	query_set, my_ready_set;
1289 	procname_t	proc = "idn_retry_query";
1290 
1291 	IDN_SYNC_LOCK();
1292 	IDN_DLOCK_EXCL(domid);
1293 
1294 	switch (rtype) {
1295 	case IDNRETRY_CONQ:
1296 		sync_cmd = IDNSYNC_CONNECT;
1297 		my_ready_set = idn.domset.ds_ready_on | idn.domset.ds_connected;
1298 		my_ready_set &= ~idn.domset.ds_trans_off;
1299 		DOMAINSET_ADD(my_ready_set, idn.localid);
1300 		break;
1301 
1302 	case IDNRETRY_FINQ:
1303 		sync_cmd = IDNSYNC_DISCONNECT;
1304 		my_ready_set = idn.domset.ds_ready_off |
1305 		    ~idn.domset.ds_connected;
1306 		break;
1307 
1308 	default:
1309 		IDN_DUNLOCK(domid);
1310 		IDN_SYNC_UNLOCK();
1311 		return;
1312 	}
1313 
1314 	if (dp->dsync.s_cmd == sync_cmd)
1315 		my_ready_set |= dp->dsync.s_set_rdy;
1316 
1317 	query_set = idn_sync_register(domid, sync_cmd, 0, IDNSYNC_REG_QUERY);
1318 
1319 	PR_PROTO("%s:%d: query_set = 0x%x\n", proc, domid, query_set);
1320 
1321 	if (query_set == 0) {
1322 		IDN_DUNLOCK(domid);
1323 		IDN_SYNC_UNLOCK();
1324 		return;
1325 	}
1326 
1327 	for (d = 0; d < MAX_DOMAINS; d++) {
1328 		if (!DOMAIN_IN_SET(query_set, d))
1329 			continue;
1330 
1331 		dp = &idn_domain[d];
1332 		if (d != domid)
1333 			IDN_DLOCK_EXCL(d);
1334 
1335 		if ((dp->dsync.s_cmd == sync_cmd) ||
1336 		    (!dp->dcookie_send &&
1337 		    (rtype == IDNRETRY_CONQ))) {
1338 			if (d != domid)
1339 				IDN_DUNLOCK(d);
1340 			continue;
1341 		}
1342 
1343 		IDN_SYNC_QUERY_UPDATE(domid, d);
1344 
1345 		if (rtype == IDNRETRY_CONQ)
1346 			idn_send_con(d, NULL, IDNCON_QUERY, my_ready_set);
1347 		else
1348 			idn_send_fin(d, NULL, IDNFIN_QUERY, IDNFIN_ARG_NONE,
1349 			    IDNFIN_OPT_NONE, my_ready_set, NIL_FIN_MASTER);
1350 		if (d != domid)
1351 			IDN_DUNLOCK(d);
1352 	}
1353 
1354 	IDN_DUNLOCK(domid);
1355 	IDN_SYNC_UNLOCK();
1356 }
1357 
1358 static int
1359 idn_send_nego(int domid, idn_msgtype_t *mtp, domainset_t conset)
1360 {
1361 	idn_domain_t	*ldp, *dp;
1362 	int		d, masterid;
1363 	uint_t		dmask;
1364 	uint_t		acknack;
1365 	uint_t		ticket;
1366 	idnneg_dset_t	dset;
1367 	idn_msgtype_t	mt;
1368 	procname_t	proc = "idn_send_nego";
1369 
1370 	ASSERT(IDN_SYNC_IS_LOCKED());
1371 	ASSERT(IDN_DLOCK_IS_EXCL(domid));
1372 
1373 	if (mtp) {
1374 		acknack = mtp->mt_mtype & IDNP_ACKNACK_MASK;
1375 		mt.mt_mtype = mtp->mt_mtype;
1376 		mt.mt_atype = mtp->mt_atype;
1377 		mt.mt_cookie = mtp->mt_cookie;
1378 	} else {
1379 		acknack = 0;
1380 		mt.mt_mtype = IDNP_NEGO;
1381 		mt.mt_atype = 0;
1382 		mt.mt_cookie = IDN_TIMER_PUBLIC_COOKIE;
1383 	}
1384 
1385 	IDN_GLOCK_SHARED();
1386 
1387 	dp = &idn_domain[domid];
1388 	ldp = &idn_domain[idn.localid];
1389 
1390 	if ((idn.state == IDNGS_RECONFIG) ||
1391 	    ((masterid = IDN_GET_MASTERID()) == IDN_NIL_DOMID)) {
1392 		masterid = IDN_GET_NEW_MASTERID();
1393 		if ((masterid == idn.localid) || (masterid == domid)) {
1394 			/*
1395 			 * We only send the new-master "hint" to
1396 			 * "other" domains.  If the new-master is
1397 			 * ourself or we're talking to the new-master
1398 			 * then we need to be accurate about our
1399 			 * real master so that the correct master
1400 			 * is selected.
1401 			 */
1402 			masterid = IDN_NIL_DOMID;
1403 		}
1404 	}
1405 
1406 	DOMAINSET_DEL(conset, idn.localid);
1407 	DOMAINSET_DEL(conset, domid);
1408 	/*
1409 	 * Exclude domains from conset that are on
1410 	 * remote domain's hitlist.  He's not interested
1411 	 * in hearing about them.  SSP is probably requesting
1412 	 * such domains be unlinked - will eventually get to
1413 	 * local domain.
1414 	 */
1415 	conset &= ~idn.domset.ds_hitlist;
1416 	if ((masterid != IDN_NIL_DOMID) &&
1417 	    DOMAIN_IN_SET(idn.domset.ds_hitlist, masterid)) {
1418 		PR_PROTO("%s:%d: masterid(%d) on hitlist(0x%x) -> -1\n",
1419 		    proc, domid, masterid, idn.domset.ds_hitlist);
1420 		/*
1421 		 * Yikes, our chosen master is on the hitlist!
1422 		 */
1423 		masterid = IDN_NIL_DOMID;
1424 	}
1425 
1426 	dmask = IDNNEG_DSET_MYMASK();
1427 	IDNNEG_DSET_INIT(dset, dmask);
1428 	for (d = 0; d < MAX_DOMAINS; d++) {
1429 		int	cpuid;
1430 
1431 		if (!DOMAIN_IN_SET(conset, d))
1432 			continue;
1433 
1434 		if ((cpuid = idn_domain[d].dcpu) == IDN_NIL_DCPU) {
1435 			ASSERT(d != masterid);
1436 			continue;
1437 		}
1438 
1439 		IDNNEG_DSET_SET(dset, d, cpuid, dmask);
1440 	}
1441 	IDNNEG_DSET_SET_MASTER(dset, domid, masterid);
1442 	ASSERT((masterid != IDN_NIL_DOMID) ?
1443 	    (idn_domain[masterid].dcpu != IDN_NIL_DCPU) : 1);
1444 	IDN_GUNLOCK();
1445 
1446 	IDN_DLOCK_SHARED(idn.localid);
1447 	ticket = IDNVOTE_BASICS(ldp->dvote);
1448 	/*
1449 	 * We just want to send basic vote components without an
1450 	 * indication of mastership (master bit) since that's primarily
1451 	 * for local domain's usage.  There is more correct master
1452 	 * indications in the DSET.  Recall that if we were in a
1453 	 * Reconfig we would have transmitted the "new_masterid"
1454 	 * which might conflict with the local domain's vote.v.master
1455 	 * bit if he was originally the master prior to the Reconfig.
1456 	 */
1457 
1458 	PR_PROTO("%s:%d: sending nego%sto (cpu %d) "
1459 	    "[v=0x%x, cs=0x%x, mstr=%d]\n",
1460 	    proc, domid,
1461 	    (acknack & IDNP_ACK) ? "+ack " :
1462 	    (acknack & IDNP_NACK) ? "+nack " : " ",
1463 	    dp->dcpu, ticket, conset, masterid);
1464 
1465 	IDN_MSGTIMER_START(domid, IDNP_NEGO, 0,
1466 	    idn_msg_waittime[IDNP_NEGO], &mt.mt_cookie);
1467 
1468 	IDNXDC(domid, &mt, ticket, dset[0], dset[1], dset[2]);
1469 
1470 	IDN_DUNLOCK(idn.localid);
1471 
1472 	return (0);
1473 }
1474 
1475 static int
1476 idn_recv_nego(int domid, idn_msgtype_t *mtp, idn_xdcargs_t xargs,
1477     ushort_t dcookie)
1478 {
1479 	uint_t		msg = mtp->mt_mtype;
1480 	idn_msgtype_t	mt;
1481 	idn_domain_t	*dp = &idn_domain[domid];
1482 	idn_xdcargs_t	nargs;
1483 	procname_t	proc = "idn_recv_nego";
1484 
1485 	ASSERT(IDN_SYNC_IS_LOCKED());
1486 	ASSERT(IDN_DLOCK_IS_EXCL(domid));
1487 
1488 	mt.mt_cookie = mtp->mt_cookie;
1489 
1490 #ifdef DEBUG
1491 	if (DOMAIN_IN_SET(idn.domset.ds_hitlist, domid)) {
1492 		PR_HITLIST("%s:%d: dcpu=%d, dstate=%s, msg=%x, "
1493 		    "hitlist=%x\n",
1494 		    proc, domid, dp->dcpu, idnds_str[dp->dstate],
1495 		    msg, idn.domset.ds_hitlist);
1496 	}
1497 #endif /* DEBUG */
1498 
1499 	if (dp->dcpu == IDN_NIL_DCPU) {
1500 		int		cpuid;
1501 		uint_t		ticket;
1502 		/*
1503 		 * Brandnew link.  Need to open a new domain entry.
1504 		 */
1505 		ticket = GET_XARGS_NEGO_TICKET(xargs);
1506 		cpuid = dp->dcpu_last;
1507 		ASSERT(VALID_CPUID(cpuid));
1508 
1509 		if (idn_open_domain(domid, cpuid, ticket) != 0) {
1510 			PR_PROTO("%s:%d: FAILED to open doamin "
1511 			    "(ticket = 0x%x)\n",
1512 			    proc, domid, ticket);
1513 			return (-1);
1514 		}
1515 	}
1516 
1517 	if ((msg & IDNP_MSGTYPE_MASK) == IDNP_NEGO) {
1518 		PR_PROTO("%s:%d: assigned SEND cookie 0x%x\n",
1519 		    proc, domid, dcookie);
1520 		dp->dcookie_send = dcookie;
1521 	}
1522 
1523 	if ((dp->dxp == NULL) && IDNDS_IS_CLOSED(dp)) {
1524 		dp->dxp = &xphase_nego;
1525 		IDN_XSTATE_TRANSITION(dp, IDNXS_PEND);
1526 	} else if (dp->dxp != &xphase_nego) {
1527 		if (msg & IDNP_MSGTYPE_MASK) {
1528 			/*
1529 			 * If we already have a connection to somebody
1530 			 * trying to initiate a connection to us, then
1531 			 * possibly we've awaken from a coma or he did.
1532 			 * In any case, dismantle current connection
1533 			 * and attempt to establish a new one.
1534 			 */
1535 			if (dp->dstate == IDNDS_CONNECTED) {
1536 				DOMAINSET_ADD(idn.domset.ds_relink, domid);
1537 				IDN_HISTORY_LOG(IDNH_RELINK, domid,
1538 				    dp->dstate, idn.domset.ds_relink);
1539 				idn_disconnect(domid, IDNFIN_NORMAL,
1540 				    IDNFIN_ARG_NONE, IDNFIN_SYNC_YES);
1541 			} else {
1542 				mt.mt_mtype = IDNP_NACK;
1543 				mt.mt_atype = msg;
1544 
1545 				CLR_XARGS(nargs);
1546 
1547 				if (DOMAIN_IN_SET(idn.domset.ds_hitlist,
1548 				    domid)) {
1549 					SET_XARGS_NACK_TYPE(nargs,
1550 					    IDNNACK_EXIT);
1551 				} else {
1552 					int	new_masterid;
1553 					int	new_cpuid = IDN_NIL_DCPU;
1554 
1555 					SET_XARGS_NACK_TYPE(nargs,
1556 					    IDNNACK_RETRY);
1557 					IDN_GLOCK_SHARED();
1558 					new_masterid = IDN_GET_NEW_MASTERID();
1559 					if (new_masterid == IDN_NIL_DOMID)
1560 						new_masterid =
1561 						    IDN_GET_MASTERID();
1562 					if (new_masterid != IDN_NIL_DOMID) {
1563 						idn_domain_t	*mdp;
1564 
1565 						mdp = &idn_domain[new_masterid];
1566 						new_cpuid = mdp->dcpu;
1567 					}
1568 					SET_XARGS_NACK_ARG1(nargs,
1569 					    new_masterid);
1570 					SET_XARGS_NACK_ARG2(nargs, new_cpuid);
1571 					IDN_GUNLOCK();
1572 				}
1573 				idn_send_acknack(domid, &mt, nargs);
1574 			}
1575 		}
1576 		return (0);
1577 	}
1578 
1579 	idn_xphase_transition(domid, mtp, xargs);
1580 
1581 	return (0);
1582 }
1583 
1584 /*ARGSUSED1*/
1585 static void
1586 idn_retry_nego(uint_t token, void *arg)
1587 {
1588 	int		domid = IDN_RETRY_TOKEN2DOMID(token);
1589 	int		new_masterid;
1590 	idn_domain_t	*dp = &idn_domain[domid];
1591 	idn_xdcargs_t	xargs;
1592 	procname_t	proc = "idn_retry_nego";
1593 
1594 	ASSERT(IDN_RETRY_TOKEN2TYPE(token) == IDNRETRY_NEGO);
1595 
1596 	IDN_SYNC_LOCK();
1597 	IDN_DLOCK_EXCL(domid);
1598 
1599 	if (dp->dxp != &xphase_nego) {
1600 		STRING(str);
1601 
1602 #ifdef DEBUG
1603 		if (dp->dxp) {
1604 			INUM2STR(dp->dxp->xt_msgtype, str);
1605 		}
1606 #endif /* DEBUG */
1607 
1608 		PR_PROTO("%s:%d: dxp(%s) != NEGO...bailing...\n",
1609 		    proc, domid, dp->dxp ? str : "NULL");
1610 		IDN_DUNLOCK(domid);
1611 		IDN_SYNC_UNLOCK();
1612 		return;
1613 	}
1614 
1615 	if (dp->dxstate != IDNXS_PEND) {
1616 		PR_PROTO("%s:%d: xstate(%s) != %s...bailing\n",
1617 		    proc, domid, idnxs_str[dp->dxstate],
1618 		    idnxs_str[IDNXS_PEND]);
1619 		IDN_DUNLOCK(domid);
1620 		IDN_SYNC_UNLOCK();
1621 		return;
1622 	}
1623 
1624 	IDN_GLOCK_SHARED();
1625 	if (idn.state == IDNGS_RECONFIG) {
1626 		/*
1627 		 * Have to try again later after
1628 		 * reconfig has completed.
1629 		 */
1630 		PR_PROTO("%s:%d: reconfig in-progress...try later\n",
1631 		    proc, domid);
1632 		idn_retry_submit(idn_retry_nego, NULL, token,
1633 		    idn_msg_retrytime[IDNP_NEGO]);
1634 		IDN_GUNLOCK();
1635 		IDN_DUNLOCK(domid);
1636 		IDN_SYNC_UNLOCK();
1637 		return;
1638 	}
1639 	new_masterid = IDN_GET_NEW_MASTERID();
1640 	if ((idn.state == IDNGS_CONNECT) &&
1641 	    (new_masterid != IDN_NIL_DOMID) &&
1642 	    (domid != new_masterid) &&
1643 	    (idn.localid != new_masterid)) {
1644 		/*
1645 		 * We have a new master pending and this
1646 		 * guy isn't it.  Wait until the local domain
1647 		 * has a chance to connect with the new
1648 		 * master before going forward with this
1649 		 * guy.
1650 		 */
1651 		PR_PROTO("%s:%d: waiting for connect to new master %d\n",
1652 		    proc, domid, IDN_GET_NEW_MASTERID());
1653 		idn_retry_submit(idn_retry_nego, NULL, token,
1654 		    idn_msg_retrytime[IDNP_NEGO]);
1655 		IDN_GUNLOCK();
1656 		IDN_DUNLOCK(domid);
1657 		IDN_SYNC_UNLOCK();
1658 		return;
1659 	}
1660 	IDN_GUNLOCK();
1661 
1662 	idn_xphase_transition(domid, NULL, xargs);
1663 
1664 	IDN_DUNLOCK(domid);
1665 	IDN_SYNC_UNLOCK();
1666 }
1667 
1668 static int
1669 idn_check_nego(int domid, idn_msgtype_t *mtp, idn_xdcargs_t xargs)
1670 {
1671 	int		d, new_masterid, masterid;
1672 	int		cpuid, m_cpuid = -1;
1673 	uint_t		dmask;
1674 	uint_t		msg = mtp ? mtp->mt_mtype : 0;
1675 	idn_domain_t	*dp, *ldp;
1676 	domainset_t	con_set, pending_set;
1677 	idnneg_dset_t	dset;
1678 	procname_t	proc = "idn_check_nego";
1679 
1680 	ASSERT(IDN_SYNC_IS_LOCKED());
1681 	ASSERT(IDN_DLOCK_IS_EXCL(domid));
1682 
1683 	dp = &idn_domain[domid];
1684 	ldp = &idn_domain[idn.localid];
1685 
1686 	if (msg & IDNP_NACK) {
1687 		if (GET_XARGS_NACK_TYPE(xargs) == IDNNACK_EXIT) {
1688 			PR_HITLIST("%s:%d(%s): (msg=%x) EXIT received, "
1689 			    "adding to hitlist %x -> %x\n",
1690 			    proc, domid, idnds_str[dp->dstate], msg,
1691 			    idn.domset.ds_hitlist,
1692 			    idn.domset.ds_hitlist | DOMAINSET(domid));
1693 
1694 			DOMAINSET_ADD(idn.domset.ds_hitlist, domid);
1695 			return (-1);
1696 		} else {
1697 			return (0);
1698 		}
1699 	}
1700 
1701 	if (DOMAIN_IN_SET(idn.domset.ds_hitlist, domid)) {
1702 		PR_HITLIST("%s:%d(%s): (msg=%x) domain in hitlist (%x) - "
1703 		    "exiting phase\n",
1704 		    proc, domid, idnds_str[dp->dstate], msg,
1705 		    idn.domset.ds_hitlist);
1706 		return (-1);
1707 	}
1708 
1709 	if ((dp->dstate == IDNDS_NEGO_PEND) && (msg & IDNP_MSGTYPE_MASK) &&
1710 	    (msg & IDNP_ACK))		/* nego+ack */
1711 		return (1);
1712 
1713 	dmask = (uint_t)-1;
1714 
1715 	IDN_GLOCK_EXCL();
1716 	if (idn.state == IDNGS_DISCONNECT) {
1717 		PR_PROTO("%s:%d: DISCONNECT in-progress >>> EXIT\n",
1718 		    proc, domid);
1719 		IDN_GUNLOCK();
1720 		return (-1);
1721 	} else if (idn.state == IDNGS_OFFLINE) {
1722 		IDN_GSTATE_TRANSITION(IDNGS_CONNECT);
1723 		IDN_PREP_HWINIT();
1724 		IDN_DLOCK_EXCL(idn.localid);
1725 		ldp->dvote.v.connected = 0;
1726 		IDN_DUNLOCK(idn.localid);
1727 	}
1728 
1729 	if (!DOMAIN_IN_SET(idn.domset.ds_trans_on, domid)) {
1730 		DOMAINSET_ADD(idn.domset.ds_trans_on, domid);
1731 		IDN_HISTORY_LOG(IDNH_NEGO, domid,
1732 		    idn.domset.ds_trans_on,
1733 		    idn.domset.ds_connected);
1734 	}
1735 
1736 	switch (idn.state) {
1737 	case IDNGS_RECONFIG:
1738 		PR_PROTO("%s:%d: RECONFIG in-progress >>> RETRY\n",
1739 		    proc, domid);
1740 		IDN_GUNLOCK();
1741 		return (1);
1742 
1743 	case IDNGS_CONNECT:
1744 		new_masterid = IDN_GET_NEW_MASTERID();
1745 		if ((new_masterid != IDN_NIL_DOMID) &&
1746 		    (domid != new_masterid) &&
1747 		    (idn.localid != new_masterid)) {
1748 			PR_PROTO("%s:%d: waiting for connect to "
1749 			    "new master %d\n",
1750 			    proc, domid, IDN_GET_NEW_MASTERID());
1751 			IDN_GUNLOCK();
1752 			return (1);
1753 		}
1754 		break;
1755 
1756 	default:
1757 		break;
1758 	}
1759 
1760 	ASSERT((idn.state == IDNGS_CONNECT) || (idn.state == IDNGS_ONLINE));
1761 
1762 	con_set = 0;
1763 
1764 	if (msg) {
1765 		idn_domain_t	*mdp;
1766 		idn_vote_t	vote;
1767 
1768 		vote.ticket = GET_XARGS_NEGO_TICKET(xargs);
1769 		/*
1770 		 * Sender should note have set master bit,
1771 		 * but just in case clear it so local domain
1772 		 * doesn't get confused.
1773 		 */
1774 		vote.v.master = 0;
1775 		dp->dvote.ticket = vote.ticket;
1776 		GET_XARGS_NEGO_DSET(xargs, dset);
1777 		/*LINTED*/
1778 		IDNNEG_DSET_GET_MASK(dset, domid, dmask);
1779 		IDNNEG_DSET_GET_MASTER(dset, new_masterid);
1780 		if (new_masterid == IDNNEG_NO_MASTER) {
1781 			new_masterid = IDN_NIL_DOMID;
1782 		} else {
1783 			/*
1784 			 * Remote domain has a master.  Find
1785 			 * his cpuid in the dset.  We may need
1786 			 * it to initiate a connection.
1787 			 */
1788 			if (new_masterid == domid) {
1789 				m_cpuid = dp->dcpu;
1790 			} else {
1791 				IDNNEG_DSET_GET(dset, new_masterid, m_cpuid,
1792 				    dmask);
1793 				if (m_cpuid == -1) {
1794 					/*
1795 					 * Something is bogus if remote domain
1796 					 * is reporting a valid masterid, but
1797 					 * doesn't have the cpuid for it.
1798 					 */
1799 					cmn_err(CE_WARN,
1800 					    "IDN: 209: remote domain (ID "
1801 					    "%d, CPU %d) reporting master "
1802 					    "(ID %d) without CPU ID",
1803 					    domid, dp->dcpu, new_masterid);
1804 					DOMAINSET_ADD(idn.domset.ds_hitlist,
1805 					    domid);
1806 					IDN_GUNLOCK();
1807 					return (-1);
1808 				}
1809 			}
1810 		}
1811 
1812 		for (d = 0; d < MAX_DOMAINS; d++) {
1813 			if ((d == idn.localid) || (d == domid))
1814 				continue;
1815 			IDNNEG_DSET_GET(dset, d, cpuid, dmask);
1816 			if (cpuid != -1) {
1817 				DOMAINSET_ADD(con_set, d);
1818 			}
1819 		}
1820 
1821 #ifdef DEBUG
1822 		if (idn.domset.ds_hitlist) {
1823 			PR_HITLIST("%s:%d: con_set %x -> %x (hitlist = %x)\n",
1824 			    proc, domid, con_set,
1825 			    con_set & ~idn.domset.ds_hitlist,
1826 			    idn.domset.ds_hitlist);
1827 		}
1828 #endif /* DEBUG */
1829 
1830 		con_set &= ~idn.domset.ds_hitlist;
1831 
1832 		ASSERT(!DOMAIN_IN_SET(con_set, idn.localid));
1833 		ASSERT(!DOMAIN_IN_SET(con_set, domid));
1834 
1835 		if ((new_masterid != IDN_NIL_DOMID) &&
1836 		    DOMAIN_IN_SET(idn.domset.ds_hitlist, new_masterid)) {
1837 			PR_HITLIST("%s:%d: new_mstr %d -> -1 (hitlist = %x)\n",
1838 			    proc, domid, new_masterid,
1839 			    idn.domset.ds_hitlist);
1840 			IDN_GUNLOCK();
1841 			return (1);
1842 		}
1843 
1844 		if (idn_select_master(domid, new_masterid, m_cpuid) < 0) {
1845 			/*
1846 			 * Returns w/GLOCK dropped if error.
1847 			 */
1848 			return (1);
1849 		}
1850 
1851 		masterid = IDN_GET_MASTERID();
1852 		ASSERT(masterid != IDN_NIL_DOMID);
1853 
1854 		if (idn.state == IDNGS_CONNECT) {
1855 			/*
1856 			 * This is the initial connection for
1857 			 * the local domain.
1858 			 */
1859 			IDN_DLOCK_EXCL(idn.localid);
1860 
1861 			if (masterid == idn.localid) {
1862 				if (idn_master_init() < 0) {
1863 					cmn_err(CE_WARN,
1864 					    "IDN: 210: failed to init "
1865 					    "MASTER context");
1866 					ldp->dvote.v.master = 0;
1867 					IDN_DUNLOCK(idn.localid);
1868 					IDN_GSTATE_TRANSITION(IDNGS_DISCONNECT);
1869 					IDN_SET_MASTERID(IDN_NIL_DOMID);
1870 					IDN_GUNLOCK();
1871 					return (-1);
1872 				}
1873 				DSLAB_LOCK_EXCL(idn.localid);
1874 				ldp->dslab_state = DSLAB_STATE_LOCAL;
1875 				DSLAB_UNLOCK(idn.localid);
1876 				ldp->dvote.v.connected = 1;
1877 			} else {
1878 				/*
1879 				 * Either the remote domain is the
1880 				 * master or its a new slave trying
1881 				 * to connect to us.  We can't allow
1882 				 * further progress until we've
1883 				 * sync'd up with the master.
1884 				 */
1885 				if (masterid != domid) {
1886 					IDN_DUNLOCK(idn.localid);
1887 					IDN_GUNLOCK();
1888 					return (1);
1889 				}
1890 				DSLAB_LOCK_EXCL(idn.localid);
1891 				ldp->dslab_state = DSLAB_STATE_REMOTE;
1892 				DSLAB_UNLOCK(idn.localid);
1893 			}
1894 			IDN_DUNLOCK(idn.localid);
1895 			/*
1896 			 * We've sync'd up with the new master.
1897 			 */
1898 			IDN_GSTATE_TRANSITION(IDNGS_ONLINE);
1899 		}
1900 
1901 		mdp = &idn_domain[masterid];
1902 
1903 		if ((masterid != domid) && !IDNDS_CONFIG_DONE(mdp)) {
1904 			/*
1905 			 * We can't progress any further with
1906 			 * other domains until we've exchanged all
1907 			 * the necessary CFG info with the master,
1908 			 * i.e. until we have a mailbox area from
1909 			 * which we can allocate mailboxes to
1910 			 * other domains.
1911 			 */
1912 			PR_PROTO("%s:%d: still exchanging CFG "
1913 			    "w/master(%d)\n", proc, domid, masterid);
1914 			IDN_GUNLOCK();
1915 			return (1);
1916 		}
1917 
1918 		DSLAB_LOCK_EXCL(domid);
1919 		dp->dslab_state = ldp->dslab_state;
1920 		DSLAB_UNLOCK(domid);
1921 		if (idn.state != IDNGS_ONLINE) {
1922 			IDN_GSTATE_TRANSITION(IDNGS_ONLINE);
1923 		}
1924 	}
1925 
1926 	IDN_GUNLOCK();
1927 
1928 	pending_set = con_set;
1929 	pending_set &= ~(idn.domset.ds_trans_on | idn.domset.ds_connected);
1930 	idn.domset.ds_trans_on |= pending_set;
1931 
1932 	con_set |= idn.domset.ds_trans_on | idn.domset.ds_connected;
1933 	con_set &= ~idn.domset.ds_trans_off;
1934 	DOMAINSET_ADD(con_set, idn.localid);
1935 
1936 	if (dp->dsync.s_cmd != IDNSYNC_CONNECT) {
1937 		idn_sync_exit(domid, IDNSYNC_DISCONNECT);
1938 		idn_sync_enter(domid, IDNSYNC_CONNECT,
1939 		    con_set, DOMAINSET(idn.localid), idn_xstate_transfunc,
1940 		    (void *)IDNP_CON);
1941 	}
1942 
1943 	/*
1944 	 * Get this domain registered as an expected domain on
1945 	 * the remaining domains in the CONNECT synchronization.
1946 	 */
1947 	(void) idn_sync_register(domid, IDNSYNC_CONNECT, 0, IDNSYNC_REG_NEW);
1948 
1949 	/*
1950 	 * Note that if (msg == 0), i.e. then there will be
1951 	 * no dset and also pending_set will be 0.
1952 	 * So, the following loop will never attempt to
1953 	 * look at the dset unless (msg != 0), implying
1954 	 * that we've been through the initial code above
1955 	 * and have initialized dmask.
1956 	 */
1957 	ASSERT(pending_set ? (dmask != (uint_t)-1) : 1);
1958 
1959 	for (d = 0; d < MAX_DOMAINS; d++) {
1960 		int	rv;
1961 
1962 		if (!DOMAIN_IN_SET(pending_set, d))
1963 			continue;
1964 
1965 		ASSERT((d != idn.localid) && (d != domid));
1966 
1967 		dp = &idn_domain[d];
1968 
1969 		IDNNEG_DSET_GET(dset, d, cpuid, dmask);
1970 		if (cpuid == -1) {
1971 			PR_PROTO("%s:%d: failed to get cpuid from dset "
1972 			    "for domain %d (pset = 0x%x)\n",
1973 			    proc, domid, d, pending_set);
1974 			DOMAINSET_DEL(idn.domset.ds_trans_on, d);
1975 			continue;
1976 		}
1977 
1978 		IDN_DLOCK_EXCL(d);
1979 		if ((rv = idn_open_domain(d, cpuid, 0)) != 0) {
1980 			PR_PROTO("%s:%d: failed "
1981 			    "idn_open_domain(%d,%d,0) (rv = %d)\n",
1982 			    proc, domid, d, cpuid, rv);
1983 			if (rv < 0) {
1984 				cmn_err(CE_WARN,
1985 				    "IDN: 205: (%s) failed to "
1986 				    "open-domain(%d,%d)",
1987 				    proc, d, cpuid);
1988 				DOMAINSET_DEL(idn.domset.ds_trans_on, d);
1989 			} else if (DOMAIN_IN_SET(idn.domset.ds_trans_off, d)) {
1990 				/*
1991 				 * We've requested to connect to a domain
1992 				 * from which we're disconnecting.  We
1993 				 * better mark this guy for relinking.
1994 				 */
1995 				DOMAINSET_ADD(idn.domset.ds_relink, d);
1996 				IDN_HISTORY_LOG(IDNH_RELINK, d, dp->dstate,
1997 				    idn.domset.ds_relink);
1998 			}
1999 			IDN_DUNLOCK(d);
2000 			continue;
2001 		}
2002 
2003 		idn_connect(d);
2004 
2005 		IDN_DUNLOCK(d);
2006 	}
2007 
2008 	return (0);
2009 }
2010 
2011 /*ARGSUSED*/
2012 static void
2013 idn_action_nego_pend(int domid, idn_msgtype_t *mtp, idn_xdcargs_t xargs)
2014 {
2015 	uint_t		msg = mtp ? mtp->mt_mtype : 0;
2016 	idn_msgtype_t	mt;
2017 	domainset_t	con_set;
2018 
2019 	ASSERT(IDN_SYNC_IS_LOCKED());
2020 	ASSERT(IDN_DLOCK_IS_EXCL(domid));
2021 
2022 	con_set = idn.domset.ds_trans_on | idn.domset.ds_connected;
2023 	con_set &= ~idn.domset.ds_trans_off;
2024 
2025 	if (!msg) {
2026 		idn_send_nego(domid, NULL, con_set);
2027 	} else {
2028 		mt.mt_mtype = IDNP_NEGO | IDNP_ACK;
2029 		mt.mt_atype = 0;
2030 		mt.mt_cookie = mtp->mt_cookie;
2031 		idn_send_nego(domid, &mt, con_set);
2032 	}
2033 }
2034 
2035 /*ARGSUSED*/
2036 static void
2037 idn_error_nego(int domid, idn_msgtype_t *mtp, idn_xdcargs_t xargs)
2038 {
2039 	int	new_masterid, new_cpuid;
2040 	int	retry = 1;
2041 	uint_t	msg = mtp ? mtp->mt_mtype : 0;
2042 	uint_t	token;
2043 
2044 	ASSERT(IDN_SYNC_IS_LOCKED());
2045 	ASSERT(IDN_DLOCK_IS_EXCL(domid));
2046 
2047 	if (msg & IDNP_NACK) {
2048 		idn_nack_t	nack;
2049 
2050 		nack = GET_XARGS_NACK_TYPE(xargs);
2051 		switch (nack) {
2052 		case IDNNACK_RETRY:
2053 			new_masterid = (int)GET_XARGS_NACK_ARG1(xargs);
2054 			new_cpuid    = (int)GET_XARGS_NACK_ARG2(xargs);
2055 			break;
2056 
2057 		case IDNNACK_EXIT:
2058 			retry = 0;
2059 			/*FALLTHROUGH*/
2060 
2061 		default:
2062 			new_masterid = IDN_NIL_DOMID;
2063 			new_cpuid    = IDN_NIL_DCPU;
2064 			break;
2065 		}
2066 		idn_nego_cleanup_check(domid, new_masterid, new_cpuid);
2067 	}
2068 
2069 	if (msg & IDNP_MSGTYPE_MASK) {
2070 		idn_msgtype_t	mt;
2071 		idn_xdcargs_t	nargs;
2072 
2073 		mt.mt_mtype = IDNP_NACK;
2074 		mt.mt_atype = msg;
2075 		mt.mt_cookie = mtp->mt_cookie;
2076 		CLR_XARGS(nargs);
2077 		SET_XARGS_NACK_TYPE(nargs, IDNNACK_RETRY);
2078 		IDN_GLOCK_SHARED();
2079 		new_masterid = IDN_GET_NEW_MASTERID();
2080 		if (new_masterid == IDN_NIL_DOMID)
2081 			new_masterid = IDN_GET_MASTERID();
2082 		if (new_masterid != IDN_NIL_DOMID)
2083 			new_cpuid = idn_domain[new_masterid].dcpu;
2084 		else
2085 			new_cpuid = IDN_NIL_DCPU;
2086 		SET_XARGS_NACK_ARG1(nargs, new_masterid);
2087 		SET_XARGS_NACK_ARG2(nargs, new_cpuid);
2088 		IDN_GUNLOCK();
2089 		idn_send_acknack(domid, &mt, nargs);
2090 	}
2091 
2092 	if (retry) {
2093 		token = IDN_RETRY_TOKEN(domid, IDNRETRY_NEGO);
2094 		idn_retry_submit(idn_retry_nego, NULL, token,
2095 		    idn_msg_retrytime[(int)IDNRETRY_NEGO]);
2096 	} else {
2097 		DOMAINSET_DEL(idn.domset.ds_relink, domid);
2098 		IDN_RESET_COOKIES(domid);
2099 		idn_disconnect(domid, IDNFIN_NORMAL, IDNFIN_ARG_NONE,
2100 		    IDNDS_SYNC_TYPE(&idn_domain[domid]));
2101 	}
2102 }
2103 
2104 /*ARGSUSED*/
2105 static void
2106 idn_action_nego_sent(int domid, idn_msgtype_t *mtp, idn_xdcargs_t xargs)
2107 {
2108 	uint_t		msg = mtp ? mtp->mt_mtype : 0;
2109 	domainset_t	conset;
2110 	idn_msgtype_t	mt;
2111 
2112 	ASSERT(IDN_SYNC_IS_LOCKED());
2113 	ASSERT(IDN_DLOCK_IS_EXCL(domid));
2114 
2115 	mt.mt_cookie = mtp ? mtp->mt_cookie : 0;
2116 
2117 	conset = idn.domset.ds_trans_on | idn.domset.ds_connected;
2118 	conset &= ~idn.domset.ds_trans_off;
2119 
2120 	if ((msg & IDNP_ACKNACK_MASK) == 0) {
2121 		/*
2122 		 * nego
2123 		 */
2124 		mt.mt_mtype = IDNP_NEGO | IDNP_ACK;
2125 		mt.mt_atype = 0;
2126 		idn_send_nego(domid, &mt, conset);
2127 	} else if (msg & IDNP_MSGTYPE_MASK) {
2128 		int		d;
2129 		idn_xdcargs_t	nargs;
2130 		idnneg_dset_t	dset;
2131 		uint_t		dmask;
2132 		idn_vote_t	vote;
2133 
2134 		mt.mt_mtype = IDNP_ACK;
2135 		mt.mt_atype = msg;
2136 		DOMAINSET_DEL(conset, idn.localid);
2137 		DOMAINSET_DEL(conset, domid);
2138 
2139 		dmask = IDNNEG_DSET_MYMASK();
2140 		IDNNEG_DSET_INIT(dset, dmask);
2141 		for (d = 0; d < MAX_DOMAINS; d++) {
2142 			int	cpuid;
2143 
2144 			if (!DOMAIN_IN_SET(conset, d))
2145 				continue;
2146 
2147 			if ((cpuid = idn_domain[d].dcpu) == IDN_NIL_DCPU)
2148 				continue;
2149 
2150 			IDNNEG_DSET_SET(dset, d, cpuid, dmask);
2151 		}
2152 		IDNNEG_DSET_SET_MASTER(dset, domid, IDN_GET_MASTERID());
2153 		ASSERT((IDN_GET_MASTERID() != IDN_NIL_DOMID) ?
2154 		    (idn_domain[IDN_GET_MASTERID()].dcpu != IDN_NIL_DCPU) : 1);
2155 		vote.ticket = idn_domain[idn.localid].dvote.ticket;
2156 		vote.v.master = 0;
2157 		CLR_XARGS(nargs);
2158 		SET_XARGS_NEGO_TICKET(nargs, vote.ticket);
2159 		SET_XARGS_NEGO_DSET(nargs, dset);
2160 		/*
2161 		 * nego+ack
2162 		 */
2163 		idn_send_acknack(domid, &mt, nargs);
2164 	} else {
2165 		uint_t		token;
2166 		int		new_masterid, new_cpuid;
2167 		int		retry = 1;
2168 		idn_nack_t	nack;
2169 		/*
2170 		 * nack - retry
2171 		 *
2172 		 * It's possible if we've made it this far that
2173 		 * we may have already chosen a master and this
2174 		 * dude might be it!  If it is we need to clean up.
2175 		 */
2176 		nack = GET_XARGS_NACK_TYPE(xargs);
2177 		switch (nack) {
2178 		case IDNNACK_RETRY:
2179 			new_masterid = (int)GET_XARGS_NACK_ARG1(xargs);
2180 			new_cpuid = (int)GET_XARGS_NACK_ARG2(xargs);
2181 			break;
2182 
2183 		case IDNNACK_EXIT:
2184 			retry = 0;
2185 			/*FALLTHROUGH*/
2186 
2187 		default:
2188 			new_masterid = IDN_NIL_DOMID;
2189 			new_cpuid = IDN_NIL_DCPU;
2190 			break;
2191 		}
2192 
2193 		idn_nego_cleanup_check(domid, new_masterid, new_cpuid);
2194 
2195 		if (retry) {
2196 			token = IDN_RETRY_TOKEN(domid, IDNRETRY_NEGO);
2197 			idn_retry_submit(idn_retry_nego, NULL, token,
2198 			    idn_msg_retrytime[(int)IDNRETRY_NEGO]);
2199 		} else {
2200 			DOMAINSET_DEL(idn.domset.ds_relink, domid);
2201 			IDN_RESET_COOKIES(domid);
2202 			idn_disconnect(domid, IDNFIN_NORMAL, IDNFIN_ARG_NONE,
2203 			    IDNDS_SYNC_TYPE(&idn_domain[domid]));
2204 		}
2205 	}
2206 }
2207 
2208 /*ARGSUSED*/
2209 static void
2210 idn_action_nego_rcvd(int domid, idn_msgtype_t *mtp, idn_xdcargs_t xargs)
2211 {
2212 	uint_t	msg = mtp ? mtp->mt_mtype : 0;
2213 
2214 	ASSERT(IDN_SYNC_IS_LOCKED());
2215 	ASSERT(IDN_DLOCK_IS_EXCL(domid));
2216 
2217 	if (msg & IDNP_NACK) {
2218 		uint_t		token;
2219 		int		new_masterid, new_cpuid;
2220 		int		retry = 1;
2221 		idn_nack_t	nack;
2222 		/*
2223 		 * nack - retry.
2224 		 *
2225 		 * At this stage of receiving a nack we need to
2226 		 * check whether we need to start over again with
2227 		 * selecting a new master.
2228 		 */
2229 		nack = GET_XARGS_NACK_TYPE(xargs);
2230 		switch (nack) {
2231 		case IDNNACK_RETRY:
2232 			new_masterid = (int)GET_XARGS_NACK_ARG1(xargs);
2233 			new_cpuid = (int)GET_XARGS_NACK_ARG2(xargs);
2234 			break;
2235 
2236 		case IDNNACK_EXIT:
2237 			retry = 0;
2238 			/*FALLTHROUGH*/
2239 
2240 		default:
2241 			new_masterid = IDN_NIL_DOMID;
2242 			new_cpuid = IDN_NIL_DCPU;
2243 			break;
2244 		}
2245 
2246 		idn_nego_cleanup_check(domid, new_masterid, new_cpuid);
2247 
2248 		if (retry) {
2249 			token = IDN_RETRY_TOKEN(domid, IDNRETRY_NEGO);
2250 			idn_retry_submit(idn_retry_nego, NULL, token,
2251 			    idn_msg_retrytime[(int)IDNRETRY_NEGO]);
2252 		} else {
2253 			DOMAINSET_DEL(idn.domset.ds_relink, domid);
2254 			IDN_RESET_COOKIES(domid);
2255 			idn_disconnect(domid, IDNFIN_NORMAL, IDNFIN_ARG_NONE,
2256 			    IDNDS_SYNC_TYPE(&idn_domain[domid]));
2257 		}
2258 	}
2259 }
2260 
2261 static void
2262 idn_final_nego(int domid)
2263 {
2264 	idn_domain_t	*dp = &idn_domain[domid];
2265 
2266 	ASSERT(IDN_SYNC_IS_LOCKED());
2267 	ASSERT(IDN_DLOCK_IS_EXCL(domid));
2268 
2269 	(void) idn_retry_terminate(IDN_RETRY_TOKEN(domid, IDNRETRY_NEGO));
2270 
2271 	ASSERT(dp->dstate == IDNDS_CONFIG);
2272 
2273 	dp->dxp = NULL;
2274 	IDN_XSTATE_TRANSITION(dp, IDNXS_NIL);
2275 
2276 	idn_send_config(domid, 1);
2277 }
2278 
2279 /*
2280  */
2281 /*ARGSUSED1*/
2282 static void
2283 idn_exit_nego(int domid, uint_t msgtype)
2284 {
2285 	idn_domain_t	*dp;
2286 	idn_fin_t	fintype;
2287 
2288 	ASSERT(IDN_SYNC_IS_LOCKED());
2289 	ASSERT(IDN_DLOCK_IS_EXCL(domid));
2290 
2291 	dp = &idn_domain[domid];
2292 
2293 	fintype = msgtype ? IDNFIN_NORMAL : IDNFIN_FORCE_HARD;
2294 
2295 	(void) idn_retry_terminate(IDN_RETRY_TOKEN(domid, IDNRETRY_NEGO));
2296 
2297 	ASSERT(!DOMAIN_IN_SET(idn.domset.ds_connected, domid));
2298 	ASSERT(!DOMAIN_IN_SET(idn.domset.ds_ready_on, domid));
2299 	ASSERT(dp->dxp == &xphase_nego);
2300 
2301 	idn_nego_cleanup_check(domid, IDN_NIL_DOMID, IDN_NIL_DCPU);
2302 
2303 	IDN_GLOCK_SHARED();
2304 	if ((idn.state != IDNGS_DISCONNECT) &&
2305 	    !DOMAIN_IN_SET(idn.domset.ds_hitlist, domid)) {
2306 		DOMAINSET_ADD(idn.domset.ds_relink, domid);
2307 		IDN_HISTORY_LOG(IDNH_RELINK, domid, dp->dstate,
2308 		    idn.domset.ds_relink);
2309 	} else {
2310 		idn_update_op(IDNOP_ERROR, DOMAINSET(domid), NULL);
2311 		DOMAINSET_DEL(idn.domset.ds_relink, domid);
2312 	}
2313 	IDN_GUNLOCK();
2314 	/*
2315 	 * Reset send cookie to 0 so that receiver does not validate
2316 	 * cookie.  This is necessary since at this early stage it's
2317 	 * possible we may not have exchanged appropriate cookies.
2318 	 */
2319 	IDN_RESET_COOKIES(domid);
2320 	idn_disconnect(domid, fintype, IDNFIN_ARG_NONE, IDNDS_SYNC_TYPE(dp));
2321 }
2322 
2323 static void
2324 idn_nego_cleanup_check(int domid, int new_masterid, int new_cpuid)
2325 {
2326 	idn_domain_t	*ldp, *dp;
2327 	procname_t	proc = "idn_nego_cleanup_check";
2328 
2329 	ASSERT(IDN_SYNC_IS_LOCKED());
2330 	ASSERT(IDN_DLOCK_IS_EXCL(domid));
2331 
2332 	dp = &idn_domain[domid];
2333 	ldp = &idn_domain[idn.localid];
2334 
2335 	IDN_GLOCK_EXCL();
2336 
2337 	if (((idn.state == IDNGS_ONLINE) && !idn.domset.ds_connected) ||
2338 	    (idn.state == IDNGS_CONNECT)) {
2339 		domainset_t	trans_on;
2340 		int		masterid;
2341 		int		retry_domid = IDN_NIL_DOMID;
2342 		int		rv;
2343 
2344 		IDN_DLOCK_EXCL(idn.localid);
2345 		masterid = (idn.state == IDNGS_ONLINE) ?
2346 		    IDN_GET_MASTERID() : IDN_GET_NEW_MASTERID();
2347 		trans_on = idn.domset.ds_trans_on;
2348 		DOMAINSET_DEL(trans_on, domid);
2349 		if (trans_on == 0) {
2350 			int		d;
2351 			domainset_t	relink = idn.domset.ds_relink;
2352 			/*
2353 			 * This was the only guy we were trying
2354 			 * to connect with.
2355 			 */
2356 			ASSERT((idn.state == IDNGS_ONLINE) ?
2357 			    ((idn.localid == masterid) ||
2358 			    (domid == masterid)) : 1);
2359 			if (idn.localid == masterid)
2360 				idn_master_deinit();
2361 			ldp->dvote.v.connected = 0;
2362 			ldp->dvote.v.master = 0;
2363 			dp->dvote.v.master = 0;
2364 			IDN_SET_MASTERID(IDN_NIL_DOMID);
2365 			IDN_SET_NEW_MASTERID(new_masterid);
2366 			IDN_GSTATE_TRANSITION(IDNGS_CONNECT);
2367 			IDN_PREP_HWINIT();
2368 			IDN_DUNLOCK(idn.localid);
2369 			IDN_GUNLOCK();
2370 			/*
2371 			 * If there's a new master available then
2372 			 * just try and relink with him unless
2373 			 * it's ourself.
2374 			 */
2375 			if ((new_masterid != IDN_NIL_DOMID) &&
2376 			    (new_masterid != idn.localid) &&
2377 			    (new_masterid != domid)) {
2378 				IDN_DLOCK_EXCL(new_masterid);
2379 				rv = idn_open_domain(new_masterid,
2380 				    new_cpuid, 0);
2381 				if (rv < 0) {
2382 					cmn_err(CE_WARN,
2383 					    "IDN: 205: (%s) failed to "
2384 					    "open-domain(%d,%d)",
2385 					    proc, new_masterid, new_cpuid);
2386 					IDN_GLOCK_EXCL();
2387 					IDN_SET_NEW_MASTERID(IDN_NIL_DOMID);
2388 					IDN_GUNLOCK();
2389 				} else {
2390 					relink = DOMAINSET(new_masterid);
2391 				}
2392 				IDN_DUNLOCK(new_masterid);
2393 			}
2394 			DOMAINSET_DEL(relink, domid);
2395 			if (relink)
2396 				for (d = 0; d < MAX_DOMAINS; d++) {
2397 					if (!DOMAIN_IN_SET(relink, d))
2398 						continue;
2399 					retry_domid = d;
2400 					break;
2401 				}
2402 		} else if (domid == masterid) {
2403 			/*
2404 			 * There are other domains we were trying
2405 			 * to connect to.  As long as the chosen
2406 			 * master was somebody other then this
2407 			 * domain that nack'd us, life is cool, but
2408 			 * if it was this remote domain we'll need
2409 			 * to start over.
2410 			 */
2411 			IDN_DUNLOCK(idn.localid);
2412 			dp->dvote.v.master = 0;
2413 			IDN_SET_MASTERID(IDN_NIL_DOMID);
2414 			IDN_SET_NEW_MASTERID(new_masterid);
2415 
2416 			if (idn.state == IDNGS_ONLINE) {
2417 				IDN_GKSTAT_GLOBAL_EVENT(gk_reconfigs,
2418 				    gk_reconfig_last);
2419 				IDN_GSTATE_TRANSITION(IDNGS_RECONFIG);
2420 				IDN_GUNLOCK();
2421 				idn_unlink_domainset(trans_on, IDNFIN_NORMAL,
2422 				    IDNFIN_ARG_NONE,
2423 				    IDNFIN_OPT_RELINK,
2424 				    BOARDSET_ALL);
2425 			} else if ((new_masterid != IDN_NIL_DOMID) &&
2426 			    (new_masterid != idn.localid) &&
2427 			    (new_masterid != domid) &&
2428 			    !DOMAIN_IN_SET(trans_on, new_masterid)) {
2429 				IDN_GUNLOCK();
2430 				IDN_DLOCK_EXCL(new_masterid);
2431 				rv = idn_open_domain(new_masterid,
2432 				    new_cpuid, 0);
2433 				IDN_GLOCK_EXCL();
2434 				IDN_DUNLOCK(new_masterid);
2435 				if (rv < 0) {
2436 					cmn_err(CE_WARN,
2437 					    "IDN: 205: (%s) failed to "
2438 					    "open-domain(%d,%d)",
2439 					    proc, new_masterid,
2440 					    new_cpuid);
2441 					IDN_SET_NEW_MASTERID(IDN_NIL_DOMID);
2442 					new_masterid = IDN_NIL_DOMID;
2443 				} else {
2444 					retry_domid = new_masterid;
2445 				}
2446 				IDN_GUNLOCK();
2447 			} else {
2448 				IDN_GUNLOCK();
2449 			}
2450 		} else {
2451 			IDN_DUNLOCK(idn.localid);
2452 			IDN_GUNLOCK();
2453 		}
2454 		if (retry_domid != IDN_NIL_DOMID) {
2455 			uint_t		token;
2456 			idn_domain_t	*rdp = &idn_domain[retry_domid];
2457 
2458 			IDN_DLOCK_EXCL(retry_domid);
2459 			rdp->dxp = &xphase_nego;
2460 			IDN_XSTATE_TRANSITION(rdp, IDNXS_PEND);
2461 			IDN_DUNLOCK(retry_domid);
2462 			token = IDN_RETRY_TOKEN(retry_domid, IDNRETRY_NEGO);
2463 			idn_retry_submit(idn_retry_nego, NULL, token,
2464 			    idn_msg_retrytime[(int)IDNRETRY_NEGO]);
2465 		}
2466 	} else {
2467 		IDN_GUNLOCK();
2468 	}
2469 }
2470 
2471 static int
2472 idn_send_con(int domid, idn_msgtype_t *mtp, idn_con_t contype, domainset_t
2473     conset)
2474 {
2475 	idn_msgtype_t	mt;
2476 	uint_t		acknack;
2477 	procname_t	proc = "idn_send_con";
2478 
2479 	ASSERT(IDN_SYNC_IS_LOCKED());
2480 	ASSERT(IDN_DLOCK_IS_EXCL(domid));
2481 
2482 	if (mtp) {
2483 		acknack = mtp->mt_mtype & IDNP_ACKNACK_MASK;
2484 		mt.mt_mtype = mtp->mt_mtype;
2485 		mt.mt_atype = mtp->mt_atype;
2486 		mt.mt_cookie = mtp->mt_cookie;
2487 	} else {
2488 		acknack = 0;
2489 		mt.mt_mtype = IDNP_CON;
2490 		mt.mt_atype = 0;
2491 		/*
2492 		 * For simple CON queries we want a unique
2493 		 * timer assigned.  For others, they
2494 		 * effectively share one.
2495 		 */
2496 		if (contype == IDNCON_QUERY)
2497 			mt.mt_cookie = 0;
2498 		else
2499 			mt.mt_cookie = IDN_TIMER_PUBLIC_COOKIE;
2500 	}
2501 
2502 	ASSERT((contype == IDNCON_QUERY) ? idn_domain[domid].dcookie_send : 1);
2503 
2504 	PR_PROTO("%s:%d: sending con%sto (cpu %d) [ct=%s, cs=0x%x]\n",
2505 	    proc, domid,
2506 	    (acknack & IDNP_ACK) ? "+ack " :
2507 	    (acknack & IDNP_NACK) ? "+nack " : " ",
2508 	    idn_domain[domid].dcpu,
2509 	    idncon_str[contype], conset);
2510 
2511 	IDN_MSGTIMER_START(domid, IDNP_CON, (ushort_t)contype,
2512 	    idn_msg_waittime[IDNP_CON], &mt.mt_cookie);
2513 
2514 	IDNXDC(domid, &mt, (uint_t)contype, (uint_t)conset, 0, 0);
2515 
2516 	return (0);
2517 }
2518 
2519 /*
2520  * Must leave w/DLOCK dropped and SYNC_LOCK held.
2521  */
2522 static int
2523 idn_recv_con(int domid, idn_msgtype_t *mtp, idn_xdcargs_t xargs)
2524 {
2525 	uint_t		msg = mtp ? mtp->mt_mtype : 0;
2526 	uint_t		msgarg = mtp ? mtp->mt_atype : 0;
2527 	idn_con_t	contype;
2528 	domainset_t	my_ready_set, ready_set;
2529 	idn_msgtype_t	mt;
2530 	idn_domain_t	*dp = &idn_domain[domid];
2531 	idn_xdcargs_t	aargs;
2532 	procname_t	proc = "idn_recv_con";
2533 
2534 	ASSERT(IDN_SYNC_IS_LOCKED());
2535 	ASSERT(IDN_DLOCK_IS_EXCL(domid));
2536 
2537 	mt.mt_cookie = mtp ? mtp->mt_cookie : 0;
2538 
2539 	contype   = GET_XARGS_CON_TYPE(xargs);
2540 	ready_set = GET_XARGS_CON_DOMSET(xargs);
2541 
2542 	CLR_XARGS(aargs);
2543 
2544 	if (!(msg & IDNP_NACK) && (contype == IDNCON_QUERY)) {
2545 		domainset_t	query_set;
2546 
2547 		query_set = idn_sync_register(domid, IDNSYNC_CONNECT,
2548 		    ready_set, IDNSYNC_REG_REG);
2549 
2550 		my_ready_set = idn.domset.ds_connected | idn.domset.ds_ready_on;
2551 		my_ready_set &= ~idn.domset.ds_trans_off;
2552 		DOMAINSET_ADD(my_ready_set, idn.localid);
2553 
2554 		if (msg & IDNP_MSGTYPE_MASK) {
2555 			mt.mt_mtype = IDNP_ACK;
2556 			mt.mt_atype = IDNP_CON;
2557 			SET_XARGS_CON_TYPE(aargs, contype);
2558 			SET_XARGS_CON_DOMSET(aargs, my_ready_set);
2559 			idn_send_acknack(domid, &mt, aargs);
2560 		}
2561 
2562 		if (query_set) {
2563 			uint_t	token;
2564 
2565 			token = IDN_RETRY_TOKEN(domid, IDNRETRY_CONQ);
2566 			idn_retry_submit(idn_retry_query, NULL, token,
2567 			    idn_msg_retrytime[(int)IDNRETRY_CONQ]);
2568 		}
2569 
2570 		return (0);
2571 	}
2572 
2573 	if (dp->dxp == NULL) {
2574 		STRING(mstr);
2575 		STRING(lstr);
2576 		/*
2577 		 * Must have received an inappropriate error
2578 		 * message as we should already be registered
2579 		 * by the time we reach here.
2580 		 */
2581 		INUM2STR(msg, mstr);
2582 		INUM2STR(msgarg, lstr);
2583 
2584 		PR_PROTO("%s:%d: ERROR: NOT YET REGISTERED (%s/%s)\n",
2585 		    proc, domid, mstr, lstr);
2586 
2587 		if (msg & IDNP_MSGTYPE_MASK) {
2588 			mt.mt_mtype = IDNP_NACK;
2589 			mt.mt_atype = msg;
2590 			SET_XARGS_NACK_TYPE(aargs, IDNNACK_RETRY);
2591 			idn_send_acknack(domid, &mt, aargs);
2592 		}
2593 
2594 		return (-1);
2595 	}
2596 
2597 	idn_xphase_transition(domid, mtp, xargs);
2598 
2599 	return (0);
2600 }
2601 
2602 /*ARGSUSED1*/
2603 static void
2604 idn_retry_con(uint_t token, void *arg)
2605 {
2606 	int		domid = IDN_RETRY_TOKEN2DOMID(token);
2607 	idn_domain_t	*dp = &idn_domain[domid];
2608 	idn_xdcargs_t	xargs;
2609 	procname_t	proc = "idn_retry_con";
2610 
2611 	ASSERT(IDN_RETRY_TOKEN2TYPE(token) == IDNRETRY_CON);
2612 
2613 	IDN_SYNC_LOCK();
2614 	IDN_DLOCK_EXCL(domid);
2615 
2616 	if (dp->dxp != &xphase_con) {
2617 		STRING(str);
2618 
2619 #ifdef DEBUG
2620 		if (dp->dxp) {
2621 			INUM2STR(dp->dxp->xt_msgtype, str);
2622 		}
2623 #endif /* DEBUG */
2624 
2625 		PR_PROTO("%s:%d: dxp(%s) != CON...bailing...\n",
2626 		    proc, domid, dp->dxp ? str : "NULL");
2627 		IDN_DUNLOCK(domid);
2628 		IDN_SYNC_UNLOCK();
2629 		return;
2630 	}
2631 
2632 	if ((dp->dsync.s_cmd != IDNSYNC_CONNECT) ||
2633 	    (dp->dxstate != IDNXS_PEND)) {
2634 		PR_PROTO("%s:%d: cmd (%s) and/or xstate (%s) not "
2635 		    "expected (%s/%s)\n",
2636 		    proc, domid, idnsync_str[dp->dsync.s_cmd],
2637 		    idnxs_str[dp->dxstate], idnsync_str[IDNSYNC_CONNECT],
2638 		    idnxs_str[IDNXS_PEND]);
2639 		IDN_DUNLOCK(domid);
2640 		IDN_SYNC_UNLOCK();
2641 		return;
2642 	}
2643 
2644 	idn_xphase_transition(domid, NULL, xargs);
2645 
2646 	IDN_DUNLOCK(domid);
2647 	IDN_SYNC_UNLOCK();
2648 }
2649 
2650 static int
2651 idn_check_con(int domid, idn_msgtype_t *mtp, idn_xdcargs_t xargs)
2652 {
2653 	int		ready;
2654 	uint_t		msg = mtp ? mtp->mt_mtype : 0;
2655 	idn_domain_t	*dp = &idn_domain[domid];
2656 	domainset_t	ready_set, my_ready_set, query_set;
2657 
2658 	ASSERT(IDN_SYNC_IS_LOCKED());
2659 	ASSERT(IDN_DLOCK_IS_EXCL(domid));
2660 
2661 	if (msg & IDNP_NACK)
2662 		return (0);
2663 
2664 	if ((dp->dstate == IDNDS_CON_PEND) &&
2665 	    (msg & IDNP_MSGTYPE_MASK) && (msg & IDNP_ACK))	/* con+ack */
2666 		return (1);
2667 
2668 	if (msg == 0) {
2669 		ready_set = idn.domset.ds_connected &
2670 		    ~idn.domset.ds_trans_off;
2671 	} else {
2672 		ready_set = GET_XARGS_CON_DOMSET(xargs);
2673 		DOMAINSET_ADD(idn.domset.ds_ready_on, domid);
2674 	}
2675 
2676 	DOMAINSET_ADD(ready_set, idn.localid);
2677 
2678 	query_set = idn_sync_register(domid, IDNSYNC_CONNECT,
2679 	    ready_set, IDNSYNC_REG_REG);
2680 	/*
2681 	 * No need to query this domain as he's already
2682 	 * in the CON sequence.
2683 	 */
2684 	DOMAINSET_DEL(query_set, domid);
2685 
2686 	ready = (dp->dsync.s_set_exp == dp->dsync.s_set_rdy) ? 1 : 0;
2687 	if (ready) {
2688 		DOMAINSET_DEL(idn.domset.ds_ready_on, domid);
2689 		DOMAINSET_ADD(idn.domset.ds_connected, domid);
2690 	}
2691 
2692 	if (query_set) {
2693 		int	d;
2694 
2695 		my_ready_set = idn.domset.ds_ready_on |
2696 		    idn.domset.ds_connected;
2697 		my_ready_set &= ~idn.domset.ds_trans_off;
2698 		DOMAINSET_ADD(my_ready_set, idn.localid);
2699 
2700 		for (d = 0; d < MAX_DOMAINS; d++) {
2701 			if (!DOMAIN_IN_SET(query_set, d))
2702 				continue;
2703 
2704 			dp = &idn_domain[d];
2705 
2706 			IDN_DLOCK_EXCL(d);
2707 			if ((dp->dsync.s_cmd == IDNSYNC_CONNECT) ||
2708 			    !dp->dcookie_send) {
2709 				IDN_DUNLOCK(d);
2710 				continue;
2711 			}
2712 
2713 			IDN_SYNC_QUERY_UPDATE(domid, d);
2714 
2715 			idn_send_con(d, NULL, IDNCON_QUERY, my_ready_set);
2716 			IDN_DUNLOCK(d);
2717 		}
2718 	}
2719 
2720 	return (!msg ? 0 : (ready ? 0 : 1));
2721 }
2722 
2723 /*ARGSUSED2*/
2724 static void
2725 idn_error_con(int domid, idn_msgtype_t *mtp, idn_xdcargs_t xargs)
2726 {
2727 	uint_t	token;
2728 	uint_t	msg = mtp ? mtp->mt_mtype : 0;
2729 
2730 	ASSERT(IDN_SYNC_IS_LOCKED());
2731 	ASSERT(IDN_DLOCK_IS_EXCL(domid));
2732 
2733 	if (msg & IDNP_MSGTYPE_MASK) {
2734 		idn_msgtype_t	mt;
2735 		idn_xdcargs_t	nargs;
2736 
2737 		mt.mt_mtype = IDNP_NACK;
2738 		mt.mt_atype = msg;
2739 		mt.mt_cookie = mtp->mt_cookie;
2740 		CLR_XARGS(nargs);
2741 		SET_XARGS_NACK_TYPE(nargs, IDNNACK_RETRY);
2742 		idn_send_acknack(domid, &mt, nargs);
2743 	}
2744 
2745 	token = IDN_RETRY_TOKEN(domid, IDNRETRY_CON);
2746 	idn_retry_submit(idn_retry_con, NULL, token,
2747 	    idn_msg_retrytime[(int)IDNRETRY_CON]);
2748 }
2749 
2750 /*ARGSUSED*/
2751 static void
2752 idn_action_con_pend(int domid, idn_msgtype_t *mtp, idn_xdcargs_t xargs)
2753 {
2754 	uint_t		msg = mtp ? mtp->mt_mtype : 0;
2755 	idn_domain_t	*dp = &idn_domain[domid];
2756 	idn_msgtype_t	mt;
2757 	domainset_t	my_ready_set;
2758 
2759 	ASSERT(IDN_SYNC_IS_LOCKED());
2760 	ASSERT(IDN_DLOCK_IS_EXCL(domid));
2761 
2762 	my_ready_set = dp->dsync.s_set_rdy | idn.domset.ds_ready_on |
2763 	    idn.domset.ds_connected;
2764 	my_ready_set &= ~idn.domset.ds_trans_off;
2765 	DOMAINSET_ADD(my_ready_set, idn.localid);
2766 
2767 	if (!msg) {
2768 		idn_send_con(domid, NULL, IDNCON_NORMAL, my_ready_set);
2769 	} else {
2770 		mt.mt_mtype = IDNP_CON | IDNP_ACK;
2771 		mt.mt_atype = 0;
2772 		mt.mt_cookie = mtp->mt_cookie;
2773 		idn_send_con(domid, &mt, IDNCON_NORMAL, my_ready_set);
2774 	}
2775 }
2776 
2777 static void
2778 idn_action_con_sent(int domid, idn_msgtype_t *mtp, idn_xdcargs_t xargs)
2779 {
2780 	uint_t		msg = mtp ? mtp->mt_mtype : 0;
2781 	idn_domain_t	*dp = &idn_domain[domid];
2782 	idn_con_t	contype;
2783 	domainset_t	my_ready_set;
2784 	idn_msgtype_t	mt;
2785 
2786 	ASSERT(IDN_SYNC_IS_LOCKED());
2787 	ASSERT(IDN_DLOCK_IS_EXCL(domid));
2788 
2789 	mt.mt_cookie = mtp ? mtp->mt_cookie : 0;
2790 
2791 	my_ready_set = dp->dsync.s_set_rdy | idn.domset.ds_ready_on |
2792 	    idn.domset.ds_connected;
2793 	my_ready_set &= ~idn.domset.ds_trans_off;
2794 	DOMAINSET_ADD(my_ready_set, idn.localid);
2795 
2796 	contype = GET_XARGS_CON_TYPE(xargs);
2797 
2798 	if ((msg & IDNP_ACKNACK_MASK) == 0) {
2799 		/*
2800 		 * con
2801 		 */
2802 		mt.mt_mtype = IDNP_CON | IDNP_ACK;
2803 		mt.mt_atype = 0;
2804 		idn_send_con(domid, &mt, contype, my_ready_set);
2805 	} else if (msg & IDNP_MSGTYPE_MASK) {
2806 		idn_xdcargs_t	cargs;
2807 
2808 		mt.mt_mtype = IDNP_ACK;
2809 		mt.mt_atype = msg;
2810 		CLR_XARGS(cargs);
2811 		SET_XARGS_CON_TYPE(cargs, contype);
2812 		SET_XARGS_CON_DOMSET(cargs, my_ready_set);
2813 		/*
2814 		 * con+ack
2815 		 */
2816 		idn_send_acknack(domid, &mt, cargs);
2817 	} else {
2818 		uint_t	token;
2819 		/*
2820 		 * nack - retry
2821 		 */
2822 		token = IDN_RETRY_TOKEN(domid, IDNRETRY_CON);
2823 		idn_retry_submit(idn_retry_con, NULL, token,
2824 		    idn_msg_retrytime[(int)IDNRETRY_CON]);
2825 	}
2826 }
2827 
2828 /*ARGSUSED*/
2829 static void
2830 idn_action_con_rcvd(int domid, idn_msgtype_t *mtp, idn_xdcargs_t xargs)
2831 {
2832 	uint_t	msg = mtp ? mtp->mt_mtype : 0;
2833 
2834 	ASSERT(IDN_SYNC_IS_LOCKED());
2835 	ASSERT(IDN_DLOCK_IS_EXCL(domid));
2836 
2837 	if (msg & IDNP_NACK) {
2838 		uint_t	token;
2839 		/*
2840 		 * nack - retry
2841 		 */
2842 		token = IDN_RETRY_TOKEN(domid, IDNRETRY_CON);
2843 		idn_retry_submit(idn_retry_con, NULL, token,
2844 		    idn_msg_retrytime[(int)IDNRETRY_CON]);
2845 	}
2846 }
2847 
2848 static void
2849 idn_final_con(int domid)
2850 {
2851 	uint_t		targ;
2852 	uint_t		token = IDN_RETRY_TOKEN(domid, IDNRETRY_CON);
2853 	idn_domain_t	*dp = &idn_domain[domid];
2854 	procname_t	proc = "idn_final_con";
2855 
2856 	ASSERT(IDN_SYNC_IS_LOCKED());
2857 	ASSERT(IDN_DLOCK_IS_EXCL(domid));
2858 
2859 	(void) idn_retry_terminate(token);
2860 
2861 	dp->dxp = NULL;
2862 	IDN_XSTATE_TRANSITION(dp, IDNXS_NIL);
2863 
2864 	idn_sync_exit(domid, IDNSYNC_CONNECT);
2865 
2866 	CHECKPOINT_OPENED(IDNSB_CHKPT_LINK, dp->dhw.dh_boardset, 1);
2867 
2868 	DOMAINSET_DEL(idn.domset.ds_trans_on, domid);
2869 	DOMAINSET_DEL(idn.domset.ds_relink, domid);
2870 	IDN_FSTATE_TRANSITION(dp, IDNFIN_OFF);
2871 
2872 	PR_PROTO("%s:%d: CONNECTED\n", proc, domid);
2873 
2874 	if (idn.domset.ds_trans_on == 0) {
2875 		if ((idn.domset.ds_trans_off | idn.domset.ds_relink) == 0) {
2876 			PR_HITLIST("%s:%d: HITLIST %x -> 0\n",
2877 			    proc, domid, idn.domset.ds_hitlist);
2878 			idn.domset.ds_hitlist = 0;
2879 		}
2880 		PR_PROTO("%s:%d: ALL CONNECTED ************ "
2881 		    "(0x%x + 0x%x) = 0x%x\n", proc, domid,
2882 		    DOMAINSET(idn.localid), idn.domset.ds_connected,
2883 		    DOMAINSET(idn.localid) | idn.domset.ds_connected);
2884 	} else {
2885 		PR_PROTO("%s:%d: >>> ds_trans_on = 0x%x, ds_ready_on = 0x%x\n",
2886 		    proc, domid,
2887 		    idn.domset.ds_trans_on, idn.domset.ds_ready_on);
2888 	}
2889 
2890 	if (idn_verify_config_mbox(domid)) {
2891 		idnsb_error_t	idnerr;
2892 		/*
2893 		 * Mailbox is not cool. Need to disconnect.
2894 		 */
2895 		INIT_IDNKERR(&idnerr);
2896 		SET_IDNKERR_ERRNO(&idnerr, EPROTO);
2897 		SET_IDNKERR_IDNERR(&idnerr, IDNKERR_SMR_CORRUPTED);
2898 		SET_IDNKERR_PARAM0(&idnerr, domid);
2899 		idn_update_op(IDNOP_ERROR, DOMAINSET(domid), &idnerr);
2900 		/*
2901 		 * We cannot disconnect from an individual domain
2902 		 * unless all domains are attempting to disconnect
2903 		 * from him also, especially now since we touched
2904 		 * the SMR and now we have a potential cache conflicts
2905 		 * with the other domains with respect to this
2906 		 * domain.  Disconnect attempt will effectively
2907 		 * shutdown connection with respective domain
2908 		 * which is the effect we really want anyway.
2909 		 */
2910 		idn_disconnect(domid, IDNFIN_NORMAL, IDNFIN_ARG_SMRBAD,
2911 		    IDNFIN_SYNC_YES);
2912 
2913 		return;
2914 	}
2915 
2916 	if (lock_try(&idn.first_swlink)) {
2917 		/*
2918 		 * This is our first connection.  Need to
2919 		 * kick some stuff into gear.
2920 		 */
2921 		idndl_dlpi_init();
2922 		(void) idn_activate_channel(CHANSET_ALL, IDNCHAN_ONLINE);
2923 
2924 		targ = 0xf0;
2925 	} else {
2926 		targ = 0;
2927 	}
2928 
2929 	idn_mainmbox_activate(domid);
2930 
2931 	idn_update_op(IDNOP_CONNECTED, DOMAINSET(domid), NULL);
2932 
2933 	IDN_GKSTAT_GLOBAL_EVENT(gk_links, gk_link_last);
2934 
2935 	membar_stst_ldst();
2936 
2937 	IDN_DSTATE_TRANSITION(dp, IDNDS_CONNECTED);
2938 	/*
2939 	 * Need to kick off initial commands in background.
2940 	 * We do not want to do them within the context of
2941 	 * a protocol server because they may sleep and thus
2942 	 * cause the protocol server to incur a soft-deadlock,
2943 	 * i.e. he's sleeping waiting in the slab-waiting area
2944 	 * for a response that will arrive on his protojob
2945 	 * queue, but which he obviously can't process since
2946 	 * he's not waiting on his protojob queue.
2947 	 */
2948 	targ |= domid & 0x0f;
2949 	(void) timeout(idn_link_established, (void *)(uintptr_t)targ, 50);
2950 
2951 	cmn_err(CE_NOTE,
2952 	    "!IDN: 200: link (domain %d, CPU %d) connected",
2953 	    dp->domid, dp->dcpu);
2954 }
2955 
2956 static void
2957 idn_exit_con(int domid, uint_t msgtype)
2958 {
2959 	idn_domain_t	*dp = &idn_domain[domid];
2960 	idn_fin_t	fintype;
2961 	procname_t	proc = "idn_exit_con";
2962 	STRING(str);
2963 
2964 	ASSERT(IDN_SYNC_IS_LOCKED());
2965 	ASSERT(IDN_DLOCK_IS_EXCL(domid));
2966 
2967 	INUM2STR(msgtype, str);
2968 	PR_PROTO("%s:%d: msgtype = 0x%x(%s)\n", proc, domid, msgtype, str);
2969 
2970 	fintype = msgtype ? IDNFIN_NORMAL : IDNFIN_FORCE_HARD;
2971 
2972 	IDN_GLOCK_SHARED();
2973 	if (idn.state != IDNGS_DISCONNECT) {
2974 		DOMAINSET_ADD(idn.domset.ds_relink, domid);
2975 		IDN_HISTORY_LOG(IDNH_RELINK, domid, dp->dstate,
2976 		    idn.domset.ds_relink);
2977 	} else {
2978 		DOMAINSET_DEL(idn.domset.ds_relink, domid);
2979 	}
2980 	IDN_GUNLOCK();
2981 
2982 	idn_disconnect(domid, fintype, IDNFIN_ARG_NONE, IDNDS_SYNC_TYPE(dp));
2983 }
2984 
2985 static int
2986 idn_send_fin(int domid, idn_msgtype_t *mtp, idn_fin_t fintype, idn_finarg_t
2987     finarg, idn_finopt_t finopt, domainset_t finset, uint_t finmaster)
2988 {
2989 	int		need_timer = 1;
2990 	uint_t		acknack;
2991 	uint_t		fintypearg = 0;
2992 	idn_msgtype_t	mt;
2993 	idn_domain_t	*dp = &idn_domain[domid];
2994 	procname_t	proc = "idn_send_fin";
2995 
2996 	ASSERT(IDN_SYNC_IS_LOCKED());
2997 	ASSERT(IDN_DLOCK_IS_HELD(domid));
2998 
2999 	ASSERT((fintype != IDNFIN_QUERY) ? (finopt != IDNFIN_OPT_NONE) : 1);
3000 
3001 	if (mtp) {
3002 		acknack = mtp->mt_mtype & IDNP_ACKNACK_MASK;
3003 		mt.mt_mtype = mtp->mt_mtype;
3004 		mt.mt_atype = mtp->mt_atype;
3005 		mt.mt_cookie = mtp->mt_cookie;
3006 	} else {
3007 		acknack = 0;
3008 		mt.mt_mtype = IDNP_FIN;
3009 		mt.mt_atype = 0;
3010 		/*
3011 		 * For simple FIN queries we want a unique
3012 		 * timer assigned.  For others, they
3013 		 * effectively share one.
3014 		 */
3015 		if (fintype == IDNFIN_QUERY)
3016 			mt.mt_cookie = 0;
3017 		else
3018 			mt.mt_cookie = IDN_TIMER_PUBLIC_COOKIE;
3019 	}
3020 
3021 	PR_PROTO("%s:%d: sending fin%sto (cpu %d) "
3022 	    "[ft=%s, fa=%s, fs=0x%x, fo=%s, fm=(%d,%d)]\n",
3023 	    proc, domid,
3024 	    (acknack & IDNP_ACK) ? "+ack " :
3025 	    (acknack & IDNP_NACK) ? "+nack " : " ",
3026 	    dp->dcpu, idnfin_str[fintype], idnfinarg_str[finarg],
3027 	    (int)finset, idnfinopt_str[finopt],
3028 	    FIN_MASTER_DOMID(finmaster), FIN_MASTER_CPUID(finmaster));
3029 
3030 	if (need_timer) {
3031 		IDN_MSGTIMER_START(domid, IDNP_FIN, (ushort_t)fintype,
3032 		    idn_msg_waittime[IDNP_FIN], &mt.mt_cookie);
3033 	}
3034 
3035 	SET_FIN_TYPE(fintypearg, fintype);
3036 	SET_FIN_ARG(fintypearg, finarg);
3037 
3038 	IDNXDC(domid, &mt, fintypearg, (uint_t)finset, (uint_t)finopt,
3039 	    finmaster);
3040 
3041 	return (0);
3042 }
3043 
3044 /*
3045  * Must leave w/DLOCK dropped and SYNC_LOCK held.
3046  */
3047 static int
3048 idn_recv_fin(int domid, idn_msgtype_t *mtp, idn_xdcargs_t xargs)
3049 {
3050 	uint_t		msg = mtp ? mtp->mt_mtype : 0;
3051 	idn_fin_t	fintype;
3052 	idn_finarg_t	finarg;
3053 	idn_finopt_t	finopt;
3054 	domainset_t	my_ready_set, ready_set;
3055 	idn_msgtype_t	mt;
3056 	idn_domain_t	*dp = &idn_domain[domid];
3057 	idn_xdcargs_t	aargs;
3058 	procname_t	proc = "idn_recv_fin";
3059 
3060 	ASSERT(IDN_SYNC_IS_LOCKED());
3061 	ASSERT(IDN_DLOCK_IS_EXCL(domid));
3062 
3063 	mt.mt_cookie = mtp ? mtp->mt_cookie : 0;
3064 
3065 	fintype   = GET_XARGS_FIN_TYPE(xargs);
3066 	finarg    = GET_XARGS_FIN_ARG(xargs);
3067 	ready_set = GET_XARGS_FIN_DOMSET(xargs);
3068 	finopt    = GET_XARGS_FIN_OPT(xargs);
3069 
3070 	CLR_XARGS(aargs);
3071 
3072 	if (msg & IDNP_NACK) {
3073 		PR_PROTO("%s:%d: received NACK (type = %s)\n",
3074 		    proc, domid, idnnack_str[xargs[0]]);
3075 	} else {
3076 		PR_PROTO("%s:%d: fintype = %s, finopt = %s, "
3077 		    "finarg = %s, ready_set = 0x%x\n",
3078 		    proc, domid, idnfin_str[fintype],
3079 		    idnfinopt_str[finopt],
3080 		    idnfinarg_str[finarg], ready_set);
3081 	}
3082 
3083 	if (!(msg & IDNP_NACK) && (fintype == IDNFIN_QUERY)) {
3084 		domainset_t	query_set;
3085 
3086 		query_set = idn_sync_register(domid, IDNSYNC_DISCONNECT,
3087 		    ready_set, IDNSYNC_REG_REG);
3088 
3089 		my_ready_set = ~idn.domset.ds_connected |
3090 		    idn.domset.ds_ready_off;
3091 
3092 		if (msg & IDNP_MSGTYPE_MASK) {
3093 			mt.mt_mtype = IDNP_ACK;
3094 			mt.mt_atype = IDNP_FIN;
3095 			SET_XARGS_FIN_TYPE(aargs, fintype);
3096 			SET_XARGS_FIN_ARG(aargs, finarg);
3097 			SET_XARGS_FIN_DOMSET(aargs, my_ready_set);
3098 			SET_XARGS_FIN_OPT(aargs, IDNFIN_OPT_NONE);
3099 			SET_XARGS_FIN_MASTER(aargs, NIL_FIN_MASTER);
3100 			idn_send_acknack(domid, &mt, aargs);
3101 		}
3102 
3103 		if (query_set) {
3104 			uint_t	token;
3105 
3106 			token = IDN_RETRY_TOKEN(domid, IDNRETRY_FINQ);
3107 			idn_retry_submit(idn_retry_query, NULL, token,
3108 			    idn_msg_retrytime[(int)IDNRETRY_FINQ]);
3109 		}
3110 
3111 		return (0);
3112 	}
3113 
3114 	if (dp->dxp != &xphase_fin) {
3115 		uint_t	token;
3116 
3117 		if (IDNDS_IS_CLOSED(dp)) {
3118 			PR_PROTO("%s:%d: domain already closed (%s)\n",
3119 			    proc, domid, idnds_str[dp->dstate]);
3120 			if (msg & IDNP_MSGTYPE_MASK) {
3121 				/*
3122 				 * fin or fin+ack.
3123 				 */
3124 				mt.mt_mtype = IDNP_NACK;
3125 				mt.mt_atype = msg;
3126 				SET_XARGS_NACK_TYPE(aargs, IDNNACK_NOCONN);
3127 				idn_send_acknack(domid, &mt, aargs);
3128 			}
3129 			return (0);
3130 		}
3131 		dp->dfin_sync = IDNDS_SYNC_TYPE(dp);
3132 
3133 		/*
3134 		 * Need to do some clean-up ala idn_disconnect().
3135 		 *
3136 		 * Terminate any outstanding commands that were
3137 		 * targeted towards this domain.
3138 		 */
3139 		idn_terminate_cmd(domid, ECANCELED);
3140 
3141 		/*
3142 		 * Terminate any and all retries that may have
3143 		 * outstanding for this domain.
3144 		 */
3145 		token = IDN_RETRY_TOKEN(domid, IDN_RETRY_TYPEALL);
3146 		(void) idn_retry_terminate(token);
3147 
3148 		/*
3149 		 * Stop all outstanding message timers for
3150 		 * this guy.
3151 		 */
3152 		IDN_MSGTIMER_STOP(domid, 0, 0);
3153 
3154 		dp->dxp = &xphase_fin;
3155 		IDN_XSTATE_TRANSITION(dp, IDNXS_PEND);
3156 	}
3157 
3158 	if (msg & IDNP_NACK) {
3159 		idn_nack_t	nack;
3160 
3161 		nack = GET_XARGS_NACK_TYPE(xargs);
3162 		if (nack == IDNNACK_NOCONN) {
3163 			/*
3164 			 * We're trying to FIN with somebody we're
3165 			 * already disconnected from.  Need to
3166 			 * speed this guy through.
3167 			 */
3168 			DOMAINSET_ADD(idn.domset.ds_ready_off, domid);
3169 			(void) idn_sync_register(domid, IDNSYNC_DISCONNECT,
3170 			    DOMAINSET_ALL, IDNSYNC_REG_REG);
3171 			ready_set = (uint_t)DOMAINSET_ALL;
3172 			/*
3173 			 * Need to transform message to allow us to
3174 			 * pass this guy right through and not waste time
3175 			 * talking to him.
3176 			 */
3177 			IDN_FSTATE_TRANSITION(dp, IDNFIN_FORCE_HARD);
3178 
3179 			switch (dp->dstate) {
3180 			case IDNDS_FIN_PEND:
3181 				mtp->mt_mtype = 0;
3182 				mtp->mt_atype = 0;
3183 				break;
3184 
3185 			case IDNDS_FIN_SENT:
3186 				mtp->mt_mtype = IDNP_FIN | IDNP_ACK;
3187 				mtp->mt_atype = 0;
3188 				break;
3189 
3190 			case IDNDS_FIN_RCVD:
3191 				mtp->mt_mtype = IDNP_ACK;
3192 				mtp->mt_atype = IDNP_FIN | IDNP_ACK;
3193 				break;
3194 
3195 			default:
3196 #ifdef DEBUG
3197 				cmn_err(CE_PANIC,
3198 				    "%s:%d: UNEXPECTED state = %s",
3199 				    proc, domid,
3200 				    idnds_str[dp->dstate]);
3201 #endif /* DEBUG */
3202 				break;
3203 			}
3204 		}
3205 		fintype = (uint_t)dp->dfin;
3206 		finopt = DOMAIN_IN_SET(idn.domset.ds_relink, domid) ?
3207 		    IDNFIN_OPT_RELINK : IDNFIN_OPT_UNLINK;
3208 
3209 		CLR_XARGS(xargs);
3210 		SET_XARGS_FIN_TYPE(xargs, fintype);
3211 		SET_XARGS_FIN_ARG(xargs, finarg);
3212 		SET_XARGS_FIN_DOMSET(xargs, ready_set);
3213 		SET_XARGS_FIN_OPT(xargs, finopt);
3214 		SET_XARGS_FIN_MASTER(xargs, NIL_FIN_MASTER);
3215 	}
3216 
3217 	idn_xphase_transition(domid, mtp, xargs);
3218 
3219 	return (0);
3220 }
3221 
3222 /*ARGSUSED1*/
3223 static void
3224 idn_retry_fin(uint_t token, void *arg)
3225 {
3226 	int		domid = IDN_RETRY_TOKEN2DOMID(token);
3227 	int		new_masterid, new_cpuid = IDN_NIL_DCPU;
3228 	uint_t		finmaster;
3229 	idn_domain_t	*dp = &idn_domain[domid];
3230 	idn_xdcargs_t	xargs;
3231 	idn_finopt_t	finopt;
3232 	procname_t	proc = "idn_retry_fin";
3233 
3234 	ASSERT(IDN_RETRY_TOKEN2TYPE(token) == IDNRETRY_FIN);
3235 
3236 	IDN_SYNC_LOCK();
3237 	IDN_DLOCK_EXCL(domid);
3238 
3239 	if (dp->dxp != &xphase_fin) {
3240 		PR_PROTO("%s:%d: dxp(0x%p) != xstate_fin(0x%p)...bailing\n",
3241 		    proc, domid, dp->dxp, &xphase_fin);
3242 		IDN_DUNLOCK(domid);
3243 		IDN_SYNC_UNLOCK();
3244 		return;
3245 	}
3246 
3247 	if (dp->dxstate != IDNXS_PEND) {
3248 		PR_PROTO("%s:%d: xstate(%s) != %s...bailing\n",
3249 		    proc, domid, idnxs_str[dp->dxstate],
3250 		    idnxs_str[IDNXS_PEND]);
3251 		IDN_DUNLOCK(domid);
3252 		IDN_SYNC_UNLOCK();
3253 		return;
3254 	}
3255 
3256 	finopt = DOMAIN_IN_SET(idn.domset.ds_relink, domid) ?
3257 	    IDNFIN_OPT_RELINK : IDNFIN_OPT_UNLINK;
3258 
3259 	CLR_XARGS(xargs);
3260 	SET_XARGS_FIN_TYPE(xargs, dp->dfin);
3261 	/*LINTED*/
3262 	SET_XARGS_FIN_ARG(xargs, IDNFIN_ARG_NONE);
3263 	SET_XARGS_FIN_OPT(xargs, finopt);
3264 	SET_XARGS_FIN_DOMSET(xargs, 0);		/* unused when msg == 0 */
3265 	IDN_GLOCK_SHARED();
3266 	new_masterid = IDN_GET_NEW_MASTERID();
3267 	IDN_GUNLOCK();
3268 	if (new_masterid != IDN_NIL_DOMID)
3269 		new_cpuid = idn_domain[new_masterid].dcpu;
3270 	finmaster = MAKE_FIN_MASTER(new_masterid, new_cpuid);
3271 	SET_XARGS_FIN_MASTER(xargs, finmaster);
3272 
3273 	idn_xphase_transition(domid, NULL, xargs);
3274 
3275 	IDN_DUNLOCK(domid);
3276 	IDN_SYNC_UNLOCK();
3277 }
3278 
3279 static int
3280 idn_check_fin_pend(int domid, idn_msgtype_t *mtp, idn_xdcargs_t xargs)
3281 {
3282 	idn_domain_t	*dp = &idn_domain[domid];
3283 	idn_fin_t	fintype;
3284 	idn_finopt_t	finopt;
3285 	idn_finarg_t	finarg;
3286 	int		ready;
3287 	int		finmasterid;
3288 	int		fincpuid;
3289 	uint_t		finmaster;
3290 	uint_t		msg = mtp ? mtp->mt_mtype : 0;
3291 	domainset_t	query_set, ready_set, conn_set;
3292 	domainset_t	my_ready_set, shutdown_set;
3293 	procname_t	proc = "idn_check_fin_pend";
3294 
3295 	ASSERT(IDN_SYNC_IS_LOCKED());
3296 	ASSERT(IDN_DLOCK_IS_EXCL(domid));
3297 
3298 	if (msg & IDNP_NACK)
3299 		return (0);
3300 
3301 	if ((dp->dstate == IDNDS_FIN_PEND) && (msg & IDNP_MSGTYPE_MASK) &&
3302 	    (msg & IDNP_ACK))		/* fin+ack */
3303 		return (1);
3304 
3305 	query_set = 0;
3306 
3307 	if (!DOMAIN_IN_SET(idn.domset.ds_trans_off, domid)) {
3308 		/*
3309 		 * Can't remove domain from ds_connected yet,
3310 		 * since he's still officially connected until
3311 		 * we get an ACK from him.
3312 		 */
3313 		DOMAINSET_DEL(idn.domset.ds_trans_on, domid);
3314 		DOMAINSET_ADD(idn.domset.ds_trans_off, domid);
3315 	}
3316 
3317 	IDN_GLOCK_SHARED();
3318 	conn_set = (idn.domset.ds_connected | idn.domset.ds_trans_on) &
3319 	    ~idn.domset.ds_trans_off;
3320 	if ((idn.state == IDNGS_DISCONNECT) ||
3321 	    (idn.state == IDNGS_RECONFIG) ||
3322 	    (domid == IDN_GET_MASTERID()) || !conn_set) {
3323 		/*
3324 		 * If we're disconnecting, reconfiguring,
3325 		 * unlinking from the master, or unlinking
3326 		 * the last of our connections, then we need
3327 		 * to shutdown all the channels.
3328 		 */
3329 		shutdown_set = DOMAINSET_ALL;
3330 	} else {
3331 		shutdown_set = DOMAINSET(domid);
3332 	}
3333 	IDN_GUNLOCK();
3334 
3335 	idn_shutdown_datapath(shutdown_set, (dp->dfin == IDNFIN_FORCE_HARD));
3336 
3337 	IDN_GLOCK_EXCL();
3338 	/*
3339 	 * Remap the SMR back to our local space if the remote
3340 	 * domain going down is the master.  We do this now before
3341 	 * flushing caches.  This will help guarantee that any
3342 	 * accidental accesses to the SMR after the cache flush
3343 	 * will only go to local memory.
3344 	 */
3345 	if ((domid == IDN_GET_MASTERID()) && (idn.smr.rempfn != PFN_INVALID)) {
3346 		PR_PROTO("%s:%d: deconfiging CURRENT MASTER - SMR remap\n",
3347 		    proc, domid);
3348 		IDN_DLOCK_EXCL(idn.localid);
3349 		/*
3350 		 * We're going to remap the SMR,
3351 		 * so gotta blow away our local
3352 		 * pointer to the mbox table.
3353 		 */
3354 		idn_domain[idn.localid].dmbox.m_tbl = NULL;
3355 		IDN_DUNLOCK(idn.localid);
3356 
3357 		idn.smr.rempfn = PFN_INVALID;
3358 		idn.smr.rempfnlim = PFN_INVALID;
3359 
3360 		smr_remap(&kas, idn.smr.vaddr, idn.smr.locpfn, IDN_SMR_SIZE);
3361 	}
3362 	IDN_GUNLOCK();
3363 
3364 	if (DOMAIN_IN_SET(idn.domset.ds_flush, domid)) {
3365 		idnxf_flushall_ecache();
3366 		CHECKPOINT_CLOSED(IDNSB_CHKPT_CACHE, dp->dhw.dh_boardset, 2);
3367 		DOMAINSET_DEL(idn.domset.ds_flush, domid);
3368 	}
3369 
3370 	fintype   = GET_XARGS_FIN_TYPE(xargs);
3371 	finarg    = GET_XARGS_FIN_ARG(xargs);
3372 	ready_set = GET_XARGS_FIN_DOMSET(xargs);
3373 	finopt    = GET_XARGS_FIN_OPT(xargs);
3374 
3375 	ASSERT(fintype != IDNFIN_QUERY);
3376 	if (!VALID_FIN(fintype)) {
3377 		/*
3378 		 * If for some reason remote domain
3379 		 * sent us an invalid FIN type,
3380 		 * override it to a  NORMAL fin.
3381 		 */
3382 		PR_PROTO("%s:%d: WARNING invalid fintype (%d) -> %s(%d)\n",
3383 		    proc, domid, (int)fintype,
3384 		    idnfin_str[IDNFIN_NORMAL], (int)IDNFIN_NORMAL);
3385 		fintype = IDNFIN_NORMAL;
3386 	}
3387 
3388 	if (!VALID_FINOPT(finopt)) {
3389 		PR_PROTO("%s:%d: WARNING invalid finopt (%d) -> %s(%d)\n",
3390 		    proc, domid, (int)finopt,
3391 		    idnfinopt_str[IDNFIN_OPT_UNLINK],
3392 		    (int)IDNFIN_OPT_UNLINK);
3393 		finopt = IDNFIN_OPT_UNLINK;
3394 	}
3395 
3396 	finmaster = GET_XARGS_FIN_MASTER(xargs);
3397 	finmasterid = FIN_MASTER_DOMID(finmaster);
3398 	fincpuid = FIN_MASTER_CPUID(finmaster);
3399 
3400 	if ((finarg != IDNFIN_ARG_NONE) &&
3401 	    !DOMAIN_IN_SET(idn.domset.ds_hitlist, domid)) {
3402 		idnsb_error_t	idnerr;
3403 
3404 		INIT_IDNKERR(&idnerr);
3405 		SET_IDNKERR_ERRNO(&idnerr, EPROTO);
3406 		SET_IDNKERR_IDNERR(&idnerr, FINARG2IDNKERR(finarg));
3407 		SET_IDNKERR_PARAM0(&idnerr, domid);
3408 
3409 		if (IDNFIN_ARG_IS_FATAL(finarg)) {
3410 			finopt = IDNFIN_OPT_UNLINK;
3411 			DOMAINSET_DEL(idn.domset.ds_relink, domid);
3412 			DOMAINSET_ADD(idn.domset.ds_hitlist, domid);
3413 
3414 			if (idn.domset.ds_connected == 0) {
3415 				domainset_t	domset;
3416 
3417 				IDN_GLOCK_EXCL();
3418 				domset = ~idn.domset.ds_relink;
3419 				if (idn.domset.ds_relink == 0) {
3420 					IDN_GSTATE_TRANSITION(IDNGS_DISCONNECT);
3421 				}
3422 				domset &= ~idn.domset.ds_hitlist;
3423 				/*
3424 				 * The primary domain we were trying to
3425 				 * connect to fin'd us with a fatal argument.
3426 				 * Something isn't cool in our IDN environment,
3427 				 * e.g. corrupted SMR or non-compatible CONFIG
3428 				 * parameters.  In any case we need to dismantle
3429 				 * ourselves completely.
3430 				 */
3431 				IDN_SET_NEW_MASTERID(IDN_NIL_DOMID);
3432 				IDN_GUNLOCK();
3433 				IDN_DUNLOCK(domid);
3434 
3435 				DOMAINSET_DEL(domset, idn.localid);
3436 				DOMAINSET_DEL(domset, domid);
3437 
3438 				idn_update_op(IDNOP_ERROR, DOMAINSET_ALL,
3439 				    &idnerr);
3440 
3441 				PR_HITLIST("%s:%d: unlink_domainset(%x) "
3442 				    "due to CFG error (relink=%x, "
3443 				    "hitlist=%x)\n", proc, domid, domset,
3444 				    idn.domset.ds_relink,
3445 				    idn.domset.ds_hitlist);
3446 
3447 				idn_unlink_domainset(domset, IDNFIN_NORMAL,
3448 				    finarg, IDNFIN_OPT_UNLINK, BOARDSET_ALL);
3449 				IDN_DLOCK_EXCL(domid);
3450 			}
3451 			PR_HITLIST("%s:%d: CFG error, (conn=%x, relink=%x, "
3452 			    "hitlist=%x)\n",
3453 			    proc, domid, idn.domset.ds_connected,
3454 			    idn.domset.ds_relink, idn.domset.ds_hitlist);
3455 		}
3456 		idn_update_op(IDNOP_ERROR, DOMAINSET(domid), &idnerr);
3457 	}
3458 
3459 	if ((finmasterid != IDN_NIL_DOMID) && (!VALID_DOMAINID(finmasterid) ||
3460 	    DOMAIN_IN_SET(idn.domset.ds_hitlist, domid))) {
3461 		PR_HITLIST("%s:%d: finmasterid = %d -> -1, relink=%x, "
3462 		    "hitlist=%x\n",
3463 		    proc, domid, finmasterid, idn.domset.ds_relink,
3464 		    idn.domset.ds_hitlist);
3465 		PR_PROTO("%s:%d: WARNING invalid finmasterid (%d) -> -1\n",
3466 		    proc, domid, finmasterid);
3467 		finmasterid = IDN_NIL_DOMID;
3468 	}
3469 
3470 	IDN_GLOCK_EXCL();
3471 
3472 	if ((finopt == IDNFIN_OPT_RELINK) && (idn.state != IDNGS_DISCONNECT)) {
3473 		DOMAINSET_ADD(idn.domset.ds_relink, domid);
3474 		IDN_HISTORY_LOG(IDNH_RELINK, domid, dp->dstate,
3475 		    idn.domset.ds_relink);
3476 	} else {
3477 		DOMAINSET_DEL(idn.domset.ds_relink, domid);
3478 		DOMAINSET_ADD(idn.domset.ds_hitlist, domid);
3479 	}
3480 
3481 	if ((domid == IDN_GET_NEW_MASTERID()) &&
3482 	    !DOMAIN_IN_SET(idn.domset.ds_relink, domid)) {
3483 		IDN_SET_NEW_MASTERID(IDN_NIL_DOMID);
3484 	}
3485 
3486 	if ((idn.state != IDNGS_DISCONNECT) && (idn.state != IDNGS_RECONFIG) &&
3487 	    (domid == IDN_GET_MASTERID())) {
3488 		domainset_t	dis_set, master_candidates;
3489 
3490 		IDN_GKSTAT_GLOBAL_EVENT(gk_reconfigs, gk_reconfig_last);
3491 
3492 		IDN_GSTATE_TRANSITION(IDNGS_RECONFIG);
3493 		IDN_GUNLOCK();
3494 
3495 		if ((finmasterid != IDN_NIL_DOMID) &&
3496 		    (finmasterid != idn.localid)) {
3497 			if (finmasterid != domid)
3498 				IDN_DLOCK_EXCL(finmasterid);
3499 			if (idn_open_domain(finmasterid, fincpuid, 0) < 0) {
3500 				cmn_err(CE_WARN,
3501 				    "IDN: 205: (%s) failed to "
3502 				    "open-domain(%d,%d)",
3503 				    proc, finmasterid, fincpuid);
3504 				if (finmasterid != domid)
3505 					IDN_DUNLOCK(finmasterid);
3506 				finmasterid = IDN_NIL_DOMID;
3507 			}
3508 			if (finmasterid != domid)
3509 				IDN_DUNLOCK(finmasterid);
3510 		}
3511 
3512 		IDN_GLOCK_EXCL();
3513 		if (finmasterid == IDN_NIL_DOMID) {
3514 			int	m;
3515 
3516 			master_candidates = idn.domset.ds_trans_on |
3517 			    idn.domset.ds_connected |
3518 			    idn.domset.ds_relink;
3519 			master_candidates &= ~(idn.domset.ds_trans_off &
3520 			    ~idn.domset.ds_relink);
3521 			DOMAINSET_DEL(master_candidates, domid);
3522 			/*
3523 			 * Local domain gets to participate also.
3524 			 */
3525 			DOMAINSET_ADD(master_candidates, idn.localid);
3526 
3527 			m = idn_select_candidate(master_candidates);
3528 			IDN_SET_NEW_MASTERID(m);
3529 		} else {
3530 			IDN_SET_NEW_MASTERID(finmasterid);
3531 		}
3532 		IDN_GUNLOCK();
3533 
3534 		dis_set = idn.domset.ds_trans_on | idn.domset.ds_connected;
3535 		DOMAINSET_DEL(dis_set, domid);
3536 
3537 		idn_unlink_domainset(dis_set, IDNFIN_NORMAL, IDNFIN_ARG_NONE,
3538 		    IDNFIN_OPT_RELINK, BOARDSET_ALL);
3539 	} else {
3540 		IDN_GUNLOCK();
3541 	}
3542 
3543 	/*
3544 	 * My local ready-set are those domains from which I
3545 	 * have confirmed no datapaths exist.
3546 	 */
3547 	my_ready_set = ~idn.domset.ds_connected;
3548 
3549 	switch (dp->dfin) {
3550 	case IDNFIN_NORMAL:
3551 	case IDNFIN_FORCE_SOFT:
3552 	case IDNFIN_FORCE_HARD:
3553 		if (fintype < dp->dfin) {
3554 			/*
3555 			 * Remote domain has requested a
3556 			 * FIN of lower priority than what
3557 			 * we're currently running.  Just
3558 			 * leave the priority where it is.
3559 			 */
3560 			break;
3561 		}
3562 		/*FALLTHROUGH*/
3563 
3564 	default:
3565 		IDN_FSTATE_TRANSITION(dp, fintype);
3566 		break;
3567 	}
3568 
3569 	ASSERT(dp->dfin_sync != IDNFIN_SYNC_OFF);
3570 
3571 	if (msg == 0) {
3572 		/*
3573 		 * Local domain is initiating a FIN sequence
3574 		 * to remote domid.  Note that remote domain
3575 		 * remains in ds_connected even though he's
3576 		 * in thet ready-set from the local domain's
3577 		 * perspective.  We can't remove him from
3578 		 * ds_connected until we get a confirmed message
3579 		 * from him indicating he has ceased communication.
3580 		 */
3581 		ready_set = my_ready_set;
3582 	} else {
3583 		/*
3584 		 * Remote domain initiated a FIN sequence
3585 		 * to local domain.  This implies that he
3586 		 * has shutdown his datapath to us.  Since
3587 		 * we shutdown our datapath to him, we're
3588 		 * effectively now in his ready-set.
3589 		 */
3590 		DOMAINSET_ADD(ready_set, idn.localid);
3591 		/*
3592 		 * Since we know both sides of the connection
3593 		 * have ceased, this remote domain is effectively
3594 		 * considered disconnected.
3595 		 */
3596 		DOMAINSET_ADD(idn.domset.ds_ready_off, domid);
3597 	}
3598 
3599 	if (dp->dfin == IDNFIN_FORCE_HARD) {
3600 		/*
3601 		 * If we're doing a hard disconnect
3602 		 * of this domain then we want to
3603 		 * blow straight through and not
3604 		 * waste time trying to talk to the
3605 		 * remote domain nor to domains we
3606 		 * believe are AWOL.  Although we will
3607 		 * try and do it cleanly with
3608 		 * everybody else.
3609 		 */
3610 		DOMAINSET_ADD(my_ready_set, domid);
3611 		my_ready_set |= idn.domset.ds_awol;
3612 		ready_set = DOMAINSET_ALL;
3613 
3614 	} else if (dp->dfin_sync == IDNFIN_SYNC_NO) {
3615 		/*
3616 		 * If we're not fin'ing this domain
3617 		 * synchronously then the only
3618 		 * expected domain set is himself.
3619 		 */
3620 		ready_set |= ~DOMAINSET(domid);
3621 		my_ready_set |= ~DOMAINSET(domid);
3622 	}
3623 
3624 	if (dp->dsync.s_cmd != IDNSYNC_DISCONNECT) {
3625 		idn_sync_exit(domid, IDNSYNC_CONNECT);
3626 		idn_sync_enter(domid, IDNSYNC_DISCONNECT, DOMAINSET_ALL,
3627 		    my_ready_set, idn_xstate_transfunc,	(void *)IDNP_FIN);
3628 	}
3629 
3630 	query_set = idn_sync_register(domid, IDNSYNC_DISCONNECT, ready_set,
3631 	    IDNSYNC_REG_REG);
3632 
3633 	/*
3634 	 * No need to query this domain as he's already
3635 	 * in the FIN sequence.
3636 	 */
3637 	DOMAINSET_DEL(query_set, domid);
3638 
3639 	ready = (dp->dsync.s_set_exp == dp->dsync.s_set_rdy) ? 1 : 0;
3640 	if (ready) {
3641 		DOMAINSET_DEL(idn.domset.ds_ready_off, domid);
3642 		DOMAINSET_DEL(idn.domset.ds_connected, domid);
3643 	}
3644 
3645 	if (query_set) {
3646 		int	d;
3647 
3648 		my_ready_set = idn.domset.ds_ready_off |
3649 		    ~idn.domset.ds_connected;
3650 
3651 		for (d = 0; d < MAX_DOMAINS; d++) {
3652 			if (!DOMAIN_IN_SET(query_set, d))
3653 				continue;
3654 
3655 			dp = &idn_domain[d];
3656 
3657 			IDN_DLOCK_EXCL(d);
3658 
3659 			if (dp->dsync.s_cmd == IDNSYNC_DISCONNECT) {
3660 				IDN_DUNLOCK(d);
3661 				continue;
3662 			}
3663 
3664 			IDN_SYNC_QUERY_UPDATE(domid, d);
3665 
3666 			idn_send_fin(d, NULL, IDNFIN_QUERY, IDNFIN_ARG_NONE,
3667 			    IDNFIN_OPT_NONE, my_ready_set, NIL_FIN_MASTER);
3668 			IDN_DUNLOCK(d);
3669 		}
3670 	}
3671 
3672 	return (!msg ? 0 : (ready ? 0 : 1));
3673 }
3674 
3675 /*ARGSUSED*/
3676 static void
3677 idn_error_fin_pend(int domid, idn_msgtype_t *mtp, idn_xdcargs_t xargs)
3678 {
3679 	uint_t	msg = mtp ? mtp->mt_mtype : 0;
3680 	uint_t	token;
3681 
3682 	ASSERT(IDN_SYNC_IS_LOCKED());
3683 	ASSERT(IDN_DLOCK_IS_HELD(domid));
3684 
3685 	/*
3686 	 * Don't communicate with domains that
3687 	 * we're forcing a hard disconnect.
3688 	 */
3689 	if ((idn_domain[domid].dfin != IDNFIN_FORCE_HARD) &&
3690 	    (msg & IDNP_MSGTYPE_MASK)) {
3691 		idn_msgtype_t	mt;
3692 		idn_xdcargs_t	nargs;
3693 
3694 		mt.mt_mtype = IDNP_NACK;
3695 		mt.mt_atype = msg;
3696 		mt.mt_cookie = mtp->mt_cookie;
3697 		CLR_XARGS(nargs);
3698 		SET_XARGS_NACK_TYPE(nargs, IDNNACK_RETRY);
3699 		idn_send_acknack(domid, &mt, nargs);
3700 	}
3701 
3702 	token = IDN_RETRY_TOKEN(domid, IDNRETRY_FIN);
3703 	idn_retry_submit(idn_retry_fin, NULL, token,
3704 	    idn_msg_retrytime[(int)IDNRETRY_FIN]);
3705 }
3706 
3707 static void
3708 idn_action_fin_pend(int domid, idn_msgtype_t *mtp, idn_xdcargs_t xargs)
3709 {
3710 	idn_domain_t	*dp = &idn_domain[domid];
3711 	domainset_t	my_ready_set;
3712 	idn_finopt_t	finopt;
3713 	idn_finarg_t	finarg;
3714 	uint_t		finmaster;
3715 	int		new_masterid, new_cpuid = IDN_NIL_DCPU;
3716 	uint_t		msg = mtp ? mtp->mt_mtype : 0;
3717 	idn_msgtype_t	mt;
3718 
3719 	ASSERT(IDN_SYNC_IS_LOCKED());
3720 	ASSERT(IDN_DLOCK_IS_HELD(domid));
3721 
3722 	my_ready_set = dp->dsync.s_set_rdy | idn.domset.ds_ready_off |
3723 	    ~idn.domset.ds_connected;
3724 
3725 	ASSERT(xargs[0] != (uint_t)IDNFIN_QUERY);
3726 
3727 	finarg = GET_XARGS_FIN_ARG(xargs);
3728 	finopt = DOMAIN_IN_SET(idn.domset.ds_relink, domid) ?
3729 	    IDNFIN_OPT_RELINK : IDNFIN_OPT_UNLINK;
3730 
3731 	mt.mt_cookie = mtp ? mtp->mt_cookie : 0;
3732 
3733 	IDN_GLOCK_SHARED();
3734 	new_masterid = IDN_GET_NEW_MASTERID();
3735 	IDN_GUNLOCK();
3736 	if (new_masterid != IDN_NIL_DOMID)
3737 		new_cpuid = idn_domain[new_masterid].dcpu;
3738 	finmaster = MAKE_FIN_MASTER(new_masterid, new_cpuid);
3739 
3740 	if (dp->dfin == IDNFIN_FORCE_HARD) {
3741 		ASSERT(IDN_DLOCK_IS_EXCL(domid));
3742 
3743 		if (!msg) {
3744 			mt.mt_mtype = IDNP_FIN | IDNP_ACK;
3745 			mt.mt_atype = 0;
3746 		} else {
3747 			mt.mt_mtype = IDNP_ACK;
3748 			mt.mt_atype = IDNP_FIN | IDNP_ACK;
3749 		}
3750 		idn_xphase_transition(domid, &mt, xargs);
3751 	} else if (!msg) {
3752 		idn_send_fin(domid, NULL, dp->dfin, finarg, finopt,
3753 		    my_ready_set, finmaster);
3754 	} else if ((msg & IDNP_ACKNACK_MASK) == 0) {
3755 		/*
3756 		 * fin
3757 		 */
3758 		mt.mt_mtype = IDNP_FIN | IDNP_ACK;
3759 		mt.mt_atype = 0;
3760 		idn_send_fin(domid, &mt, dp->dfin, finarg, finopt,
3761 		    my_ready_set, finmaster);
3762 	} else {
3763 		uint_t	token;
3764 		/*
3765 		 * nack - retry
3766 		 */
3767 		token = IDN_RETRY_TOKEN(domid, IDNRETRY_FIN);
3768 		idn_retry_submit(idn_retry_fin, NULL, token,
3769 		    idn_msg_retrytime[(int)IDNRETRY_FIN]);
3770 	}
3771 }
3772 
3773 static int
3774 idn_check_fin_sent(int domid, idn_msgtype_t *mtp, idn_xdcargs_t xargs)
3775 {
3776 	int		ready;
3777 	uint_t		msg = mtp ? mtp->mt_mtype : 0;
3778 	idn_fin_t	fintype;
3779 	idn_finopt_t	finopt;
3780 	idn_domain_t	*dp = &idn_domain[domid];
3781 	domainset_t	query_set, ready_set;
3782 
3783 	ASSERT(IDN_SYNC_IS_LOCKED());
3784 	ASSERT(IDN_DLOCK_IS_EXCL(domid));
3785 
3786 	if (msg & IDNP_NACK)
3787 		return (0);
3788 
3789 	fintype   = GET_XARGS_FIN_TYPE(xargs);
3790 	ready_set = GET_XARGS_FIN_DOMSET(xargs);
3791 	finopt    = GET_XARGS_FIN_OPT(xargs);
3792 
3793 	ASSERT(fintype != IDNFIN_QUERY);
3794 	if (!VALID_FIN(fintype)) {
3795 		/*
3796 		 * If for some reason remote domain
3797 		 * sent us an invalid FIN type,
3798 		 * override it to a  NORMAL fin.
3799 		 */
3800 		fintype = IDNFIN_NORMAL;
3801 	}
3802 
3803 	if (!VALID_FINOPT(finopt)) {
3804 		finopt = IDNFIN_OPT_UNLINK;
3805 	}
3806 	IDN_GLOCK_SHARED();
3807 	if ((finopt == IDNFIN_OPT_RELINK) && (idn.state != IDNGS_DISCONNECT)) {
3808 		DOMAINSET_ADD(idn.domset.ds_relink, domid);
3809 		IDN_HISTORY_LOG(IDNH_RELINK, domid, dp->dstate,
3810 		    idn.domset.ds_relink);
3811 	} else {
3812 		DOMAINSET_DEL(idn.domset.ds_relink, domid);
3813 	}
3814 	IDN_GUNLOCK();
3815 
3816 	switch (dp->dfin) {
3817 	case IDNFIN_NORMAL:
3818 	case IDNFIN_FORCE_SOFT:
3819 	case IDNFIN_FORCE_HARD:
3820 		if (fintype < dp->dfin) {
3821 			/*
3822 			 * Remote domain has requested a
3823 			 * FIN of lower priority than what
3824 			 * we're current running.  Just
3825 			 * leave the priority where it is.
3826 			 */
3827 			break;
3828 		}
3829 		/*FALLTHROUGH*/
3830 
3831 	default:
3832 		IDN_FSTATE_TRANSITION(dp, fintype);
3833 		break;
3834 	}
3835 
3836 	if (dp->dfin == IDNFIN_FORCE_HARD) {
3837 		/*
3838 		 * If we're doing a hard disconnect
3839 		 * of this domain then we want to
3840 		 * blow straight through and not
3841 		 * waste time trying to talk to the
3842 		 * remote domain.  By registering him
3843 		 * as ready with respect to all
3844 		 * possible domains he'll transition
3845 		 * immediately.  Note that we'll still
3846 		 * try and do it coherently with
3847 		 * other domains to which we're connected.
3848 		 */
3849 		ready_set = DOMAINSET_ALL;
3850 	} else {
3851 		DOMAINSET_ADD(ready_set, idn.localid);
3852 	}
3853 
3854 	DOMAINSET_ADD(idn.domset.ds_ready_off, domid);
3855 
3856 	query_set = idn_sync_register(domid, IDNSYNC_DISCONNECT,
3857 	    ready_set, IDNSYNC_REG_REG);
3858 	/*
3859 	 * No need to query this domain as he's already
3860 	 * in the FIN sequence.
3861 	 */
3862 	DOMAINSET_DEL(query_set, domid);
3863 
3864 	ready = (dp->dsync.s_set_exp == dp->dsync.s_set_rdy) ? 1 : 0;
3865 	if (ready) {
3866 		DOMAINSET_DEL(idn.domset.ds_ready_off, domid);
3867 		DOMAINSET_DEL(idn.domset.ds_connected, domid);
3868 	}
3869 
3870 	if (query_set) {
3871 		int		d;
3872 		domainset_t	my_ready_set;
3873 
3874 		my_ready_set = idn.domset.ds_ready_off |
3875 		    ~idn.domset.ds_connected;
3876 
3877 		for (d = 0; d < MAX_DOMAINS; d++) {
3878 			if (!DOMAIN_IN_SET(query_set, d))
3879 				continue;
3880 
3881 			dp = &idn_domain[d];
3882 
3883 			IDN_DLOCK_EXCL(d);
3884 
3885 			if (dp->dsync.s_cmd == IDNSYNC_DISCONNECT) {
3886 				IDN_DUNLOCK(d);
3887 				continue;
3888 			}
3889 
3890 			IDN_SYNC_QUERY_UPDATE(domid, d);
3891 
3892 			idn_send_fin(d, NULL, IDNFIN_QUERY, IDNFIN_ARG_NONE,
3893 			    IDNFIN_OPT_NONE, my_ready_set, NIL_FIN_MASTER);
3894 			IDN_DUNLOCK(d);
3895 		}
3896 	}
3897 
3898 	return ((ready > 0) ? 0 : 1);
3899 }
3900 
3901 /*ARGSUSED*/
3902 static void
3903 idn_error_fin_sent(int domid, idn_msgtype_t *mtp, idn_xdcargs_t xargs)
3904 {
3905 	uint_t	msg = mtp ? mtp->mt_mtype : 0;
3906 	uint_t	token;
3907 
3908 	ASSERT(IDN_SYNC_IS_LOCKED());
3909 	ASSERT(IDN_DLOCK_IS_EXCL(domid));
3910 
3911 	/*
3912 	 * Don't communicate with domains that
3913 	 * we're forcing a hard disconnect.
3914 	 */
3915 	if ((idn_domain[domid].dfin != IDNFIN_FORCE_HARD) &&
3916 	    (msg & IDNP_MSGTYPE_MASK)) {
3917 		idn_msgtype_t	mt;
3918 		idn_xdcargs_t	nargs;
3919 
3920 		mt.mt_mtype = IDNP_NACK;
3921 		mt.mt_atype = msg;
3922 		mt.mt_cookie = mtp->mt_cookie;
3923 		CLR_XARGS(nargs);
3924 		SET_XARGS_NACK_TYPE(nargs, IDNNACK_RETRY);
3925 		idn_send_acknack(domid, &mt, nargs);
3926 	}
3927 
3928 	token = IDN_RETRY_TOKEN(domid, IDNRETRY_FIN);
3929 	idn_retry_submit(idn_retry_fin, NULL, token,
3930 	    idn_msg_retrytime[(int)IDNRETRY_FIN]);
3931 }
3932 
3933 static void
3934 idn_action_fin_sent(int domid, idn_msgtype_t *mtp, idn_xdcargs_t xargs)
3935 {
3936 	uint_t		msg = mtp ? mtp->mt_mtype : 0;
3937 	int		new_masterid, new_cpuid = IDN_NIL_DCPU;
3938 	uint_t		finmaster;
3939 	idn_msgtype_t	mt;
3940 	idn_finopt_t	finopt;
3941 	idn_finarg_t	finarg;
3942 	domainset_t	my_ready_set;
3943 	idn_domain_t	*dp = &idn_domain[domid];
3944 
3945 	ASSERT(IDN_SYNC_IS_LOCKED());
3946 	ASSERT(IDN_DLOCK_IS_EXCL(domid));
3947 
3948 	mt.mt_cookie = mtp ? mtp->mt_cookie : 0;
3949 
3950 	finopt = DOMAIN_IN_SET(idn.domset.ds_relink, domid) ?
3951 	    IDNFIN_OPT_RELINK : IDNFIN_OPT_UNLINK;
3952 
3953 	finarg = GET_XARGS_FIN_ARG(xargs);
3954 
3955 	my_ready_set = dp->dsync.s_set_rdy | idn.domset.ds_ready_off |
3956 	    ~idn.domset.ds_connected;
3957 
3958 	IDN_GLOCK_SHARED();
3959 	new_masterid = IDN_GET_NEW_MASTERID();
3960 	IDN_GUNLOCK();
3961 	if (new_masterid != IDN_NIL_DOMID)
3962 		new_cpuid = idn_domain[new_masterid].dcpu;
3963 	finmaster = MAKE_FIN_MASTER(new_masterid, new_cpuid);
3964 
3965 	if ((msg & IDNP_ACKNACK_MASK) == 0) {
3966 		/*
3967 		 * fin
3968 		 */
3969 		if (dp->dfin == IDNFIN_FORCE_HARD) {
3970 			mt.mt_mtype = IDNP_ACK;
3971 			mt.mt_atype = IDNP_FIN | IDNP_ACK;
3972 			idn_xphase_transition(domid, &mt, xargs);
3973 		} else {
3974 			mt.mt_mtype = IDNP_FIN | IDNP_ACK;
3975 			mt.mt_atype = 0;
3976 			idn_send_fin(domid, &mt, dp->dfin, finarg, finopt,
3977 			    my_ready_set, finmaster);
3978 		}
3979 	} else if (msg & IDNP_MSGTYPE_MASK) {
3980 		/*
3981 		 * fin+ack
3982 		 */
3983 		if (dp->dfin != IDNFIN_FORCE_HARD) {
3984 			idn_xdcargs_t	fargs;
3985 
3986 			mt.mt_mtype = IDNP_ACK;
3987 			mt.mt_atype = msg;
3988 			CLR_XARGS(fargs);
3989 			SET_XARGS_FIN_TYPE(fargs, dp->dfin);
3990 			SET_XARGS_FIN_ARG(fargs, finarg);
3991 			SET_XARGS_FIN_DOMSET(fargs, my_ready_set);
3992 			SET_XARGS_FIN_OPT(fargs, finopt);
3993 			SET_XARGS_FIN_MASTER(fargs, finmaster);
3994 			idn_send_acknack(domid, &mt, fargs);
3995 		}
3996 	} else {
3997 		uint_t	token;
3998 		/*
3999 		 * nack - retry
4000 		 */
4001 		token = IDN_RETRY_TOKEN(domid, IDNRETRY_FIN);
4002 		idn_retry_submit(idn_retry_fin, NULL, token,
4003 		    idn_msg_retrytime[(int)IDNRETRY_FIN]);
4004 	}
4005 }
4006 
4007 /*ARGSUSED*/
4008 static void
4009 idn_action_fin_rcvd(int domid, idn_msgtype_t *mtp, idn_xdcargs_t xargs)
4010 {
4011 	uint_t	msg = mtp ? mtp->mt_mtype : 0;
4012 
4013 	ASSERT(IDN_SYNC_IS_LOCKED());
4014 	ASSERT(IDN_DLOCK_IS_EXCL(domid));
4015 
4016 	if (msg & IDNP_NACK) {
4017 		uint_t	token;
4018 		/*
4019 		 * nack - retry.
4020 		 */
4021 		token = IDN_RETRY_TOKEN(domid, IDNRETRY_FIN);
4022 		idn_retry_submit(idn_retry_fin, NULL, token,
4023 		    idn_msg_retrytime[(int)IDNRETRY_FIN]);
4024 	}
4025 }
4026 
4027 static void
4028 idn_final_fin(int domid)
4029 {
4030 	int		do_relink;
4031 	int		rv, d, new_masterid = IDN_NIL_DOMID;
4032 	idn_gstate_t	next_gstate;
4033 	domainset_t	relinkset;
4034 	uint_t		token = IDN_RETRY_TOKEN(domid, IDNRETRY_FIN);
4035 	idn_domain_t	*ldp, *dp = &idn_domain[domid];
4036 	procname_t	proc = "idn_final_fin";
4037 
4038 	ASSERT(IDN_SYNC_IS_LOCKED());
4039 	ASSERT(IDN_DLOCK_IS_EXCL(domid));
4040 	ASSERT(dp->dstate == IDNDS_DMAP);
4041 
4042 	(void) idn_retry_terminate(token);
4043 
4044 	dp->dxp = NULL;
4045 	IDN_XSTATE_TRANSITION(dp, IDNXS_NIL);
4046 
4047 	idn_sync_exit(domid, IDNSYNC_DISCONNECT);
4048 
4049 	DOMAINSET_DEL(idn.domset.ds_trans_off, domid);
4050 
4051 	do_relink = DOMAIN_IN_SET(idn.domset.ds_relink, domid) ? 1 : 0;
4052 
4053 	/*
4054 	 * idn_deconfig will idn_close_domain.
4055 	 */
4056 	idn_deconfig(domid);
4057 
4058 	PR_PROTO("%s:%d: DISCONNECTED\n", proc, domid);
4059 
4060 	IDN_GLOCK_EXCL();
4061 	/*
4062 	 * It's important that this update-op occur within
4063 	 * the context of holding the glock(EXCL).  There is
4064 	 * still some additional state stuff to cleanup which
4065 	 * will be completed once the glock is dropped in
4066 	 * this flow.  Which means anybody that's doing a
4067 	 * SSI_INFO and waiting on glock will not actually
4068 	 * run until the clean-up is completed, which is what
4069 	 * we want.  Recall that a separate thread processes
4070 	 * the SSI_LINK/UNLINK calls and when they complete
4071 	 * (i.e. are awakened) they will immediately SSI_INFO
4072 	 * and we don't want them to prematurely pick up stale
4073 	 * information.
4074 	 */
4075 	idn_update_op(IDNOP_DISCONNECTED, DOMAINSET(domid), NULL);
4076 
4077 	ASSERT(idn.state != IDNGS_OFFLINE);
4078 	ASSERT(!DOMAIN_IN_SET(idn.domset.ds_trans_on, domid));
4079 
4080 	if (domid == IDN_GET_MASTERID()) {
4081 		IDN_SET_MASTERID(IDN_NIL_DOMID);
4082 		dp->dvote.v.master = 0;
4083 	}
4084 
4085 	if ((domid == IDN_GET_NEW_MASTERID()) && !do_relink) {
4086 		IDN_SET_NEW_MASTERID(IDN_NIL_DOMID);
4087 	}
4088 
4089 	if (idn.state == IDNGS_RECONFIG)
4090 		new_masterid = IDN_GET_NEW_MASTERID();
4091 
4092 	if ((idn.domset.ds_trans_on | idn.domset.ds_trans_off |
4093 	    idn.domset.ds_relink) == 0) {
4094 		PR_HITLIST("%s:%d: HITLIST %x -> 0\n",
4095 		    proc, domid, idn.domset.ds_hitlist);
4096 		idn.domset.ds_hitlist = 0;
4097 	}
4098 
4099 	if (idn.domset.ds_connected || idn.domset.ds_trans_off) {
4100 		PR_PROTO("%s:%d: ds_connected = 0x%x, ds_trans_off = 0x%x\n",
4101 		    proc, domid, idn.domset.ds_connected,
4102 		    idn.domset.ds_trans_off);
4103 		IDN_GUNLOCK();
4104 		goto fin_done;
4105 	}
4106 
4107 	IDN_DLOCK_EXCL(idn.localid);
4108 	ldp = &idn_domain[idn.localid];
4109 
4110 	if (idn.domset.ds_trans_on != 0) {
4111 		ASSERT((idn.state != IDNGS_DISCONNECT) &&
4112 		    (idn.state != IDNGS_OFFLINE));
4113 
4114 		switch (idn.state) {
4115 		case IDNGS_CONNECT:
4116 			if (idn.localid == IDN_GET_MASTERID()) {
4117 				idn_master_deinit();
4118 				IDN_SET_MASTERID(IDN_NIL_DOMID);
4119 				ldp->dvote.v.master = 0;
4120 			}
4121 			/*FALLTHROUGH*/
4122 		case IDNGS_ONLINE:
4123 			next_gstate = idn.state;
4124 			break;
4125 
4126 		case IDNGS_RECONFIG:
4127 			if (idn.localid == IDN_GET_MASTERID()) {
4128 				idn_master_deinit();
4129 				IDN_SET_MASTERID(IDN_NIL_DOMID);
4130 				ldp->dvote.v.master = 0;
4131 			}
4132 			ASSERT(IDN_GET_MASTERID() == IDN_NIL_DOMID);
4133 			next_gstate = IDNGS_CONNECT;
4134 			ldp->dvote.v.connected = 0;
4135 			/*
4136 			 * Need to do HWINIT since we won't
4137 			 * be transitioning through OFFLINE
4138 			 * which would normally be caught in
4139 			 * idn_check_nego() when we
4140 			 * initially go to CONNECT.
4141 			 */
4142 			IDN_PREP_HWINIT();
4143 			break;
4144 
4145 		case IDNGS_DISCONNECT:
4146 		case IDNGS_OFFLINE:
4147 			cmn_err(CE_WARN,
4148 			    "IDN: 211: disconnect domain %d, "
4149 			    "unexpected Gstate (%s)",
4150 			    domid, idngs_str[idn.state]);
4151 			IDN_DUNLOCK(idn.localid);
4152 			IDN_GUNLOCK();
4153 			goto fin_done;
4154 
4155 		default:
4156 			/*
4157 			 * XXX
4158 			 * Go into FATAL state?
4159 			 */
4160 			cmn_err(CE_PANIC,
4161 			    "IDN: 212: disconnect domain %d, "
4162 			    "bad Gstate (%d)",
4163 			    domid, idn.state);
4164 			/* not reached */
4165 			break;
4166 		}
4167 	} else {
4168 		if (idn.localid == IDN_GET_MASTERID()) {
4169 			idn_master_deinit();
4170 			IDN_SET_MASTERID(IDN_NIL_DOMID);
4171 			ldp->dvote.v.master = 0;
4172 		}
4173 		next_gstate = IDNGS_OFFLINE;
4174 		if (idn.domset.ds_relink == 0) {
4175 			IDN_SET_NEW_MASTERID(IDN_NIL_DOMID);
4176 		}
4177 	}
4178 	IDN_DUNLOCK(idn.localid);
4179 
4180 	/*
4181 	 * If we reach here we've effectively disconnected all
4182 	 * existing links, however new ones may be pending.
4183 	 */
4184 	PR_PROTO("%s:%d: ALL DISCONNECTED *****************\n", proc, domid);
4185 
4186 	IDN_GSTATE_TRANSITION(next_gstate);
4187 
4188 	ASSERT((idn.state == IDNGS_OFFLINE) ?
4189 	    (IDN_GET_MASTERID() == IDN_NIL_DOMID) : 1);
4190 
4191 	IDN_GUNLOCK();
4192 
4193 	/*
4194 	 * If we have no new masterid and yet there are relinkers
4195 	 * out there, then force us to attempt to link with one
4196 	 * of them.
4197 	 */
4198 	if ((new_masterid == IDN_NIL_DOMID) && idn.domset.ds_relink)
4199 		new_masterid = idn.localid;
4200 
4201 	if (new_masterid != IDN_NIL_DOMID) {
4202 		/*
4203 		 * If the local domain is the selected
4204 		 * master then we'll want to initiate
4205 		 * a link with one of the other candidates.
4206 		 * If not, then we want to initiate a link
4207 		 * with the master only.
4208 		 */
4209 		relinkset = (new_masterid == idn.localid) ?
4210 		    idn.domset.ds_relink : DOMAINSET(new_masterid);
4211 
4212 		DOMAINSET_DEL(relinkset, idn.localid);
4213 
4214 		for (d = 0; d < MAX_DOMAINS; d++) {
4215 			int	lock_held;
4216 
4217 			if (!DOMAIN_IN_SET(relinkset, d))
4218 				continue;
4219 
4220 			if (d == domid) {
4221 				do_relink = 0;
4222 				lock_held = 0;
4223 			} else {
4224 				IDN_DLOCK_EXCL(d);
4225 				lock_held = 1;
4226 			}
4227 
4228 			rv = idn_open_domain(d, -1, 0);
4229 			if (rv == 0) {
4230 				rv = idn_connect(d);
4231 				if (lock_held)
4232 					IDN_DUNLOCK(d);
4233 				/*
4234 				 * If we're able to kick off at
4235 				 * least one connect then that's
4236 				 * good enough for now.  The others
4237 				 * will fall into place normally.
4238 				 */
4239 				if (rv == 0)
4240 					break;
4241 			} else if (rv < 0) {
4242 				if (lock_held)
4243 					IDN_DUNLOCK(d);
4244 				cmn_err(CE_WARN,
4245 				    "IDN: 205: (%s.1) failed to "
4246 				    "open-domain(%d,%d)",
4247 				    proc, domid, -1);
4248 				DOMAINSET_DEL(idn.domset.ds_relink, d);
4249 			} else {
4250 				if (lock_held)
4251 					IDN_DUNLOCK(d);
4252 				PR_PROTO("%s:%d: failed to "
4253 				    "re-open domain %d "
4254 				    "(cpu %d) [rv = %d]\n",
4255 				    proc, domid, d, idn_domain[d].dcpu,
4256 				    rv);
4257 			}
4258 		}
4259 	}
4260 
4261 fin_done:
4262 	if (do_relink) {
4263 		ASSERT(IDN_DLOCK_IS_EXCL(domid));
4264 
4265 		rv = idn_open_domain(domid, -1, 0);
4266 		if (rv == 0) {
4267 			(void) idn_connect(domid);
4268 		} else if (rv < 0) {
4269 			cmn_err(CE_WARN,
4270 			    "IDN: 205: (%s.2) failed to "
4271 			    "open-domain(%d,%d)",
4272 			    proc, domid, -1);
4273 			DOMAINSET_DEL(idn.domset.ds_relink, domid);
4274 		}
4275 	}
4276 }
4277 
4278 static void
4279 idn_exit_fin(int domid, uint_t msgtype)
4280 {
4281 	idn_domain_t	*dp = &idn_domain[domid];
4282 	uint_t		token;
4283 	procname_t	proc = "idn_exit_fin";
4284 	STRING(str);
4285 
4286 	ASSERT(IDN_SYNC_IS_LOCKED());
4287 	ASSERT(IDN_DLOCK_IS_EXCL(domid));
4288 
4289 	INUM2STR(msgtype, str);
4290 	PR_PROTO("%s:%d: msgtype = 0x%x(%s)\n", proc, domid, msgtype, str);
4291 
4292 	token = IDN_RETRY_TOKEN(domid, IDNRETRY_FIN);
4293 	(void) idn_retry_terminate(token);
4294 
4295 	DOMAINSET_DEL(idn.domset.ds_ready_off, domid);
4296 
4297 	dp->dxp = &xphase_fin;
4298 	IDN_XSTATE_TRANSITION(dp, IDNXS_PEND);
4299 
4300 	idn_retry_submit(idn_retry_fin, NULL, token,
4301 	    idn_msg_retrytime[(int)IDNRETRY_FIN]);
4302 }
4303 
4304 /*
4305  * Must return w/locks held.
4306  */
4307 static int
4308 idn_xphase_transition(int domid, idn_msgtype_t *mtp, idn_xdcargs_t xargs)
4309 {
4310 	uint_t		msg = mtp ? mtp->mt_mtype : 0;
4311 	uint_t		msgarg = mtp ? mtp->mt_atype : 0;
4312 	idn_xphase_t	*xp;
4313 	idn_domain_t	*dp;
4314 	int		(*cfunc)(int, idn_msgtype_t *, idn_xdcargs_t);
4315 	void		(*ffunc)(int);
4316 	void		(*afunc)(int, idn_msgtype_t *, idn_xdcargs_t);
4317 	void		(*efunc)(int, idn_msgtype_t *, idn_xdcargs_t);
4318 	void		(*xfunc)(int, uint_t);
4319 	int		err = 0;
4320 	uint_t		msgtype;
4321 	idn_xstate_t	o_xstate, n_xstate;
4322 	procname_t	proc = "idn_xphase_transition";
4323 	STRING(mstr);
4324 	STRING(astr);
4325 
4326 	ASSERT(IDN_SYNC_IS_LOCKED());
4327 	ASSERT(IDN_DLOCK_IS_EXCL(domid));
4328 
4329 	INUM2STR(msg, mstr);
4330 	INUM2STR(msgarg, astr);
4331 
4332 	dp = &idn_domain[domid];
4333 	if ((xp = dp->dxp) == NULL) {
4334 		PR_PROTO("%s:%d: WARNING: domain xsp is NULL (msg = %s, "
4335 		    "msgarg = %s) <<<<<<<<<<<<\n",
4336 		    proc, domid, mstr, astr);
4337 		return (-1);
4338 	}
4339 	o_xstate = dp->dxstate;
4340 
4341 	xfunc = xp->xt_exit;
4342 
4343 	if ((msgtype = (msg & IDNP_MSGTYPE_MASK)) == 0)
4344 		msgtype = msgarg & IDNP_MSGTYPE_MASK;
4345 
4346 	if ((o_xstate == IDNXS_PEND) && msg &&
4347 	    ((msg & IDNP_ACKNACK_MASK) == msg)) {
4348 		PR_PROTO("%s:%d: unwanted acknack received (o_xstate = %s, "
4349 		    "msg = %s/%s - dropping message\n",
4350 		    proc, domid, idnxs_str[(int)o_xstate], mstr, astr);
4351 		return (0);
4352 	}
4353 
4354 	/*
4355 	 * Validate that message received is following
4356 	 * the expected protocol for the current state.
4357 	 */
4358 	if (idn_next_xstate(o_xstate, -1, msg) == IDNXS_NIL) {
4359 		PR_PROTO("%s:%d: WARNING: o_xstate = %s, msg = %s -> NIL "
4360 		    "<<<<<<<<<\n",
4361 		    proc, domid, idnxs_str[(int)o_xstate], mstr);
4362 		if (xfunc)
4363 			(*xfunc)(domid, msgtype);
4364 		return (-1);
4365 	}
4366 
4367 	if (msg || msgarg) {
4368 		/*
4369 		 * Verify that message type is correct for
4370 		 * the given xstate.
4371 		 */
4372 		if (msgtype != xp->xt_msgtype) {
4373 			STRING(xstr);
4374 			STRING(tstr);
4375 
4376 			INUM2STR(xp->xt_msgtype, xstr);
4377 			INUM2STR(msgtype, tstr);
4378 			PR_PROTO("%s:%d: WARNING: msg expected %s(0x%x), "
4379 			    "actual %s(0x%x) [msg=%s(0x%x), "
4380 			    "msgarg=%s(0x%x)]\n",
4381 			    proc, domid, xstr, xp->xt_msgtype,
4382 			    tstr, msgtype, mstr, msg, astr, msgarg);
4383 			if (xfunc)
4384 				(*xfunc)(domid, msgtype);
4385 			return (-1);
4386 		}
4387 	}
4388 
4389 	cfunc = xp->xt_trans[(int)o_xstate].t_check;
4390 
4391 	if (cfunc && ((err = (*cfunc)(domid, mtp, xargs)) < 0)) {
4392 		if (o_xstate != IDNXS_PEND) {
4393 			IDN_XSTATE_TRANSITION(dp, IDNXS_PEND);
4394 		}
4395 		if (xfunc)
4396 			(*xfunc)(domid, msgtype);
4397 		return (-1);
4398 	}
4399 
4400 	n_xstate = idn_next_xstate(o_xstate, err, msg);
4401 
4402 	if (n_xstate == IDNXS_NIL) {
4403 		PR_PROTO("%s:%d: WARNING: n_xstate = %s, msg = %s -> NIL "
4404 		    "<<<<<<<<<\n",
4405 		    proc, domid, idnxs_str[(int)n_xstate], mstr);
4406 		if (xfunc)
4407 			(*xfunc)(domid, msgtype);
4408 		return (-1);
4409 	}
4410 
4411 	if (n_xstate != o_xstate) {
4412 		IDN_XSTATE_TRANSITION(dp, n_xstate);
4413 	}
4414 
4415 	if (err) {
4416 		if ((efunc = xp->xt_trans[(int)o_xstate].t_error) != NULL)
4417 			(*efunc)(domid, mtp, xargs);
4418 	} else if ((afunc = xp->xt_trans[(int)o_xstate].t_action) != NULL) {
4419 		(*afunc)(domid, mtp, xargs);
4420 	}
4421 
4422 	if ((n_xstate == IDNXS_FINAL) && ((ffunc = xp->xt_final) != NULL))
4423 		(*ffunc)(domid);
4424 
4425 	return (0);
4426 }
4427 
4428 /*
4429  * Entered and returns w/DLOCK & SYNC_LOCK held.
4430  */
4431 static int
4432 idn_xstate_transfunc(int domid, void *transarg)
4433 {
4434 	uint_t		msg = (uint_t)(uintptr_t)transarg;
4435 	uint_t		token;
4436 	procname_t	proc = "idn_xstate_transfunc";
4437 
4438 	ASSERT(IDN_SYNC_IS_LOCKED());
4439 
4440 	switch (msg) {
4441 	case IDNP_CON:
4442 		DOMAINSET_ADD(idn.domset.ds_connected, domid);
4443 		break;
4444 
4445 	case IDNP_FIN:
4446 		DOMAINSET_DEL(idn.domset.ds_connected, domid);
4447 		break;
4448 
4449 	default:
4450 		PR_PROTO("%s:%d: ERROR: unknown msg (0x%x) <<<<<<<<\n",
4451 		    proc, domid, msg);
4452 		return (0);
4453 	}
4454 
4455 	token = IDN_RETRY_TOKEN(domid, (msg == IDNP_CON) ?
4456 	    IDNRETRY_CON : IDNRETRY_FIN);
4457 	if (msg == IDNP_CON)
4458 		idn_retry_submit(idn_retry_con, NULL, token,
4459 		    idn_msg_retrytime[(int)IDNRETRY_CON]);
4460 	else
4461 		idn_retry_submit(idn_retry_fin, NULL, token,
4462 		    idn_msg_retrytime[(int)IDNRETRY_FIN]);
4463 
4464 	return (1);
4465 }
4466 
4467 /*
4468  * Entered and returns w/DLOCK & SYNC_LOCK held.
4469  */
4470 static void
4471 idn_sync_enter(int domid, idn_synccmd_t cmd, domainset_t xset,
4472     domainset_t rset, int (*transfunc)(), void *transarg)
4473 {
4474 	int		z;
4475 	idn_syncop_t	*sp;
4476 	idn_synczone_t	*zp;
4477 	procname_t	proc = "idn_sync_enter";
4478 
4479 	ASSERT(IDN_SYNC_IS_LOCKED());
4480 	ASSERT(IDN_DLOCK_IS_EXCL(domid));
4481 
4482 	z = IDN_SYNC_GETZONE(cmd);
4483 	ASSERT(z >= 0);
4484 	zp = &idn.sync.sz_zone[z];
4485 
4486 	PR_SYNC("%s:%d: cmd=%s(%d), z=%d, xs=0x%x, rx=0x%x, cnt=%d\n",
4487 	    proc, domid, idnsync_str[cmd], cmd, z, xset, rset, zp->sc_cnt);
4488 
4489 	sp = &idn_domain[domid].dsync;
4490 
4491 	sp->s_domid = domid;
4492 	sp->s_cmd = cmd;
4493 	sp->s_msg = 0;
4494 	sp->s_set_exp = xset;
4495 	sp->s_set_rdy = rset;
4496 	sp->s_transfunc = transfunc;
4497 	sp->s_transarg = transarg;
4498 	IDN_SYNC_QUERY_INIT(domid);
4499 
4500 	sp->s_next = zp->sc_op;
4501 	zp->sc_op = sp;
4502 	zp->sc_cnt++;
4503 }
4504 
4505 /*
4506  * Entered and returns w/DLOCK & SYNC_LOCK held.
4507  */
4508 void
4509 idn_sync_exit(int domid, idn_synccmd_t cmd)
4510 {
4511 	int		d, z, zone, tot_queries, tot_domains;
4512 	idn_syncop_t	*sp;
4513 	idn_synczone_t	*zp = NULL;
4514 	procname_t	proc = "idn_sync_exit";
4515 
4516 	ASSERT(IDN_SYNC_IS_LOCKED());
4517 	ASSERT(IDN_DLOCK_IS_EXCL(domid));
4518 
4519 	sp = &idn_domain[domid].dsync;
4520 
4521 	z = IDN_SYNC_GETZONE(sp->s_cmd);
4522 
4523 	zone = IDN_SYNC_GETZONE(cmd);
4524 
4525 	PR_SYNC("%s:%d: cmd=%s(%d) (z=%d, zone=%d)\n",
4526 	    proc, domid, idnsync_str[cmd], cmd, z, zone);
4527 
4528 #ifdef DEBUG
4529 	if (z != -1) {
4530 		tot_queries = tot_domains = 0;
4531 
4532 		for (d = 0; d < MAX_DOMAINS; d++) {
4533 			int	qv;
4534 
4535 			if ((qv = sp->s_query[d]) > 0) {
4536 				tot_queries += qv;
4537 				tot_domains++;
4538 				PR_SYNC("%s:%d: query_count = %d\n",
4539 				    proc, domid, qv);
4540 			}
4541 		}
4542 		PR_SYNC("%s:%d: tot_queries = %d, tot_domaines = %d\n",
4543 		    proc, domid, tot_queries, tot_domains);
4544 	}
4545 #endif /* DEBUG */
4546 
4547 	zp = (z != -1) ? &idn.sync.sz_zone[z] : NULL;
4548 
4549 	if (zp) {
4550 		idn_syncop_t	**spp;
4551 
4552 		for (spp = &zp->sc_op; *spp; spp = &((*spp)->s_next)) {
4553 			if (*spp == sp) {
4554 				*spp = sp->s_next;
4555 				sp->s_next = NULL;
4556 				zp->sc_cnt--;
4557 				break;
4558 			}
4559 		}
4560 	}
4561 
4562 	sp->s_cmd = IDNSYNC_NIL;
4563 
4564 	for (z = 0; z < IDN_SYNC_NUMZONE; z++) {
4565 		idn_syncop_t	**spp, **nspp;
4566 
4567 		if ((zone != -1) && (z != zone))
4568 			continue;
4569 
4570 		zp = &idn.sync.sz_zone[z];
4571 
4572 		for (spp = &zp->sc_op; *spp; spp = nspp) {
4573 			sp = *spp;
4574 			nspp = &sp->s_next;
4575 
4576 			if (!DOMAIN_IN_SET(sp->s_set_exp, domid))
4577 				continue;
4578 
4579 			DOMAINSET_DEL(sp->s_set_exp, domid);
4580 			DOMAINSET_DEL(sp->s_set_rdy, domid);
4581 
4582 			if ((sp->s_set_exp == sp->s_set_rdy) &&
4583 			    sp->s_transfunc) {
4584 				int	delok;
4585 
4586 				ASSERT(sp->s_domid != domid);
4587 
4588 				PR_SYNC("%s:%d invoking transfunc "
4589 				    "for domain %d\n",
4590 				    proc, domid, sp->s_domid);
4591 				delok = (*sp->s_transfunc)(sp->s_domid,
4592 				    sp->s_transarg);
4593 				if (delok) {
4594 					*spp = sp->s_next;
4595 					sp->s_next = NULL;
4596 					zp->sc_cnt--;
4597 					nspp = spp;
4598 				}
4599 			}
4600 		}
4601 	}
4602 }
4603 
4604 /*
4605  * Entered and returns w/DLOCK & SYNC_LOCK held.
4606  */
4607 static domainset_t
4608 idn_sync_register(int domid, idn_synccmd_t cmd, domainset_t ready_set,
4609     idn_syncreg_t regtype)
4610 {
4611 	int		z;
4612 	idn_synczone_t	*zp;
4613 	idn_syncop_t	*sp, **spp, **nspp;
4614 	domainset_t	query_set = 0, trans_set;
4615 	procname_t	proc = "idn_sync_register";
4616 
4617 	ASSERT(IDN_SYNC_IS_LOCKED());
4618 	ASSERT(IDN_DLOCK_IS_EXCL(domid));
4619 
4620 	if ((z = IDN_SYNC_GETZONE(cmd)) == -1) {
4621 		PR_SYNC("%s:%d: ERROR: unexpected sync cmd(%d)\n",
4622 		    proc, domid, cmd);
4623 		return (0);
4624 	}
4625 
4626 	/*
4627 	 * Find out what domains are in transition with respect
4628 	 * to given command.  There will be no need to query
4629 	 * these folks.
4630 	 */
4631 	trans_set = IDN_SYNC_GETTRANS(cmd);
4632 
4633 	zp = &idn.sync.sz_zone[z];
4634 
4635 	PR_SYNC("%s:%d: cmd=%s(%d), z=%d, rset=0x%x, "
4636 	    "regtype=%s(%d), sc_op=%s\n",
4637 	    proc, domid, idnsync_str[cmd], cmd, z, ready_set,
4638 	    idnreg_str[regtype], regtype,
4639 	    zp->sc_op ? idnsync_str[zp->sc_op->s_cmd] : "NULL");
4640 
4641 	for (spp = &zp->sc_op; *spp; spp = nspp) {
4642 		sp = *spp;
4643 		nspp = &sp->s_next;
4644 
4645 		if (regtype == IDNSYNC_REG_NEW) {
4646 			DOMAINSET_ADD(sp->s_set_exp, domid);
4647 			PR_SYNC("%s:%d: adding new to %d (exp=0x%x)\n",
4648 			    proc, domid, sp->s_domid, sp->s_set_exp);
4649 		} else if (regtype == IDNSYNC_REG_QUERY) {
4650 			query_set |= ~sp->s_set_rdy & sp->s_set_exp;
4651 			continue;
4652 		}
4653 
4654 		if (!DOMAIN_IN_SET(sp->s_set_exp, domid))
4655 			continue;
4656 
4657 		if (!DOMAIN_IN_SET(ready_set, sp->s_domid)) {
4658 			/*
4659 			 * Given domid doesn't have a desired
4660 			 * domain in his ready-set.  We'll need
4661 			 * to query him again.
4662 			 */
4663 			DOMAINSET_ADD(query_set, domid);
4664 			continue;
4665 		}
4666 
4667 		/*
4668 		 * If we reach here, then an expected domain
4669 		 * has marked its respective datapath to
4670 		 * sp->s_domid as down (i.e. in his ready_set).
4671 		 */
4672 		DOMAINSET_ADD(sp->s_set_rdy, domid);
4673 
4674 		PR_SYNC("%s:%d: mark READY for domain %d "
4675 		    "(r=0x%x, x=0x%x)\n",
4676 		    proc, domid, sp->s_domid,
4677 		    sp->s_set_rdy, sp->s_set_exp);
4678 
4679 		query_set |= ~sp->s_set_rdy & sp->s_set_exp;
4680 
4681 		if (sp->s_set_exp == sp->s_set_rdy) {
4682 #ifdef DEBUG
4683 			if (sp->s_msg == 0) {
4684 				sp->s_msg = 1;
4685 				PR_SYNC("%s:%d: >>>>>>>>>>> DOMAIN %d "
4686 				    "ALL CHECKED IN (0x%x)\n",
4687 				    proc, domid, sp->s_domid,
4688 				    sp->s_set_exp);
4689 			}
4690 #endif /* DEBUG */
4691 
4692 			if ((sp->s_domid != domid) && sp->s_transfunc) {
4693 				int	delok;
4694 
4695 				PR_SYNC("%s:%d invoking transfunc "
4696 				    "for domain %d\n",
4697 				    proc, domid, sp->s_domid);
4698 				delok = (*sp->s_transfunc)(sp->s_domid,
4699 				    sp->s_transarg);
4700 				if (delok) {
4701 					*spp = sp->s_next;
4702 					sp->s_next = NULL;
4703 					zp->sc_cnt--;
4704 					nspp = spp;
4705 				}
4706 			}
4707 		}
4708 	}
4709 
4710 	PR_SYNC("%s:%d: trans_set = 0x%x, query_set = 0x%x -> 0x%x\n",
4711 	    proc, domid, trans_set, query_set, query_set & ~trans_set);
4712 
4713 	query_set &= ~trans_set;
4714 
4715 	return (query_set);
4716 }
4717 
4718 static void
4719 idn_sync_register_awol(int domid)
4720 {
4721 	int		z;
4722 	idn_synccmd_t	cmd = IDNSYNC_DISCONNECT;
4723 	idn_synczone_t	*zp;
4724 	idn_syncop_t	*sp;
4725 	procname_t	proc = "idn_sync_register_awol";
4726 
4727 	ASSERT(IDN_SYNC_IS_LOCKED());
4728 
4729 	if ((z = IDN_SYNC_GETZONE(cmd)) == -1) {
4730 		PR_SYNC("%s:%d: ERROR: unexpected sync cmd(%d)\n",
4731 		    proc, domid, cmd);
4732 		return;
4733 	}
4734 
4735 	zp = &idn.sync.sz_zone[z];
4736 
4737 	PR_SYNC("%s:%d: cmd=%s(%d), z=%d (domain %d = AWOL)\n",
4738 	    proc, domid, idnsync_str[cmd], cmd, z, domid);
4739 
4740 	for (sp = zp->sc_op; sp; sp = sp->s_next) {
4741 		idn_domain_t	*dp;
4742 
4743 		dp = &idn_domain[sp->s_domid];
4744 		if (dp->dfin == IDNFIN_FORCE_HARD) {
4745 			DOMAINSET_ADD(sp->s_set_rdy, domid);
4746 			PR_SYNC("%s:%d: adding new to %d (rdy=0x%x)\n",
4747 			    proc, domid, sp->s_domid, sp->s_set_rdy);
4748 		}
4749 	}
4750 }
4751 
4752 static void
4753 idn_link_established(void *arg)
4754 {
4755 	int	first_link;
4756 	int	domid, masterid;
4757 	uint_t	info = (uint_t)(uintptr_t)arg;
4758 
4759 	first_link = (int)(info & 0xf0);
4760 	domid = (int)(info & 0x0f);
4761 
4762 	IDN_GLOCK_SHARED();
4763 	masterid = IDN_GET_MASTERID();
4764 	if ((masterid == IDN_NIL_DOMID) ||
4765 	    (idn_domain[masterid].dstate != IDNDS_CONNECTED)) {
4766 		/*
4767 		 * No point in doing this unless we're connected
4768 		 * to the master.
4769 		 */
4770 		if ((masterid != IDN_NIL_DOMID) &&
4771 		    (idn.state == IDNGS_ONLINE)) {
4772 			/*
4773 			 * As long as we're still online keep
4774 			 * trying.
4775 			 */
4776 			(void) timeout(idn_link_established, arg, 50);
4777 		}
4778 		IDN_GUNLOCK();
4779 		return;
4780 	}
4781 	IDN_GUNLOCK();
4782 
4783 	if (first_link && IDN_SLAB_PREALLOC)
4784 		idn_prealloc_slab(IDN_SLAB_PREALLOC);
4785 
4786 	/*
4787 	 * No guarantee, but it might save a little
4788 	 * time.
4789 	 */
4790 	if (idn_domain[domid].dstate == IDNDS_CONNECTED) {
4791 		/*
4792 		 * Get the remote domain's dname.
4793 		 */
4794 		idn_send_nodename_req(domid);
4795 	}
4796 
4797 	/*
4798 	 * May have had some streams backed up waiting for
4799 	 * this connection.  Prod them.
4800 	 */
4801 	rw_enter(&idn.struprwlock, RW_READER);
4802 	mutex_enter(&idn.sipwenlock);
4803 	idndl_wenable(NULL);
4804 	mutex_exit(&idn.sipwenlock);
4805 	rw_exit(&idn.struprwlock);
4806 }
4807 
4808 /*
4809  * Send the following chunk of data received from above onto
4810  * the IDN wire.  This is raw data as far as the IDN driver
4811  * is concerned.
4812  * Returns:
4813  *	IDNXMIT_LOOP	- Msg handled in loopback and thus
4814  *			  still active (i.e. don't free).
4815  *	IDNXMIT_OKAY	- Data handled (freemsg).
4816  *	IDNXMIT_DROP	- Packet should be dropped.
4817  *	IDNXMIT_RETRY	- Packet should be requeued and retried.
4818  *	IDNXMIT_REQUEUE	- Packet should be requeued, but not
4819  *			  immediatetly retried.
4820  */
4821 int
4822 idn_send_data(int dst_domid, idn_netaddr_t dst_netaddr, queue_t *wq, mblk_t *mp)
4823 {
4824 	int		pktcnt = 0;
4825 	int		msglen;
4826 	int		rv = IDNXMIT_OKAY;
4827 	int		xfersize = 0;
4828 	caddr_t		iobufp, iodatap;
4829 	uchar_t		*data_rptr;
4830 	int		cpuindex;
4831 	int		serrno;
4832 	int		channel;
4833 	int		retry_reclaim;
4834 	idn_chansvr_t	*csp = NULL;
4835 	uint_t		netports = 0;
4836 	struct idnstr	*stp;
4837 	struct idn	*sip;
4838 	idn_domain_t	*dp;
4839 	struct ether_header	*ehp;
4840 	smr_pkthdr_t	*hdrp;
4841 	idn_msgtype_t	mt;
4842 	procname_t	proc = "idn_send_data";
4843 #ifdef DEBUG
4844 	size_t		orig_msglen = msgsize(mp);
4845 #endif /* DEBUG */
4846 
4847 	ASSERT(DB_TYPE(mp) == M_DATA);
4848 
4849 	mt.mt_mtype = IDNP_DATA;
4850 	mt.mt_atype = 0;
4851 	mt.mt_cookie = 0;
4852 
4853 	channel = (int)dst_netaddr.net.chan;
4854 
4855 	msglen = msgdsize(mp);
4856 	PR_DATA("%s:%d: (netaddr 0x%x) msgsize=%ld, msgdsize=%d\n",
4857 	    proc, dst_domid, dst_netaddr.netaddr, msgsize(mp), msglen);
4858 
4859 	ASSERT(wq->q_ptr);
4860 
4861 	stp = (struct idnstr *)wq->q_ptr;
4862 	sip = stp->ss_sip;
4863 	ASSERT(sip);
4864 
4865 	if (msglen < 0) {
4866 		/*
4867 		 * No data to send.  That was easy!
4868 		 */
4869 		PR_DATA("%s:%d: BAD msg length (%d) (netaddr 0x%x)\n",
4870 		    proc, dst_domid, msglen, dst_netaddr.netaddr);
4871 		return (IDNXMIT_DROP);
4872 	}
4873 
4874 	ASSERT(RW_READ_HELD(&stp->ss_rwlock));
4875 
4876 	if (dst_domid == IDN_NIL_DOMID) {
4877 		cmn_err(CE_WARN,
4878 		    "IDN: 213: no destination specified "
4879 		    "(d=%d, c=%d, n=0x%x)",
4880 		    dst_domid, dst_netaddr.net.chan,
4881 		    dst_netaddr.net.netid);
4882 		IDN_KSTAT_INC(sip, si_nolink);
4883 		IDN_KSTAT_INC(sip, si_macxmt_errors);
4884 		rv = IDNXMIT_DROP;
4885 		goto nocando;
4886 	}
4887 
4888 	ehp = (struct ether_header *)mp->b_rptr;
4889 	PR_DATA("%s:%d: destination channel = %d\n", proc, dst_domid, channel);
4890 
4891 #ifdef DEBUG
4892 	{
4893 		uchar_t	echn;
4894 
4895 		echn = (uchar_t)
4896 		    ehp->ether_shost.ether_addr_octet[IDNETHER_CHANNEL];
4897 		ASSERT((uchar_t)channel == echn);
4898 	}
4899 #endif /* DEBUG */
4900 	ASSERT(msglen <= IDN_DATA_SIZE);
4901 
4902 	dp = &idn_domain[dst_domid];
4903 	/*
4904 	 * Get reader lock.  We hold for the duration
4905 	 * of the transfer so that our state doesn't
4906 	 * change during this activity.  Note that since
4907 	 * we grab the reader lock, we can still permit
4908 	 * simultaneous tranfers from different threads
4909 	 * to the same domain.
4910 	 * Before we waste a bunch of time gathering locks, etc.
4911 	 * do a an unprotected check to make sure things are
4912 	 * semi-copesetic.  If these values are in flux,
4913 	 * that's okay.
4914 	 */
4915 	if ((dp->dstate != IDNDS_CONNECTED) || (idn.state != IDNGS_ONLINE)) {
4916 		IDN_KSTAT_INC(sip, si_linkdown);
4917 		if (idn.state != IDNGS_ONLINE) {
4918 			rv = IDNXMIT_REQUEUE;
4919 		} else {
4920 			IDN_KSTAT_INC(sip, si_macxmt_errors);
4921 			rv = IDNXMIT_DROP;
4922 		}
4923 		goto nocando;
4924 	}
4925 
4926 	if (idn.chan_servers[channel].ch_send.c_checkin) {
4927 		/*
4928 		 * Gotta bail, somethin' s'up.
4929 		 */
4930 		rv = IDNXMIT_REQUEUE;
4931 		goto nocando;
4932 	}
4933 
4934 	csp = &idn.chan_servers[channel];
4935 	IDN_CHAN_LOCK_SEND(csp);
4936 
4937 	if (dst_netaddr.net.netid == IDN_BROADCAST_ALLNETID) {
4938 		/*
4939 		 * We're doing a broadcast.  Need to set
4940 		 * up IDN netaddr's one at a time.
4941 		 * We set the ethernet destination to the same
4942 		 * instance as the sending address.  The instance
4943 		 * numbers effectively represent subnets.
4944 		 */
4945 		dst_netaddr.net.netid = dp->dnetid;
4946 
4947 		(void) idndl_domain_etheraddr(dst_domid, channel,
4948 		    &ehp->ether_dhost);
4949 
4950 		if (dst_domid == idn.localid) {
4951 			mblk_t	*nmp;
4952 			/*
4953 			 * If this is a broadcast and going to
4954 			 * the local domain, then we need to make
4955 			 * a private copy of the message since
4956 			 * the current one will be reused when
4957 			 * transmitting to other domains.
4958 			 */
4959 			PR_DATA("%s:%d: dup broadcast msg for local domain\n",
4960 			    proc, dst_domid);
4961 			if ((nmp = copymsg(mp)) == NULL) {
4962 				/*
4963 				 * Couldn't get a duplicate copy.
4964 				 */
4965 				IDN_CHAN_UNLOCK_SEND(csp);
4966 				csp = NULL;
4967 				IDN_KSTAT_INC(sip, si_allocbfail);
4968 				IDN_KSTAT_INC(sip, si_noxmtbuf);
4969 				rv = IDNXMIT_DROP;
4970 				goto nocando;
4971 			}
4972 			mp = nmp;
4973 		}
4974 	}
4975 
4976 	if (dp->dnetid != dst_netaddr.net.netid) {
4977 		PR_DATA("%s:%d: dest netid (0x%x) != expected (0x%x)\n",
4978 		    proc, dst_domid, (uint_t)dst_netaddr.net.netid,
4979 		    (uint_t)dp->dnetid);
4980 		IDN_CHAN_UNLOCK_SEND(csp);
4981 		csp = NULL;
4982 		IDN_KSTAT_INC(sip, si_nolink);
4983 		IDN_KSTAT_INC(sip, si_macxmt_errors);
4984 		rv = IDNXMIT_DROP;
4985 		goto nocando;
4986 	}
4987 
4988 	if (dst_domid == idn.localid) {
4989 		int	lbrv;
4990 		/*
4991 		 * Sending to our local domain! Loopback.
4992 		 * Note that idn_send_data_loop returning 0
4993 		 * does not mean the message can now be freed.
4994 		 * We need to return (-1) so that caller doesn't
4995 		 * try to free mblk.
4996 		 */
4997 		IDN_CHAN_UNLOCK_SEND(csp);
4998 		rw_exit(&stp->ss_rwlock);
4999 		lbrv = idn_send_data_loopback(dst_netaddr, wq, mp);
5000 		rw_enter(&stp->ss_rwlock, RW_READER);
5001 		if (lbrv == 0) {
5002 			return (IDNXMIT_LOOP);
5003 		} else {
5004 			IDN_KSTAT_INC(sip, si_macxmt_errors);
5005 			return (IDNXMIT_DROP);
5006 		}
5007 	}
5008 
5009 	if (dp->dstate != IDNDS_CONNECTED) {
5010 		/*
5011 		 * Can't send data unless a link has already been
5012 		 * established with the target domain.  Normally,
5013 		 * a user cannot set the remote netaddr unless a
5014 		 * link has already been established, however it
5015 		 * is possible the connection may have become
5016 		 * disconnected since that time.
5017 		 */
5018 		IDN_CHAN_UNLOCK_SEND(csp);
5019 		csp = NULL;
5020 		IDN_KSTAT_INC(sip, si_linkdown);
5021 		IDN_KSTAT_INC(sip, si_macxmt_errors);
5022 		rv = IDNXMIT_DROP;
5023 		goto nocando;
5024 	}
5025 
5026 	/*
5027 	 * Need to make sure the channel is active and that the
5028 	 * domain to which we're sending is allowed to receive stuff.
5029 	 */
5030 	if (!IDN_CHANNEL_IS_SEND_ACTIVE(csp)) {
5031 		int	not_active;
5032 		/*
5033 		 * See if we can activate channel.
5034 		 */
5035 		IDN_CHAN_UNLOCK_SEND(csp);
5036 		not_active = idn_activate_channel(CHANSET(channel),
5037 		    IDNCHAN_OPEN);
5038 		if (!not_active) {
5039 			/*
5040 			 * Only grab the lock for a recheck if we were
5041 			 * able to activate the channel.
5042 			 */
5043 			IDN_CHAN_LOCK_SEND(csp);
5044 		}
5045 		/*
5046 		 * Verify channel still active now that we have the lock.
5047 		 */
5048 		if (not_active || !IDN_CHANNEL_IS_SEND_ACTIVE(csp)) {
5049 			if (!not_active) {
5050 				/*
5051 				 * Only need to drop the lock if it was
5052 				 * acquired while we thought we had
5053 				 * activated the channel.
5054 				 */
5055 				IDN_CHAN_UNLOCK_SEND(csp);
5056 			}
5057 			ASSERT(!IDN_CHAN_SEND_IS_LOCKED(csp));
5058 			/*
5059 			 * Damn!   Must have went inactive during the window
5060 			 * before we regrabbed the send lock.  Oh well, can't
5061 			 * spend all day doing this, bail out.  Set csp to
5062 			 * NULL to prevent inprogress update at bottom.
5063 			 */
5064 			csp = NULL;
5065 			/*
5066 			 * Channel is not active, should not be used.
5067 			 */
5068 			PR_DATA("%s:%d: dest channel %d NOT ACTIVE\n",
5069 			    proc, dst_domid, channel);
5070 			IDN_KSTAT_INC(sip, si_linkdown);
5071 			rv = IDNXMIT_REQUEUE;
5072 			goto nocando;
5073 		}
5074 		ASSERT(IDN_CHAN_SEND_IS_LOCKED(csp));
5075 	}
5076 	/*
5077 	 * If we made it here then the channel is active
5078 	 * Make sure the target domain is registered to receive stuff,
5079 	 * i.e. we're still linked.
5080 	 */
5081 	if (!IDN_CHAN_DOMAIN_IS_REGISTERED(csp, dst_domid)) {
5082 		/*
5083 		 * If domain is not even registered with this channel
5084 		 * then we have no business being here.  Doesn't matter
5085 		 * whether it's active or not.
5086 		 */
5087 		PR_DATA("%s:%d: domain not registered with channel %d\n",
5088 		    proc, dst_domid, channel);
5089 		/*
5090 		 * Set csp to NULL to prevent in-progress update below.
5091 		 */
5092 		IDN_CHAN_UNLOCK_SEND(csp);
5093 		csp = NULL;
5094 		IDN_KSTAT_INC(sip, si_linkdown);
5095 		IDN_KSTAT_INC(sip, si_macxmt_errors);
5096 		rv = IDNXMIT_DROP;
5097 		goto nocando;
5098 	}
5099 
5100 	IDN_CHAN_SEND_INPROGRESS(csp);
5101 	IDN_CHAN_UNLOCK_SEND(csp);
5102 
5103 	/*
5104 	 * Find a target cpu to send interrupt to if
5105 	 * it becomes necessary (i.e. remote channel
5106 	 * server is idle).
5107 	 */
5108 	cpuindex = dp->dcpuindex;
5109 
5110 	/*
5111 	 * dcpuindex is atomically incremented, but other than
5112 	 * that is not well protected and that's okay.  The
5113 	 * intention is to simply spread around the interrupts
5114 	 * at the destination domain, however we don't have to
5115 	 * anal about it.  If we hit the same cpu multiple times
5116 	 * in a row that's okay, it will only be for a very short
5117 	 * period anyway before the cpuindex is incremented
5118 	 * to the next cpu.
5119 	 */
5120 	if (cpuindex < NCPU) {
5121 		ATOMIC_INC(dp->dcpuindex);
5122 	}
5123 	if (dp->dcpuindex >= NCPU)
5124 		dp->dcpuindex = 0;
5125 
5126 	IDN_ASSIGN_DCPU(dp, cpuindex);
5127 
5128 #ifdef XXX_DLPI_UNFRIENDLY
5129 	{
5130 		ushort_t	dstport = (ushort_t)dp->dcpu;
5131 
5132 		/*
5133 		 * XXX
5134 		 * This is not DLPI friendly, but we need some way
5135 		 * of distributing our XDC interrupts to the cpus
5136 		 * on the remote domain in a relatively random fashion
5137 		 * while trying to remain constant for an individual
5138 		 * network connection.  Don't want the target network
5139 		 * appl pinging around cpus thrashing the caches.
5140 		 * So, we'll pick target cpus based on the destination
5141 		 * TCP/IP port (socket).  The (simple) alternative to
5142 		 * this is to simply send all messages destined for
5143 		 * particular domain to the same cpu (dcpu), but
5144 		 * will lower our bandwidth and introduce a lot of
5145 		 * contention on that target cpu.
5146 		 */
5147 		if (ehp->ether_type == ETHERTYPE_IP) {
5148 			ipha_t	*ipha;
5149 			uchar_t	*dstporta;
5150 			int	hdr_length;
5151 			mblk_t	*nmp = mp;
5152 			uchar_t	*rptr = mp->b_rptr +
5153 			    sizeof (struct ether_header);
5154 			if (nmp->b_wptr <= rptr) {
5155 				/*
5156 				 * Only the ethernet header was contained
5157 				 * in the first block.  Check for the
5158 				 * next packet.
5159 				 */
5160 				if ((nmp = mp->b_cont) != NULL)
5161 					rptr = nmp->b_rptr;
5162 			}
5163 			/*
5164 			 * If we still haven't found the IP header packet
5165 			 * then don't bother.  Can't search forever.
5166 			 */
5167 			if (nmp &&
5168 			    ((nmp->b_wptr - rptr) >= IP_SIMPLE_HDR_LENGTH)) {
5169 				ipha = (ipha_t *)ALIGN32(rptr);
5170 
5171 				ASSERT(DB_TYPE(mp) == M_DATA);
5172 				hdr_length = IPH_HDR_LENGTH(ipha);
5173 
5174 				switch (ipha->ipha_protocol) {
5175 				case IPPROTO_UDP:
5176 				case IPPROTO_TCP:
5177 					/*
5178 					 * TCP/UDP Protocol Header (1st word)
5179 					 * 0	    15,16	31
5180 					 * -----------------------
5181 					 * | src port | dst port |
5182 					 * -----------------------
5183 					 */
5184 					dstporta = (uchar_t *)ipha + hdr_length;
5185 					netports = *(uint_t *)dstporta;
5186 					dstporta += 2;
5187 					dstport  = *(ushort_t *)dstporta;
5188 					break;
5189 				default:
5190 					break;
5191 				}
5192 			}
5193 
5194 		}
5195 		IDN_ASSIGN_DCPU(dp, dstport);
5196 
5197 		PR_DATA("%s:%d: (dstport %d) assigned %d\n",
5198 		    proc, dst_domid, (int)dstport, dp->dcpu);
5199 	}
5200 #endif /* XXX_DLPI_UNFRIENDLY */
5201 
5202 	data_rptr = mp->b_rptr;
5203 
5204 	ASSERT(dp->dcpu != IDN_NIL_DCPU);
5205 
5206 	ASSERT(idn_domain[dst_domid].dmbox.m_send);
5207 
5208 	retry_reclaim = 1;
5209 retry:
5210 	if ((dp->dio >= IDN_RECLAIM_MIN) || dp->diowanted) {
5211 		int	reclaim_req;
5212 		/*
5213 		 * Reclaim however many outstanding buffers
5214 		 * there are up to IDN_RECLAIM_MAX if it's set.
5215 		 */
5216 		reclaim_req = dp->diowanted ? -1 : IDN_RECLAIM_MAX ?
5217 		    MIN(dp->dio, IDN_RECLAIM_MAX) : dp->dio;
5218 		(void) idn_reclaim_mboxdata(dst_domid, channel,
5219 		    reclaim_req);
5220 	}
5221 
5222 	if (dp->dio >= IDN_WINDOW_EMAX) {
5223 
5224 		if (lock_try(&dp->diocheck)) {
5225 			IDN_MSGTIMER_START(dst_domid, IDNP_DATA, 0,
5226 			    idn_msg_waittime[IDNP_DATA],
5227 			    &mt.mt_cookie);
5228 			/*
5229 			 * We have exceeded the minimum window for
5230 			 * outstanding I/O buffers to this domain.
5231 			 * Need to start the MSG timer to check for
5232 			 * possible response from remote domain.
5233 			 * The remote domain may be hung.  Send a
5234 			 * wakeup!  Specify all channels for given
5235 			 * domain since we don't know precisely which
5236 			 * is backed up (dio is global).
5237 			 */
5238 			IDNXDC(dst_domid, &mt,
5239 			    (uint_t)dst_netaddr.net.chan, 0, 0, 0);
5240 		}
5241 
5242 		/*
5243 		 * Yikes!  We have exceeded the maximum window
5244 		 * which means no more packets going to remote
5245 		 * domain until he frees some up.
5246 		 */
5247 		IDN_KSTAT_INC(sip, si_txmax);
5248 		IDN_KSTAT_INC(sip, si_macxmt_errors);
5249 		rv = IDNXMIT_DROP;
5250 		goto nocando;
5251 	}
5252 
5253 	/*
5254 	 * Allocate a SMR I/O buffer and send it.
5255 	 */
5256 	if (msglen == 0) {
5257 		/*
5258 		 * A zero length messages is effectively a signal
5259 		 * to just send an interrupt to the remote domain.
5260 		 */
5261 		IDN_MSGTIMER_START(dst_domid, IDNP_DATA, 0,
5262 		    idn_msg_waittime[IDNP_DATA],
5263 		    &mt.mt_cookie);
5264 		IDNXDC(dst_domid, &mt,
5265 		    (uint_t)dst_netaddr.net.chan, 0, 0, 0);
5266 	}
5267 	for (; (msglen > 0) && mp; msglen -= xfersize) {
5268 		int		xrv;
5269 		smr_offset_t	bufoffset;
5270 #ifdef DEBUG
5271 		int		n_xfersize;
5272 #endif /* DEBUG */
5273 
5274 		ASSERT(msglen <= IDN_DATA_SIZE);
5275 		xfersize = msglen;
5276 
5277 		serrno = smr_buf_alloc(dst_domid, xfersize, &iobufp);
5278 		if (serrno) {
5279 			PR_DATA("%s:%d: failed to alloc SMR I/O buffer "
5280 			    "(serrno = %d)\n",
5281 			    proc, dst_domid, serrno);
5282 			/*
5283 			 * Failure is either due to a timeout waiting
5284 			 * for the master to give us a slab, OR the
5285 			 * local domain exhausted its slab quota!
5286 			 * In either case we'll have to bail from
5287 			 * here and let higher layers decide what
5288 			 * to do.
5289 			 * We also could have had locking problems.
5290 			 * A negative serrno indicates we lost the lock
5291 			 * on dst_domid, so no need in dropping lock.
5292 			 */
5293 
5294 			if (lock_try(&dp->diowanted) && retry_reclaim) {
5295 				/*
5296 				 * We were the first to acquire the
5297 				 * lock indicating that it wasn't
5298 				 * set on entry to idn_send_data.
5299 				 * So, let's go back and see if we
5300 				 * can't reclaim some buffers and
5301 				 * try again.
5302 				 * It's very likely diowanted will be
5303 				 * enough to prevent us from looping
5304 				 * on retrying here, however to protect
5305 				 * against the small window where a
5306 				 * race condition might exist, we use
5307 				 * the retry_reclaim flag so that we
5308 				 * don't retry more than once.
5309 				 */
5310 				retry_reclaim = 0;
5311 				goto retry;
5312 			}
5313 
5314 			rv = (serrno > 0) ? serrno : -serrno;
5315 			IDN_KSTAT_INC(sip, si_notbufs);
5316 			IDN_KSTAT_INC(sip, si_noxmtbuf);	/* MIB II */
5317 			switch (rv) {
5318 			case ENOMEM:
5319 			case EBUSY:
5320 			case ENOLCK:
5321 			case ETIMEDOUT:
5322 			case EDQUOT:
5323 				/*
5324 				 * These are all transient conditions
5325 				 * which should be recoverable over
5326 				 * time.
5327 				 */
5328 				rv = IDNXMIT_REQUEUE;
5329 				break;
5330 
5331 			default:
5332 				rv = IDNXMIT_DROP;
5333 				break;
5334 			}
5335 			goto nocando;
5336 		}
5337 
5338 		lock_clear(&dp->diowanted);
5339 
5340 		hdrp = IDN_BUF2HDR(iobufp);
5341 		bufoffset = (smr_offset_t)IDN_ALIGNPTR(sizeof (smr_pkthdr_t),
5342 		    data_rptr);
5343 		/*
5344 		 * If the alignment of bufoffset took us pass the
5345 		 * length of a smr_pkthdr_t then we need to possibly
5346 		 * lower xfersize since it was calulated based on
5347 		 * a perfect alignment.  However, if we're in DLPI
5348 		 * mode then shouldn't be necessary since the length
5349 		 * of the incoming packet (mblk) should have already
5350 		 * taken into consideration this possible adjustment.
5351 		 */
5352 #ifdef DEBUG
5353 		if (bufoffset != sizeof (smr_pkthdr_t))
5354 			PR_DATA("%s:%d: offset ALIGNMENT (%lu -> %u) "
5355 			    "(data_rptr = %p)\n",
5356 			    proc, dst_domid, sizeof (smr_pkthdr_t),
5357 			    bufoffset, data_rptr);
5358 
5359 		n_xfersize = MIN(xfersize, (IDN_SMR_BUFSIZE - bufoffset));
5360 		if (xfersize != n_xfersize) {
5361 			PR_DATA("%s:%d: xfersize ADJUST (%d -> %d)\n",
5362 			    proc, dst_domid, xfersize, n_xfersize);
5363 			cmn_err(CE_WARN, "%s: ERROR (xfersize = %d, > "
5364 			    "bufsize(%d)-bufoffset(%d) = %d)",
5365 			    proc, xfersize, IDN_SMR_BUFSIZE,
5366 			    bufoffset,
5367 			    IDN_SMR_BUFSIZE - bufoffset);
5368 		}
5369 #endif /* DEBUG */
5370 		xfersize = MIN(xfersize, (int)(IDN_SMR_BUFSIZE - bufoffset));
5371 
5372 		iodatap = IDN_BUF2DATA(iobufp, bufoffset);
5373 		mp = idn_fill_buffer(iodatap, xfersize, mp, &data_rptr);
5374 
5375 		hdrp->b_netaddr  = dst_netaddr.netaddr;
5376 		hdrp->b_netports = netports;
5377 		hdrp->b_offset   = bufoffset;
5378 		hdrp->b_length   = xfersize;
5379 		hdrp->b_next	 = IDN_NIL_SMROFFSET;
5380 		hdrp->b_rawio	 = 0;
5381 		hdrp->b_cksum    = IDN_CKSUM_PKT(hdrp);
5382 
5383 		xrv = idn_send_mboxdata(dst_domid, sip, channel, iobufp);
5384 		if (xrv) {
5385 			/*
5386 			 * Reclaim packet.
5387 			 * Return error on this packet so it can be retried
5388 			 * (putbq).  Note that it should be safe to assume
5389 			 * that this for-loop is only executed once when in
5390 			 * DLPI mode and so no need to worry about fractured
5391 			 * mblk packet.
5392 			 */
5393 			PR_DATA("%s:%d: DATA XFER to chan %d FAILED "
5394 			    "(ret=%d)\n",
5395 			    proc, dst_domid, channel, xrv);
5396 			smr_buf_free(dst_domid, iobufp, xfersize);
5397 
5398 			PR_DATA("%s:%d: (line %d) dec(dio) -> %d\n",
5399 			    proc, dst_domid, __LINE__, dp->dio);
5400 
5401 			rv = IDNXMIT_DROP;
5402 			IDN_KSTAT_INC(sip, si_macxmt_errors);
5403 			goto nocando;
5404 		} else {
5405 			pktcnt++;
5406 			/*
5407 			 * Packet will get freed on a subsequent send
5408 			 * when we reclaim buffers that the receivers
5409 			 * has finished consuming.
5410 			 */
5411 		}
5412 	}
5413 
5414 #ifdef DEBUG
5415 	if (pktcnt > 1)
5416 		cmn_err(CE_WARN,
5417 		    "%s: ERROR: sent multi-pkts (%d), len = %ld",
5418 		    proc, pktcnt, orig_msglen);
5419 #endif /* DEBUG */
5420 
5421 	PR_DATA("%s:%d: SENT %d packets (%d @ 0x%x)\n",
5422 	    proc, dst_domid, pktcnt, dst_netaddr.net.chan,
5423 	    dst_netaddr.net.netid);
5424 
5425 	IDN_CHAN_LOCK_SEND(csp);
5426 	IDN_CHAN_SEND_DONE(csp);
5427 	IDN_CHAN_UNLOCK_SEND(csp);
5428 
5429 	return (IDNXMIT_OKAY);
5430 
5431 nocando:
5432 
5433 	if (csp) {
5434 		IDN_CHAN_LOCK_SEND(csp);
5435 		IDN_CHAN_SEND_DONE(csp);
5436 		IDN_CHAN_UNLOCK_SEND(csp);
5437 	}
5438 
5439 	if (rv == IDNXMIT_REQUEUE) {
5440 		/*
5441 		 * Better kick off monitor to check when
5442 		 * it's ready to reenable the queues for
5443 		 * this channel.
5444 		 */
5445 		idn_xmit_monitor_kickoff(channel);
5446 	}
5447 
5448 	return (rv);
5449 }
5450 
5451 /*
5452  * Function to support local loopback testing of IDN driver.
5453  * Primarily geared towards measuring stream-head and IDN driver
5454  * overhead with respect to data messages.  Setting idn_strhead_only
5455  * allows routine to focus on stream-head overhead by simply putting
5456  * the message straight to the 'next' queue of the destination
5457  * read-queue.  Current implementation puts the message directly to
5458  * the read-queue thus sending the message right back to the IDN driver
5459  * as though the data came in off the wire.  No need to worry about
5460  * any IDN layers attempting to ack data as that's normally handled
5461  * by idnh_recv_data.
5462  *
5463  * dst_netaddr = destination port-n-addr on local domain.
5464  * wq          = write queue from whence message came.
5465  * mp          = the (data-only) message.
5466  *
5467  * Returns 0		Indicates data handled.
5468  *	   errno	EAGAIN indicates data can be retried.
5469  *			Other errno's indicate failure to handle.
5470  */
5471 static int
5472 idn_send_data_loopback(idn_netaddr_t dst_netaddr, queue_t *wq, mblk_t *mp)
5473 {
5474 	register struct idnstr	*stp;
5475 	struct idn	*sip;
5476 	int		rv = 0;
5477 	procname_t	proc = "idn_send_data_loopback";
5478 
5479 	if (dst_netaddr.net.netid != idn_domain[idn.localid].dnetid) {
5480 		PR_DATA("%s: dst_netaddr.net.netid 0x%x != local 0x%x\n",
5481 		    proc, dst_netaddr.net.netid,
5482 		    idn_domain[idn.localid].dnetid);
5483 		rv = EADDRNOTAVAIL;
5484 		goto done;
5485 	}
5486 	stp = (struct idnstr *)wq->q_ptr;
5487 	if (!stp || !stp->ss_rq) {
5488 		rv = EDESTADDRREQ;
5489 		goto done;
5490 	}
5491 	sip = stp->ss_sip;
5492 
5493 	idndl_read(sip, mp);
5494 	rv = 0;
5495 
5496 done:
5497 	return (rv);
5498 }
5499 
5500 /*
5501  * Fill bufp with as much data as possible from the message pointed
5502  * to by mp up to size bytes.
5503  * Save our current read pointer in the variable parameter (data_rptrp)
5504  * so we know where to start on the next go around.  Don't want to
5505  * bump the actual b_rptr in the mblk because the mblk may need to
5506  * be reused, e.g. broadcast.
5507  * Return the mblk pointer to the position we had to stop.
5508  */
5509 static mblk_t *
5510 idn_fill_buffer(caddr_t bufp, int size, mblk_t *mp, uchar_t **data_rptrp)
5511 {
5512 	int	copysize;
5513 
5514 	ASSERT(bufp && size);
5515 
5516 	if (mp == NULL)
5517 		return (NULL);
5518 
5519 	while ((size > 0) && mp) {
5520 
5521 		copysize = MIN(mp->b_wptr - (*data_rptrp), size);
5522 
5523 		if (copysize > 0) {
5524 			/*
5525 			 * If there's data to copy, do it.
5526 			 */
5527 			bcopy((*data_rptrp), bufp, copysize);
5528 			(*data_rptrp) += copysize;
5529 			bufp += copysize;
5530 			size -= copysize;
5531 		}
5532 		if (mp->b_wptr <= (*data_rptrp)) {
5533 			/*
5534 			 * If we emptied the mblk, then
5535 			 * move on to the next one.
5536 			 */
5537 			for (mp = mp->b_cont;
5538 			    mp && (mp->b_datap->db_type != M_DATA);
5539 			    mp = mp->b_cont)
5540 				;
5541 			if (mp)
5542 				*data_rptrp = mp->b_rptr;
5543 		}
5544 	}
5545 	return (mp);
5546 }
5547 
5548 /*
5549  * Messages received here do NOT arrive on a stream, but are
5550  * instead handled via the idn_protocol_servers.  This routine
5551  * is effectively the job processor for the protocol servers.
5552  */
5553 static void
5554 idn_recv_proto(idn_protomsg_t *hp)
5555 {
5556 	int		domid, cpuid;
5557 	int		sync_lock = 0;
5558 	idn_domain_t	*dp;
5559 	register uint_t	mtype;
5560 	register uint_t	msgtype, acktype;
5561 	idn_msgtype_t	mt;
5562 	ushort_t	dcookie, tcookie;
5563 	procname_t	proc = "idn_recv_proto";
5564 
5565 
5566 	if (idn.state == IDNGS_IGNORE) {
5567 		/*
5568 		 * Fault injection to simulate non-responsive domain.
5569 		 */
5570 		return;
5571 	}
5572 
5573 	domid   = hp->m_domid;
5574 	cpuid   = hp->m_cpuid;
5575 	msgtype = hp->m_msgtype;
5576 	acktype = hp->m_acktype;
5577 	dcookie = IDN_DCOOKIE(hp->m_cookie);
5578 	tcookie = IDN_TCOOKIE(hp->m_cookie);
5579 	/*
5580 	 * msgtype =	Is the type of message we received,
5581 	 *		e.g. nego, ack, nego+ack, etc.
5582 	 *
5583 	 * acktype =	If we received a pure ack or nack
5584 	 *		then this variable is set to the
5585 	 *		type of message that was ack/nack'd.
5586 	 */
5587 	if ((mtype = msgtype & IDNP_MSGTYPE_MASK) == 0) {
5588 		/*
5589 		 * Received a pure ack/nack.
5590 		 */
5591 		mtype = acktype & IDNP_MSGTYPE_MASK;
5592 	}
5593 
5594 	if (!VALID_MSGTYPE(mtype)) {
5595 		PR_PROTO("%s:%d: ERROR: invalid message type (0x%x)\n",
5596 		    proc, domid, mtype);
5597 		return;
5598 	}
5599 	if (!VALID_CPUID(cpuid)) {
5600 		PR_PROTO("%s:%d: ERROR: invalid cpuid (%d)\n",
5601 		    proc, domid, cpuid);
5602 		return;
5603 	}
5604 
5605 	/*
5606 	 * No pure data packets should reach this level.
5607 	 * Data+ack messages will reach here, but only
5608 	 * for the purpose of stopping the timer which
5609 	 * happens by default when this routine is called.
5610 	 */
5611 	ASSERT(msgtype != IDNP_DATA);
5612 
5613 	/*
5614 	 * We should never receive a request from ourself,
5615 	 * except for commands in the case of broadcasts!
5616 	 */
5617 	if ((domid == idn.localid) && (mtype != IDNP_CMD)) {
5618 		char	str[15];
5619 
5620 		inum2str(hp->m_msgtype, str);
5621 
5622 		cmn_err(CE_WARN,
5623 		    "IDN: 214: received message (%s[0x%x]) from self "
5624 		    "(domid %d)",
5625 		    str, hp->m_msgtype, domid);
5626 		return;
5627 	}
5628 
5629 	IDN_SYNC_LOCK();
5630 	/*
5631 	 * Set a flag indicating whether we really need
5632 	 * SYNC-LOCK.  We'll drop it in a little bit if
5633 	 * we really don't need it.
5634 	 */
5635 	switch (mtype) {
5636 	case IDNP_CON:
5637 	case IDNP_FIN:
5638 	case IDNP_NEGO:
5639 		sync_lock = 1;
5640 		break;
5641 
5642 	default:
5643 		break;
5644 	}
5645 
5646 	dp = &idn_domain[domid];
5647 	IDN_DLOCK_EXCL(domid);
5648 
5649 	/*
5650 	 * The only messages we do _not_ check the cookie are:
5651 	 *	nego
5652 	 *	nego+ack
5653 	 *	fin	 - if received cookie is 0.
5654 	 *	fin+ack	 - if received cookie is 0.
5655 	 *	ack/fin	 - if received cookie is 0.
5656 	 *	nack/fin - if received cookie is 0.
5657 	 */
5658 	if (((msgtype & IDNP_MSGTYPE_MASK) != IDNP_NEGO) &&
5659 	    ((mtype != IDNP_FIN) || (dcookie && dp->dcookie_recv))) {
5660 		if (dp->dcookie_recv != dcookie) {
5661 			dp->dcookie_errcnt++;
5662 			if (dp->dcookie_err == 0) {
5663 				/*
5664 				 * Set cookie error to prevent a
5665 				 * possible flood of bogus cookies
5666 				 * and thus error messages.
5667 				 */
5668 				dp->dcookie_err = 1;
5669 				cmn_err(CE_WARN,
5670 				    "IDN: 215: invalid cookie (0x%x) "
5671 				    "for message (0x%x) from domain %d",
5672 				    dcookie, hp->m_msgtype, domid);
5673 
5674 				PR_PROTO("%s:%d: received cookie (0x%x), "
5675 				    "expected (0x%x) [errcnt = %d]\n",
5676 				    proc, domid, dcookie,
5677 				    dp->dcookie_recv, dp->dcookie_errcnt);
5678 			}
5679 			IDN_DUNLOCK(domid);
5680 			IDN_SYNC_UNLOCK();
5681 			return;
5682 		}
5683 	}
5684 	dp->dcookie_err = 0;
5685 	IDN_GLOCK_EXCL();
5686 
5687 	idn_clear_awol(domid);
5688 
5689 	IDN_GUNLOCK();
5690 	if (!sync_lock)		/* really don't need SYNC-LOCK past here */
5691 		IDN_SYNC_UNLOCK();
5692 
5693 	/*
5694 	 * Stop any timers that may have been outstanding for
5695 	 * this domain, for this particular message type.
5696 	 * Note that CFG timers are directly managed by
5697 	 * config recv/send code.
5698 	 */
5699 	if ((mtype != IDNP_CFG) && (msgtype & IDNP_ACKNACK_MASK) && tcookie) {
5700 		IDN_MSGTIMER_STOP(domid, mtype, tcookie);
5701 	}
5702 
5703 	/*
5704 	 * Keep track of the last cpu to send us a message.
5705 	 * If the domain has not yet been assigned, we'll need
5706 	 * this cpuid in order to send back a respond.
5707 	 */
5708 	dp->dcpu_last = cpuid;
5709 
5710 	mt.mt_mtype = (ushort_t)msgtype;
5711 	mt.mt_atype = (ushort_t)acktype;
5712 	mt.mt_cookie = tcookie;
5713 
5714 	switch (mtype) {
5715 	case IDNP_NEGO:
5716 		idn_recv_nego(domid, &mt, hp->m_xargs, dcookie);
5717 		break;
5718 
5719 	case IDNP_CFG:
5720 		idn_recv_config(domid, &mt, hp->m_xargs);
5721 		break;
5722 
5723 	case IDNP_CON:
5724 		idn_recv_con(domid, &mt, hp->m_xargs);
5725 		break;
5726 
5727 	case IDNP_FIN:
5728 		idn_recv_fin(domid, &mt, hp->m_xargs);
5729 		break;
5730 
5731 	case IDNP_CMD:
5732 		idn_recv_cmd(domid, &mt, hp->m_xargs);
5733 		break;
5734 
5735 	case IDNP_DATA:
5736 		ASSERT(msgtype & IDNP_ACKNACK_MASK);
5737 		/*
5738 		 * When doing the fast track we simply process
5739 		 * possible nack error conditions.  The actual
5740 		 * processing of the SMR data buffer is taken
5741 		 * care of in idnh_recv_dataack.  When NOT doing
5742 		 * the fast track, we do all the processing here
5743 		 * in the protocol server.
5744 		 */
5745 		idn_recv_data(domid, &mt, hp->m_xargs);
5746 		break;
5747 
5748 	default:
5749 		/*
5750 		 * Should be receiving 0 inum and 0 acknack.
5751 		 */
5752 #ifdef DEBUG
5753 		cmn_err(CE_PANIC,
5754 #else /* DEBUG */
5755 		    cmn_err(CE_WARN,
5756 #endif /* DEBUG */
5757 			/* CSTYLED */
5758 			"IDN: 216: (0x%x)msgtype/(0x%x)acktype rcvd from "
5759 			/* CSTYLED */
5760 			"domain %d", msgtype, acktype, domid);
5761 		break;
5762 	}
5763 
5764 	IDN_DUNLOCK(domid);
5765 	/*
5766 	 * All receiving routines are responsible for dropping drwlock.
5767 	 */
5768 
5769 	if (sync_lock)
5770 		IDN_SYNC_UNLOCK();
5771 }
5772 
5773 /*
5774  * Once the CONFIG state is hit we immediately blast out all
5775  * of our config info.  This guarantees that the CONFIG state
5776  * effectively signifies that the sender has sent _all_ of
5777  * their config info.
5778  */
5779 static void
5780 idn_send_config(int domid, int phase)
5781 {
5782 	idn_domain_t	*dp;
5783 	int		rv;
5784 	clock_t		cfg_waittime = idn_msg_waittime[IDNP_CFG];
5785 	procname_t	proc = "idn_send_config";
5786 
5787 	ASSERT(IDN_DLOCK_IS_EXCL(domid));
5788 
5789 	dp = &idn_domain[domid];
5790 
5791 	ASSERT(dp->dstate == IDNDS_CONFIG);
5792 
5793 	if (phase == 1) {
5794 		/*
5795 		 * Reset stuff in dtmp to 0:
5796 		 *	dcfgphase
5797 		 *	dcksum
5798 		 *	dncfgitems
5799 		 *	dmaxnets
5800 		 *	dmboxpernet
5801 		 */
5802 		dp->dtmp = 0;
5803 	}
5804 
5805 	if (dp->dcfgsnddone) {
5806 		if (!dp->dcfgrcvdone) {
5807 			IDN_MSGTIMER_START(domid, IDNP_CFG, 0,
5808 			    cfg_waittime, NULL);
5809 		}
5810 		return;
5811 	}
5812 
5813 	IDN_DLOCK_SHARED(idn.localid);
5814 
5815 	PR_PROTO("%s:%d: sending %s config (phase %d)\n",
5816 	    proc, domid,
5817 	    idn_domain[idn.localid].dvote.v.master ? "MASTER" : "SLAVE",
5818 	    phase);
5819 
5820 	if (idn_domain[idn.localid].dvote.v.master)
5821 		rv = idn_send_master_config(domid, phase);
5822 	else
5823 		rv = idn_send_slave_config(domid, phase);
5824 
5825 	IDN_DUNLOCK(idn.localid);
5826 
5827 	if (rv >= 0) {
5828 
5829 		if (rv == 1) {
5830 			dp->dcfgsnddone = 1;
5831 			PR_PROTO("%s:%d: SEND config DONE\n", proc, domid);
5832 			if (!dp->dcfgrcvdone) {
5833 				IDN_MSGTIMER_START(domid, IDNP_CFG, 0,
5834 				    cfg_waittime, NULL);
5835 			}
5836 		} else {
5837 			IDN_MSGTIMER_START(domid, IDNP_CFG, 0,
5838 			    cfg_waittime, NULL);
5839 		}
5840 	}
5841 }
5842 
5843 /*
5844  * Clear out the mailbox table.
5845  * NOTE: This routine touches the SMR.
5846  */
5847 static void
5848 idn_reset_mboxtbl(idn_mboxtbl_t *mtp)
5849 {
5850 	int		qi;
5851 	idn_mboxmsg_t	*mp = &mtp->mt_queue[0];
5852 
5853 	qi = 0;
5854 	do {
5855 		mp[qi].ms_bframe = 0;
5856 		mp[qi].ms_owner = 0;
5857 		mp[qi].ms_flag = 0;
5858 		IDN_MMBOXINDEX_INC(qi);
5859 	} while (qi);
5860 }
5861 
5862 static int
5863 idn_get_mbox_config(int domid, int *mindex, smr_offset_t *mtable,
5864     smr_offset_t *mdomain)
5865 {
5866 	idn_domain_t	*dp, *ldp;
5867 
5868 	dp = &idn_domain[domid];
5869 	ldp = &idn_domain[idn.localid];
5870 
5871 	ASSERT(IDN_DLOCK_IS_EXCL(domid));
5872 	ASSERT(IDN_DLOCK_IS_SHARED(idn.localid));
5873 	ASSERT(IDN_GET_MASTERID() != IDN_NIL_DOMID);
5874 
5875 	/*
5876 	 * Get SMR offset of receive mailbox assigned
5877 	 * to respective domain.  If I'm a slave then
5878 	 * my dmbox.m_tbl will not have been assigned yet.
5879 	 * Instead of sending the actual offset I send
5880 	 * the master his assigned index.  Since the
5881 	 * master knows what offset it will assign to
5882 	 * me he can determine his assigned (recv) mailbox
5883 	 * based on the offset and given index.  The local
5884 	 * domain can also use this information once the
5885 	 * dmbox.m_tbl is received to properly assign the
5886 	 * correct mbox offset to the master.
5887 	 */
5888 	if (ldp->dmbox.m_tbl == NULL) {
5889 		/*
5890 		 * Local domain has not yet been assigned a
5891 		 * (recv) mailbox table.  This must be the
5892 		 * initial connection of this domain.
5893 		 */
5894 		ASSERT(dp->dvote.v.master && !ldp->dvote.v.master);
5895 		ASSERT(mindex);
5896 		*mindex = domid;
5897 	} else {
5898 		idn_mboxtbl_t	*mtp;
5899 
5900 		mtp = IDN_MBOXTBL_PTR(ldp->dmbox.m_tbl, domid);
5901 
5902 		ASSERT(mdomain);
5903 		*mdomain = IDN_ADDR2OFFSET(mtp);
5904 
5905 		if (ldp->dvote.v.master) {
5906 			/*
5907 			 * Need to calculate mailbox table to
5908 			 * assign to the given domain.  Since
5909 			 * I'm the master his mailbox is in
5910 			 * the (all-domains) mailbox table.
5911 			 */
5912 			mtp = IDN_MBOXAREA_BASE(idn.mboxarea, domid);
5913 			ASSERT(mtable);
5914 			*mtable = IDN_ADDR2OFFSET(mtp);
5915 
5916 			dp->dmbox.m_tbl = mtp;
5917 		}
5918 	}
5919 
5920 	return (0);
5921 }
5922 
5923 /*
5924  * RETURNS:
5925  *	1	Unexpected/unnecessary phase.
5926  *	0	Successfully handled, timer needed.
5927  */
5928 static int
5929 idn_send_master_config(int domid, int phase)
5930 {
5931 	idn_cfgsubtype_t	cfg_subtype;
5932 	int		rv = 0;
5933 	idn_domain_t	*dp, *ldp;
5934 	idn_msgtype_t	mt;
5935 	int		nmcadr;
5936 	uint_t		barpfn, larpfn;
5937 	uint_t		cpus_u32, cpus_l32;
5938 	uint_t		mcadr[3];
5939 	smr_offset_t	mbox_table, mbox_domain;
5940 	register int	b, p, m;
5941 	procname_t	proc = "idn_send_master_config";
5942 
5943 	ASSERT(IDN_DLOCK_IS_EXCL(domid));
5944 	ASSERT(IDN_DLOCK_IS_SHARED(idn.localid));
5945 
5946 	dp = &idn_domain[domid];
5947 	ldp = &idn_domain[idn.localid];
5948 
5949 	ASSERT(dp->dstate == IDNDS_CONFIG);
5950 	ASSERT(dp->dvote.v.master == 0);
5951 	ASSERT(ldp->dvote.v.master == 1);
5952 
5953 	mt.mt_mtype = IDNP_CFG;
5954 	mt.mt_atype = 0;
5955 	mt.mt_cookie = 0;
5956 	m = 0;
5957 	mcadr[0] = mcadr[1] = mcadr[2] = 0;
5958 	cfg_subtype.val = 0;
5959 
5960 	switch (phase) {
5961 
5962 	case 1:
5963 		mbox_table = mbox_domain = IDN_NIL_SMROFFSET;
5964 		idn_get_mbox_config(domid, NULL, &mbox_table, &mbox_domain);
5965 		/*
5966 		 * ----------------------------------------------------
5967 		 * Send: SLABSIZE, DATAMBOX.DOMAIN, DATAMBOX.TABLE
5968 		 * ----------------------------------------------------
5969 		 */
5970 		cfg_subtype.param.p[0] = IDN_CFGPARAM(IDNCFG_SIZE,
5971 		    IDNCFGARG_SIZE_SLAB);
5972 		cfg_subtype.param.p[1] = IDN_CFGPARAM(IDNCFG_DATAMBOX,
5973 		    IDNCFGARG_DATAMBOX_DOMAIN);
5974 		cfg_subtype.param.p[2] = IDN_CFGPARAM(IDNCFG_DATAMBOX,
5975 		    IDNCFGARG_DATAMBOX_TABLE);
5976 		cfg_subtype.info.num = 3;
5977 		cfg_subtype.info.phase = phase;
5978 		dp->dcfgphase = phase;
5979 
5980 		ASSERT(mbox_domain != IDN_NIL_SMROFFSET);
5981 		ASSERT(mbox_table != IDN_NIL_SMROFFSET);
5982 
5983 		PR_PROTO("%s:%d:%d: sending SLABSIZE (%d), "
5984 		    "DATAMBOX.DOMAIN (0x%x), DATAMBOX.TABLE (0x%x)\n",
5985 		    proc, domid, phase, IDN_SLAB_BUFCOUNT, mbox_domain,
5986 		    mbox_table);
5987 
5988 		IDNXDC(domid, &mt, cfg_subtype.val, IDN_SLAB_BUFCOUNT,
5989 		    mbox_domain, mbox_table);
5990 		break;
5991 
5992 	case 2:
5993 		barpfn = idn.smr.locpfn;
5994 		larpfn = barpfn + (uint_t)btop(MB2B(IDN_SMR_SIZE));
5995 		/*
5996 		 * ----------------------------------------------------
5997 		 * Send: NETID, BARLAR
5998 		 * ----------------------------------------------------
5999 		 */
6000 		cfg_subtype.param.p[0] = IDN_CFGPARAM(IDNCFG_NETID, 0);
6001 		cfg_subtype.param.p[1] = IDN_CFGPARAM(IDNCFG_BARLAR,
6002 		    IDNCFGARG_BARLAR_BAR);
6003 		cfg_subtype.param.p[2] = IDN_CFGPARAM(IDNCFG_BARLAR,
6004 		    IDNCFGARG_BARLAR_LAR);
6005 		cfg_subtype.info.num = 3;
6006 		cfg_subtype.info.phase = phase;
6007 		dp->dcfgphase = phase;
6008 
6009 		PR_PROTO("%s:%d:%d: sending NETID (%d), "
6010 		    "BARPFN/LARPFN (0x%x/0x%x)\n",
6011 		    proc, domid, phase, ldp->dnetid, barpfn, larpfn);
6012 
6013 		IDNXDC(domid, &mt, cfg_subtype.val,
6014 		    (uint_t)ldp->dnetid, barpfn, larpfn);
6015 		break;
6016 
6017 	case 3:
6018 		nmcadr = ldp->dhw.dh_nmcadr;
6019 		cpus_u32 = UPPER32_CPUMASK(ldp->dcpuset);
6020 		cpus_l32 = LOWER32_CPUMASK(ldp->dcpuset);
6021 		/*
6022 		 * ----------------------------------------------------
6023 		 * Send: CPUSET, NMCADR
6024 		 * ----------------------------------------------------
6025 		 */
6026 		cfg_subtype.param.p[0] = IDN_CFGPARAM(IDNCFG_CPUSET,
6027 		    IDNCFGARG_CPUSET_UPPER);
6028 		cfg_subtype.param.p[1] = IDN_CFGPARAM(IDNCFG_CPUSET,
6029 		    IDNCFGARG_CPUSET_LOWER);
6030 		cfg_subtype.param.p[2] = IDN_CFGPARAM(IDNCFG_NMCADR, 0);
6031 		cfg_subtype.info.num = 3;
6032 		cfg_subtype.info.phase = phase;
6033 		dp->dcfgphase = phase;
6034 
6035 		PR_PROTO("%s:%d:%d: sending CPUSET (0x%x.%x), NMCADR (%d)\n",
6036 		    proc, domid, phase, cpus_u32, cpus_l32, nmcadr);
6037 
6038 		IDNXDC(domid, &mt, cfg_subtype.val,
6039 		    cpus_u32, cpus_l32, nmcadr);
6040 		break;
6041 
6042 	case 4:
6043 		/*
6044 		 * ----------------------------------------------------
6045 		 * Send: BOARDSET, MTU, BUFSIZE
6046 		 * ----------------------------------------------------
6047 		 */
6048 		cfg_subtype.param.p[0] = IDN_CFGPARAM(IDNCFG_BOARDSET, 0);
6049 		cfg_subtype.param.p[1] = IDN_CFGPARAM(IDNCFG_SIZE,
6050 		    IDNCFGARG_SIZE_MTU);
6051 		cfg_subtype.param.p[2] = IDN_CFGPARAM(IDNCFG_SIZE,
6052 		    IDNCFGARG_SIZE_BUF);
6053 		cfg_subtype.info.num = 3;
6054 		cfg_subtype.info.phase = phase;
6055 		dp->dcfgphase = phase;
6056 
6057 		PR_PROTO("%s:%d:%d: sending BOARDSET (0x%x), MTU (0x%lx), "
6058 		    "BUFSIZE (0x%x)\n", proc, domid, phase,
6059 		    ldp->dhw.dh_boardset, IDN_MTU, IDN_SMR_BUFSIZE);
6060 
6061 		IDNXDC(domid, &mt, cfg_subtype.val,
6062 		    ldp->dhw.dh_boardset, IDN_MTU, IDN_SMR_BUFSIZE);
6063 		break;
6064 
6065 	case 5:
6066 		/*
6067 		 * ----------------------------------------------------
6068 		 * Send: MAXNETS, MBOXPERNET, CKSUM
6069 		 * ----------------------------------------------------
6070 		 */
6071 		cfg_subtype.param.p[0] = IDN_CFGPARAM(IDNCFG_DATASVR,
6072 		    IDNCFGARG_DATASVR_MAXNETS);
6073 		cfg_subtype.param.p[1] = IDN_CFGPARAM(IDNCFG_DATASVR,
6074 		    IDNCFGARG_DATASVR_MBXPERNET);
6075 		cfg_subtype.param.p[2] = IDN_CFGPARAM(IDNCFG_OPTIONS,
6076 		    IDNCFGARG_CHECKSUM);
6077 		cfg_subtype.info.num = 3;
6078 		cfg_subtype.info.phase = phase;
6079 		dp->dcfgphase = phase;
6080 
6081 		PR_PROTO("%s:%d:%d: sending MAXNETS (%d), "
6082 		    "MBOXPERNET (%d), CKSUM (%d)\n",
6083 		    proc, domid, phase,
6084 		    IDN_MAX_NETS, IDN_MBOX_PER_NET,
6085 		    IDN_CHECKSUM);
6086 
6087 		IDNXDC(domid, &mt, cfg_subtype.val,
6088 		    IDN_MAX_NETS, IDN_MBOX_PER_NET, IDN_CHECKSUM);
6089 		break;
6090 
6091 	case 6:
6092 		/*
6093 		 * ----------------------------------------------------
6094 		 * Send: NWRSIZE (piggyback on MCADRs)
6095 		 * ----------------------------------------------------
6096 		 */
6097 		cfg_subtype.param.p[0] = IDN_CFGPARAM(IDNCFG_SIZE,
6098 		    IDNCFGARG_SIZE_NWR);
6099 		mcadr[0] = IDN_NWR_SIZE;
6100 		m = 1;
6101 
6102 		/*FALLTHROUGH*/
6103 
6104 	default:	/* case 7 and above */
6105 		/*
6106 		 * ----------------------------------------------------
6107 		 * Send: MCADR's
6108 		 * ----------------------------------------------------
6109 		 * First need to figure how many we've already sent
6110 		 * based on what phase of CONFIG we're in.
6111 		 * ----------------------------------------------------
6112 		 */
6113 		if (phase > 6) {
6114 			p = ((phase - 7) * 3) + 2;
6115 			for (b = 0; (b < MAX_BOARDS) && (p > 0); b++)
6116 				if (ldp->dhw.dh_mcadr[b])
6117 					p--;
6118 		} else {
6119 			b = 0;
6120 		}
6121 
6122 		for (; (b < MAX_BOARDS) && (m < 3); b++) {
6123 			if (ldp->dhw.dh_mcadr[b] == 0)
6124 				continue;
6125 			mcadr[m] = ldp->dhw.dh_mcadr[b];
6126 			cfg_subtype.param.p[m] = IDN_CFGPARAM(IDNCFG_MCADR, b);
6127 			m++;
6128 		}
6129 		if (m > 0) {
6130 			if (phase == 6) {
6131 				PR_PROTO("%s:%d:%d: sending NWRSIZE (%d), "
6132 				    "MCADRs (0x%x, 0x%x)\n",
6133 				    proc, domid, phase,
6134 				    mcadr[0], mcadr[1], mcadr[2]);
6135 			} else {
6136 				PR_PROTO("%s:%d:%d: sending MCADRs "
6137 				    "(0x%x, 0x%x, 0x%x)\n",
6138 				    proc, domid, phase,
6139 				    mcadr[0], mcadr[1], mcadr[2]);
6140 			}
6141 			cfg_subtype.info.num = m;
6142 			cfg_subtype.info.phase = phase;
6143 			dp->dcfgphase = phase;
6144 
6145 			IDNXDC(domid, &mt, cfg_subtype.val,
6146 			    mcadr[0], mcadr[1], mcadr[2]);
6147 		} else {
6148 			rv = 1;
6149 		}
6150 		break;
6151 	}
6152 
6153 	return (rv);
6154 }
6155 
6156 /*
6157  * RETURNS:
6158  *	1	Unexpected/unnecessary phase.
6159  *	0	Successfully handled.
6160  */
6161 static int
6162 idn_send_slave_config(int domid, int phase)
6163 {
6164 	idn_cfgsubtype_t	cfg_subtype;
6165 	int		rv = 0;
6166 	idn_domain_t	*dp, *ldp;
6167 	smr_offset_t	mbox_domain;
6168 	idn_msgtype_t	mt;
6169 	int		mbox_index;
6170 	uint_t		cpus_u32, cpus_l32;
6171 	procname_t	proc = "idn_send_slave_config";
6172 
6173 	ASSERT(IDN_DLOCK_IS_EXCL(domid));
6174 	ASSERT(IDN_DLOCK_IS_SHARED(idn.localid));
6175 
6176 	mt.mt_mtype = IDNP_CFG;
6177 	mt.mt_atype = 0;
6178 	dp = &idn_domain[domid];
6179 	ldp = &idn_domain[idn.localid];
6180 
6181 	ASSERT(dp->dstate == IDNDS_CONFIG);
6182 	ASSERT(ldp->dvote.v.master == 0);
6183 
6184 	switch (phase) {
6185 
6186 	case 1:
6187 		mbox_index = IDN_NIL_DOMID;
6188 		mbox_domain = IDN_NIL_SMROFFSET;
6189 		idn_get_mbox_config(domid, &mbox_index, NULL, &mbox_domain);
6190 		/*
6191 		 * ----------------------------------------------------
6192 		 * Send: DATAMBOX.DOMAIN or DATAMBOX.INDEX,
6193 		 *	 DATASVR.MAXNETS, DATASVR.MBXPERNET
6194 		 * ----------------------------------------------------
6195 		 */
6196 		cfg_subtype.val = 0;
6197 		if (mbox_index == IDN_NIL_DOMID) {
6198 			ASSERT(mbox_domain != IDN_NIL_SMROFFSET);
6199 			cfg_subtype.param.p[0] = IDN_CFGPARAM(IDNCFG_DATAMBOX,
6200 			    IDNCFGARG_DATAMBOX_DOMAIN);
6201 		} else {
6202 			/*
6203 			 * Should only be sending Index to
6204 			 * the master and not another slave.
6205 			 */
6206 			ASSERT(dp->dvote.v.master);
6207 			ASSERT(mbox_domain == IDN_NIL_SMROFFSET);
6208 			cfg_subtype.param.p[0] = IDN_CFGPARAM(IDNCFG_DATAMBOX,
6209 			    IDNCFGARG_DATAMBOX_INDEX);
6210 		}
6211 		cfg_subtype.param.p[1] = IDN_CFGPARAM(IDNCFG_DATASVR,
6212 		    IDNCFGARG_DATASVR_MAXNETS);
6213 		cfg_subtype.param.p[2] = IDN_CFGPARAM(IDNCFG_DATASVR,
6214 		    IDNCFGARG_DATASVR_MBXPERNET);
6215 		cfg_subtype.info.num = 3;
6216 		cfg_subtype.info.phase = phase;
6217 		dp->dcfgphase = phase;
6218 
6219 		PR_PROTO("%s:%d:%d: sending DATAMBOX.%s (0x%x), "
6220 		    "MAXNETS (%d), MBXPERNET (%d)\n",
6221 		    proc, domid, phase,
6222 		    (IDN_CFGPARAM_ARG(cfg_subtype.param.p[0])
6223 		    == IDNCFGARG_DATAMBOX_INDEX) ? "INDEX" : "DOMAIN",
6224 		    (mbox_index == IDN_NIL_DOMID) ? mbox_domain : mbox_index,
6225 		    IDN_MAX_NETS, IDN_MBOX_PER_NET);
6226 
6227 		IDNXDC(domid, &mt, cfg_subtype.val,
6228 		    ((mbox_index == IDN_NIL_DOMID) ? mbox_domain : mbox_index),
6229 		    IDN_MAX_NETS, IDN_MBOX_PER_NET);
6230 		break;
6231 
6232 	case 2:
6233 		cpus_u32 = UPPER32_CPUMASK(ldp->dcpuset);
6234 		cpus_l32 = LOWER32_CPUMASK(ldp->dcpuset);
6235 		/*
6236 		 * ----------------------------------------------------
6237 		 * Send: NETID, CPUSET
6238 		 * ----------------------------------------------------
6239 		 */
6240 		cfg_subtype.val = 0;
6241 		cfg_subtype.param.p[0] = IDN_CFGPARAM(IDNCFG_NETID, 0);
6242 		cfg_subtype.param.p[1] = IDN_CFGPARAM(IDNCFG_CPUSET,
6243 		    IDNCFGARG_CPUSET_UPPER);
6244 		cfg_subtype.param.p[2] = IDN_CFGPARAM(IDNCFG_CPUSET,
6245 		    IDNCFGARG_CPUSET_LOWER);
6246 		cfg_subtype.info.num = 3;
6247 		cfg_subtype.info.phase = phase;
6248 		dp->dcfgphase = phase;
6249 
6250 		PR_PROTO("%s:%d:%d: sending NETID (%d), "
6251 		    "CPUSET (0x%x.%x)\n", proc, domid, phase,
6252 		    ldp->dnetid, cpus_u32, cpus_l32);
6253 
6254 		IDNXDC(domid, &mt, cfg_subtype.val,
6255 		    (uint_t)ldp->dnetid, cpus_u32, cpus_l32);
6256 		break;
6257 
6258 	case 3:
6259 		/*
6260 		 * ----------------------------------------------------
6261 		 * Send: BOARDSET, MTU, BUFSIZE
6262 		 * ----------------------------------------------------
6263 		 */
6264 		cfg_subtype.val = 0;
6265 		cfg_subtype.param.p[0] = IDN_CFGPARAM(IDNCFG_BOARDSET, 0);
6266 		cfg_subtype.param.p[1] = IDN_CFGPARAM(IDNCFG_SIZE,
6267 		    IDNCFGARG_SIZE_MTU);
6268 		cfg_subtype.param.p[2] = IDN_CFGPARAM(IDNCFG_SIZE,
6269 		    IDNCFGARG_SIZE_BUF);
6270 		cfg_subtype.info.num = 3;
6271 		cfg_subtype.info.phase = phase;
6272 		dp->dcfgphase = phase;
6273 
6274 		PR_PROTO("%s:%d:%d: sending BOARDSET (0x%x), MTU (0x%lx), "
6275 		    "BUFSIZE (0x%x)\n",
6276 		    proc, domid, phase, ldp->dhw.dh_boardset, IDN_MTU,
6277 		    IDN_SMR_BUFSIZE);
6278 
6279 		IDNXDC(domid, &mt, cfg_subtype.val,
6280 		    ldp->dhw.dh_boardset, IDN_MTU, IDN_SMR_BUFSIZE);
6281 		break;
6282 
6283 	case 4:
6284 		/*
6285 		 * ----------------------------------------------------
6286 		 * Send: SLABSIZE, OPTIONS.CHECKSUM, NWR_SIZE
6287 		 * ----------------------------------------------------
6288 		 */
6289 		cfg_subtype.val = 0;
6290 		cfg_subtype.param.p[0] = IDN_CFGPARAM(IDNCFG_SIZE,
6291 		    IDNCFGARG_SIZE_SLAB);
6292 		cfg_subtype.param.p[1] = IDN_CFGPARAM(IDNCFG_OPTIONS,
6293 		    IDNCFGARG_CHECKSUM);
6294 		cfg_subtype.param.p[2] = IDN_CFGPARAM(IDNCFG_SIZE,
6295 		    IDNCFGARG_SIZE_NWR);
6296 		cfg_subtype.info.num = 3;
6297 		cfg_subtype.info.phase = phase;
6298 		dp->dcfgphase = phase;
6299 
6300 		PR_PROTO("%s:%d:%d: sending SLABSIZE (%d), CKSUM (%d), "
6301 		    "NWRSIZE (%d)\n",
6302 		    proc, domid, phase, IDN_SLAB_BUFCOUNT,
6303 		    IDN_CHECKSUM, IDN_NWR_SIZE);
6304 
6305 		IDNXDC(domid, &mt, cfg_subtype.val,
6306 		    IDN_SLAB_BUFCOUNT, IDN_CHECKSUM, IDN_NWR_SIZE);
6307 		break;
6308 
6309 	default:
6310 		rv = 1;
6311 		break;
6312 	}
6313 
6314 	return (rv);
6315 }
6316 
6317 #define	CFG_FATAL	((uint_t)-1)	/* reset link */
6318 #define	CFG_CONTINUE	0x0000		/* looking for more */
6319 #define	CFG_DONE	0x0001		/* got everything expected */
6320 #define	CFG_ERR_MTU	0x0002
6321 #define	CFG_ERR_BUF	0x0004
6322 #define	CFG_ERR_SLAB	0x0008
6323 #define	CFG_ERR_NWR	0x0010
6324 #define	CFG_ERR_NETS	0x0020
6325 #define	CFG_ERR_MBOX	0x0040
6326 #define	CFG_ERR_NMCADR	0x0080
6327 #define	CFG_ERR_MCADR	0x0100
6328 #define	CFG_ERR_CKSUM	0x0200
6329 #define	CFG_ERR_SMR	0x0400
6330 #define	CFG_MAX_ERRORS	16
6331 
6332 #define	CFGERR2IDNKERR(ce) \
6333 	(((ce) & CFG_ERR_MTU)	? IDNKERR_CONFIG_MTU 	: \
6334 	((ce) & CFG_ERR_BUF)	? IDNKERR_CONFIG_BUF 	: \
6335 	((ce) & CFG_ERR_SLAB)	? IDNKERR_CONFIG_SLAB 	: \
6336 	((ce) & CFG_ERR_NWR)	? IDNKERR_CONFIG_NWR 	: \
6337 	((ce) & CFG_ERR_NETS)	? IDNKERR_CONFIG_NETS 	: \
6338 	((ce) & CFG_ERR_MBOX)	? IDNKERR_CONFIG_MBOX 	: \
6339 	((ce) & CFG_ERR_NMCADR)	? IDNKERR_CONFIG_NMCADR	: \
6340 	((ce) & CFG_ERR_MCADR)	? IDNKERR_CONFIG_MCADR	: \
6341 	((ce) & CFG_ERR_CKSUM)	? IDNKERR_CONFIG_CKSUM	: \
6342 	((ce) & CFG_ERR_SMR)	? IDNKERR_CONFIG_SMR	: 0)
6343 
6344 #define	CFGERR2FINARG(ce) \
6345 	(((ce) & CFG_ERR_MTU)	? IDNFIN_ARG_CFGERR_MTU    : \
6346 	((ce) & CFG_ERR_BUF)	? IDNFIN_ARG_CFGERR_BUF    : \
6347 	((ce) & CFG_ERR_SLAB)	? IDNFIN_ARG_CFGERR_SLAB   : \
6348 	((ce) & CFG_ERR_NWR)	? IDNFIN_ARG_CFGERR_NWR    : \
6349 	((ce) & CFG_ERR_NETS)	? IDNFIN_ARG_CFGERR_NETS   : \
6350 	((ce) & CFG_ERR_MBOX)	? IDNFIN_ARG_CFGERR_MBOX   : \
6351 	((ce) & CFG_ERR_NMCADR)	? IDNFIN_ARG_CFGERR_NMCADR : \
6352 	((ce) & CFG_ERR_MCADR)	? IDNFIN_ARG_CFGERR_MCADR  : \
6353 	((ce) & CFG_ERR_CKSUM)	? IDNFIN_ARG_CFGERR_CKSUM  : \
6354 	((ce) & CFG_ERR_SMR)	? IDNFIN_ARG_CFGERR_SMR	   : IDNFIN_ARG_NONE)
6355 
6356 /*
6357  * Called when some CFG messages arrive.  We use dncfgitems to count the
6358  * total number of items received so far since we'll receive multiple CFG
6359  * messages during the CONFIG phase.  Note that dncfgitems is initialized
6360  * in idn_send_config.
6361  */
6362 static void
6363 idn_recv_config(int domid, idn_msgtype_t *mtp, idn_xdcargs_t xargs)
6364 {
6365 	uint_t		msg = mtp->mt_mtype;
6366 	uint_t		rv, rv_expected, rv_actual;
6367 	int		pnum;
6368 	int		phase;
6369 	register int	p;
6370 	register int	c;
6371 	idn_mainmbox_t	*mmp;
6372 	register uint_t	subtype, subtype_arg;
6373 	idn_domain_t	*dp;
6374 	int		index;
6375 	idn_domain_t	*ldp = &idn_domain[idn.localid];
6376 	idn_mboxtbl_t	*mbtp;
6377 	idn_cfgsubtype_t	cfg_subtype;
6378 	idn_xdcargs_t	cfg_arg;
6379 	idn_msgtype_t	mt;
6380 	idnsb_error_t	idnerr;
6381 	procname_t	proc = "idn_recv_config";
6382 
6383 	ASSERT(domid != idn.localid);
6384 
6385 	GET_XARGS(xargs, &cfg_subtype.val, &cfg_arg[0], &cfg_arg[1],
6386 	    &cfg_arg[2]);
6387 	cfg_arg[3] = 0;
6388 
6389 	dp = &idn_domain[domid];
6390 
6391 	ASSERT(IDN_DLOCK_IS_EXCL(domid));
6392 
6393 	if (dp->dstate != IDNDS_CONFIG) {
6394 		/*
6395 		 * Not ready to receive config info.
6396 		 * Drop whatever he sent us.  Let the
6397 		 * timer continue and timeout if needed.
6398 		 */
6399 		PR_PROTO("%s:%d: WARNING state(%s) != CONFIG\n",
6400 		    proc, domid, idnds_str[dp->dstate]);
6401 		return;
6402 	}
6403 
6404 	if ((msg & IDNP_ACKNACK_MASK) || dp->dcfgsnddone) {
6405 		IDN_MSGTIMER_STOP(domid, IDNP_CFG, 0);
6406 	}
6407 
6408 	if (msg & IDNP_ACKNACK_MASK) {
6409 		/*
6410 		 * ack/cfg
6411 		 */
6412 		phase = GET_XARGS_CFG_PHASE(xargs);
6413 
6414 		PR_PROTO("%s:%d: received ACK for CFG phase %d\n",
6415 		    proc, domid, phase);
6416 		if (phase != (int)dp->dcfgphase) {
6417 			/*
6418 			 * Phase is not what we were
6419 			 * expecting.  Something got lost
6420 			 * in the shuffle.  Restart the
6421 			 * timer and let it timeout if necessary
6422 			 * and reestablish the connection.
6423 			 */
6424 			IDN_MSGTIMER_START(domid, IDNP_CFG, dp->dcfgphase,
6425 			    idn_msg_waittime[IDNP_CFG], NULL);
6426 		} else {
6427 			idn_send_config(domid, phase + 1);
6428 
6429 			if (dp->dcfgsnddone && dp->dcfgrcvdone) {
6430 				IDN_DUNLOCK(domid);
6431 				IDN_SYNC_LOCK();
6432 				IDN_DLOCK_EXCL(domid);
6433 				if (dp->dstate == IDNDS_CONFIG) {
6434 					dp->dxp = &xphase_con;
6435 					IDN_XSTATE_TRANSITION(dp, IDNXS_PEND);
6436 					bzero(xargs, sizeof (xargs));
6437 
6438 					idn_xphase_transition(domid, NULL,
6439 					    xargs);
6440 				}
6441 				IDN_SYNC_UNLOCK();
6442 			}
6443 		}
6444 		return;
6445 	}
6446 
6447 	pnum = (int)cfg_subtype.info.num;
6448 	phase = (int)cfg_subtype.info.phase;
6449 
6450 	for (p = 0; p < pnum; p++) {
6451 		int	board;
6452 #ifdef DEBUG
6453 		uint_t	val;
6454 		char	*str;
6455 
6456 		val = 0;
6457 		str = NULL;
6458 #define	RCVCFG(s, v)	{ str = (s); val = (v); }
6459 #else
6460 #define	RCVCFG(s, v)	{}
6461 #endif /* DEBUG */
6462 
6463 		subtype	    = IDN_CFGPARAM_TYPE(cfg_subtype.param.p[p]);
6464 		subtype_arg = IDN_CFGPARAM_ARG(cfg_subtype.param.p[p]);
6465 
6466 		switch (subtype) {
6467 
6468 		case IDNCFG_BARLAR:
6469 			IDN_GLOCK_EXCL();
6470 			switch (subtype_arg) {
6471 
6472 			case IDNCFGARG_BARLAR_BAR:
6473 				if (idn.smr.rempfn == PFN_INVALID) {
6474 					idn.smr.rempfn = (pfn_t)cfg_arg[p];
6475 					dp->dncfgitems++;
6476 					RCVCFG("BARLAR_BAR", cfg_arg[p]);
6477 				}
6478 				break;
6479 
6480 			case IDNCFGARG_BARLAR_LAR:
6481 				if (idn.smr.rempfnlim == PFN_INVALID) {
6482 					idn.smr.rempfnlim = (pfn_t)cfg_arg[p];
6483 					dp->dncfgitems++;
6484 					RCVCFG("BARLAR_LAR", cfg_arg[p]);
6485 				}
6486 				break;
6487 
6488 			default:
6489 				cmn_err(CE_WARN,
6490 				    "IDN 217: unknown CFGARG type (%d) "
6491 				    "from domain %d",
6492 				    subtype_arg, domid);
6493 				break;
6494 			}
6495 			IDN_GUNLOCK();
6496 			break;
6497 
6498 		case IDNCFG_MCADR:
6499 			board = subtype_arg;
6500 			if ((board >= 0) && (board < MAX_BOARDS) &&
6501 			    (dp->dhw.dh_mcadr[board] == 0)) {
6502 				dp->dhw.dh_mcadr[board] = cfg_arg[p];
6503 				dp->dncfgitems++;
6504 				RCVCFG("MCADR", cfg_arg[p]);
6505 			}
6506 			break;
6507 
6508 		case IDNCFG_NMCADR:
6509 			if (dp->dhw.dh_nmcadr == 0) {
6510 				dp->dhw.dh_nmcadr = cfg_arg[p];
6511 				dp->dncfgitems++;
6512 				RCVCFG("NMCADR", cfg_arg[p]);
6513 			}
6514 			break;
6515 
6516 		case IDNCFG_CPUSET:
6517 			switch (subtype_arg) {
6518 
6519 			case IDNCFGARG_CPUSET_UPPER:
6520 			{
6521 				cpuset_t	tmpset;
6522 
6523 				MAKE64_CPUMASK(tmpset, cfg_arg[p], 0);
6524 				CPUSET_OR(dp->dcpuset, tmpset);
6525 				dp->dncfgitems++;
6526 				RCVCFG("CPUSET_UPPER", cfg_arg[p]);
6527 				break;
6528 			}
6529 			case IDNCFGARG_CPUSET_LOWER:
6530 			{
6531 				cpuset_t	tmpset;
6532 
6533 				MAKE64_CPUMASK(tmpset, 0, cfg_arg[p]);
6534 				CPUSET_OR(dp->dcpuset, tmpset);
6535 				dp->dncfgitems++;
6536 				RCVCFG("CPUSET_LOWER", cfg_arg[p]);
6537 				break;
6538 			}
6539 			default:
6540 				ASSERT(0);
6541 				break;
6542 			}
6543 			break;
6544 
6545 		case IDNCFG_NETID:
6546 			if (dp->dnetid == (ushort_t)-1) {
6547 				dp->dnetid = (ushort_t)cfg_arg[p];
6548 				dp->dncfgitems++;
6549 				RCVCFG("NETID", cfg_arg[p]);
6550 			}
6551 			break;
6552 
6553 		case IDNCFG_BOARDSET:
6554 			if ((dp->dhw.dh_boardset & cfg_arg[p])
6555 			    == dp->dhw.dh_boardset) {
6556 				/*
6557 				 * Boardset better include what we
6558 				 * already know about.
6559 				 */
6560 				dp->dhw.dh_boardset = cfg_arg[p];
6561 				dp->dncfgitems++;
6562 				RCVCFG("BOARDSET", cfg_arg[p]);
6563 			}
6564 			break;
6565 
6566 		case IDNCFG_SIZE:
6567 			switch (subtype_arg) {
6568 
6569 			case IDNCFGARG_SIZE_MTU:
6570 				if (dp->dmtu == 0) {
6571 					dp->dmtu = cfg_arg[p];
6572 					dp->dncfgitems++;
6573 					RCVCFG("MTU", cfg_arg[p]);
6574 				}
6575 				break;
6576 
6577 			case IDNCFGARG_SIZE_BUF:
6578 				if (dp->dbufsize == 0) {
6579 					dp->dbufsize = cfg_arg[p];
6580 					dp->dncfgitems++;
6581 					RCVCFG("BUFSIZE", cfg_arg[p]);
6582 				}
6583 				break;
6584 
6585 			case IDNCFGARG_SIZE_SLAB:
6586 				if (dp->dslabsize == 0) {
6587 					dp->dslabsize = (short)cfg_arg[p];
6588 					dp->dncfgitems++;
6589 					RCVCFG("SLABSIZE", cfg_arg[p]);
6590 				}
6591 				break;
6592 
6593 			case IDNCFGARG_SIZE_NWR:
6594 				if (dp->dnwrsize == 0) {
6595 					dp->dnwrsize = (short)cfg_arg[p];
6596 					dp->dncfgitems++;
6597 					RCVCFG("NWRSIZE", cfg_arg[p]);
6598 				}
6599 				break;
6600 
6601 			default:
6602 				ASSERT(0);
6603 				break;
6604 			}
6605 			break;
6606 
6607 		case IDNCFG_DATAMBOX:
6608 			switch (subtype_arg) {
6609 
6610 			case IDNCFGARG_DATAMBOX_TABLE:
6611 				if (ldp->dmbox.m_tbl ||
6612 				    !dp->dvote.v.master ||
6613 				    !VALID_NWROFFSET(cfg_arg[p], 4)) {
6614 					/*
6615 					 * Only a master should be
6616 					 * sending us a datambox table.
6617 					 */
6618 					break;
6619 				}
6620 				IDN_DLOCK_EXCL(idn.localid);
6621 				ldp->dmbox.m_tbl = (idn_mboxtbl_t *)
6622 				    IDN_OFFSET2ADDR(cfg_arg[p]);
6623 				IDN_DUNLOCK(idn.localid);
6624 				dp->dncfgitems++;
6625 				RCVCFG("DATAMBOX.TABLE", cfg_arg[p]);
6626 				break;
6627 
6628 			case IDNCFGARG_DATAMBOX_DOMAIN:
6629 				if (dp->dmbox.m_send->mm_smr_mboxp ||
6630 				    !VALID_NWROFFSET(cfg_arg[p], 4))
6631 					break;
6632 				mbtp = (idn_mboxtbl_t *)
6633 				    IDN_OFFSET2ADDR(cfg_arg[p]);
6634 				mmp = dp->dmbox.m_send;
6635 				for (c = 0; c < IDN_MAX_NETS; c++) {
6636 
6637 					mutex_enter(&mmp[c].mm_mutex);
6638 					mmp[c].mm_smr_mboxp = mbtp;
6639 					mutex_exit(&mmp[c].mm_mutex);
6640 
6641 					IDN_MBOXTBL_PTR_INC(mbtp);
6642 				}
6643 				if (c <= 0)
6644 					break;
6645 				dp->dncfgitems++;
6646 				RCVCFG("DATAMBOX.DOMAIN", cfg_arg[p]);
6647 				break;
6648 
6649 			case IDNCFGARG_DATAMBOX_INDEX:
6650 				if (!ldp->dvote.v.master ||
6651 				    dp->dmbox.m_send->mm_smr_mboxp) {
6652 					/*
6653 					 * If I'm not the master then
6654 					 * I can't handle processing a
6655 					 * mailbox index.
6656 					 * OR, if I already have the send
6657 					 * mailbox, I'm done with this
6658 					 * config item.
6659 					 */
6660 					break;
6661 				}
6662 				ASSERT(dp->dmbox.m_tbl);
6663 				index = (int)cfg_arg[p];
6664 				/*
6665 				 * The given index is the local domain's
6666 				 * index into the remote domain's mailbox
6667 				 * table that contains the mailbox that
6668 				 * remote domain wants the local domain to
6669 				 * use as the send mailbox for messages
6670 				 * destined for the remote domain.
6671 				 * I.e. from the remote domain's
6672 				 *	perspective, this is his receive
6673 				 *	mailbox.
6674 				 */
6675 				mbtp = IDN_MBOXTBL_PTR(dp->dmbox.m_tbl, index);
6676 				mmp = dp->dmbox.m_send;
6677 				for (c = 0; c < IDN_MAX_NETS; c++) {
6678 
6679 					mutex_enter(&mmp[c].mm_mutex);
6680 					mmp[c].mm_smr_mboxp = mbtp;
6681 					mutex_exit(&mmp[c].mm_mutex);
6682 
6683 					IDN_MBOXTBL_PTR_INC(mbtp);
6684 				}
6685 				if (c <= 0)
6686 					break;
6687 				dp->dncfgitems++;
6688 				RCVCFG("DATAMBOX.INDEX", cfg_arg[p]);
6689 				break;
6690 
6691 			default:
6692 				ASSERT(0);
6693 				break;
6694 			}
6695 			break;
6696 
6697 		case IDNCFG_DATASVR:
6698 			switch (subtype_arg) {
6699 
6700 			case IDNCFGARG_DATASVR_MAXNETS:
6701 				if (dp->dmaxnets)
6702 					break;
6703 				dp->dmaxnets = (uint_t)(cfg_arg[p] & 0x3f);
6704 				dp->dncfgitems++;
6705 				RCVCFG("DATASVR.MAXNETS", cfg_arg[p]);
6706 				break;
6707 
6708 			case IDNCFGARG_DATASVR_MBXPERNET:
6709 				if (dp->dmboxpernet)
6710 					break;
6711 				dp->dmboxpernet = (uint_t)(cfg_arg[p] & 0x1ff);
6712 				dp->dncfgitems++;
6713 				RCVCFG("DATASVR.MBXPERNET", cfg_arg[p]);
6714 				break;
6715 
6716 			default:
6717 				ASSERT(0);
6718 				break;
6719 			}
6720 			break;
6721 
6722 		case IDNCFG_OPTIONS:
6723 			switch (subtype_arg) {
6724 
6725 			case IDNCFGARG_CHECKSUM:
6726 				if (dp->dcksum)
6727 					break;
6728 				if ((cfg_arg[p] & 0xff) == 0)
6729 					dp->dcksum = 1;		/* off */
6730 				else
6731 					dp->dcksum = 2;		/* on */
6732 				dp->dncfgitems++;
6733 				RCVCFG("OPTIONS.CHECKSUM", cfg_arg[p]);
6734 				break;
6735 
6736 			default:
6737 				ASSERT(0);
6738 				break;
6739 			}
6740 
6741 		default:
6742 			break;
6743 		}
6744 #ifdef DEBUG
6745 		PR_PROTO("%s:%d: received %s (0x%x)\n",
6746 		    proc, domid, str ? str : "<empty>", val);
6747 #endif /* DEBUG */
6748 	}
6749 
6750 	mt.mt_mtype = IDNP_ACK;
6751 	mt.mt_atype = IDNP_CFG;
6752 	mt.mt_cookie = mtp->mt_cookie;
6753 	CLR_XARGS(cfg_arg);
6754 	SET_XARGS_CFG_PHASE(cfg_arg, phase);
6755 	idn_send_acknack(domid, &mt, cfg_arg);
6756 
6757 	rv_expected = rv_actual = 0;
6758 
6759 	if (dp->dvote.v.master == 0) {
6760 		/*
6761 		 * Remote domain is a slave, check if we've received
6762 		 * all that we were expecting, and if so transition to
6763 		 * the next state.
6764 		 */
6765 		rv = idn_check_slave_config(domid, &rv_expected, &rv_actual);
6766 	} else {
6767 		/*
6768 		 * Remote domain is a master, check if this slave has
6769 		 * received all that it was expecting, and if so
6770 		 * transition to the next state.
6771 		 */
6772 		rv = idn_check_master_config(domid, &rv_expected, &rv_actual);
6773 	}
6774 
6775 	switch (rv) {
6776 	case CFG_DONE:
6777 		/*
6778 		 * All config info received that was expected, wrap up.
6779 		 */
6780 		if (!idn_recv_config_done(domid) && dp->dvote.v.master) {
6781 			IDN_DLOCK_EXCL(idn.localid);
6782 			ldp->dvote.v.connected = 1;
6783 			IDN_DUNLOCK(idn.localid);
6784 		}
6785 		break;
6786 
6787 	case CFG_CONTINUE:
6788 		/*
6789 		 * If we're not done sending our own config, then
6790 		 * there's no need to set a timer since one will
6791 		 * automatically be set when we send a config
6792 		 * message waiting for an acknowledgement.
6793 		 */
6794 		if (dp->dcfgsnddone) {
6795 			/*
6796 			 * We haven't yet received all the config
6797 			 * information we were expecting.  Need to
6798 			 * restart CFG timer if we've sent everything..
6799 			 */
6800 			IDN_MSGTIMER_START(domid, IDNP_CFG, 0,
6801 			    idn_msg_waittime[IDNP_CFG], NULL);
6802 		}
6803 		break;
6804 
6805 	case CFG_FATAL:
6806 		/*
6807 		 * Fatal error occurred during config exchange.
6808 		 * We need to shutdown connection in this
6809 		 * case, so initiate a (non-relink) FIN.
6810 		 * so let's get the show on the road.
6811 		 */
6812 		IDN_DUNLOCK(domid);
6813 		IDN_SYNC_LOCK();
6814 		IDN_DLOCK_EXCL(domid);
6815 		/*
6816 		 * If the state has changed from CONFIG
6817 		 * then somebody else has taken over
6818 		 * control of this domain so we can just
6819 		 * bail out.
6820 		 */
6821 		if (dp->dstate == IDNDS_CONFIG) {
6822 			INIT_IDNKERR(&idnerr);
6823 			SET_IDNKERR_ERRNO(&idnerr, EPROTO);
6824 			SET_IDNKERR_IDNERR(&idnerr, IDNKERR_CONFIG_FATAL);
6825 			SET_IDNKERR_PARAM0(&idnerr, domid);
6826 			idn_update_op(IDNOP_ERROR, DOMAINSET(domid), &idnerr);
6827 			/*
6828 			 * Keep this guy around so we can try again.
6829 			 */
6830 			DOMAINSET_ADD(idn.domset.ds_relink, domid);
6831 			IDN_HISTORY_LOG(IDNH_RELINK, domid, dp->dstate,
6832 			    idn.domset.ds_relink);
6833 			idn_disconnect(domid, IDNFIN_NORMAL,
6834 			    IDNFIN_ARG_CFGERR_FATAL,
6835 			    IDNFIN_SYNC_NO);
6836 		}
6837 		IDN_SYNC_UNLOCK();
6838 		break;
6839 
6840 	default:	/* parameter conflict */
6841 		IDN_DUNLOCK(domid);
6842 		IDN_SYNC_LOCK();
6843 		IDN_DLOCK_EXCL(domid);
6844 		if (dp->dstate != IDNDS_CONFIG) {
6845 			/*
6846 			 * Hmmm...changed in the short period
6847 			 * we had dropped the lock, oh well.
6848 			 */
6849 			IDN_SYNC_UNLOCK();
6850 			break;
6851 		}
6852 		c = 0;
6853 		for (p = 0; p < CFG_MAX_ERRORS; p++)
6854 			if (rv & (1 << p))
6855 				c++;
6856 		INIT_IDNKERR(&idnerr);
6857 		SET_IDNKERR_ERRNO(&idnerr, EINVAL);
6858 		SET_IDNKERR_PARAM0(&idnerr, domid);
6859 		if (c > 1) {
6860 			SET_IDNKERR_IDNERR(&idnerr, IDNKERR_CONFIG_MULTIPLE);
6861 			SET_IDNKERR_PARAM1(&idnerr, c);
6862 		} else {
6863 			SET_IDNKERR_IDNERR(&idnerr, CFGERR2IDNKERR(rv));
6864 			SET_IDNKERR_PARAM1(&idnerr, rv_expected);
6865 			SET_IDNKERR_PARAM2(&idnerr, rv_actual);
6866 		}
6867 		/*
6868 		 * Any parameter conflicts are grounds for dismissal.
6869 		 */
6870 		if (idn.domset.ds_connected == 0) {
6871 			domainset_t	domset;
6872 			/*
6873 			 * We have no other connections yet.
6874 			 * We must blow out of here completely
6875 			 * unless we have relinkers left from
6876 			 * a RECONFIG.
6877 			 */
6878 			IDN_GLOCK_EXCL();
6879 			domset = ~idn.domset.ds_relink;
6880 			if (idn.domset.ds_relink == 0) {
6881 				IDN_GSTATE_TRANSITION(IDNGS_DISCONNECT);
6882 			}
6883 			domset &= ~idn.domset.ds_hitlist;
6884 			IDN_SET_NEW_MASTERID(IDN_NIL_DOMID);
6885 			IDN_GUNLOCK();
6886 			IDN_DUNLOCK(domid);
6887 
6888 			DOMAINSET_DEL(domset, idn.localid);
6889 
6890 			idn_update_op(IDNOP_ERROR, DOMAINSET_ALL, &idnerr);
6891 
6892 			PR_HITLIST("%s:%d: unlink_domainset(%x) due to "
6893 			    "CFG error (relink=%x, hitlist=%x)\n",
6894 			    proc, domid, domset, idn.domset.ds_relink,
6895 			    idn.domset.ds_hitlist);
6896 
6897 			idn_unlink_domainset(domset, IDNFIN_NORMAL,
6898 			    CFGERR2FINARG(rv),
6899 			    IDNFIN_OPT_UNLINK,
6900 			    BOARDSET_ALL);
6901 			IDN_SYNC_UNLOCK();
6902 			IDN_DLOCK_EXCL(domid);
6903 		} else {
6904 			PR_HITLIST("%s:%d: idn_disconnect(%d) due to CFG "
6905 			    "error (conn=%x, relink=%x, hitlist=%x)\n",
6906 			    proc, domid, domid, idn.domset.ds_connected,
6907 			    idn.domset.ds_relink, idn.domset.ds_hitlist);
6908 			/*
6909 			 * If we have other connections then
6910 			 * we're only going to blow away this
6911 			 * single connection.
6912 			 */
6913 			idn_update_op(IDNOP_ERROR, DOMAINSET(domid), &idnerr);
6914 
6915 			DOMAINSET_DEL(idn.domset.ds_relink, domid);
6916 			idn_disconnect(domid, IDNFIN_NORMAL,
6917 			    CFGERR2FINARG(rv), IDNFIN_SYNC_NO);
6918 			IDN_SYNC_UNLOCK();
6919 		}
6920 		break;
6921 	}
6922 }
6923 
6924 /*
6925  * Called by master or slave which expects exactly the following
6926  * with respect to config info received from a SLAVE:
6927  * 	IDNCFG_CPUSET
6928  *	IDNCFG_NETID
6929  *	IDNCFG_BOARDSET
6930  *	IDNCFG_SIZE (MTU, BUF, SLAB, NWR)
6931  *	IDNCFG_DATAMBOX (DOMAIN or INDEX if caller is master)
6932  *	IDNCFG_DATASVR (MAXNETS, MBXPERNET)
6933  *	IDNCFG_OPTIONS (CHECKSUM)
6934  */
6935 static uint_t
6936 idn_check_slave_config(int domid, uint_t *exp, uint_t *act)
6937 {
6938 	uint_t		rv = 0;
6939 	idn_domain_t	*ldp, *dp;
6940 	procname_t	proc = "idn_check_slave_config";
6941 
6942 	dp = &idn_domain[domid];
6943 	ldp = &idn_domain[idn.localid];
6944 
6945 	ASSERT(domid != idn.localid);
6946 	ASSERT(IDN_DLOCK_IS_EXCL(domid));
6947 	ASSERT(dp->dstate == IDNDS_CONFIG);
6948 
6949 	PR_PROTO("%s:%d: number received %d, number expected %d\n",
6950 	    proc, domid, (int)dp->dncfgitems, IDN_SLAVE_NCFGITEMS);
6951 
6952 	if ((int)dp->dncfgitems < IDN_SLAVE_NCFGITEMS)
6953 		return (CFG_CONTINUE);
6954 
6955 	if ((dp->dnetid == (ushort_t)-1) ||
6956 	    CPUSET_ISNULL(dp->dcpuset) ||
6957 	    (dp->dhw.dh_boardset == 0) ||
6958 	    (dp->dmbox.m_send->mm_smr_mboxp == NULL) ||
6959 	    (dp->dmaxnets == 0) ||
6960 	    (dp->dmboxpernet == 0) ||
6961 	    (dp->dcksum == 0) ||
6962 	    (dp->dmtu == 0) ||
6963 	    (dp->dbufsize == 0) ||
6964 	    (dp->dslabsize == 0) ||
6965 	    (dp->dnwrsize == 0)) {
6966 		/*
6967 		 * We received our IDN_SLAVE_NCFGITEMS config items,
6968 		 * but not all what we were expecting!  Gotta nack and
6969 		 * close connection.
6970 		 */
6971 		cmn_err(CE_WARN,
6972 		    "IDN: 218: missing some required config items from "
6973 		    "domain %d", domid);
6974 
6975 		rv = CFG_FATAL;
6976 		goto done;
6977 	}
6978 
6979 	if (!valid_mtu(dp->dmtu)) {
6980 		cmn_err(CE_WARN,
6981 		    "IDN: 219: remote domain %d MTU (%d) invalid "
6982 		    "(local.mtu = %d)", dp->domid, dp->dmtu, ldp->dmtu);
6983 
6984 		*exp = (uint_t)ldp->dmtu;
6985 		*act = (uint_t)dp->dmtu;
6986 		rv |= CFG_ERR_MTU;
6987 	}
6988 	if (!valid_bufsize(dp->dbufsize)) {
6989 		cmn_err(CE_WARN,
6990 		    "IDN: 220: remote domain %d BUFSIZE (%d) invalid "
6991 		    "(local.bufsize = %d)", dp->domid, dp->dbufsize,
6992 		    ldp->dbufsize);
6993 
6994 		*exp = (uint_t)ldp->dbufsize;
6995 		*act = (uint_t)dp->dbufsize;
6996 		rv |= CFG_ERR_BUF;
6997 	}
6998 	if (!valid_slabsize((int)dp->dslabsize)) {
6999 		cmn_err(CE_WARN,
7000 		    "IDN: 221: remote domain %d SLABSIZE (%d) invalid "
7001 		    "(local.slabsize = %d)",
7002 		    dp->domid, dp->dslabsize, ldp->dslabsize);
7003 
7004 		*exp = (uint_t)ldp->dslabsize;
7005 		*act = (uint_t)dp->dslabsize;
7006 		rv |= CFG_ERR_SLAB;
7007 	}
7008 	if (!valid_nwrsize((int)dp->dnwrsize)) {
7009 		cmn_err(CE_WARN,
7010 		    "IDN: 223: remote domain %d NWRSIZE (%d) invalid "
7011 		    "(local.nwrsize = %d)",
7012 		    dp->domid, dp->dnwrsize, ldp->dnwrsize);
7013 
7014 		*exp = (uint_t)ldp->dnwrsize;
7015 		*act = (uint_t)dp->dnwrsize;
7016 		rv |= CFG_ERR_NWR;
7017 	}
7018 	if ((int)dp->dmaxnets != IDN_MAX_NETS) {
7019 		cmn_err(CE_WARN,
7020 		    "IDN: 224: remote domain %d MAX_NETS (%d) invalid "
7021 		    "(local.maxnets = %d)",
7022 		    dp->domid, (int)dp->dmaxnets, IDN_MAX_NETS);
7023 
7024 		*exp = (uint_t)IDN_MAX_NETS;
7025 		*act = (uint_t)dp->dmaxnets;
7026 		rv |= CFG_ERR_NETS;
7027 	}
7028 	if ((int)dp->dmboxpernet != IDN_MBOX_PER_NET) {
7029 		cmn_err(CE_WARN,
7030 		    "IDN: 225: remote domain %d MBOX_PER_NET (%d) "
7031 		    "invalid (local.mboxpernet = %d)",
7032 		    dp->domid, (int)dp->dmboxpernet, IDN_MBOX_PER_NET);
7033 
7034 		*exp = (uint_t)IDN_MBOX_PER_NET;
7035 		*act = (uint_t)dp->dmboxpernet;
7036 		rv |= CFG_ERR_MBOX;
7037 	}
7038 	if ((dp->dcksum - 1) != (uchar_t)IDN_CHECKSUM) {
7039 		cmn_err(CE_WARN,
7040 		    "IDN: 226: remote domain %d CHECKSUM flag (%d) "
7041 		    "mismatches local domain's (%d)",
7042 		    dp->domid, (int)dp->dcksum - 1, IDN_CHECKSUM);
7043 
7044 		*exp = (uint_t)IDN_CHECKSUM;
7045 		*act = (uint_t)(dp->dcksum - 1);
7046 		rv |= CFG_ERR_CKSUM;
7047 	}
7048 
7049 done:
7050 
7051 	return (rv ? rv : CFG_DONE);
7052 }
7053 
7054 /*
7055  * Called by slave ONLY which expects exactly the following
7056  * config info from the MASTER:
7057  *	IDNCFG_BARLAR
7058  *	IDNCFG_MCADR
7059  *	IDNCFG_NMCADR
7060  * 	IDNCFG_CPUSET
7061  *	IDNCFG_NETID
7062  *	IDNCFG_BOARDSET
7063  *	IDNCFG_SIZE (MTU, BUF, SLAB, NWR)
7064  *	IDNCFG_DATAMBOX (TABLE, DOMAIN)
7065  *	IDNCFG_DATASVR (MAXNETS, MBXPERNET)
7066  *	IDNCFG_OPTIONS (CHECKSUM)
7067  */
7068 static uint_t
7069 idn_check_master_config(int domid, uint_t *exp, uint_t *act)
7070 {
7071 	uint_t		rv = 0;
7072 	int		nmcadr;
7073 	int		total_expitems;
7074 	int		p, m, err;
7075 	idn_domain_t	*dp;
7076 	idn_domain_t	*ldp = &idn_domain[idn.localid];
7077 	procname_t	proc = "idn_check_master_config";
7078 
7079 	dp = &idn_domain[domid];
7080 
7081 	ASSERT(IDN_GET_MASTERID() != idn.localid);
7082 	ASSERT(domid != idn.localid);
7083 	ASSERT(IDN_DLOCK_IS_EXCL(domid));
7084 	ASSERT(dp->dstate == IDNDS_CONFIG);
7085 
7086 	PR_PROTO("%s:%d: number received %d, minimum number expected %d\n",
7087 	    proc, domid, (int)dp->dncfgitems, IDN_MASTER_NCFGITEMS);
7088 
7089 	if ((int)dp->dncfgitems < IDN_MASTER_NCFGITEMS)
7090 		return (CFG_CONTINUE);
7091 
7092 	/*
7093 	 * We have at least IDN_MASTER_NCFGITEMS items which
7094 	 * means we have at least one MCADR.  Need to make sure
7095 	 * we have all that we're expecting, NMCADR.
7096 	 */
7097 	total_expitems = IDN_MASTER_NCFGITEMS + dp->dhw.dh_nmcadr - 1;
7098 	if ((dp->dhw.dh_nmcadr == 0) ||
7099 	    ((int)dp->dncfgitems < total_expitems)) {
7100 		/*
7101 		 * We have not yet received all the MCADRs
7102 		 * we're expecting.
7103 		 */
7104 		PR_PROTO("%s:%d: haven't received all MCADRs yet.\n",
7105 		    proc, domid);
7106 		return (CFG_CONTINUE);
7107 	}
7108 
7109 	nmcadr = 0;
7110 	for (p = 0; p < MAX_BOARDS; p++)
7111 		if (dp->dhw.dh_mcadr[p] != 0)
7112 			nmcadr++;
7113 
7114 	IDN_GLOCK_SHARED();
7115 	if ((idn.smr.rempfn == PFN_INVALID) ||
7116 	    (idn.smr.rempfnlim == PFN_INVALID) ||
7117 	    (dp->dnetid == (ushort_t)-1) ||
7118 	    CPUSET_ISNULL(dp->dcpuset) ||
7119 	    (dp->dhw.dh_boardset == 0) ||
7120 	    (nmcadr != dp->dhw.dh_nmcadr) ||
7121 	    (dp->dmbox.m_send->mm_smr_mboxp == NULL) ||
7122 	    (ldp->dmbox.m_tbl == NULL) ||
7123 	    (dp->dmaxnets == 0) ||
7124 	    (dp->dmboxpernet == 0) ||
7125 	    (dp->dcksum == 0) ||
7126 	    (dp->dmtu == 0) ||
7127 	    (dp->dbufsize == 0) ||
7128 	    (dp->dnwrsize == 0)) {
7129 
7130 		IDN_GUNLOCK();
7131 		/*
7132 		 * We received all of our config items, but not
7133 		 * all what we were expecting!  Gotta reset and
7134 		 * close connection.
7135 		 */
7136 		cmn_err(CE_WARN,
7137 		    "IDN: 227: missing some required config items from "
7138 		    "domain %d", domid);
7139 
7140 		rv = CFG_FATAL;
7141 		goto done;
7142 	}
7143 	if ((idn.smr.rempfnlim - idn.smr.rempfn) > btop(MB2B(IDN_SMR_SIZE))) {
7144 		/*
7145 		 * The master's SMR region is larger than
7146 		 * mine!  This means that this domain may
7147 		 * receive I/O buffers which are out of the
7148 		 * range of this local domain's SMR virtual
7149 		 * address space.  The master SMR has to be
7150 		 * no larger than the local SMR in order to
7151 		 * guarantee enough local virtual addresses
7152 		 * to see all of the SMR space.
7153 		 * XXX - Possibly add negotiating SMR size.
7154 		 *	 Try to create a new virtual mapping.
7155 		 *	 Could let domains negotiate SMR size.
7156 		 *	 Winning size would have to be smallest
7157 		 *	 in DC.  If so, how to handle incoming
7158 		 *	 domains with even smaller SMRs?
7159 		 *	 - Could either disallow connection
7160 		 *	 - Could reconfigure to use smaller SMR.
7161 		 */
7162 		cmn_err(CE_WARN,
7163 		    "IDN: 228: master's SMR (%ld) larger than "
7164 		    "local's SMR (%ld)",
7165 		    idn.smr.rempfnlim - idn.smr.rempfn,
7166 		    btop(MB2B(IDN_SMR_SIZE)));
7167 
7168 		*exp = (uint_t)IDN_SMR_SIZE;
7169 		*act = (uint_t)B2MB(ptob(idn.smr.rempfnlim - idn.smr.rempfn));
7170 		rv |= CFG_ERR_SMR;
7171 	}
7172 	IDN_GUNLOCK();
7173 
7174 	if (!valid_mtu(dp->dmtu)) {
7175 		cmn_err(CE_WARN,
7176 		    "IDN: 219: remote domain %d MTU (%d) invalid "
7177 		    "(local.mtu = %d)", dp->domid, dp->dmtu, ldp->dmtu);
7178 
7179 		*exp = (uint_t)ldp->dmtu;
7180 		*act = (uint_t)dp->dmtu;
7181 		rv |= CFG_ERR_MTU;
7182 	}
7183 	if (!valid_bufsize(dp->dbufsize)) {
7184 		cmn_err(CE_WARN,
7185 		    "IDN: 220: remote domain %d BUFSIZE (%d) invalid "
7186 		    "(local.bufsize = %d)", dp->domid, dp->dbufsize,
7187 		    ldp->dbufsize);
7188 
7189 		*exp = (uint_t)ldp->dbufsize;
7190 		*act = (uint_t)dp->dbufsize;
7191 		rv |= CFG_ERR_BUF;
7192 	}
7193 	if (!valid_nwrsize((int)dp->dnwrsize)) {
7194 		cmn_err(CE_WARN,
7195 		    "IDN: 223: remote domain %d NWRSIZE (%d) invalid "
7196 		    "(local.nwrsize = %d)",
7197 		    dp->domid, dp->dnwrsize, ldp->dnwrsize);
7198 
7199 		*exp = (uint_t)ldp->dnwrsize;
7200 		*act = (uint_t)dp->dnwrsize;
7201 		rv |= CFG_ERR_NWR;
7202 	}
7203 	if ((int)dp->dmaxnets != IDN_MAX_NETS) {
7204 		cmn_err(CE_WARN,
7205 		    "IDN: 224: remote domain %d MAX_NETS (%d) invalid "
7206 		    "(local.maxnets = %d)",
7207 		    dp->domid, (int)dp->dmaxnets, IDN_MAX_NETS);
7208 
7209 		*exp = (uint_t)IDN_MAX_NETS;
7210 		*act = (uint_t)dp->dmaxnets;
7211 		rv |= CFG_ERR_NETS;
7212 	}
7213 	if ((int)dp->dmboxpernet != IDN_MBOX_PER_NET) {
7214 		cmn_err(CE_WARN,
7215 		    "IDN: 225: remote domain %d MBOX_PER_NET (%d) "
7216 		    "invalid (local.mboxpernet = %d)",
7217 		    dp->domid, (int)dp->dmboxpernet, IDN_MBOX_PER_NET);
7218 
7219 		*exp = (uint_t)IDN_MBOX_PER_NET;
7220 		*act = (uint_t)dp->dmboxpernet;
7221 		rv |= CFG_ERR_MBOX;
7222 	}
7223 	if ((dp->dcksum - 1) != (uchar_t)IDN_CHECKSUM) {
7224 		cmn_err(CE_WARN,
7225 		    "IDN: 226: remote domain %d CHECKSUM flag (%d) "
7226 		    "mismatches local domain's (%d)",
7227 		    dp->domid, (int)dp->dcksum - 1, IDN_CHECKSUM);
7228 
7229 		*exp = (uint_t)IDN_CHECKSUM;
7230 		*act = (uint_t)(dp->dcksum - 1);
7231 		rv |= CFG_ERR_CKSUM;
7232 	}
7233 	nmcadr = 0;
7234 	err = 0;
7235 	for (m = 0; m < MAX_BOARDS; m++) {
7236 		if (!BOARD_IN_SET(dp->dhw.dh_boardset, m) &&
7237 		    dp->dhw.dh_mcadr[m]) {
7238 			cmn_err(CE_WARN,
7239 			    "IDN: 229: remote domain %d boardset (0x%x) "
7240 			    "conflicts with MCADR(board %d) [0x%x]",
7241 			    dp->domid, (uint_t)dp->dhw.dh_boardset, m,
7242 			    dp->dhw.dh_mcadr[m]);
7243 			err++;
7244 		}
7245 		if (dp->dhw.dh_mcadr[m])
7246 			nmcadr++;
7247 	}
7248 	if (err) {
7249 		*exp = 0;
7250 		*act = err;
7251 		rv |= CFG_ERR_MCADR;
7252 	} else if (nmcadr != dp->dhw.dh_nmcadr) {
7253 		cmn_err(CE_WARN,
7254 		    "IDN: 230: remote domain %d reported number of "
7255 		    "MCADRs (%d) mismatches received (%d)",
7256 		    dp->domid, dp->dhw.dh_nmcadr, nmcadr);
7257 		*exp = (uint_t)dp->dhw.dh_nmcadr;
7258 		*act = (uint_t)nmcadr;
7259 		rv |= CFG_ERR_NMCADR;
7260 	}
7261 
7262 done:
7263 
7264 	return (rv ? rv : CFG_DONE);
7265 }
7266 
7267 static int
7268 idn_recv_config_done(int domid)
7269 {
7270 	boardset_t		b_conflicts;
7271 	cpuset_t		p_conflicts;
7272 	register int		p, i;
7273 	register idn_domain_t	*dp;
7274 	idnsb_error_t		idnerr;
7275 	procname_t		proc = "idn_recv_config_done";
7276 
7277 	ASSERT(domid != IDN_NIL_DOMID);
7278 	dp = &idn_domain[domid];
7279 	ASSERT(IDN_DLOCK_IS_EXCL(domid));
7280 
7281 	/*
7282 	 * Well, we received all that we were expecting
7283 	 * so stop any CFG timers we had going.
7284 	 */
7285 	IDN_MSGTIMER_STOP(domid, IDNP_CFG, 0);
7286 
7287 	dp->dncpus = 0;
7288 	for (p = 0; p < NCPU; p++)
7289 		if (CPU_IN_SET(dp->dcpuset, p))
7290 			dp->dncpus++;
7291 	dp->dhw.dh_nboards = 0;
7292 	for (p = 0; p < MAX_BOARDS; p++)
7293 		if (BOARD_IN_SET(dp->dhw.dh_boardset, p))
7294 			dp->dhw.dh_nboards++;
7295 
7296 	IDN_GLOCK_EXCL();
7297 	/*
7298 	 * Verify dcpuset and dhw.dh_boardset don't
7299 	 * conflict with any existing DC member.
7300 	 */
7301 	b_conflicts = idn.dc_boardset & dp->dhw.dh_boardset;
7302 	CPUSET_ZERO(p_conflicts);
7303 	CPUSET_OR(p_conflicts, idn.dc_cpuset);
7304 	CPUSET_AND(p_conflicts, dp->dcpuset);
7305 
7306 	if (b_conflicts || !CPUSET_ISNULL(p_conflicts)) {
7307 		if (b_conflicts) {
7308 			cmn_err(CE_WARN,
7309 			    "IDN: 231: domain %d boardset "
7310 			    "(0x%x) conflicts with existing "
7311 			    "IDN boardset (0x%x)",
7312 			    domid, dp->dhw.dh_boardset,
7313 			    b_conflicts);
7314 		}
7315 		if (!CPUSET_ISNULL(p_conflicts)) {
7316 			cmn_err(CE_WARN,
7317 			    "IDN: 232: domain %d cpuset "
7318 			    "(0x%x.%0x) conflicts with existing "
7319 			    "IDN cpuset (0x%x.%0x)", domid,
7320 			    UPPER32_CPUMASK(dp->dcpuset),
7321 			    LOWER32_CPUMASK(dp->dcpuset),
7322 			    UPPER32_CPUMASK(p_conflicts),
7323 			    LOWER32_CPUMASK(p_conflicts));
7324 		}
7325 		IDN_GUNLOCK();
7326 		/*
7327 		 * Need to disconnect and not retry with this guy.
7328 		 */
7329 		IDN_DUNLOCK(domid);
7330 		IDN_SYNC_LOCK();
7331 		DOMAINSET_DEL(idn.domset.ds_relink, domid);
7332 		IDN_DLOCK_EXCL(domid);
7333 
7334 		INIT_IDNKERR(&idnerr);
7335 		SET_IDNKERR_ERRNO(&idnerr, EPROTO);
7336 		SET_IDNKERR_IDNERR(&idnerr, IDNKERR_CONFIG_FATAL);
7337 		SET_IDNKERR_PARAM0(&idnerr, domid);
7338 		idn_update_op(IDNOP_ERROR, DOMAINSET(domid), &idnerr);
7339 
7340 		idn_disconnect(domid, IDNFIN_FORCE_HARD,
7341 		    IDNFIN_ARG_CFGERR_FATAL, IDNFIN_SYNC_NO);
7342 		IDN_SYNC_UNLOCK();
7343 
7344 		return (-1);
7345 	}
7346 
7347 	idn_mainmbox_reset(domid, dp->dmbox.m_send);
7348 	idn_mainmbox_reset(domid, dp->dmbox.m_recv);
7349 
7350 #ifdef IDNBUG_CPUPERBOARD
7351 	/*
7352 	 * We only allow connections to domains whose (mem) boards
7353 	 * all have at least one cpu.  This is necessary so that
7354 	 * we can program the CICs of that respective board.  This
7355 	 * is primarily only a requirement if the remote domain
7356 	 * is the master _and_ has the SMR in that particular board.
7357 	 * To simplify the checking we simply restrict connections to
7358 	 * domains that have at least one cpu on all boards that
7359 	 * contain memory.
7360 	 */
7361 	if (!idn_cpu_per_board((void *)NULL, dp->dcpuset, &dp->dhw)) {
7362 		cmn_err(CE_WARN,
7363 		    "IDN: 233: domain %d missing CPU per "
7364 		    "memory boardset (0x%x), CPU boardset (0x%x)",
7365 		    domid, dp->dhw.dh_boardset,
7366 		    cpuset2boardset(dp->dcpuset));
7367 
7368 		IDN_GUNLOCK();
7369 		/*
7370 		 * Need to disconnect and not retry with this guy.
7371 		 */
7372 		IDN_DUNLOCK(domid);
7373 		IDN_SYNC_LOCK();
7374 		DOMAINSET_DEL(idn.domset.ds_relink, domid);
7375 		IDN_DLOCK_EXCL(domid);
7376 
7377 		INIT_IDNKERR(&idnerr);
7378 		SET_IDNKERR_ERRNO(&idnerr, EINVAL);
7379 		SET_IDNKERR_IDNERR(&idnerr, IDNKERR_CPU_CONFIG);
7380 		SET_IDNKERR_PARAM0(&idnerr, domid);
7381 		idn_update_op(IDNOP_ERROR, DOMAINSET(domid), &idnerr);
7382 
7383 		idn_disconnect(domid, IDNFIN_FORCE_HARD,
7384 		    IDNFIN_ARG_CPUCFG, IDNFIN_SYNC_NO);
7385 		IDN_SYNC_UNLOCK();
7386 
7387 		return (-1);
7388 	}
7389 #endif /* IDNBUG_CPUPERBOARD */
7390 
7391 	CPUSET_OR(idn.dc_cpuset, dp->dcpuset);
7392 	idn.dc_boardset |= dp->dhw.dh_boardset;
7393 
7394 	IDN_GUNLOCK();
7395 
7396 	/*
7397 	 * Set up the portmap for this domain.
7398 	 */
7399 	i = -1;
7400 	for (p = 0; p < NCPU; p++) {
7401 		BUMP_INDEX(dp->dcpuset, i);
7402 		dp->dcpumap[p] = (uchar_t)i;
7403 	}
7404 
7405 	/*
7406 	 * Got everything we need from the remote
7407 	 * domain, now we can program hardware as needed.
7408 	 */
7409 	if (idn_program_hardware(domid) != 0) {
7410 		domainset_t	domset;
7411 		/*
7412 		 * Yikes!  Failed to program hardware.
7413 		 * Gotta bail.
7414 		 */
7415 		cmn_err(CE_WARN,
7416 		    "IDN: 234: failed to program hardware for domain %d "
7417 		    "(boardset = 0x%x)",
7418 		    domid, dp->dhw.dh_boardset);
7419 
7420 		IDN_DUNLOCK(domid);
7421 		/*
7422 		 * If we're having problems programming our
7423 		 * hardware we better unlink completely from
7424 		 * the IDN before things get really bad.
7425 		 */
7426 		IDN_SYNC_LOCK();
7427 		IDN_GLOCK_EXCL();
7428 		IDN_GSTATE_TRANSITION(IDNGS_DISCONNECT);
7429 		domset = DOMAINSET_ALL;
7430 		DOMAINSET_DEL(domset, idn.localid);
7431 		IDN_SET_NEW_MASTERID(IDN_NIL_DOMID);
7432 		IDN_GUNLOCK();
7433 
7434 		INIT_IDNKERR(&idnerr);
7435 		SET_IDNKERR_ERRNO(&idnerr, EINVAL);
7436 		SET_IDNKERR_IDNERR(&idnerr, IDNKERR_HW_ERROR);
7437 		SET_IDNKERR_PARAM0(&idnerr, domid);
7438 		idn_update_op(IDNOP_ERROR, DOMAINSET_ALL, &idnerr);
7439 
7440 		idn_unlink_domainset(domset, IDNFIN_NORMAL, IDNFIN_ARG_HWERR,
7441 		    IDNFIN_OPT_UNLINK, BOARDSET_ALL);
7442 
7443 		IDN_SYNC_UNLOCK();
7444 		IDN_DLOCK_EXCL(domid);
7445 
7446 		return (-1);
7447 	}
7448 
7449 	/*
7450 	 * Now that hardware has been programmed we can
7451 	 * remap the SMR into our local space, if necessary.
7452 	 */
7453 	IDN_GLOCK_EXCL();
7454 	if (domid == IDN_GET_MASTERID()) {
7455 		/*
7456 		 * No need to worry about disabling the data
7457 		 * server since at this stage there is only
7458 		 * one and he doesn't go active until his
7459 		 * mailbox (dmbox.m_recv->mm_smr_mboxp) is set up.
7460 		 */
7461 		smr_remap(&kas, idn.smr.vaddr, idn.smr.rempfn, IDN_SMR_SIZE);
7462 	}
7463 	IDN_GUNLOCK();
7464 
7465 	/*
7466 	 * There is no need to ACK the CFG messages since remote
7467 	 * domain would not progress to the next state (CON_SENT)
7468 	 * unless he has received everything.
7469 	 */
7470 
7471 	dp->dcfgrcvdone = 1;
7472 	PR_PROTO("%s:%d: RECV config DONE\n", proc, domid);
7473 
7474 	if (dp->dcfgsnddone) {
7475 		idn_xdcargs_t	xargs;
7476 		/*
7477 		 * Well, we've received all that we were expecting,
7478 		 * but we don't know if the remote domain has
7479 		 * received all that it was expecting from us,
7480 		 * although we know we transferred everything
7481 		 * so let's get the show on the road.
7482 		 */
7483 		IDN_DUNLOCK(domid);
7484 		IDN_SYNC_LOCK();
7485 		IDN_DLOCK_EXCL(domid);
7486 		/*
7487 		 * If the state has changed from CONFIG
7488 		 * then somebody else has taken over
7489 		 * control of this domain so we can just
7490 		 * bail out.
7491 		 */
7492 		if (dp->dstate == IDNDS_CONFIG) {
7493 			dp->dxp = &xphase_con;
7494 			IDN_XSTATE_TRANSITION(dp, IDNXS_PEND);
7495 			bzero(xargs, sizeof (xargs));
7496 
7497 			idn_xphase_transition(domid, NULL, xargs);
7498 		}
7499 		IDN_SYNC_UNLOCK();
7500 	}
7501 
7502 	return (0);
7503 }
7504 
7505 static int
7506 idn_verify_config_mbox(int domid)
7507 {
7508 	idn_domain_t	*ldp, *dp;
7509 	idn_mainmbox_t	*mmp;
7510 	idn_mboxtbl_t	*mtp;
7511 	int		c, rv = 0;
7512 	uint_t		activeptr, readyptr;
7513 	ushort_t	mbox_csum;
7514 
7515 	ASSERT(IDN_DLOCK_IS_EXCL(domid));
7516 
7517 	dp = &idn_domain[domid];
7518 	ldp = &idn_domain[idn.localid];
7519 
7520 	/*
7521 	 * The master will have assigned us the dmbox.m_tbl
7522 	 * from which we assign our receive mailboxes.
7523 	 * The first (0) entry contains the cookie used
7524 	 * for verification.
7525 	 */
7526 	IDN_DLOCK_SHARED(idn.localid);
7527 	/*
7528 	 * Now that we have an assigned mboxtbl from the
7529 	 * master, we can determine which receive mailbox
7530 	 * we indirectly assigned to him at the time we
7531 	 * sent him his MBOX_INDEX.  Prep it, however note
7532 	 * that the master will have not been able to
7533 	 * validate it because of the chicken 'n egg
7534 	 * problem between a master and slave.  Thus we
7535 	 * need to reset the cookie after the prep.
7536 	 */
7537 	mmp = dp->dmbox.m_recv;
7538 	mtp = IDN_MBOXTBL_PTR(ldp->dmbox.m_tbl, domid);
7539 	for (c = 0; c < IDN_MAX_NETS; c++) {
7540 		mutex_enter(&mmp[c].mm_mutex);
7541 		ASSERT(!mmp[c].mm_smr_mboxp);
7542 
7543 		mmp[c].mm_smr_mboxp = mtp;
7544 		mbox_csum = IDN_CKSUM_MBOX(&mtp->mt_header);
7545 		if (!VALID_MBOXHDR(&mtp->mt_header, c, mbox_csum)) {
7546 			cmn_err(CE_WARN,
7547 			    "IDN: 235: [recv] mailbox (domain %d, "
7548 			    "channel %d) SMR CORRUPTED - RELINK",
7549 			    domid, c);
7550 			cmn_err(CE_CONT,
7551 			    "IDN: 235: [recv] expected (cookie 0x%x, "
7552 			    "cksum 0x%x) actual (cookie 0x%x, "
7553 			    "cksum 0x%x)\n",
7554 			    IDN_GET_MBOXHDR_COOKIE(&mtp->mt_header),
7555 			    (int)mtp->mt_header.mh_cksum,
7556 			    IDN_MAKE_MBOXHDR_COOKIE(0, 0, c),
7557 			    (int)mbox_csum);
7558 			mutex_exit(&mmp[c].mm_mutex);
7559 			rv = -1;
7560 			break;
7561 		}
7562 		activeptr = mtp->mt_header.mh_svr_active_ptr;
7563 		readyptr = mtp->mt_header.mh_svr_ready_ptr;
7564 		/*
7565 		 * Verify pointers are valid.
7566 		 */
7567 		if (!activeptr || !VALID_NWROFFSET(activeptr, 2) ||
7568 		    !readyptr || !VALID_NWROFFSET(readyptr, 2)) {
7569 			cmn_err(CE_WARN,
7570 			    "IDN: 235: [recv] mailbox (domain %d, "
7571 			    "channel %d) SMR CORRUPTED - RELINK",
7572 			    domid, c);
7573 			cmn_err(CE_CONT,
7574 			    "IDN: 235: [recv] activeptr (0x%x), "
7575 			    "readyptr (0x%x)\n",
7576 			    activeptr, readyptr);
7577 			mutex_exit(&mmp[c].mm_mutex);
7578 			rv = -1;
7579 			break;
7580 		}
7581 		mmp[c].mm_smr_activep =	(ushort_t *)IDN_OFFSET2ADDR(activeptr);
7582 		mmp[c].mm_smr_readyp =	(ushort_t *)IDN_OFFSET2ADDR(readyptr);
7583 		mutex_exit(&mmp[c].mm_mutex);
7584 		IDN_MBOXTBL_PTR_INC(mtp);
7585 	}
7586 
7587 	IDN_DUNLOCK(idn.localid);
7588 
7589 	if (rv)
7590 		return (rv);
7591 
7592 	/*
7593 	 * Now we need to translate SMR offsets for send mailboxes
7594 	 * to actual virtual addresses.
7595 	 */
7596 	mmp = dp->dmbox.m_send;
7597 	for (c = 0; c < IDN_MAX_NETS; mmp++, c++) {
7598 		mutex_enter(&mmp->mm_mutex);
7599 		if ((mtp = mmp->mm_smr_mboxp) == NULL) {
7600 			mutex_exit(&mmp->mm_mutex);
7601 			rv = -1;
7602 			break;
7603 		}
7604 
7605 		mbox_csum = IDN_CKSUM_MBOX(&mtp->mt_header);
7606 
7607 		if (!VALID_MBOXHDR(&mtp->mt_header, c, mbox_csum)) {
7608 			cmn_err(CE_WARN,
7609 			    "IDN: 235: [send] mailbox (domain %d, "
7610 			    "channel %d) SMR CORRUPTED - RELINK",
7611 			    domid, c);
7612 			cmn_err(CE_CONT,
7613 			    "IDN: 235: [send] expected (cookie 0x%x, "
7614 			    "cksum 0x%x) actual (cookie 0x%x, "
7615 			    "cksum 0x%x)\n",
7616 			    IDN_GET_MBOXHDR_COOKIE(&mtp->mt_header),
7617 			    (int)mtp->mt_header.mh_cksum,
7618 			    IDN_MAKE_MBOXHDR_COOKIE(0, 0, c),
7619 			    (int)mbox_csum);
7620 			mutex_exit(&mmp->mm_mutex);
7621 			rv = -1;
7622 			break;
7623 		}
7624 		activeptr = mtp->mt_header.mh_svr_active_ptr;
7625 		readyptr = mtp->mt_header.mh_svr_ready_ptr;
7626 		/*
7627 		 * Paranoid check.
7628 		 */
7629 		if (!activeptr || !VALID_NWROFFSET(activeptr, 2) ||
7630 		    !readyptr || !VALID_NWROFFSET(readyptr, 2)) {
7631 			cmn_err(CE_WARN,
7632 			    "IDN: 235: [send] mailbox (domain %d, "
7633 			    "channel %d) SMR CORRUPTED - RELINK",
7634 			    domid, c);
7635 			cmn_err(CE_CONT,
7636 			    "IDN: 235: [send] activeptr (0x%x), "
7637 			    "readyptr (0x%x)\n",
7638 			    activeptr, readyptr);
7639 			mutex_exit(&mmp->mm_mutex);
7640 			rv = -1;
7641 			break;
7642 		}
7643 		mmp->mm_smr_activep = (ushort_t *)IDN_OFFSET2ADDR(activeptr);
7644 		mmp->mm_smr_readyp = (ushort_t *)IDN_OFFSET2ADDR(readyptr);
7645 		idn_reset_mboxtbl(mtp);
7646 		mutex_exit(&mmp->mm_mutex);
7647 		IDN_MBOXTBL_PTR_INC(mtp);
7648 	}
7649 
7650 	return (rv);
7651 }
7652 
7653 /*
7654  * The BUFSIZEs between domains have to be equal so that slave buffers
7655  * and the master's slabpool are consistent.
7656  * The MTUs between domains have to be equal so they can transfer
7657  * packets consistently without possible data truncation.
7658  *
7659  * ZZZ - Perhaps these could be negotiated?
7660  */
7661 static int
7662 valid_mtu(uint_t mtu)
7663 {
7664 	return ((mtu == idn_domain[idn.localid].dmtu) && mtu);
7665 }
7666 
7667 static int
7668 valid_bufsize(uint_t bufsize)
7669 {
7670 	return ((bufsize == idn_domain[idn.localid].dbufsize) && bufsize);
7671 }
7672 
7673 static int
7674 valid_slabsize(int slabsize)
7675 {
7676 	return ((slabsize == idn_domain[idn.localid].dslabsize) && slabsize);
7677 }
7678 
7679 static int
7680 valid_nwrsize(int nwrsize)
7681 {
7682 	return ((nwrsize == idn_domain[idn.localid].dnwrsize) && nwrsize);
7683 }
7684 
7685 static int
7686 idn_program_hardware(int domid)
7687 {
7688 	int		rv, is_master;
7689 	idn_domain_t	*dp;
7690 	uint_t		*mcadrp;
7691 	pfn_t		rem_pfn, rem_pfnlimit;
7692 	procname_t	proc = "idn_program_hardware";
7693 
7694 	PR_PROTO("%s:%d: program hw in domain %d w.r.t remote domain %d\n",
7695 	    proc, domid, idn.localid, domid);
7696 
7697 	dp = &idn_domain[domid];
7698 
7699 	ASSERT(domid != idn.localid);
7700 	ASSERT(IDN_DLOCK_IS_EXCL(domid));
7701 	ASSERT(dp->dstate == IDNDS_CONFIG);
7702 
7703 	IDN_GLOCK_EXCL();
7704 
7705 	if (DOMAIN_IN_SET(idn.domset.ds_hwlinked, domid)) {
7706 		IDN_GUNLOCK();
7707 		return (0);
7708 	}
7709 
7710 	DOMAINSET_ADD(idn.domset.ds_flush, domid);
7711 	CHECKPOINT_OPENED(IDNSB_CHKPT_CACHE, dp->dhw.dh_boardset, 1);
7712 
7713 	if (domid != IDN_GET_MASTERID()) {
7714 		/*
7715 		 * If the remote domain is a slave, then
7716 		 * all we have to program is the CIC sm_mask.
7717 		 */
7718 		is_master = 0;
7719 		if ((idn.localid == IDN_GET_MASTERID()) &&
7720 		    lock_try(&idn.first_hwlink)) {
7721 			/*
7722 			 * This is our first HW link and I'm the
7723 			 * master, which means we need to program
7724 			 * our local bar/lar.
7725 			 */
7726 			ASSERT(idn.first_hwmasterid == (short)IDN_NIL_DOMID);
7727 			idn.first_hwmasterid = (short)idn.localid;
7728 			rem_pfn = idn.smr.locpfn;
7729 			rem_pfnlimit = idn.smr.locpfn +
7730 			    btop(MB2B(IDN_SMR_SIZE));
7731 		} else {
7732 			/*
7733 			 * Otherwise, just a slave linking to
7734 			 * another slave.  No bar/lar updating
7735 			 * necessary.
7736 			 */
7737 			rem_pfn = rem_pfnlimit = PFN_INVALID;
7738 		}
7739 		mcadrp = NULL;
7740 	} else {
7741 		/*
7742 		 * If the remote domain is a master, then
7743 		 * we need to program the CIC sm_mask/sm_bar/sm_lar,
7744 		 * and PC's.
7745 		 */
7746 		is_master = 1;
7747 		rem_pfn = idn.smr.rempfn;
7748 		rem_pfnlimit = idn.smr.rempfnlim;
7749 		mcadrp = dp->dhw.dh_mcadr;
7750 		ASSERT(idn.first_hwmasterid == (short)IDN_NIL_DOMID);
7751 		idn.first_hwmasterid = (short)domid;
7752 	}
7753 
7754 	PR_PROTO("%s:%d: ADD bset (0x%x)\n", proc, domid, dp->dhw.dh_boardset);
7755 
7756 	rv = idnxf_shmem_add(is_master, dp->dhw.dh_boardset,
7757 	    rem_pfn, rem_pfnlimit, mcadrp);
7758 
7759 	if (rv == 0) {
7760 		DOMAINSET_ADD(idn.domset.ds_hwlinked, domid);
7761 	} else {
7762 		if (rem_pfn == idn.smr.locpfn)
7763 			lock_clear(&idn.first_hwlink);
7764 
7765 		if (idn.first_hwmasterid == (short)domid)
7766 			idn.first_hwmasterid = (short)IDN_NIL_DOMID;
7767 
7768 		(void) idnxf_shmem_sub(is_master, dp->dhw.dh_boardset);
7769 	}
7770 
7771 	IDN_GUNLOCK();
7772 
7773 	return (rv);
7774 }
7775 
7776 static int
7777 idn_deprogram_hardware(int domid)
7778 {
7779 	int		rv, is_master;
7780 	idn_domain_t	*dp;
7781 	procname_t	proc = "idn_deprogram_hardware";
7782 
7783 
7784 	dp = &idn_domain[domid];
7785 
7786 	ASSERT(domid != idn.localid);
7787 	ASSERT(IDN_DLOCK_IS_EXCL(domid));
7788 
7789 	/*
7790 	 * Need to take into consideration what boards remote
7791 	 * domain was connected to.  If we don't have a connection to
7792 	 * them ourself, then we better remove them now , otherwise
7793 	 * they'll never be removed (unless we link to them at some point).
7794 	 */
7795 #if 0
7796 	DEBUG_USECDELAY(500000);
7797 #endif /* 0 */
7798 
7799 	IDN_GLOCK_EXCL();
7800 
7801 	if (!DOMAIN_IN_SET(idn.domset.ds_hwlinked, domid)) {
7802 		IDN_GUNLOCK();
7803 		return (0);
7804 	}
7805 
7806 	PR_PROTO("%s:%d: DEprogram hw in domain %d w.r.t remote domain %d\n",
7807 	    proc, domid, idn.localid, domid);
7808 
7809 	/*
7810 	 * It's possible to come through this flow for domains that
7811 	 * have not been programmed, i.e. not in idn.hwlinked_domset,
7812 	 * so don't bother asserting that they might be in there.
7813 	 * This can occur if we lose a domain during the config/syn
7814 	 * sequence.  If this occurs we won't know whether the remote
7815 	 * domain has programmed its hardware or not.  If it has then
7816 	 * it will have to go through the DMAP sequence and thus we
7817 	 * have to go through it also.  So, if we reach at least the
7818 	 * CONFIG state, we need to go through the DMAP handshake.
7819 	 */
7820 
7821 	PR_PROTO("%s:%d: SUB bset (0x%x)\n", proc, domid, dp->dhw.dh_boardset);
7822 
7823 	if (idn.first_hwmasterid == (short)domid) {
7824 		is_master = 1;
7825 		idn.first_hwmasterid = (short)IDN_NIL_DOMID;
7826 	} else {
7827 		is_master = 0;
7828 	}
7829 	rv = idnxf_shmem_sub(is_master, dp->dhw.dh_boardset);
7830 
7831 	if (rv == 0)
7832 		DOMAINSET_DEL(idn.domset.ds_hwlinked, domid);
7833 
7834 	IDN_GUNLOCK();
7835 
7836 	return (rv);
7837 }
7838 
7839 /*
7840  * Remember can't send slabs back to master at this point.
7841  * Entered with write-drwlock held.
7842  * Returns with drwlock dropped.
7843  */
7844 static void
7845 idn_deconfig(int domid)
7846 {
7847 	idn_domain_t	*dp, *ldp;
7848 	smr_slab_t	*sp;
7849 	int		c, masterid;
7850 	procname_t	proc = "idn_deconfig";
7851 
7852 	ASSERT(IDN_SYNC_IS_LOCKED());
7853 	ASSERT(IDN_DLOCK_IS_EXCL(domid));
7854 	ASSERT(domid != idn.localid);
7855 
7856 	ldp = &idn_domain[idn.localid];
7857 	dp = &idn_domain[domid];
7858 
7859 	ASSERT(dp->dstate == IDNDS_DMAP);
7860 
7861 	PR_PROTO("%s:%d: (dio=%d, dioerr=%d, dnslabs=%d)\n",
7862 	    proc, domid, dp->dio, dp->dioerr, dp->dnslabs);
7863 
7864 	IDN_GLOCK_EXCL();
7865 	masterid = IDN_GET_MASTERID();
7866 
7867 	idn.dc_boardset &= ~dp->dhw.dh_boardset;
7868 	for (c = 0; c < NCPU; c++) {
7869 		if (CPU_IN_SET(dp->dcpuset, c)) {
7870 			CPUSET_DEL(idn.dc_cpuset, c);
7871 		}
7872 	}
7873 
7874 	IDN_GUNLOCK();
7875 
7876 	(void) smr_buf_free_all(domid);
7877 
7878 	if (idn.localid == masterid) {
7879 		/*
7880 		 * Since I'm the master there may
7881 		 * have been slabs in this domain's
7882 		 * idn_domain[] entry.
7883 		 */
7884 		DSLAB_LOCK_EXCL(domid);
7885 		if ((sp = dp->dslab) != NULL) {
7886 			PR_PROTO("%s:%d: freeing up %d dead slabs\n",
7887 			    proc, domid, dp->dnslabs);
7888 			smr_slab_free(domid, sp);
7889 			dp->dslab = NULL;
7890 			dp->dnslabs = 0;
7891 			dp->dslab_state = DSLAB_STATE_UNKNOWN;
7892 		}
7893 		DSLAB_UNLOCK(domid);
7894 	} else if (domid == masterid) {
7895 		/*
7896 		 * We're shutting down the master!
7897 		 * We need to blow away our local slab
7898 		 * data structures.
7899 		 * Since I'm not the master, there should
7900 		 * be no slab structures in the given
7901 		 * domain's idn_domain[] entry.  They should
7902 		 * only exist in the local domain's entry.
7903 		 */
7904 		DSLAB_LOCK_EXCL(idn.localid);
7905 		ASSERT(dp->dslab == NULL);
7906 #ifdef DEBUG
7907 		{
7908 			int	nbusy = 0;
7909 			uint_t	dommask = 0;
7910 			for (sp = ldp->dslab; sp; sp = sp->sl_next) {
7911 				smr_slabbuf_t *bp;
7912 
7913 				if (!smr_slab_busy(sp))
7914 					continue;
7915 				nbusy++;
7916 				for (bp = sp->sl_inuse; bp; bp = bp->sb_next)
7917 					if (bp->sb_domid != IDN_NIL_DOMID)
7918 						DOMAINSET_ADD(dommask,
7919 						    bp->sb_domid);
7920 			}
7921 			if (nbusy)
7922 				PR_PROTO("%s:%d: found %d busy slabs "
7923 				    "(dommask = 0x%x)\n",
7924 				    proc, domid, nbusy, dommask);
7925 		}
7926 #endif /* DEBUG */
7927 		if ((sp = ldp->dslab) != NULL) {
7928 			PR_PROTO("%s:%d: freeing up %d local slab "
7929 			    "structs\n", proc, domid, ldp->dnslabs);
7930 			smr_slab_garbage_collection(sp);
7931 			ldp->dslab = NULL;
7932 			ldp->dnslabs = 0;
7933 			ldp->dslab_state = DSLAB_STATE_UNKNOWN;
7934 		}
7935 		DSLAB_UNLOCK(idn.localid);
7936 	}
7937 	if (dp->dio) {
7938 		PR_PROTO("%s:%d: reset dio (%d) to 0\n", proc, domid, dp->dio);
7939 		dp->dio = 0;
7940 	}
7941 	dp->dioerr = 0;
7942 
7943 	PR_PROTO("%s:%d: reset diocheck (%x) to 0\n",
7944 	    proc, domid, dp->diocheck);
7945 	lock_clear(&dp->diocheck);
7946 
7947 	CHECKPOINT_CLOSED(IDNSB_CHKPT_LINK, dp->dhw.dh_boardset, 2);
7948 
7949 	/*
7950 	 * Should have already flush our memory before
7951 	 * reaching this stage.  The issue is that by the
7952 	 * time we reach here the remote domains may have
7953 	 * already reprogrammed their hardware and so flushing
7954 	 * out caches now could result in a arbstop/hang
7955 	 * if we have data that needs to go back to one
7956 	 * of the remote domains that has already reprogrammed
7957 	 * its hardware.
7958 	 */
7959 	ASSERT(!DOMAIN_IN_SET(idn.domset.ds_flush, domid));
7960 
7961 	(void) idn_deprogram_hardware(domid);
7962 	/*
7963 	 * XXX - what to do if we
7964 	 *	 fail to program hardware
7965 	 *	 probably should panic since
7966 	 *	 demise of system may be near?
7967 	 *	 Sufficient to just shutdown network?
7968 	 */
7969 
7970 	IDN_DSTATE_TRANSITION(dp, IDNDS_CLOSED);
7971 
7972 	idn_close_domain(domid);
7973 }
7974 
7975 /*
7976  * If we're sending a Reset we better make sure we don't have any
7977  * references or traffic headed in the direction of this guy, since
7978  * when he receives the reset, he'll start shutting down which means
7979  * we effectively have to shutdown _before_ sending the reset.
7980  * DO NOT HOLD ANY DOMAIN RWLOCKS ON ENTRY.  Could result in deadlock
7981  * due to channel server looping back through STREAMs and attempting
7982  * to acquire domain lock, i.e. channel server will never "stop".
7983  */
7984 static void
7985 idn_shutdown_datapath(domainset_t domset, int force)
7986 {
7987 	int		do_allchan;
7988 	idn_domain_t	*dp;
7989 	register int	d;
7990 	procname_t	proc = "idn_shutdown_datapath";
7991 
7992 
7993 	PR_CHAN("%s: domset = 0x%x\n", proc, (uint_t)domset);
7994 
7995 	do_allchan = (domset == DOMAINSET_ALL) ? 1 : 0;
7996 
7997 	DOMAINSET_DEL(domset, idn.localid);
7998 
7999 	if (do_allchan) {
8000 		/*
8001 		 * Need to stop all outgoing and
8002 		 * incoming SMR references.
8003 		 */
8004 		idn_deactivate_channel(CHANSET_ALL, IDNCHAN_OFFLINE);
8005 	}
8006 
8007 	/*
8008 	 * If force is set then we don't want to reference
8009 	 * the SMR at all, so deactivate the domains from
8010 	 * channels first.  This will result in the mainmbox-flush
8011 	 * routines to just clean up without referencing the
8012 	 * SMR space.
8013 	 */
8014 	if (force)
8015 		idn_mainmbox_deactivate(domset);
8016 
8017 	/*
8018 	 * Flush out mailboxes (clear smr reference).
8019 	 */
8020 	for (d = 0; d < MAX_DOMAINS; d++) {
8021 		if (!DOMAIN_IN_SET(domset, d))
8022 			continue;
8023 
8024 		dp = &idn_domain[d];
8025 		if ((dp->dmbox.m_send == NULL) && (dp->dmbox.m_recv == NULL))
8026 			continue;
8027 
8028 		IDN_MBOX_LOCK(d);
8029 		if (dp->dmbox.m_send)
8030 			(void) idn_mainmbox_flush(d, dp->dmbox.m_send);
8031 		if (dp->dmbox.m_recv)
8032 			(void) idn_mainmbox_flush(d, dp->dmbox.m_recv);
8033 		IDN_MBOX_UNLOCK(d);
8034 	}
8035 	/*
8036 	 * Deactivate all domain references also.
8037 	 * Only necessary if it wasn't already done above.
8038 	 */
8039 	if (!force)
8040 		idn_mainmbox_deactivate(domset);
8041 }
8042 
8043 void
8044 idn_send_cmd(int domid, idn_cmd_t cmdtype, uint_t arg1, uint_t arg2, uint_t
8045     arg3)
8046 {
8047 	idn_msgtype_t	mt;
8048 	procname_t	proc = "idn_send_cmd";
8049 
8050 	mt.mt_mtype = IDNP_CMD;
8051 	mt.mt_atype = 0;
8052 	mt.mt_cookie = 0;
8053 
8054 	ASSERT(IDN_DLOCK_IS_HELD(domid));
8055 
8056 	PR_PROTO("%s:%d: sending command %s\n", proc, domid,
8057 	    VALID_IDNCMD(cmdtype) ? idncmd_str[cmdtype] : "unknown");
8058 
8059 	IDN_MSGTIMER_START(domid, IDNP_CMD, (ushort_t)cmdtype,
8060 	    idn_msg_waittime[IDNP_CMD], &mt.mt_cookie);
8061 
8062 	IDNXDC(domid, &mt, (uint_t)cmdtype, arg1, arg2, arg3);
8063 }
8064 
8065 void
8066 idn_send_cmdresp(int domid, idn_msgtype_t *mtp, idn_cmd_t cmdtype, uint_t arg1,
8067     uint_t arg2, uint_t cerrno)
8068 {
8069 	idn_msgtype_t	mt;
8070 
8071 	ASSERT(IDN_DLOCK_IS_HELD(domid));
8072 
8073 	if (domid == idn.localid) {
8074 		/*
8075 		 * It's possible local domain received a command
8076 		 * from itself.  However, we cannot send a normal
8077 		 * "ack" response (XDC) to ourself.
8078 		 */
8079 		return;
8080 	}
8081 
8082 	mt.mt_mtype = IDNP_CMD | IDNP_ACK;
8083 	mt.mt_atype = 0;
8084 	mt.mt_cookie = mtp->mt_cookie;
8085 
8086 	IDNXDC(domid, &mt, (uint_t)cmdtype, arg1, arg2, cerrno);
8087 }
8088 
8089 static void
8090 idn_send_cmd_nackresp(int domid, idn_msgtype_t *mtp, idn_cmd_t cmdtype,
8091     idn_nack_t nacktype)
8092 {
8093 	idn_msgtype_t	mt;
8094 
8095 	if (domid == idn.localid)
8096 		return;
8097 
8098 	mt.mt_mtype = IDNP_CMD | IDNP_NACK;
8099 	mt.mt_atype = 0;
8100 	mt.mt_cookie = mtp->mt_cookie;
8101 
8102 	(void) IDNXDC(domid, &mt, (uint_t)cmdtype, (uint_t)nacktype, 0, 0);
8103 }
8104 
8105 void
8106 idn_broadcast_cmd(idn_cmd_t cmdtype, uint_t arg1, uint_t arg2, uint_t arg3)
8107 {
8108 	idn_msgtype_t	mt;
8109 	domainset_t	domset;
8110 	procname_t	proc = "idn_broadcast_cmd";
8111 
8112 	IDN_GLOCK_SHARED();
8113 
8114 	domset = idn.domset.ds_connected;
8115 	DOMAINSET_DEL(domset, idn.localid);
8116 
8117 	PR_PROTO("%s: broadcasting command (%s) to domainset 0x%x\n",
8118 	    proc, VALID_IDNCMD(cmdtype) ? idncmd_str[cmdtype] : "unknown",
8119 	    domset);
8120 
8121 	mt.mt_mtype = IDNP_CMD;
8122 	mt.mt_atype = 0;
8123 	mt.mt_cookie = 0;
8124 
8125 	IDNXDC_BROADCAST(domset, &mt, (uint_t)cmdtype, arg1, arg2, arg3);
8126 
8127 	IDN_GUNLOCK();
8128 	/*
8129 	 * This is a broadcast which means local domain needs
8130 	 * to process it also.  Since we can't XDC to ourselves
8131 	 * we simply call a local function.
8132 	 */
8133 	idn_local_cmd(cmdtype, arg1, arg2, arg3);
8134 }
8135 
8136 /*
8137  * Since xargs[0] contains the cmdtype, only xargs[1], xargs[2], xargs[3]
8138  * are valid possible response arguments.
8139  */
8140 static void
8141 idn_recv_cmd(int domid, idn_msgtype_t *mtp, idn_xdcargs_t xargs)
8142 {
8143 	uint_t			msg = mtp->mt_mtype;
8144 	register idn_domain_t	*dp;
8145 	idn_cmd_t		cmdtype;
8146 	uint_t			acknack;
8147 	uint_t			cmdarg1, cmdarg2, cmdarg3;
8148 	int			islocal;
8149 	int			unsup_cmd_sent, unsup_cmd_recvd;
8150 	procname_t		proc = "idn_recv_cmd";
8151 
8152 	acknack = msg & IDNP_ACKNACK_MASK;
8153 	GET_XARGS(xargs, &cmdtype, &cmdarg1, &cmdarg2, &cmdarg3);
8154 
8155 	dp = &idn_domain[domid];
8156 	ASSERT(IDN_DLOCK_IS_EXCL(domid));
8157 
8158 	IDN_GLOCK_SHARED();
8159 
8160 	islocal = (domid == idn.localid);
8161 
8162 	ASSERT(!acknack || (acknack & IDNP_ACKNACK_MASK));
8163 
8164 	PR_PROTO("%s:%d: (local=%d) acknack=0x%x, cmdtype=%s(%d), "
8165 	    "a1=0x%x, a2=0x%x, a3=0x%x\n",
8166 	    proc, domid, islocal, acknack,
8167 	    VALID_IDNCMD(cmdtype) ? idncmd_str[cmdtype] : "unknown",
8168 	    cmdtype, cmdarg1, cmdarg2, cmdarg3);
8169 
8170 	unsup_cmd_sent = unsup_cmd_recvd = 0;
8171 
8172 	if ((IDN_GET_MASTERID() == IDN_NIL_DOMID) ||
8173 	    (dp->dstate != IDNDS_CONNECTED)) {
8174 		/*
8175 		 * Commands cannot be handled without a valid
8176 		 * master.  If this is a request then nack him.
8177 		 */
8178 		PR_PROTO("%s:%d: cannot process CMD w/o master (%d, %s)\n",
8179 		    proc, domid, IDN_GET_MASTERID(),
8180 		    idnds_str[dp->dstate]);
8181 
8182 		if (!islocal && !(acknack & IDNP_ACKNACK_MASK))
8183 			idn_send_cmd_nackresp(domid, mtp, cmdtype,
8184 			    IDNNACK_NOCONN);
8185 		IDN_GUNLOCK();
8186 		return;
8187 	}
8188 	IDN_GUNLOCK();
8189 
8190 	if (acknack & IDNP_ACKNACK_MASK) {
8191 		idn_nack_t	nack;
8192 		/*
8193 		 * Receiving a cmd+ack or cmd+nack in response to some
8194 		 * earlier command we must have issued.
8195 		 * If the response is a nack, there are two possibilites:
8196 		 *
8197 		 *	1. Remote domain failed to allocate due
8198 		 *	   to limited resources.
8199 		 *
8200 		 *	2. Remote domain does not support this
8201 		 *	   particular command.
8202 		 *
8203 		 * In the case of #2, the argument immediately after
8204 		 * the cmdtype (xargs[1]) will be (-1).
8205 		 */
8206 		nack = (idn_nack_t)cmdarg1;
8207 		if ((acknack & IDNP_NACK) && (nack == IDNNACK_BADCMD))
8208 			unsup_cmd_sent++;
8209 
8210 		if (islocal) {
8211 			/*
8212 			 * Shouldn't be receiving local commands w/acks.
8213 			 */
8214 			cmdtype = (idn_cmd_t)0;
8215 		}
8216 
8217 		switch (cmdtype) {
8218 		case IDNCMD_SLABALLOC:
8219 			idn_recv_slaballoc_resp(domid, cmdarg1, cmdarg2,
8220 			    cmdarg3);
8221 			break;
8222 
8223 		case IDNCMD_SLABFREE:
8224 			idn_recv_slabfree_resp(domid, cmdarg1, cmdarg2,
8225 			    cmdarg3);
8226 			break;
8227 
8228 		case IDNCMD_SLABREAP:
8229 			/*
8230 			 * We only care if successful.
8231 			 */
8232 			if (acknack & IDNP_ACK)
8233 				idn_recv_slabreap_resp(domid, cmdarg1, cmdarg3);
8234 			break;
8235 
8236 		case IDNCMD_NODENAME:
8237 			if ((acknack & IDNP_NACK) == 0) {
8238 				idn_recv_nodename_resp(domid, cmdarg1, cmdarg3);
8239 				break;
8240 			}
8241 			switch (nack) {
8242 			case IDNNACK_NOCONN:
8243 			case IDNNACK_RETRY:
8244 				/*
8245 				 * Remote domain was not quite
8246 				 * ready, try again.
8247 				 */
8248 				PR_PROTO("%s:%d: remote not ready "
8249 				    "for %s - retrying "
8250 				    "[dstate=%s]\n",
8251 				    proc, domid,
8252 				    idncmd_str[IDNCMD_NODENAME],
8253 				    idnds_str[dp->dstate]);
8254 
8255 				if (dp->dstate == IDNDS_CONNECTED)
8256 					(void) timeout(idn_retry_nodename_req,
8257 					    (void *)(uintptr_t)domid, hz);
8258 			default:
8259 				break;
8260 			}
8261 			break;
8262 
8263 		default:
8264 			/*
8265 			 * Unsupported command.
8266 			 */
8267 			unsup_cmd_recvd++;
8268 			break;
8269 		}
8270 		if (unsup_cmd_sent) {
8271 			PR_PROTO("%s:%d: unsupported command "
8272 			    "requested (0x%x)\n",
8273 			    proc, domid, cmdtype);
8274 		}
8275 		if (unsup_cmd_recvd) {
8276 			PR_PROTO("%s:%d: unsupported command "
8277 			    "response (0x%x)\n",
8278 			    proc, domid, cmdtype);
8279 		}
8280 	} else {
8281 		/*
8282 		 * Receiving a regular cmd from a remote domain.
8283 		 */
8284 		switch (cmdtype) {
8285 		case IDNCMD_SLABALLOC:
8286 			idn_recv_slaballoc_req(domid, mtp, cmdarg1);
8287 			break;
8288 
8289 		case IDNCMD_SLABFREE:
8290 			idn_recv_slabfree_req(domid, mtp, cmdarg1, cmdarg2);
8291 			break;
8292 
8293 		case IDNCMD_SLABREAP:
8294 			idn_recv_slabreap_req(domid, mtp, cmdarg1);
8295 			break;
8296 
8297 		case IDNCMD_NODENAME:
8298 			idn_recv_nodename_req(domid, mtp, cmdarg1);
8299 			break;
8300 
8301 		default:
8302 			/*
8303 			 * Unsupported command.
8304 			 */
8305 			unsup_cmd_recvd++;
8306 			break;
8307 		}
8308 		if (!islocal && unsup_cmd_recvd) {
8309 			/*
8310 			 * Received an unsupported IDN command.
8311 			 */
8312 			idn_send_cmd_nackresp(domid, mtp, cmdtype,
8313 			    IDNNACK_BADCMD);
8314 		}
8315 	}
8316 }
8317 
8318 /*
8319  * This is a supporting routine for idn_broadcast_cmd() to
8320  * handle processing of the requested command for the local
8321  * domain.  Currently the only support broadcast command
8322  * supported is reaping.
8323  */
8324 /*ARGSUSED2*/
8325 static void
8326 idn_local_cmd(idn_cmd_t cmdtype, uint_t arg1, uint_t arg2, uint_t arg3)
8327 {
8328 	idn_protojob_t	*jp;
8329 	idn_domain_t	*ldp = &idn_domain[idn.localid];
8330 	procname_t	proc = "idn_local_cmd";
8331 
8332 	PR_PROTO("%s: submitting local command %s on domain %d\n",
8333 	    proc, VALID_IDNCMD(cmdtype) ? idncmd_str[cmdtype] : "unknown",
8334 	    idn.localid);
8335 
8336 
8337 	jp = idn_protojob_alloc(KM_SLEEP);
8338 
8339 	jp->j_msg.m_domid    = ldp->domid;
8340 	jp->j_msg.m_msgtype  = IDNP_CMD;
8341 	jp->j_msg.m_cookie   = ldp->dcookie_recv;
8342 	SET_XARGS(jp->j_msg.m_xargs, cmdtype, arg1, arg2, arg3);
8343 
8344 	idn_protojob_submit(ldp->domid, jp);
8345 }
8346 
8347 /*
8348  * Terminate any outstanding commands that may have
8349  * been targeted for the given domain.  A command is
8350  * designated as outstanding if it has an active timer.
8351  *
8352  * serrno = ECANCELED.
8353  */
8354 static void
8355 idn_terminate_cmd(int domid, int serrno)
8356 {
8357 	idn_domain_t	*dp;
8358 	idn_timer_t	*tplist = NULL, *tp;
8359 	procname_t	proc = "idn_terminate_cmd";
8360 
8361 	dp = &idn_domain[domid];
8362 
8363 	ASSERT(IDN_DLOCK_IS_EXCL(domid));
8364 
8365 	IDN_MSGTIMER_GET(dp, IDNP_CMD, tplist, 0);
8366 	/*
8367 	 * At this point the timers are effectively terminated
8368 	 * since when they're t_onq indication is set false.
8369 	 */
8370 	if (tplist == NULL) {
8371 		PR_PROTO("%s:%d: no outstanding cmds found\n",
8372 		    proc, domid);
8373 		/*
8374 		 * There is a window where we may have caught a
8375 		 * request just prior to issuing the actual
8376 		 * command (SLABALLOC).  We're guaranteed if there
8377 		 * was, then he will have at least registered.
8378 		 * So, if we abort the command now, he'll catch
8379 		 * it before going to sleep.
8380 		 * Drop through.
8381 		 */
8382 	}
8383 	ASSERT(tplist ? (tplist->t_back->t_forw == NULL) : 1);
8384 
8385 	for (tp = tplist; tp; tp = tp->t_forw) {
8386 		ASSERT(tp->t_type == IDNP_CMD);
8387 
8388 		PR_PROTO("%s:%d: found outstanding cmd: %s\n",
8389 		    proc, domid, idncmd_str[tp->t_subtype]);
8390 
8391 		switch (tp->t_subtype) {
8392 		case IDNCMD_SLABALLOC:
8393 			/*
8394 			 * Outstanding slaballoc request may have
8395 			 * slab waiters hanging around.  Need to
8396 			 * tell them to bail out.  The given domain
8397 			 * must be the master if we have an outstanding
8398 			 * command to him.  This also presumes that
8399 			 * if there are any waiters they're only in
8400 			 * the local domain's waiting area (i.e. we're
8401 			 * a slave).
8402 			 */
8403 #ifdef DEBUG
8404 			IDN_GLOCK_SHARED();
8405 			ASSERT(domid == IDN_GET_MASTERID());
8406 			ASSERT(idn.localid != IDN_GET_MASTERID());
8407 			IDN_GUNLOCK();
8408 #endif /* DEBUG */
8409 			(void) smr_slabwaiter_abort(idn.localid, serrno);
8410 			break;
8411 
8412 		case IDNCMD_SLABFREE:
8413 		case IDNCMD_SLABREAP:
8414 		case IDNCMD_NODENAME:
8415 			/*
8416 			 * Nothing really waiting for these operations
8417 			 * so no biggy if we just drop.
8418 			 * Note that NODENAME may have an outstanding
8419 			 * buffer, however that will be reclaimed
8420 			 * when we actually unlink from domain.
8421 			 */
8422 			break;
8423 
8424 		default:
8425 			ASSERT(0);
8426 			break;
8427 		}
8428 	}
8429 	/*
8430 	 * As mentioned before the timers are effectively no-op'd
8431 	 * once they're dequeued, however let's cleanup house and
8432 	 * get rid of the useless entries in the timeout queue.
8433 	 */
8434 	if (tplist) {
8435 		IDN_TIMER_STOPALL(tplist);
8436 	}
8437 
8438 	if (idn_domain[idn.localid].dvote.v.master) {
8439 		/*
8440 		 * I'm the master so it's possible I had
8441 		 * outstanding commands (SLABALLOC) waiting
8442 		 * to be satisfied for the given domain.
8443 		 * Since we're forcing an error it's okay
8444 		 * to continue holding onto the drwlock.
8445 		 */
8446 		PR_PROTO("%s:%d: abort slaballoc waiters\n", proc, domid);
8447 		(void) smr_slabwaiter_abort(domid, serrno);
8448 
8449 	} else if (dp->dvote.v.master) {
8450 		PR_PROTO("%s:%d: abort (local domain) slaballoc waiters\n",
8451 		    proc, domid);
8452 		(void) smr_slabwaiter_abort(idn.localid, serrno);
8453 	}
8454 }
8455 
8456 static void
8457 idn_send_acknack(int domid, idn_msgtype_t *mtp, idn_xdcargs_t xargs)
8458 {
8459 	idn_domain_t	*dp = &idn_domain[domid];
8460 	procname_t	proc = "idn_send_acknack";
8461 
8462 	ASSERT(mtp ? (mtp->mt_mtype & IDNP_ACKNACK_MASK) : 1);
8463 	ASSERT(domid != IDN_NIL_DOMID);
8464 
8465 #ifdef DEBUG
8466 	{
8467 		STRING(mstr);
8468 		STRING(astr);
8469 
8470 		INUM2STR(mtp->mt_mtype, mstr);
8471 		INUM2STR(mtp->mt_atype, astr);
8472 
8473 		if (mtp->mt_mtype & IDNP_ACK) {
8474 			PR_PROTO("%s:%d: dstate=%s, msg=(%s/%s), "
8475 			    "a1=0x%x, a2=0x%x, a3=0x%x, a4 = 0x%x\n",
8476 			    proc, domid, idnds_str[dp->dstate],
8477 			    astr, mstr, xargs[0], xargs[1],
8478 			    xargs[2], xargs[3]);
8479 		} else {
8480 			idn_nack_t	nack;
8481 
8482 			nack = GET_XARGS_NACK_TYPE(xargs);
8483 			PR_PROTO("%s:%d: dstate=%s, msg=(%s/%s), "
8484 			    "nack=%s(0x%x)\n",
8485 			    proc, domid, idnds_str[dp->dstate],
8486 			    astr, mstr, idnnack_str[nack],
8487 			    (uint_t)nack);
8488 		}
8489 	}
8490 #endif /* DEBUG */
8491 
8492 	(void) IDNXDC(domid, mtp, xargs[0], xargs[1], xargs[2], xargs[3]);
8493 }
8494 
8495 /*ARGSUSED0*/
8496 static void
8497 idn_prealloc_slab(int nslabs)
8498 {
8499 	register int	s, serrno;
8500 	smr_slab_t	*sp;
8501 	idn_domain_t	*ldp = &idn_domain[idn.localid];
8502 	procname_t	proc = "idn_prealloc_slab";
8503 
8504 	IDN_GLOCK_SHARED();
8505 	DSLAB_LOCK_SHARED(idn.localid);
8506 	if ((idn.state != IDNGS_ONLINE) || (ldp->dnslabs > 0)) {
8507 		/*
8508 		 * Not in the proper state or slab already allocated.
8509 		 */
8510 		DSLAB_UNLOCK(idn.localid);
8511 		IDN_GUNLOCK();
8512 		return;
8513 	}
8514 	IDN_GUNLOCK();
8515 	ASSERT(!ldp->dslab);
8516 
8517 	serrno = 0;
8518 	for (s = 0; (s < nslabs) && ((int)ldp->dnslabs < nslabs); s++) {
8519 		/*
8520 		 * Returns with ldp->drwlock dropped.
8521 		 */
8522 		serrno = smr_slab_alloc(idn.localid, &sp);
8523 		if (serrno != 0) {
8524 			PR_PROTO("%s: FAILED to pre-alloc'd "
8525 			    "slab (serrno = %d)\n", proc, serrno);
8526 			break;
8527 		}
8528 		/*
8529 		 * State may have changed since smr_slab_alloc
8530 		 * temporarily drops drwlock.  Make sure we're
8531 		 * still connected.
8532 		 */
8533 		PR_PROTO("%s: SUCCESSFULLY pre-alloc'd slab\n", proc);
8534 
8535 		if (idn.state != IDNGS_ONLINE) {
8536 			PR_PROTO("%s: Lost connection..leaving\n", proc);
8537 			break;
8538 		}
8539 	}
8540 
8541 	DSLAB_UNLOCK(idn.localid);
8542 }
8543 
8544 /*
8545  * Received a request from a remote domain to
8546  * allocate a slab from the master SMR for him.
8547  * Allocate slab and return the response.
8548  */
8549 static void
8550 idn_recv_slaballoc_req(int domid, idn_msgtype_t *mtp, uint_t slab_size)
8551 {
8552 	register idn_domain_t	*dp;
8553 	procname_t		proc = "idn_recv_slaballoc_req";
8554 
8555 	PR_PROTO("%s: slaballoc req from domain %d (size=0x%x)\n",
8556 	    proc, domid, slab_size);
8557 
8558 	dp = &idn_domain[domid];
8559 
8560 	ASSERT(IDN_DLOCK_IS_EXCL(domid));
8561 
8562 	IDN_GLOCK_SHARED();
8563 
8564 	if (idn.localid != IDN_GET_MASTERID()) {
8565 		IDN_GUNLOCK();
8566 		/*
8567 		 * It's a fatal error if the remote domain thinks
8568 		 * we're the master.
8569 		 */
8570 		idn_send_slaballoc_resp(domid, mtp, 0, 0, EACCES);
8571 
8572 	} else if (dp->dstate != IDNDS_CONNECTED) {
8573 
8574 		IDN_GUNLOCK();
8575 		/*
8576 		 * It's a fatal error if we don't yet have a
8577 		 * connection established with the requestor.
8578 		 */
8579 		idn_send_slaballoc_resp(domid, mtp, 0, 0, ENOLINK);
8580 	} else {
8581 		int		serrno;
8582 		smr_slab_t	*sp;
8583 		smr_offset_t	slab_offset;
8584 
8585 		IDN_GUNLOCK();
8586 		DSLAB_LOCK_SHARED(domid);
8587 		IDN_DUNLOCK(domid);
8588 		/*
8589 		 * We're connected and we're the master.
8590 		 * smr_slab_alloc() returns with dp->drwlock dropped.
8591 		 */
8592 		if ((serrno = smr_slab_alloc(domid, &sp)) == 0) {
8593 			/*
8594 			 * Successfully allocated slab for remote slave.
8595 			 */
8596 			slab_offset = IDN_ADDR2OFFSET(sp->sl_start);
8597 			slab_size   = sp->sl_end - sp->sl_start;
8598 			ASSERT((slab_offset != 0) && (slab_size != 0));
8599 		} else {
8600 			slab_offset = slab_size = 0;
8601 		}
8602 		DSLAB_UNLOCK(domid);
8603 		/*
8604 		 * The drwlock is dropped during smr_slab_alloc.
8605 		 * During that time our connection with the given
8606 		 * domain may have changed.  Better check again.
8607 		 */
8608 		IDN_DLOCK_SHARED(domid);
8609 		if ((dp->dstate != IDNDS_CONNECTED) && !serrno) {
8610 			/*
8611 			 * Connection broke.  Keep the slab here.
8612 			 */
8613 			DSLAB_LOCK_EXCL(domid);
8614 			IDN_DUNLOCK(domid);
8615 			smr_slab_free(domid, sp);
8616 			DSLAB_UNLOCK(domid);
8617 			slab_offset = slab_size = 0;
8618 			serrno = ECANCELED;
8619 			IDN_DLOCK_SHARED(domid);
8620 		}
8621 		/*
8622 		 * Send response.
8623 		 * Note that smr_slab_alloc automatically installs
8624 		 * slab into domains respective idn_domain entry
8625 		 * to be associated with that domain.
8626 		 */
8627 		idn_send_slaballoc_resp(domid, mtp, slab_offset, slab_size,
8628 		    serrno);
8629 	}
8630 }
8631 
8632 static void
8633 idn_send_slaballoc_resp(int domid, idn_msgtype_t *mtp, smr_offset_t slab_offset,
8634     uint_t slab_size, int serrno)
8635 {
8636 	procname_t	proc = "idn_send_slaballoc_resp";
8637 
8638 	PR_PROTO("%s: slaballoc resp to domain %d (off=0x%x, size=0x%x) "
8639 	    "[serrno = %d]\n",
8640 	    proc, domid, slab_offset, slab_size, serrno);
8641 
8642 	idn_send_cmdresp(domid, mtp, IDNCMD_SLABALLOC, slab_offset, slab_size,
8643 	    serrno);
8644 }
8645 
8646 /*
8647  * Received the ack or nack to a previous allocation request
8648  * made by the local domain to the master for a slab.  Need
8649  * to "put" the response into the waiting area for any
8650  * waiters.
8651  */
8652 static void
8653 idn_recv_slaballoc_resp(int domid, smr_offset_t slab_offset, uint_t slab_size,
8654     int serrno)
8655 {
8656 	smr_slab_t		*sp = NULL;
8657 	int			rv;
8658 	procname_t		proc = "idn_recv_slaballoc_resp";
8659 
8660 
8661 	ASSERT(IDN_DLOCK_IS_EXCL(domid));
8662 
8663 	PR_PROTO("%s: slaballoc resp from domain %d (off=0x%x, size=0x%x) "
8664 	    "[serrno = %d]\n",
8665 	    proc, domid, slab_offset, slab_size, serrno);
8666 
8667 	if (!serrno) {
8668 		IDN_GLOCK_SHARED();
8669 		if (domid != IDN_GET_MASTERID()) {
8670 			/*
8671 			 * We should only be receiving responses from
8672 			 * our master.  This is either a bogus message
8673 			 * or an old response.  In either case dump it.
8674 			 */
8675 			PR_PROTO("%s: BOGUS slaballoc resp from domid %d "
8676 			    "(master = %d)\n",
8677 			    proc, domid, IDN_GET_MASTERID());
8678 			serrno = EPROTO;
8679 		}
8680 		IDN_GUNLOCK();
8681 
8682 		if (!serrno &&
8683 		    !VALID_NWROFFSET(slab_offset, IDN_SMR_BUFSIZE)) {
8684 			PR_PROTO("%s: slab offset (0x%x) out of range "
8685 			    "(0-0x%lx)\n",
8686 			    proc, slab_offset, MB2B(IDN_NWR_SIZE));
8687 			serrno = EPROTO;
8688 		} else if (!serrno) {
8689 			sp = GETSTRUCT(smr_slab_t, 1);
8690 			sp->sl_start = IDN_OFFSET2ADDR(slab_offset);
8691 			sp->sl_end   = sp->sl_start + slab_size;
8692 			smr_alloc_buflist(sp);
8693 		}
8694 	}
8695 
8696 	/*
8697 	 * Always "put" slabs back to yourself since you're a slave.
8698 	 * Note that we set the forceflag so that even if there are
8699 	 * no waiters we still install the slab for the domain.
8700 	 */
8701 	if (!serrno) {
8702 		DSLAB_LOCK_EXCL(idn.localid);
8703 	}
8704 	rv = smr_slaballoc_put(idn.localid, sp, 1, serrno);
8705 	if (!serrno) {
8706 		DSLAB_UNLOCK(idn.localid);
8707 	}
8708 
8709 	if (rv < 0) {
8710 		/*
8711 		 * Some kind of error trying to install response.
8712 		 * If there was a valid slab sent to us, we'll
8713 		 * just have to send it back.
8714 		 */
8715 		PR_PROTO("%s: failed to install response in waiting area\n",
8716 		    proc);
8717 		if (slab_size != 0) {
8718 			PR_PROTO("%s: sending slab back to domain %d "
8719 			    "(master = %d)\n",
8720 			    proc, domid, IDN_GET_MASTERID());
8721 			idn_send_cmd(domid, IDNCMD_SLABFREE, slab_offset,
8722 			    slab_size, 0);
8723 		}
8724 		if (sp) {
8725 			smr_free_buflist(sp);
8726 			FREESTRUCT(sp, smr_slab_t, 1);
8727 		}
8728 	}
8729 }
8730 
8731 /*
8732  * Note that slab reaping is effectively performed asynchronously
8733  * since the request will be received a protocol server.
8734  */
8735 static void
8736 idn_recv_slabreap_req(int domid, idn_msgtype_t *mtp, int nslabs)
8737 {
8738 	procname_t	proc = "idn_recv_slabreap_req";
8739 
8740 	PR_PROTO("%s: slab reap request (nslabs = %d)\n", proc, nslabs);
8741 
8742 	ASSERT(IDN_DLOCK_IS_EXCL(domid));
8743 
8744 	IDN_GLOCK_SHARED();
8745 	if (domid != IDN_GET_MASTERID()) {
8746 		/*
8747 		 * Only the master can request that slabs be reaped.
8748 		 */
8749 		IDN_GUNLOCK();
8750 		PR_PROTO("%s: only master can request slab reaping\n", proc);
8751 
8752 		idn_send_cmdresp(domid, mtp, IDNCMD_SLABREAP, 0, 0, EACCES);
8753 
8754 		return;
8755 	}
8756 	IDN_GUNLOCK();
8757 
8758 	if (nslabs != 0) {
8759 		IDN_DUNLOCK(domid);
8760 		smr_slab_reap(idn.localid, &nslabs);
8761 		IDN_DLOCK_SHARED(domid);
8762 	}
8763 
8764 	PR_PROTO("%s: slab reap result (nslabs = %d)\n", proc, nslabs);
8765 
8766 	/*
8767 	 * Go ahead and send the reap response back before we start
8768 	 * free'ing off the individual slabs.
8769 	 */
8770 	idn_send_slabreap_resp(domid, mtp, nslabs, 0);
8771 }
8772 
8773 static void
8774 idn_recv_slabreap_resp(int domid, int nslabs, int serrno)
8775 {
8776 	procname_t	proc = "idn_recv_slabreap_resp";
8777 
8778 	ASSERT(IDN_DLOCK_IS_EXCL(domid));
8779 
8780 	if ((idn.localid != IDN_GET_MASTERID()) || (idn.localid == domid)) {
8781 		PR_PROTO("%s: unexpected slabreap resp received "
8782 		    "(domid = %d)\n", proc, domid);
8783 		ASSERT(0);
8784 		return;
8785 	}
8786 	PR_PROTO("%s: recvd reap response from domain %d for %d slabs "
8787 	    "[serrno = %d]\n", proc, domid, nslabs, serrno);
8788 }
8789 
8790 /*
8791  * Not really necessary to send slabreap response.
8792  * XXX - perhaps useful to master for accounting or
8793  *	 throttling of further reaping?
8794  */
8795 static void
8796 idn_send_slabreap_resp(int domid, idn_msgtype_t *mtp, int nslabs, int serrno)
8797 {
8798 	idn_send_cmdresp(domid, mtp, IDNCMD_SLABREAP, nslabs, 0, serrno);
8799 }
8800 
8801 /*
8802  * Slave -> Master ONLY
8803  * Master never sends slabfree request to itself.
8804  */
8805 static void
8806 idn_recv_slabfree_req(int domid, idn_msgtype_t *mtp, smr_offset_t slab_offset,
8807     uint_t slab_size)
8808 {
8809 	smr_slab_t	*sp;
8810 	int		serrno;
8811 	caddr_t		s_start, s_end;
8812 	procname_t	proc = "idn_recv_slabfree_req";
8813 
8814 	ASSERT(IDN_DLOCK_IS_EXCL(domid));
8815 
8816 	if (domid == IDN_GET_MASTERID()) {
8817 		PR_PROTO("%s: unexpected slabfree req received (domid = %d)\n",
8818 		    proc, domid);
8819 		idn_send_slabfree_resp(domid, mtp, slab_offset, slab_size,
8820 		    EACCES);
8821 		return;
8822 	}
8823 	if (slab_size > IDN_SLAB_SIZE) {
8824 		PR_PROTO("%s: unexpected slab size. exp %d, recvd %d\n",
8825 		    proc, IDN_SLAB_SIZE, slab_size);
8826 		idn_send_slabfree_resp(domid, mtp, slab_offset, slab_size,
8827 		    EINVAL);
8828 		return;
8829 	}
8830 	s_start = IDN_OFFSET2ADDR(slab_offset);
8831 	s_end   = s_start + slab_size;
8832 	/*
8833 	 * Master has received a SLABFREE request (effectively a response
8834 	 * to some earlier SLABREAP request.
8835 	 * Find the slab associated with this slab and free it up.
8836 	 */
8837 	DSLAB_LOCK_EXCL(domid);
8838 	if ((sp = smr_slaballoc_get(domid, s_start, s_end)) != NULL) {
8839 		smr_slab_free(domid, sp);
8840 		serrno = 0;
8841 	} else {
8842 		serrno = EINVAL;
8843 	}
8844 	DSLAB_UNLOCK(domid);
8845 
8846 	idn_send_slabfree_resp(domid, mtp, slab_offset, slab_size, serrno);
8847 }
8848 
8849 /*
8850  * Master -> Slave ONLY
8851  */
8852 static void
8853 idn_recv_slabfree_resp(int domid, uint_t slab_offset, uint_t slab_size, int
8854     serrno)
8855 {
8856 	procname_t	proc = "idn_recv_slabfree_resp";
8857 
8858 	ASSERT(IDN_DLOCK_IS_EXCL(domid));
8859 
8860 	if (domid != IDN_GET_MASTERID()) {
8861 		PR_PROTO("%s: unexpected slabfree resp received (domid = %d)\n",
8862 		    proc, domid);
8863 		ASSERT(0);
8864 		return;
8865 	}
8866 	if (slab_size > IDN_SLAB_SIZE) {
8867 		PR_PROTO("%s: unexpected slab size. exp %d, recvd %d\n",
8868 		    proc, IDN_SLAB_SIZE, slab_size);
8869 		ASSERT(0);
8870 		return;
8871 	}
8872 	PR_PROTO("%s: recvd free resp from dom %d "
8873 	    "- slab (off/size) 0x%x/0x%x [serrno = %d]\n",
8874 	    proc, domid, slab_offset, slab_size, serrno);
8875 }
8876 
8877 static void
8878 idn_send_slabfree_resp(int domid, idn_msgtype_t *mtp, uint_t slab_offset,
8879     uint_t slab_size, int serrno)
8880 {
8881 	idn_send_cmdresp(domid, mtp, IDNCMD_SLABFREE, slab_offset, slab_size,
8882 	    serrno);
8883 }
8884 
8885 static void
8886 idn_retry_nodename_req(void *arg)
8887 {
8888 	int	domid = (int)(uintptr_t)arg;
8889 
8890 	idn_send_nodename_req(domid);
8891 }
8892 
8893 static void
8894 idn_send_nodename_req(int domid)
8895 {
8896 	caddr_t		b_bufp;
8897 	smr_offset_t	bufoffset;
8898 	int		serrno;
8899 	idn_domain_t	*dp = &idn_domain[domid];
8900 	procname_t	proc = "idn_send_nodename_req";
8901 
8902 	/*
8903 	 * Need to drop domain lock across
8904 	 * SMR allocation.
8905 	 */
8906 	serrno = smr_buf_alloc(domid, MAXDNAME+1, &b_bufp);
8907 
8908 	IDN_DLOCK_SHARED(domid);
8909 	if (dp->dstate != IDNDS_CONNECTED) {
8910 		/*
8911 		 * Lost connection.
8912 		 */
8913 		PR_PROTO("%s:%d: connection lost [dstate = %s]\n",
8914 		    proc, domid, idnds_str[dp->dstate]);
8915 		IDN_DUNLOCK(domid);
8916 		if (!serrno)
8917 			(void) smr_buf_free(domid, b_bufp, MAXDNAME+1);
8918 		return;
8919 	}
8920 	if (serrno) {
8921 		/*
8922 		 * Failed to allocate buffer, but still have
8923 		 * connection so keep trying.  We may have queried
8924 		 * the master a little too earlier.
8925 		 */
8926 		PR_PROTO("%s:%d: buffer alloc failed [dstate = %s]\n",
8927 		    proc, domid, idnds_str[dp->dstate]);
8928 		(void) timeout(idn_retry_nodename_req, (void *)(uintptr_t)domid,
8929 		    hz);
8930 		IDN_DUNLOCK(domid);
8931 		return;
8932 	}
8933 
8934 	*b_bufp = (char)MAXDNAME;
8935 	bufoffset = IDN_ADDR2OFFSET(b_bufp);
8936 
8937 	idn_send_cmd(domid, IDNCMD_NODENAME, bufoffset, 0, 0);
8938 	IDN_DUNLOCK(domid);
8939 }
8940 
8941 static void
8942 idn_send_nodename_resp(int domid, idn_msgtype_t *mtp, smr_offset_t bufoffset,
8943     int serrno)
8944 {
8945 	idn_send_cmdresp(domid, mtp, IDNCMD_NODENAME, (uint_t)bufoffset, 0,
8946 	    serrno);
8947 }
8948 
8949 static void
8950 idn_recv_nodename_req(int domid, idn_msgtype_t *mtp, smr_offset_t bufoffset)
8951 {
8952 	caddr_t		b_bufp;
8953 	int		length;
8954 	idn_domain_t	*ldp = &idn_domain[idn.localid];
8955 	procname_t	proc = "idn_recv_nodename_req";
8956 
8957 	IDN_DLOCK_EXCL(idn.localid);
8958 	if (!strlen(ldp->dname)) {
8959 		if (!strlen(utsname.nodename)) {
8960 			/*
8961 			 * Local domain's nodename hasn't been
8962 			 * set yet.
8963 			 */
8964 			IDN_DUNLOCK(idn.localid);
8965 			idn_send_cmd_nackresp(domid, mtp, IDNCMD_NODENAME,
8966 			    IDNNACK_RETRY);
8967 			return;
8968 		}
8969 		strncpy(ldp->dname, utsname.nodename, MAXDNAME - 1);
8970 	}
8971 	IDN_DLOCK_DOWNGRADE(idn.localid);
8972 
8973 	if (!VALID_NWROFFSET(bufoffset, IDN_SMR_BUFSIZE)) {
8974 		PR_PROTO("%s:%d: invalid SMR offset received (0x%x)\n",
8975 		    proc, domid, bufoffset);
8976 		IDN_DUNLOCK(idn.localid);
8977 		idn_send_nodename_resp(domid, mtp, bufoffset, EINVAL);
8978 		return;
8979 	}
8980 
8981 	b_bufp = IDN_OFFSET2ADDR(bufoffset);
8982 	length = (int)(*b_bufp++ & 0xff);
8983 
8984 	if (length < strlen(ldp->dname)) {
8985 		PR_PROTO("%s:%d: buffer not big enough (req %lu, got %d)\n",
8986 		    proc, domid, strlen(ldp->dname), length);
8987 		IDN_DUNLOCK(idn.localid);
8988 		idn_send_nodename_resp(domid, mtp, bufoffset, EINVAL);
8989 		return;
8990 	}
8991 
8992 	strncpy(b_bufp, ldp->dname, MAXDNAME);
8993 	b_bufp[MAXDNAME-1] = 0;
8994 	IDN_DUNLOCK(idn.localid);
8995 
8996 	idn_send_nodename_resp(domid, mtp, bufoffset, 0);
8997 }
8998 
8999 static void
9000 idn_recv_nodename_resp(int domid, smr_offset_t bufoffset, int serrno)
9001 {
9002 	caddr_t		b_bufp;
9003 	idn_domain_t	*dp = &idn_domain[domid];
9004 	procname_t	proc = "idn_recv_nodename_resp";
9005 
9006 	ASSERT(IDN_DLOCK_IS_EXCL(domid));
9007 
9008 	if (!VALID_NWROFFSET(bufoffset, IDN_SMR_BUFSIZE)) {
9009 		PR_PROTO("%s:%d: invalid SMR offset received (0x%x)\n",
9010 		    proc, domid, bufoffset);
9011 		return;
9012 	}
9013 
9014 	if (serrno == 0) {
9015 		b_bufp = IDN_OFFSET2ADDR(bufoffset) + 1;
9016 		b_bufp[MAXDNAME-1] = 0;
9017 
9018 		if (strlen(b_bufp) > 0) {
9019 			strncpy(dp->dname, b_bufp, MAXDNAME);
9020 			PR_PROTO("%s:%d: received nodename(%s)\n",
9021 			    proc, domid, dp->dname);
9022 		}
9023 	}
9024 
9025 	(void) smr_buf_free(domid, b_bufp - 1, MAXDNAME + 1);
9026 }
9027 
9028 /*
9029  * The master allocations the SMR management structures.
9030  */
9031 static int
9032 idn_master_init()
9033 {
9034 	idn_domain_t	*ldp = &idn_domain[idn.localid];
9035 	size_t		reserved_size = 0;
9036 	caddr_t		reserved_area = NULL;
9037 	procname_t	proc = "idn_master_init";
9038 
9039 	ASSERT(IDN_GLOCK_IS_EXCL());
9040 	ASSERT(IDN_DLOCK_IS_EXCL(idn.localid));
9041 
9042 	if (idn.mboxarea != NULL) {
9043 		PR_PROTO("%s: master data already initialized\n", proc);
9044 		return (0);
9045 	}
9046 
9047 	PR_PROTO("%s: initializing master data (domid = %d)\n",
9048 	    proc, idn.localid);
9049 
9050 	/*
9051 	 * Reserve an area of the SMR for mailbox usage.
9052 	 * This area is allocated to other domains via
9053 	 * the master.  Round it up to IDN_SMR_BUFSIZE multiple.
9054 	 */
9055 	reserved_size = IDNROUNDUP(IDN_MBOXAREA_SIZE, IDN_SMR_BUFSIZE);
9056 
9057 	PR_PROTO("%s: reserving %lu bytes for mailbox area\n",
9058 	    proc, reserved_size);
9059 
9060 #ifdef DEBUG
9061 	if (reserved_size > (size_t)IDN_SLAB_SIZE) {
9062 		PR_PROTO("%s: WARNING mbox area (%ld) > slab size (%d)\n",
9063 		    proc, reserved_size, IDN_SLAB_SIZE);
9064 	}
9065 #endif /* DEBUG */
9066 	/*
9067 	 * Initialize the pool of slabs and SMR I/O buffers.
9068 	 */
9069 	if (smr_slabpool_init(reserved_size, &reserved_area) != 0) {
9070 		idn_master_deinit();
9071 		return (-1);
9072 	}
9073 
9074 	ASSERT(idn.mboxarea == NULL);
9075 	ASSERT(reserved_area);
9076 
9077 	bzero(reserved_area, reserved_size);
9078 
9079 	idn.mboxarea = (idn_mboxtbl_t *)reserved_area;
9080 	ldp->dmbox.m_tbl = IDN_MBOXAREA_BASE(idn.mboxarea, idn.localid);
9081 	/*
9082 	 * Initialize the SMR pointers in the entire
9083 	 * mailbox table.
9084 	 */
9085 	idn_mboxarea_init(idn.mboxarea, IDN_MBOXAREA_SIZE / IDN_MBOXTBL_SIZE);
9086 
9087 	return (0);
9088 }
9089 
9090 static void
9091 idn_master_deinit()
9092 {
9093 	idn_domain_t	*ldp;
9094 	smr_slab_t	*sp;
9095 	procname_t	proc = "idn_master_deinit";
9096 
9097 	ASSERT(IDN_GLOCK_IS_EXCL());
9098 	ASSERT(IDN_DLOCK_IS_EXCL(idn.localid));
9099 
9100 	if (idn.mboxarea == NULL) {
9101 		PR_PROTO("%s: master data already deinitialized\n", proc);
9102 		return;
9103 	}
9104 
9105 	ldp = &idn_domain[idn.localid];
9106 
9107 	PR_PROTO("%s: deinitializing master data (domid = %d)\n",
9108 	    proc, idn.localid);
9109 
9110 	ldp->dmbox.m_tbl = NULL;
9111 	idn.mboxarea = NULL;
9112 	/*
9113 	 * Master may still be holding onto slabs of his own.
9114 	 */
9115 	DSLAB_LOCK_EXCL(idn.localid);
9116 	sp = ldp->dslab;
9117 	ldp->dslab = NULL;
9118 	ldp->dnslabs = 0;
9119 	if (sp)
9120 		smr_slab_free(idn.localid, sp);
9121 	ldp->dslab_state = DSLAB_STATE_UNKNOWN;
9122 	DSLAB_UNLOCK(idn.localid);
9123 
9124 	smr_slabpool_deinit();
9125 }
9126 
9127 static int
9128 idn_mark_awol(int domid, clock_t *atime)
9129 {
9130 	clock_t		awol;
9131 	idn_domain_t	*dp = &idn_domain[domid];
9132 
9133 	ASSERT(IDN_SYNC_IS_LOCKED());
9134 	ASSERT(IDN_GLOCK_IS_EXCL());
9135 
9136 	if (!DOMAIN_IN_SET(idn.domset.ds_awol, domid)) {
9137 		DOMAINSET_ADD(idn.domset.ds_awol, domid);
9138 		idn.nawols++;
9139 	}
9140 	awol = ddi_get_lbolt();
9141 	if (dp->dawol.a_count++ == 0)
9142 		dp->dawol.a_time = awol;
9143 	dp->dawol.a_last = awol;
9144 	if ((awol - dp->dawol.a_msg) >= (clock_t)(idn_awolmsg_interval * hz))
9145 		dp->dawol.a_msg = awol;
9146 	else
9147 		awol = 0;
9148 
9149 	*atime = awol;
9150 
9151 	idn_awol_event_set(dp->dhw.dh_boardset);
9152 
9153 	return (dp->dawol.a_count);
9154 }
9155 
9156 void
9157 idn_clear_awol(int domid)
9158 {
9159 	idn_domain_t	*dp = &idn_domain[domid];
9160 
9161 	ASSERT(IDN_SYNC_IS_LOCKED());
9162 	ASSERT(IDN_GLOCK_IS_EXCL());
9163 	if (DOMAIN_IN_SET(idn.domset.ds_awol, domid)) {
9164 		DOMAINSET_DEL(idn.domset.ds_awol, domid);
9165 		idn.nawols--;
9166 	}
9167 	if (dp->dawol.a_count > 0) {
9168 		dp->dawol.a_count = 0;
9169 		dp->dawol.a_last = dp->dawol.a_time;
9170 		dp->dawol.a_time = 0;
9171 		dp->dawol.a_msg = 0;
9172 
9173 		idn_awol_event_clear(dp->dhw.dh_boardset);
9174 	}
9175 }
9176 
9177 /*
9178  * A timer expired.
9179  */
9180 void
9181 idn_timer_expired(void *arg)
9182 {
9183 	idn_domain_t	*dp;
9184 	char		*op = "UNKNOWN";
9185 	clock_t		awol = 0;
9186 	int		awolcount, dcpu, domid;
9187 	idn_timer_t	*tp = (idn_timer_t *)arg;
9188 	idn_timerq_t	*tq = NULL;
9189 	uint_t		token;
9190 	char		dname[MAXDNAME];
9191 	procname_t	proc = "idn_timer_expired";
9192 	STRING(str);
9193 
9194 	tq = tp->t_q;
9195 
9196 	ASSERT(tp->t_domid != IDN_NIL_DOMID);
9197 
9198 	IDN_TIMERQ_LOCK(tq);
9199 
9200 	INUM2STR(tp->t_type, str);
9201 
9202 	if (tp->t_onq == 0) {
9203 		PR_TIMER("%s: timer CAUGHT TERMINATION (type = %s)\n",
9204 		    proc, str);
9205 		/*
9206 		 * Timer was dequeued.  Somebody is trying
9207 		 * to shut it down.
9208 		 */
9209 		IDN_TIMERQ_UNLOCK(tq);
9210 		return;
9211 	}
9212 
9213 	IDN_TIMER_DEQUEUE(tq, tp);
9214 
9215 	IDN_TIMERQ_UNLOCK(tq);
9216 
9217 	IDN_SYNC_LOCK();
9218 	IDN_DLOCK_EXCL(tp->t_domid);
9219 
9220 	domid = tp->t_domid;
9221 
9222 	dp = &idn_domain[domid];
9223 	strcpy(dname, dp->dname);
9224 	dcpu = dp->dcpu;
9225 
9226 	IDN_TIMER_EXEC(tp);
9227 
9228 #ifdef DEBUG
9229 	PR_TIMER("%s:%d: [%s] timer EXPIRED (C=0x%x, P=0x%llx, X=0x%llx)\n",
9230 	    proc, tp->t_domid, str, tp->t_cookie,
9231 	    tp->t_posttime, tp->t_exectime);
9232 #endif /* DEBUG */
9233 
9234 	/*
9235 	 * IMPORTANT:
9236 	 * Each case is responsible for dropping SYNC_LOCK & DLOCK.
9237 	 */
9238 	switch (tp->t_type) {
9239 	case IDNP_DATA:
9240 		IDN_SYNC_UNLOCK();
9241 		/*
9242 		 * Timed out waiting for a data packet response.
9243 		 * We can't close domain since he may just be
9244 		 * temporarily AWOL.
9245 		 * Note that dio and diocheck do not get cleared.
9246 		 * This is taken care of when the domain restarts
9247 		 * or is fatally closed.
9248 		 * We only need a reader lock for this.
9249 		 */
9250 		IDN_DLOCK_DOWNGRADE(domid);
9251 		if (dp->diocheck && dp->dmbox.m_send) {
9252 			(void) idn_reclaim_mboxdata(domid, 0, -1);
9253 			if (dp->dio >= IDN_WINDOW_EMAX) {
9254 				idn_msgtype_t	mt;
9255 				/*
9256 				 * Restart timer for another
9257 				 * go around.
9258 				 */
9259 				IDN_MSGTIMER_START(domid, IDNP_DATA, 0,
9260 				    idn_msg_waittime[IDNP_DATA],
9261 				    &mt.mt_cookie);
9262 			} else {
9263 				lock_clear(&dp->diocheck);
9264 			}
9265 		}
9266 		IDN_DUNLOCK(domid);
9267 		break;
9268 
9269 	case IDNP_NEGO:
9270 		/*
9271 		 * If we're not in a NEGO transition, then
9272 		 * just ignore this timeout.
9273 		 */
9274 		if (dp->dxp == &xphase_nego) {
9275 			uint_t		token;
9276 
9277 			IDN_GLOCK_EXCL();
9278 			op = "CONNECT";
9279 			awolcount = idn_mark_awol(domid, &awol);
9280 			IDN_GUNLOCK();
9281 
9282 			idn_nego_cleanup_check(domid, IDN_NIL_DOMID,
9283 			    IDN_NIL_DCPU);
9284 
9285 			IDN_XSTATE_TRANSITION(dp, IDNXS_PEND);
9286 			token = IDN_RETRY_TOKEN(domid, IDNRETRY_NEGO);
9287 			idn_retry_submit(idn_retry_nego, NULL, token,
9288 			    idn_msg_retrytime[(int)IDNRETRY_NEGO]);
9289 		}
9290 		IDN_DUNLOCK(domid);
9291 		IDN_SYNC_UNLOCK();
9292 		break;
9293 
9294 	case IDNP_CMD:
9295 		/*
9296 		 * Timeouts on commands typically mean that the
9297 		 * the master is not responding.  Furthermore, we
9298 		 * can't FORCE a FIN disconnect since at this stage
9299 		 * we are CONNECTED and thus other domains may
9300 		 * have cache entries that we're sharing with them.
9301 		 * Only choice is to completely disconnect from
9302 		 * IDN and try to reestablish connection.
9303 		 *
9304 		 * However, timeouts attempting to get nodename
9305 		 * are not fatal.  Although we don't want to retry
9306 		 * either since each timeout is a lost buffer to
9307 		 * the remote domain.
9308 		 */
9309 		if (tp->t_subtype == (ushort_t)IDNCMD_NODENAME) {
9310 			PR_PROTO("%s:%d: timedout waiting for nodename\n",
9311 			    proc, domid);
9312 			IDN_DUNLOCK(domid);
9313 			IDN_SYNC_UNLOCK();
9314 			break;
9315 		}
9316 
9317 		IDN_GLOCK_EXCL();
9318 		if (idn.state == IDNGS_ONLINE) {
9319 			domainset_t	domset;
9320 			int		masterid = IDN_GET_MASTERID();
9321 
9322 			IDN_GKSTAT_GLOBAL_EVENT(gk_reconfigs,
9323 			    gk_reconfig_last);
9324 
9325 			PR_PROTO("%s:%d: RECONFIG trying old masterid = %d\n",
9326 			    proc, domid, masterid);
9327 
9328 			IDN_GSTATE_TRANSITION(IDNGS_RECONFIG);
9329 			IDN_SET_NEW_MASTERID(masterid);
9330 			IDN_GUNLOCK();
9331 			IDN_DUNLOCK(domid);
9332 
9333 			domset = idn.domset.ds_trans_on |
9334 			    idn.domset.ds_connected;
9335 
9336 			idn_unlink_domainset(domset, IDNFIN_NORMAL,
9337 			    IDNFIN_ARG_NONE, IDNFIN_OPT_RELINK,	BOARDSET_ALL);
9338 		} else {
9339 			IDN_GUNLOCK();
9340 			IDN_DUNLOCK(domid);
9341 		}
9342 		IDN_SYNC_UNLOCK();
9343 		break;
9344 
9345 	case IDNP_CON:
9346 		if (tp->t_subtype == (ushort_t)IDNCON_QUERY) {
9347 			/*
9348 			 * Timed out sending a CON-query.  This is
9349 			 * non-fatal.  We simply need to retry.
9350 			 */
9351 			IDN_GLOCK_EXCL();
9352 			op = "CONNECT";
9353 			awolcount = idn_mark_awol(domid, &awol);
9354 			IDN_GUNLOCK();
9355 			token = IDN_RETRY_TOKEN(domid, IDNRETRY_CONQ);
9356 			idn_retry_submit(idn_retry_query, NULL, token,
9357 			    idn_msg_retrytime[(int)IDNRETRY_CONQ]);
9358 			IDN_DUNLOCK(domid);
9359 			IDN_SYNC_UNLOCK();
9360 			break;
9361 		}
9362 		/*FALLTHROUGH*/
9363 	case IDNP_CFG:
9364 		/*
9365 		 * Any timeouts here we simply try to disconnect
9366 		 * and reestablish the link.  Since we haven't
9367 		 * reached the connected state w.r.t. this domain
9368 		 * we put his fin state to FORCE-HARD in order
9369 		 * to shoot right through without involving other
9370 		 * domains.  Recall that other domains may have
9371 		 * established connections with the given domain
9372 		 * which means any FIN queries to them will always
9373 		 * return connected to the given domain.  Since
9374 		 * neither the given domain nor the local domain
9375 		 * plan on disconnecting from the IDN the connection
9376 		 * to the other domains will remain thereby preventing
9377 		 * the local FIN from ever completing.  Recall that
9378 		 * a FIN depends on all member domains FIN'ing also.
9379 		 */
9380 		IDN_GLOCK_EXCL();
9381 		op = "CONNECT";
9382 		awolcount = idn_mark_awol(domid, &awol);
9383 		IDN_GUNLOCK();
9384 		DOMAINSET_ADD(idn.domset.ds_relink, domid);
9385 		IDN_HISTORY_LOG(IDNH_RELINK, domid, dp->dstate,
9386 		    idn.domset.ds_relink);
9387 		idn_disconnect(domid, IDNFIN_FORCE_SOFT,
9388 		    IDNFIN_ARG_NONE, IDNFIN_SYNC_NO);
9389 		IDN_DUNLOCK(domid);
9390 		IDN_SYNC_UNLOCK();
9391 		break;
9392 
9393 	case IDNP_FIN:
9394 		/*
9395 		 * Timeouts here simply try to retry.
9396 		 */
9397 		IDN_GLOCK_EXCL();
9398 		op = "DISCONNECT";
9399 		awolcount = idn_mark_awol(domid, &awol);
9400 		IDN_GUNLOCK();
9401 		if (tp->t_subtype == (ushort_t)IDNFIN_QUERY) {
9402 			int		d;
9403 			domainset_t	rdyset;
9404 			/*
9405 			 * Timed out sending a FIN-query.  This is
9406 			 * non-fatal.  We simply need to retry.
9407 			 * If we were doing a forced unlink of any
9408 			 * domains, we don't want this awol guy
9409 			 * to hold us up.  Looks for any forced
9410 			 * unlinks and make them "ready" with
9411 			 * respect to this awol domain.
9412 			 */
9413 			rdyset = 0;
9414 			for (d = 0; d < MAX_DOMAINS; d++) {
9415 				if (FIN_IS_FORCE(idn_domain[d].dfin)) {
9416 					DOMAINSET_ADD(rdyset, d);
9417 				}
9418 			}
9419 			if (rdyset)
9420 				(void) idn_sync_register(domid,
9421 				    IDNSYNC_DISCONNECT,
9422 				    rdyset, IDNSYNC_REG_REG);
9423 
9424 			token = IDN_RETRY_TOKEN(domid, IDNRETRY_FINQ);
9425 			idn_retry_submit(idn_retry_query, NULL, token,
9426 			    idn_msg_retrytime[(int)IDNRETRY_FINQ]);
9427 			IDN_DUNLOCK(domid);
9428 			IDN_SYNC_UNLOCK();
9429 			break;
9430 		}
9431 
9432 		if (dp->dfin == IDNFIN_FORCE_SOFT) {
9433 			IDN_FSTATE_TRANSITION(dp, IDNFIN_FORCE_HARD);
9434 		}
9435 		/*
9436 		 * Anybody that was waiting on this domain and
9437 		 * had a hard-force in action gets this guy for
9438 		 * free in their base ready-set.
9439 		 */
9440 		idn_sync_register_awol(domid);
9441 
9442 		dp->dxp = &xphase_fin;
9443 		IDN_XSTATE_TRANSITION(dp, IDNXS_PEND);
9444 		token = IDN_RETRY_TOKEN(domid, IDNRETRY_FIN);
9445 		idn_retry_submit(idn_retry_fin, NULL, token,
9446 		    idn_msg_retrytime[(int)IDNRETRY_FIN]);
9447 		IDN_DUNLOCK(domid);
9448 		IDN_SYNC_UNLOCK();
9449 		break;
9450 
9451 	default:
9452 
9453 		ASSERT(0);
9454 		IDN_DUNLOCK(domid);
9455 		IDN_SYNC_UNLOCK();
9456 		break;
9457 	}
9458 
9459 	IDN_TIMER_FREE(tp);
9460 
9461 	if (awol) {
9462 		if (strlen(dname) > 0) {
9463 			cmn_err(CE_WARN,
9464 			    "IDN: 236: domain (%s) [ID %d] not "
9465 			    "responding to %s [#%d]",
9466 			    dname, domid, op, awolcount);
9467 		} else {
9468 			cmn_err(CE_WARN,
9469 			    "IDN: 236: domain [ID %d, CPU %d] not "
9470 			    "responding to %s [#%d]",
9471 			    domid, dcpu, op, awolcount);
9472 		}
9473 	}
9474 }
9475 
9476 #if 0
9477 static int
9478 idn_retry_check(uint_t token)
9479 {
9480 	int			i, count = 0;
9481 	int			domid = IDN_RETRY_TOKEN2DOMID(token);
9482 	int			key = IDN_RETRY_TOKEN2TYPE(token);
9483 	idn_retry_job_t		*rp;
9484 	idn_retry_queue_t	*qp;
9485 
9486 	qp = &idn.retryqueue;
9487 
9488 	mutex_enter(&qp->rq_mutex);
9489 
9490 	for (i = 0, rp = qp->rq_jobs; i < qp->rq_count; i++, rp = rp->rj_next)
9491 		if ((domid == IDN_RETRY_TOKEN2DOMID(rp->rj_token)) &&
9492 		    ((key == IDN_RETRY_TYPEALL) || (rp->rj_token == token)))
9493 			count++;
9494 
9495 	mutex_exit(&qp->rq_mutex);
9496 
9497 	return (count);
9498 }
9499 #endif /* 0 */
9500 
9501 static void
9502 idn_retry_execute(void *arg)
9503 {
9504 	idn_retry_job_t		*rp = (idn_retry_job_t *)arg;
9505 	idn_retry_queue_t	*qp;
9506 
9507 	qp = &idn.retryqueue;
9508 
9509 	mutex_enter(&qp->rq_mutex);
9510 	if (rp->rj_onq == 0) {
9511 		/*
9512 		 * Job has already been claimed by
9513 		 * retry termination routine.
9514 		 * Bail out.
9515 		 */
9516 		mutex_exit(&qp->rq_mutex);
9517 		return;
9518 	}
9519 	rp->rj_next->rj_prev = rp->rj_prev;
9520 	rp->rj_prev->rj_next = rp->rj_next;
9521 	if (--(qp->rq_count) == 0)
9522 		qp->rq_jobs = NULL;
9523 	else if (qp->rq_jobs == rp)
9524 		qp->rq_jobs = rp->rj_next;
9525 	mutex_exit(&qp->rq_mutex);
9526 
9527 	(*rp->rj_func)(rp->rj_token, rp->rj_arg);
9528 
9529 	IDNRETRY_FREEJOB(rp);
9530 }
9531 
9532 /*
9533  *
9534  */
9535 static void
9536 idn_retry_submit(void (*func)(uint_t token, void *arg), void *arg, uint_t token,
9537     clock_t ticks)
9538 {
9539 	idn_retry_job_t		*rp, *cp;
9540 	idn_retry_queue_t	*qp;
9541 	int			c;
9542 	procname_t		proc = "idn_retry_submit";
9543 
9544 	if (ticks < 0) {
9545 		PR_PROTO("%s: (token = 0x%x) WARNING ticks = %ld\n",
9546 		    proc, token, ticks);
9547 		return;
9548 	}
9549 	if (ticks == 0)		/* At least one tick to get into background */
9550 		ticks++;
9551 
9552 	PR_PROTO("%s: token = 0x%x\n", proc, token);
9553 
9554 	qp = &idn.retryqueue;
9555 
9556 	mutex_enter(&qp->rq_mutex);
9557 	for (c = 0, cp = qp->rq_jobs; c < qp->rq_count; cp = cp->rj_next, c++) {
9558 		if (cp->rj_token == token) {
9559 			PR_PROTO("%s: token = (%d,0x%x) already present\n",
9560 			    proc, IDN_RETRY_TOKEN2DOMID(token),
9561 			    IDN_RETRY_TOKEN2TYPE(token));
9562 			break;
9563 		}
9564 	}
9565 
9566 	if (c < qp->rq_count) {
9567 		mutex_exit(&qp->rq_mutex);
9568 		return;
9569 	}
9570 
9571 	rp = IDNRETRY_ALLOCJOB();
9572 	rp->rj_func = func;
9573 	rp->rj_arg = arg;
9574 	rp->rj_token = token;
9575 	rp->rj_prev = rp->rj_next = rp;
9576 
9577 	if (qp->rq_jobs == NULL) {
9578 		qp->rq_jobs = rp;
9579 	} else {
9580 		rp->rj_next = qp->rq_jobs;
9581 		rp->rj_prev = qp->rq_jobs->rj_prev;
9582 		rp->rj_next->rj_prev = rp;
9583 		rp->rj_prev->rj_next = rp;
9584 	}
9585 	rp->rj_onq = 1;
9586 	qp->rq_count++;
9587 	rp->rj_id = timeout(idn_retry_execute, (caddr_t)rp, ticks);
9588 	mutex_exit(&qp->rq_mutex);
9589 }
9590 
9591 int
9592 idn_retry_terminate(uint_t token)
9593 {
9594 	int			i, domid;
9595 	uint_t			key, count;
9596 	idn_retry_job_t		*rp, *nrp, *fp;
9597 	idn_retry_queue_t	*qp;
9598 	procname_t		proc = "idn_retry_terminate";
9599 
9600 	key = IDN_RETRY_TOKEN2TYPE(token);
9601 	domid = IDN_RETRY_TOKEN2DOMID(token);
9602 	fp = NULL;
9603 	qp = &idn.retryqueue;
9604 
9605 	mutex_enter(&qp->rq_mutex);
9606 	for (i = count = 0, rp = qp->rq_jobs; i < qp->rq_count; i++) {
9607 		nrp = rp->rj_next;
9608 		if ((domid == IDN_RETRY_TOKEN2DOMID(rp->rj_token)) &&
9609 		    ((key == IDN_RETRY_TYPEALL) ||
9610 		    (rp->rj_token == token))) {
9611 			/*
9612 			 * Turn off onq field as a signal to
9613 			 * the execution routine that this
9614 			 * retry has been terminated.  This
9615 			 * is necessary since we can't untimeout
9616 			 * while holding the rq_mutex otherwise
9617 			 * we'll deadlock with the execution
9618 			 * routine.  We'll untimeout these guys
9619 			 * _after_ we drop rq_mutex.
9620 			 */
9621 			rp->rj_onq = 0;
9622 			rp->rj_next->rj_prev = rp->rj_prev;
9623 			rp->rj_prev->rj_next = rp->rj_next;
9624 			if (qp->rq_jobs == rp)
9625 				qp->rq_jobs = rp->rj_next;
9626 			rp->rj_next = fp;
9627 			fp = rp;
9628 			count++;
9629 		}
9630 		rp = nrp;
9631 	}
9632 
9633 	if ((qp->rq_count -= count) == 0)
9634 		qp->rq_jobs = NULL;
9635 
9636 	mutex_exit(&qp->rq_mutex);
9637 
9638 	PR_PROTO("%s: token = (%d,0x%x), dequeued = %d\n",
9639 	    proc, domid, key, count);
9640 
9641 	for (; fp; fp = nrp) {
9642 		(void) untimeout(fp->rj_id);
9643 
9644 		nrp = fp->rj_next;
9645 		IDNRETRY_FREEJOB(fp);
9646 	}
9647 
9648 	return (count);
9649 }
9650 
9651 /*
9652  * -----------------------------------------------------------------------
9653  * The sole purpose of the idn_protocol_server is to manage the IDN
9654  * protocols between the various domains.  These messages do _not_ go
9655  * through the regular streams queues since they are not dependent on
9656  * any user process or module necessarily having the IDN driver open.
9657  * There may be multiple instances of these servers to enhance performance
9658  * of domain management.  Each server is assigned a idn_protoqueue_t
9659  * from which to obtain the work they need to do.
9660  * -----------------------------------------------------------------------
9661  */
9662 int
9663 idn_protocol_init(int nservers)
9664 {
9665 	int		i;
9666 	idn_protojob_t	*jp;
9667 	register idn_protoqueue_t	*protoq;
9668 
9669 	if (nservers <= 0) {
9670 		cmn_err(CE_WARN,
9671 		    "IDN: 237: invalid number (%d) of protocol servers",
9672 		    nservers);
9673 		return (-1);
9674 	}
9675 
9676 	idn.protocol.p_jobpool = kmem_cache_create("idn_protocol_jobcache",
9677 	    sizeof (idn_protojob_t), 0, NULL, NULL, NULL, NULL, NULL, 0);
9678 	if (idn.protocol.p_jobpool == NULL) {
9679 		cmn_err(CE_WARN,
9680 		    "IDN: 238: kmem_cache_create(jobcache) failed");
9681 		return (-1);
9682 	}
9683 
9684 	/*
9685 	 * Initialize static cache for protojob.
9686 	 */
9687 	mutex_init(&idn_protojob_cache_lock, NULL, MUTEX_DRIVER, NULL);
9688 	jp = &idn_protojob_cache[0];
9689 	for (i = 1; i < IDN_DMV_PENDING_MAX; jp = jp->j_next, i++) {
9690 		jp->j_cache = 1;
9691 		jp->j_next = &idn_protojob_cache[i];
9692 	}
9693 	jp->j_cache = 1;
9694 	jp->j_next = NULL;
9695 	idn_protojob_cache_list = &idn_protojob_cache[0];
9696 
9697 	/*
9698 	 * Init morgue semaphore.
9699 	 */
9700 	sema_init(&idn.protocol.p_morgue, 0, NULL, SEMA_DEFAULT, NULL);
9701 	/*
9702 	 * Alloc server queues.
9703 	 */
9704 	idn.protocol.p_serverq = GETSTRUCT(idn_protoqueue_t, nservers);
9705 
9706 	/*
9707 	 * Init server queues.
9708 	 */
9709 	protoq = idn.protocol.p_serverq;
9710 	for (i = 0; i < nservers; protoq++, i++) {
9711 		mutex_init(&protoq->q_mutex, NULL, MUTEX_DRIVER, NULL);
9712 		cv_init(&protoq->q_cv, NULL, CV_DEFAULT, NULL);
9713 		protoq->q_id	  = i;
9714 		protoq->q_joblist = NULL;
9715 		protoq->q_joblist_tail = NULL;
9716 		protoq->q_die	  = 0;
9717 		protoq->q_morgue  = &idn.protocol.p_morgue;
9718 		/*
9719 		 * Create protocol server thread.
9720 		 */
9721 		protoq->q_threadp = thread_create(NULL, 0,
9722 		    idn_protocol_server, (caddr_t)&i, sizeof (i), &p0,
9723 		    TS_RUN, maxclsyspri);
9724 	}
9725 	/*
9726 	 * The servers are kept in the p_server[] array, however
9727 	 * we'll build a linked list of them to facilitate debugging.
9728 	 */
9729 	protoq = idn.protocol.p_serverq;
9730 	for (i = 0; i < (nservers - 1); protoq++, i++)
9731 		protoq->q_next = (protoq + 1);
9732 	protoq->q_next = NULL;
9733 
9734 	idn.nservers = nservers;
9735 
9736 	return (idn.nservers);
9737 }
9738 
9739 void
9740 idn_protocol_deinit()
9741 {
9742 	register int	i;
9743 	int		nservers;
9744 	register idn_protoqueue_t	*protoq;
9745 
9746 	nservers = idn.nservers;
9747 
9748 	if (nservers <= 0)
9749 		return;
9750 
9751 	/*
9752 	 * Make sure the servers are dead.
9753 	 */
9754 	idn_protocol_server_killall();
9755 	ASSERT(idn.nservers == 0);
9756 	/*
9757 	 * Destroy the mutexes.
9758 	 */
9759 	protoq = idn.protocol.p_serverq;
9760 	for (i = 0; i < nservers; protoq++, i++) {
9761 		mutex_destroy(&protoq->q_mutex);
9762 		cv_destroy(&protoq->q_cv);
9763 	}
9764 	/*
9765 	 * Free up the protoqueue memory.
9766 	 */
9767 	FREESTRUCT(idn.protocol.p_serverq, idn_protoqueue_t, nservers);
9768 	idn.protocol.p_serverq = NULL;
9769 	/*
9770 	 * Destroy the morgue semaphore.
9771 	 */
9772 	sema_destroy(&idn.protocol.p_morgue);
9773 
9774 	if (idn.protocol.p_jobpool) {
9775 		kmem_cache_destroy(idn.protocol.p_jobpool);
9776 		idn.protocol.p_jobpool = NULL;
9777 	}
9778 }
9779 
9780 static void
9781 idn_protocol_server(int *id)
9782 {
9783 	idn_protoqueue_t	*pq;
9784 	idn_protojob_t		*jl;
9785 	register idn_protojob_t	*jp;
9786 	procname_t		proc = "idn_protocol_server";
9787 
9788 	if (id == NULL) {
9789 		PR_PROTO("%s: id == NULL, thread exiting\n", proc);
9790 		return;
9791 	}
9792 	ASSERT((*id >= 0) && (*id < idn_protocol_nservers));
9793 
9794 	pq = &idn.protocol.p_serverq[*id];
9795 
9796 	ASSERT(pq->q_id == *id);
9797 
9798 	PR_PROTO("%s: id %d starting up (pq = 0x%p)\n", proc, pq->q_id, pq);
9799 
9800 	/*CONSTCOND*/
9801 	while (1) {
9802 		mutex_enter(&pq->q_mutex);
9803 
9804 		while (((jl = pq->q_joblist) == NULL) && !pq->q_die)
9805 			cv_wait(&pq->q_cv, &pq->q_mutex);
9806 
9807 		pq->q_joblist = pq->q_joblist_tail = NULL;
9808 
9809 		if (pq->q_die) {
9810 			/*
9811 			 * We've been killed.  Need to check-in
9812 			 * at the morgue.
9813 			 */
9814 			pq->q_threadp = NULL;
9815 			mutex_exit(&pq->q_mutex);
9816 			PR_PROTO("%s: thread (%d) killed...bye bye\n",
9817 			    proc, pq->q_id);
9818 			for (jp = jl; jp; jp = jl) {
9819 				jl = jp->j_next;
9820 				idn_protojob_free(jp);
9821 			}
9822 			sema_v(pq->q_morgue);
9823 			thread_exit();
9824 			/*NOTREACHED*/
9825 		}
9826 		mutex_exit(&pq->q_mutex);
9827 
9828 		/*
9829 		 * We can process the jobs asynchronously while more are
9830 		 * put on.
9831 		 */
9832 		for (jp = jl; jp; jp = jl) {
9833 			jl = jp->j_next;
9834 			idn_recv_proto(&(jp->j_msg));
9835 			idn_protojob_free(jp);
9836 		}
9837 	}
9838 }
9839 
9840 /*
9841  * Kill off all the protocol servers.
9842  */
9843 static void
9844 idn_protocol_server_killall()
9845 {
9846 	register idn_protoqueue_t	*pq;
9847 	int		i;
9848 	procname_t	proc = "idn_protocol_server_killall";
9849 
9850 	PR_PROTO("%s: killing off %d protocol servers\n",
9851 	    proc, idn.nservers);
9852 
9853 	pq = idn.protocol.p_serverq;
9854 	for (i = 0; i < idn.nservers; pq++, i++) {
9855 		mutex_enter(&pq->q_mutex);
9856 		pq->q_die = 1;
9857 		cv_signal(&pq->q_cv);
9858 		mutex_exit(&pq->q_mutex);
9859 	}
9860 
9861 	while (idn.nservers > 0) {
9862 		sema_p(&idn.protocol.p_morgue);
9863 		idn.nservers--;
9864 	}
9865 }
9866 
9867 idn_protojob_t *
9868 idn_protojob_alloc(int kmflag)
9869 {
9870 	idn_protojob_t	*jp;
9871 
9872 	jp = kmem_cache_alloc(idn.protocol.p_jobpool, kmflag);
9873 	if (jp == NULL) {
9874 		mutex_enter(&idn_protojob_cache_lock);
9875 		if ((jp = idn_protojob_cache_list) != NULL)
9876 			idn_protojob_cache_list = jp->j_next;
9877 		mutex_exit(&idn_protojob_cache_lock);
9878 	} else {
9879 		jp->j_cache = 0;
9880 	}
9881 
9882 	return (jp);
9883 }
9884 
9885 static void
9886 idn_protojob_free(idn_protojob_t *jp)
9887 {
9888 	ASSERT(jp);
9889 
9890 	if (jp->j_cache) {
9891 		mutex_enter(&idn_protojob_cache_lock);
9892 		jp->j_next = idn_protojob_cache_list;
9893 		idn_protojob_cache_list = jp;
9894 		mutex_exit(&idn_protojob_cache_lock);
9895 	} else {
9896 		kmem_cache_free(idn.protocol.p_jobpool, (void *)jp);
9897 	}
9898 }
9899 
9900 void
9901 idn_protojob_submit(int cookie, idn_protojob_t *jp)
9902 {
9903 	idn_protoqueue_t	*pq;
9904 	int			serverid;
9905 	procname_t		proc = "idn_protojob_submit";
9906 	STRING(str);
9907 
9908 	if (jp == NULL)
9909 		return;
9910 
9911 	serverid = IDN_PROTOCOL_SERVER_HASH(cookie);
9912 
9913 	pq = &idn.protocol.p_serverq[serverid];
9914 
9915 	INUM2STR(jp->j_msg.m_msgtype, str);
9916 	PR_PROTO("%s: job (d=%d, m=0x%x, %s) submitted to "
9917 	    "protocol server %d\n", proc, jp->j_msg.m_domid,
9918 	    jp->j_msg.m_msgtype, str, serverid);
9919 
9920 	mutex_enter(&pq->q_mutex);
9921 	/*
9922 	 * Can't submit jobs to dying servers.
9923 	 */
9924 	if (!pq->q_die) {
9925 		if (pq->q_joblist_tail) {
9926 			pq->q_joblist_tail->j_next = jp;
9927 			pq->q_joblist_tail = jp;
9928 		} else {
9929 			pq->q_joblist = pq->q_joblist_tail = jp;
9930 		}
9931 		jp->j_next = NULL;
9932 		cv_signal(&pq->q_cv);
9933 	} else {
9934 		PR_PROTO("%s: protocol server dead.  freeing protojob\n",
9935 		    proc);
9936 		idn_protojob_free(jp);
9937 	}
9938 	mutex_exit(&pq->q_mutex);
9939 }
9940 
9941 static void
9942 idn_mboxarea_init(idn_mboxtbl_t *mtp, register int ntbls)
9943 {
9944 	register int	d;
9945 	caddr_t		state_ptr = NULL, mtbasep = (caddr_t)mtp;
9946 	idn_mboxtbl_t	*amtp;
9947 	procname_t	proc = "idn_mboxarea_init";
9948 
9949 	ASSERT(mtp && (ntbls > 0));
9950 
9951 	PR_PROTO("%s: init mboxtbl (0x%p) ntbls = %d\n", proc, mtp, ntbls);
9952 
9953 	for (d = 0; d < ntbls; d++) {
9954 		register int	pd, sd;
9955 		register int	ch;
9956 
9957 		mtp->mt_header.mh_svr_active = 0;
9958 		mtp->mt_header.mh_svr_ready = 0;
9959 		/*
9960 		 * Initialize the header of each mbox table
9961 		 * with a cookie for identity.
9962 		 */
9963 		/*
9964 		 * Format: 0xc0c0DSCC
9965 		 *	 D = primary domain
9966 		 *	 S = sub-domain of primary
9967 		 *	CC = channel of sub-domain.
9968 		 */
9969 		pd = (d / MAX_DOMAINS) / IDN_MAX_NETS;
9970 		sd = (d / IDN_MAX_NETS) % MAX_DOMAINS;
9971 		ch = d % IDN_MAX_NETS;
9972 
9973 		/*
9974 		 * We point all sub-domains in the same channel
9975 		 * to the same active sync flag since a single server
9976 		 * services all domains in the same channel.
9977 		 */
9978 		amtp = IDN_MBOXTBL_ABS_PTR(mtbasep, pd, 0, ch);
9979 
9980 		state_ptr = (caddr_t)&amtp->mt_header.mh_svr_active;
9981 		mtp->mt_header.mh_svr_active_ptr = IDN_ADDR2OFFSET(state_ptr);
9982 
9983 		state_ptr = (caddr_t)&amtp->mt_header.mh_svr_ready;
9984 		mtp->mt_header.mh_svr_ready_ptr = IDN_ADDR2OFFSET(state_ptr);
9985 
9986 		mtp->mt_header.mh_cookie = IDN_MAKE_MBOXHDR_COOKIE(pd, sd, ch);
9987 
9988 		mtp->mt_header.mh_cksum = IDN_CKSUM_MBOX(&mtp->mt_header);
9989 
9990 		IDN_MBOXTBL_PTR_INC(mtp);
9991 	}
9992 	/*
9993 	 * Now that the master has initialized the entire mailbox
9994 	 * region the referenced memory may not necessarily be up-to-date
9995 	 * with respect to the actual SMR memory due to caching.
9996 	 * In order to make sure future connecting domains get a
9997 	 * consistent picture of the mailbox region, it's necessary
9998 	 * for the master to flush its caches.
9999 	 */
10000 	PR_PROTO("%s: flushing ecache's of local (master) domain\n", proc);
10001 
10002 	idnxf_flushall_ecache();
10003 }
10004 
10005 idn_mainmbox_t *
10006 idn_mainmbox_init(int domid, int mbx)
10007 {
10008 	idn_mainmbox_t	*mmp;
10009 	int		c;
10010 	idn_mainmbox_t	*cmp;
10011 	procname_t	proc = "idn_mainmbox_init";
10012 
10013 	ASSERT(idn_domain[domid].dcpu != IDN_NIL_DCPU);
10014 	ASSERT(IDN_DLOCK_IS_HELD(domid));
10015 
10016 	PR_PROTO("%s: initializing main %s mailbox for domain %d\n",
10017 	    proc, IDNMBOX_IS_RECV(mbx) ? "RECV" : "SEND", domid);
10018 
10019 	cmp = GETSTRUCT(idn_mainmbox_t, IDN_MAX_NETS);
10020 	for (c = 0; c < IDN_MAX_NETS; c++) {
10021 		mmp = &cmp[c];
10022 		mmp->mm_channel = (short)c;
10023 		mutex_init(&mmp->mm_mutex, NULL, MUTEX_DRIVER, NULL);
10024 		mmp->mm_domid = (short)domid;
10025 		mmp->mm_type = mbx;
10026 	}
10027 	mmp = cmp;
10028 	/*
10029 	 * The actual SMR mailbox (mmp->mm_smr_mboxp) gets setup
10030 	 * when the SMR is setup.
10031 	 */
10032 
10033 	return (mmp);
10034 }
10035 
10036 static void
10037 idn_mainmbox_reset(int domid, idn_mainmbox_t *cmp)
10038 {
10039 	idn_mainmbox_t	*mmp;
10040 	int		c;
10041 	procname_t	proc = "idn_mainmbox_reset";
10042 
10043 	ASSERT(IDN_DLOCK_IS_EXCL(domid));
10044 
10045 	PR_PROTO("%s: reseting main %s mailbox for domain %d\n",
10046 	    proc, IDNMBOX_IS_RECV(cmp->mm_type) ? "RECV" : "SEND", domid);
10047 
10048 	for (c = 0; c < IDN_MAX_NETS; c++) {
10049 		mmp = &cmp[c];
10050 
10051 		mmp->mm_channel = (short)c;
10052 		mmp->mm_domid = (short)domid;
10053 		mmp->mm_count = 0;
10054 		mmp->mm_flags = 0;
10055 		mmp->mm_qiget = mmp->mm_qiput = 0;
10056 		mmp->mm_csp = NULL;
10057 		ASSERT(mmp->mm_type == cmp->mm_type);
10058 	}
10059 }
10060 
10061 void
10062 idn_mainmbox_deinit(int domid, idn_mainmbox_t *mmp)
10063 {
10064 	procname_t	proc = "idn_mainmbox_deinit";
10065 
10066 	ASSERT(IDN_DLOCK_IS_HELD(domid));
10067 
10068 	PR_PROTO("%s: deinitializing main %s mailbox for domain %d\n",
10069 	    proc, IDNMBOX_IS_RECV(mmp->mm_type) ? "RECV" : "SEND", domid);
10070 
10071 	ASSERT(idn_domain_is_registered(domid, -1, NULL) == 0);
10072 
10073 	FREESTRUCT(mmp, idn_mainmbox_t, IDN_MAX_NETS);
10074 }
10075 
10076 static void
10077 idn_mainmbox_activate(int domid)
10078 {
10079 	register int	c;
10080 	idn_domain_t	*dp = &idn_domain[domid];
10081 	procname_t	proc = "idn_mainmbox_activate";
10082 
10083 	ASSERT(IDN_DLOCK_IS_HELD(domid));
10084 
10085 	PR_PROTO("%s:%d: activating main mailbox\n", proc, domid);
10086 
10087 	for (c = 0; c < IDN_MAX_NETS; c++)
10088 		idn_mainmbox_chan_register(domid, &dp->dmbox.m_send[c],
10089 		    &dp->dmbox.m_recv[c], c);
10090 }
10091 
10092 /*
10093  * Called upon disabling the SMR to deactivate all the mailboxes
10094  * so that they no longer reference the SMR that's going away.
10095  *
10096  * stopall - Indicates to stop all channel services, across the board.
10097  */
10098 static void
10099 idn_mainmbox_deactivate(ushort_t domset)
10100 {
10101 	int		svr_count;
10102 	procname_t	proc = "idn_mainmbox_deactivate";
10103 
10104 
10105 	if (domset == 0)
10106 		return;
10107 
10108 	PR_PROTO("%s: %s deactivating main mailboxes for domset 0x%x\n",
10109 	    proc, (domset == (ushort_t)-1) ? "STOP-ALL" : "NORMAL", domset);
10110 
10111 	svr_count = idn_mainmbox_chan_unregister(domset, -1);
10112 
10113 	PR_PROTO("%s: deactivated %d chansvrs (domset 0x%x)\n",
10114 	    proc, svr_count, domset);
10115 }
10116 
10117 static void
10118 idn_mainmbox_chan_register(int domid, idn_mainmbox_t *send_mmp,
10119     idn_mainmbox_t *recv_mmp, int channel)
10120 {
10121 	ASSERT(IDN_DLOCK_IS_HELD(domid));
10122 
10123 	/*
10124 	 * Obtain receive mailbox lock first.
10125 	 */
10126 	mutex_enter(&recv_mmp->mm_mutex);
10127 	mutex_enter(&send_mmp->mm_mutex);
10128 
10129 	ASSERT(recv_mmp->mm_channel == (short)channel);
10130 	ASSERT(send_mmp->mm_channel == (short)channel);
10131 
10132 	recv_mmp->mm_csp = &idn.chan_servers[channel];
10133 	recv_mmp->mm_count = 0;
10134 	recv_mmp->mm_dropped = 0;
10135 	recv_mmp->mm_flags = 0;
10136 
10137 	send_mmp->mm_csp = &idn.chan_servers[channel];
10138 	send_mmp->mm_count = 0;
10139 	send_mmp->mm_dropped = 0;
10140 	send_mmp->mm_flags = 0;
10141 
10142 	mutex_exit(&send_mmp->mm_mutex);
10143 	mutex_exit(&recv_mmp->mm_mutex);
10144 
10145 	/*
10146 	 * We have to add ourselves to the respective
10147 	 * channel server's service table.
10148 	 * Note that the channel may not necessarily be
10149 	 * active at this time.
10150 	 */
10151 	ASSERT(idn.chan_servers);
10152 	/*
10153 	 * Have to get the channel server under
10154 	 * control so we can add ourselves.
10155 	 * Returns w/c_mutex.
10156 	 */
10157 	IDN_CHAN_LOCK_GLOBAL(&idn.chan_servers[channel]);
10158 	/*
10159 	 * Add the following domain (mailbox) for monitoring
10160 	 * by the respective channel server.
10161 	 */
10162 	idn_chan_addmbox(channel, DOMAINSET(domid));
10163 
10164 	IDN_CHAN_UNLOCK_GLOBAL(&idn.chan_servers[channel]);
10165 }
10166 
10167 /*
10168  * Unregister the given domain from the specified channel(s) for monitoring.
10169  */
10170 static int
10171 idn_mainmbox_chan_unregister(ushort_t domset, int channel)
10172 {
10173 	int		c, dd_count;
10174 	int		min_chan, max_chan;
10175 	procname_t	proc = "idn_mainmbox_chan_unregister";
10176 
10177 	PR_CHAN("%s: deactivating main mailboxes (channel %d) "
10178 	    "for domset 0x%x\n", proc, channel, domset);
10179 
10180 	if (channel == -1) {
10181 		min_chan = 0;
10182 		max_chan = IDN_MAX_NETS - 1;
10183 	} else {
10184 		min_chan = max_chan = channel;
10185 	}
10186 	/*
10187 	 * Point all the data dispatchers to the same morgue
10188 	 * so we can kill them all at once.
10189 	 */
10190 	dd_count = 0;
10191 	for (c = min_chan; c <= max_chan; c++) {
10192 
10193 		/*
10194 		 * Have to get the channel server under
10195 		 * control so we can remove ourselves.
10196 		 * Returns w/c_mutex held.
10197 		 */
10198 		IDN_CHAN_LOCK_GLOBAL(&idn.chan_servers[c]);
10199 		/*
10200 		 * Delete the following domain (mailbox) from
10201 		 * monitoring by the respective channel server.
10202 		 */
10203 		idn_chan_delmbox(c, (ushort_t)domset);
10204 
10205 		IDN_CHAN_UNLOCK_GLOBAL(&idn.chan_servers[c]);
10206 		dd_count++;
10207 	}
10208 	PR_CHAN("%s: deactivated %d channel mboxes for domset 0x%x, chan %d\n",
10209 	    proc, dd_count, domset, channel);
10210 	return (dd_count);
10211 }
10212 
10213 /*
10214  * Check if the given domain is registered with the given channel(s).
10215  */
10216 int
10217 idn_domain_is_registered(int domid, int channel, idn_chanset_t *chansetp)
10218 {
10219 	int		regcount;
10220 	int		c, min_chan, max_chan;
10221 	idn_chanset_t	chanset;
10222 	procname_t	proc = "idn_domain_is_registered";
10223 
10224 	CHANSET_ZERO(chanset);
10225 
10226 	if (idn.chan_servers == NULL) {
10227 		PR_CHAN("%s: idn.chan_servers == NULL!!\n", proc);
10228 		return (0);
10229 	}
10230 
10231 	if (channel == -1) {
10232 		min_chan = 0;
10233 		max_chan = IDN_MAX_NETS - 1;
10234 	} else {
10235 		min_chan = max_chan = channel;
10236 	}
10237 
10238 	regcount = 0;
10239 
10240 	for (c = min_chan; c <= max_chan; c++) {
10241 		idn_chansvr_t	*csp;
10242 
10243 		csp = &idn.chan_servers[c];
10244 		IDN_CHAN_LOCK_SEND(csp);
10245 		/*
10246 		 * Don't really need recv side lock since registeration
10247 		 * can't change while we're holding send side.
10248 		 * No need to wait for send side to actually suspend
10249 		 * since all we want to do is prevent the registered
10250 		 * information from changing.
10251 		 */
10252 		if (IDN_CHAN_DOMAIN_IS_REGISTERED(csp, domid)) {
10253 			regcount++;
10254 			CHANSET_ADD(chanset, c);
10255 		}
10256 
10257 		IDN_CHAN_UNLOCK_SEND(csp);
10258 	}
10259 
10260 	PR_CHAN("%s: domid %d mbox reg'd with %d channels [0x%x] (req=%d)\n",
10261 	    proc, domid, regcount, chanset, channel);
10262 
10263 	if (chansetp)
10264 		*chansetp = chanset;
10265 
10266 	return (regcount);
10267 }
10268 
10269 static int
10270 idn_mainmbox_flush(int domid, idn_mainmbox_t *mmp)
10271 {
10272 	register int		qi;
10273 	register idn_mboxmsg_t	*mqp;
10274 	int		total_count = 0;
10275 	int		c, count;
10276 	int		mbox_type;
10277 	char		*mbox_str;
10278 	int		lost_io, total_lost_io = 0;
10279 	idn_chanset_t	chanset;
10280 	procname_t	proc = "idn_mainmbox_flush";
10281 
10282 
10283 	if (mmp == NULL)
10284 		return (0);
10285 
10286 	CHANSET_ZERO(chanset);
10287 
10288 	mbox_type = mmp->mm_type;
10289 	ASSERT((mbox_type == IDNMMBOX_TYPE_SEND) ||
10290 	    (mbox_type == IDNMMBOX_TYPE_RECV));
10291 
10292 	mbox_str = (mbox_type == IDNMMBOX_TYPE_SEND) ? "SEND" : "RECV";
10293 
10294 	/*
10295 	 * Determine which channels this domain is registered
10296 	 * with.  If he's not registered with any, then we
10297 	 * can't touch the SMR.
10298 	 */
10299 	(void) idn_domain_is_registered(domid, -1, &chanset);
10300 
10301 	for (c = 0; c < IDN_MAX_NETS; c++) {
10302 		ushort_t	mbox_csum;
10303 
10304 		if (mmp[c].mm_smr_mboxp == NULL)
10305 			continue;
10306 		mutex_enter(&mmp[c].mm_mutex);
10307 		ASSERT(mmp[c].mm_type == mbox_type);
10308 		if (CHAN_IN_SET(chanset, c) == 0) {
10309 			/*
10310 			 * Domain is no longer registered.
10311 			 * DON'T TOUCH THE SMR - IT'S POISON!
10312 			 */
10313 			if (mmp[c].mm_smr_mboxp) {
10314 				PR_CHAN("%s:%d:%s: domain unregistered "
10315 				    "w/chan %d - DUMPING SMR reference\n",
10316 				    proc, domid, mbox_str, c);
10317 				lost_io = IDN_MMBOXINDEX_DIFF(mmp[c].mm_qiput,
10318 				    mmp[c].mm_qiget);
10319 #ifdef DEBUG
10320 				if (mbox_type == IDNMMBOX_TYPE_RECV) {
10321 					PR_CHAN("%s:%d:%s: blowing away %d "
10322 					    "incoming pkts\n",
10323 					    proc, domid, mbox_str, lost_io);
10324 				} else {
10325 					PR_CHAN("%s:%d:%s: blowing away %d/%d "
10326 					    "outstanding pkts\n",
10327 					    proc, domid, mbox_str, lost_io,
10328 					    idn_domain[domid].dio);
10329 				}
10330 #endif /* DEBUG */
10331 			}
10332 			mmp[c].mm_qiput = mmp[c].mm_qiget = 0;
10333 			mmp[c].mm_smr_mboxp = NULL;
10334 			total_lost_io += lost_io;
10335 		}
10336 		if (mmp[c].mm_smr_mboxp) {
10337 			mbox_csum =
10338 			    IDN_CKSUM_MBOX(&mmp[c].mm_smr_mboxp->mt_header);
10339 			if (!VALID_NWRADDR(mmp[c].mm_smr_mboxp, 4) ||
10340 			    !VALID_MBOXHDR(&mmp[c].mm_smr_mboxp->mt_header,
10341 			    c, mbox_csum)) {
10342 				lost_io = IDN_MMBOXINDEX_DIFF(mmp[c].mm_qiput,
10343 				    mmp[c].mm_qiget);
10344 #ifdef DEBUG
10345 				if (mbox_type == IDNMMBOX_TYPE_RECV) {
10346 					PR_CHAN("%s:%d:%s: bad mbox.  blowing "
10347 					    "away %d incoming pkts\n",
10348 					    proc, domid, mbox_str, lost_io);
10349 				} else {
10350 					PR_CHAN("%s:%d:%s: bad mbox.  blowing "
10351 					    "away %d/%d outstanding pkts\n",
10352 					    proc, domid, mbox_str, lost_io,
10353 					    idn_domain[domid].dio);
10354 				}
10355 #endif /* DEBUG */
10356 				mmp[c].mm_smr_mboxp = NULL;
10357 				mmp[c].mm_qiput = mmp[c].mm_qiget = 0;
10358 				total_lost_io += lost_io;
10359 			}
10360 		}
10361 		if (mmp[c].mm_smr_mboxp == NULL) {
10362 			mutex_exit(&mmp[c].mm_mutex);
10363 			continue;
10364 		}
10365 		mqp = &mmp[c].mm_smr_mboxp->mt_queue[0];
10366 		qi = 0;
10367 		count = 0;
10368 		/*
10369 		 * It's quite possible the remote domain may be accessing
10370 		 * these mailbox entries at the exact same time we're
10371 		 * clearing the owner bit.  That's okay.  All we're trying
10372 		 * to do at this point is to minimize the number of packets
10373 		 * the remote domain might try to process unnecessarily.
10374 		 */
10375 		do {
10376 			if (mqp[qi].ms_owner)
10377 				count++;
10378 			mqp[qi].ms_owner = 0;
10379 			IDN_MMBOXINDEX_INC(qi);
10380 		} while (qi);
10381 
10382 		lost_io = IDN_MMBOXINDEX_DIFF(mmp[c].mm_qiput, mmp[c].mm_qiget);
10383 		total_lost_io += lost_io;
10384 
10385 		mmp[c].mm_qiput = mmp[c].mm_qiget = 0;
10386 		mmp[c].mm_smr_mboxp = NULL;
10387 		mutex_exit(&mmp[c].mm_mutex);
10388 
10389 		total_count += count;
10390 
10391 		PR_CHAN("%s:%d:%s: flushed out %d mbox entries for chan %d\n",
10392 		    proc, domid, mbox_str, count, c);
10393 	}
10394 
10395 	if (total_lost_io && (mbox_type == IDNMMBOX_TYPE_SEND)) {
10396 		int	lost_bufs;
10397 		/*
10398 		 * If we lost all our outstanding I/O.  We could
10399 		 * possible could have slabs now with mistakenly
10400 		 * outstanding I/O buffers.  Need to clean them up.
10401 		 * Clean up of leftovers our self.
10402 		 */
10403 		lost_bufs = smr_buf_free_all(domid);
10404 
10405 		PR_CHAN("%s:%d:%s: flushed %d/%d buffers from slabs\n",
10406 		    proc, domid, mbox_str, lost_bufs, total_lost_io);
10407 	}
10408 
10409 	PR_CHAN("%s:%d:%s: flushed total of %d mailbox entries (lost %d)\n",
10410 	    proc, domid, mbox_str, total_count, total_lost_io);
10411 
10412 	return (total_count);
10413 }
10414 
10415 void
10416 idn_chanserver_bind(int net, int cpuid)
10417 {
10418 	int		ocpuid;
10419 	cpu_t		*cp;
10420 	idn_chansvr_t	*csp;
10421 	kthread_id_t	tp;
10422 	procname_t	proc = "idn_chanserver_bind";
10423 
10424 	csp = &idn.chan_servers[net];
10425 	IDN_CHAN_LOCK_GLOBAL(csp);
10426 
10427 	mutex_enter(&cpu_lock);		/* protect checking cpu_ready_set */
10428 	ocpuid = csp->ch_bound_cpuid;
10429 	cp = cpu_get(cpuid);
10430 	if ((cpuid != -1) && ((cp == NULL) || !cpu_is_online(cp))) {
10431 		mutex_exit(&cpu_lock);
10432 		cmn_err(CE_WARN,
10433 		    "IDN: 239: invalid CPU ID (%d) specified for "
10434 		    "IDN net %d",
10435 		    cpuid, net);
10436 		IDN_CHAN_UNLOCK_GLOBAL(csp);
10437 		return;
10438 	}
10439 	if ((tp = csp->ch_recv_threadp) == NULL) {
10440 		/*
10441 		 * Thread is not yet active.  Set ch_bound_cpuid
10442 		 * so when thread activates it will automatically
10443 		 * bind itself.
10444 		 */
10445 		csp->ch_bound_cpuid = -1;
10446 		csp->ch_bound_cpuid_pending = cpuid;
10447 	} else {
10448 		if (ocpuid != -1) {
10449 			thread_affinity_clear(tp);
10450 			csp->ch_bound_cpuid = -1;
10451 		}
10452 		if (cpuid >= 0) {
10453 			thread_affinity_set(tp, cpuid);
10454 			csp->ch_bound_cpuid = cpuid;
10455 		}
10456 		csp->ch_bound_cpuid_pending = -1;
10457 	}
10458 	mutex_exit(&cpu_lock);
10459 
10460 	PR_CHAN("%s: bound net/channel (%d) from cpuid %d to%scpuid %d\n",
10461 	    proc, net, ocpuid, tp ? " " : " (pending) ", cpuid);
10462 
10463 	IDN_CHAN_UNLOCK_GLOBAL(csp);
10464 }
10465 
10466 #ifdef DEBUG
10467 static idn_mboxhdr_t	*prev_mhp[IDN_MAXMAX_NETS];
10468 #endif /* DEBUG */
10469 /*
10470  * Get access to the respective channel server's synchronization
10471  * header which resides in SMR space.
10472  */
10473 static idn_mboxhdr_t *
10474 idn_chan_server_syncheader(int channel)
10475 {
10476 	idn_domain_t	*ldp = &idn_domain[idn.localid];
10477 	idn_mboxtbl_t	*mtp;
10478 	idn_mboxhdr_t	*mhp;
10479 	ushort_t	mbox_csum;
10480 	procname_t	proc = "idn_chan_server_syncheader";
10481 
10482 	ASSERT(IDN_CHAN_RECV_IS_LOCKED(&idn.chan_servers[channel]));
10483 
10484 	IDN_DLOCK_SHARED(idn.localid);
10485 
10486 	if (ldp->dmbox.m_tbl == NULL) {
10487 		PR_CHAN("%s: local dmbox.m_tbl == NULL\n", proc);
10488 		IDN_DUNLOCK(idn.localid);
10489 		return (NULL);
10490 	}
10491 
10492 	mtp = IDN_MBOXTBL_PTR_CHAN(ldp->dmbox.m_tbl, channel);
10493 	mhp = &mtp->mt_header;
10494 	mbox_csum = IDN_CKSUM_MBOX(&mtp->mt_header);
10495 
10496 #ifdef DEBUG
10497 	if (mhp != prev_mhp[channel]) {
10498 		prev_mhp[channel] = mhp;
10499 		PR_CHAN("%s: chan_server (%d) cookie = 0x%x (exp 0x%x)\n",
10500 		    proc, channel, IDN_GET_MBOXHDR_COOKIE(mhp),
10501 		    IDN_MAKE_MBOXHDR_COOKIE(0, 0, channel));
10502 		PR_CHAN("%s: chan_server (%d) actv_ptr = 0x%x (exp 0x%x)\n",
10503 		    proc, channel, mhp->mh_svr_active_ptr,
10504 		    IDN_ADDR2OFFSET(&mhp->mh_svr_active));
10505 		PR_CHAN("%s: chan_server (%d) ready_ptr = 0x%x (exp 0x%x)\n",
10506 		    proc, channel, mhp->mh_svr_ready_ptr,
10507 		    IDN_ADDR2OFFSET(&mhp->mh_svr_ready));
10508 		PR_CHAN("%s: chan_server (%d) mbox_cksum = 0x%x (exp 0x%x)\n",
10509 		    proc, channel, (int)mhp->mh_cksum, (int)mbox_csum);
10510 	}
10511 #endif /* DEBUG */
10512 
10513 	if ((IDN_ADDR2OFFSET(&mhp->mh_svr_active) !=
10514 	    mhp->mh_svr_active_ptr) ||
10515 	    (IDN_ADDR2OFFSET(&mhp->mh_svr_ready) != mhp->mh_svr_ready_ptr) ||
10516 	    !VALID_MBOXHDR(mhp, channel, mbox_csum)) {
10517 		idn_chansvr_t	*csp;
10518 
10519 		csp = &idn.chan_servers[channel];
10520 		if (IDN_CHANNEL_IS_RECV_CORRUPTED(csp) == 0) {
10521 			IDN_CHANSVC_MARK_RECV_CORRUPTED(csp);
10522 
10523 			cmn_err(CE_WARN,
10524 			    "IDN: 240: (channel %d) SMR CORRUPTED "
10525 			    "- RELINK", channel);
10526 			cmn_err(CE_CONT,
10527 			    "IDN: 240: (channel %d) cookie "
10528 			    "(expected 0x%x, actual 0x%x)\n",
10529 			    channel,
10530 			    IDN_MAKE_MBOXHDR_COOKIE(0, 0, channel),
10531 			    mhp->mh_cookie);
10532 			cmn_err(CE_CONT,
10533 			    "IDN: 240: (channel %d) actv_flg "
10534 			    "(expected 0x%x, actual 0x%x)\n",
10535 			    channel, mhp->mh_svr_active_ptr,
10536 			    IDN_ADDR2OFFSET(&mhp->mh_svr_active));
10537 			cmn_err(CE_CONT,
10538 			    "IDN: 240: (channel %d) ready_flg "
10539 			    "(expected 0x%x, actual 0x%x)\n",
10540 			    channel, mhp->mh_svr_ready_ptr,
10541 			    IDN_ADDR2OFFSET(&mhp->mh_svr_ready));
10542 		}
10543 
10544 		mhp = NULL;
10545 	}
10546 	IDN_DUNLOCK(idn.localid);
10547 
10548 	PR_CHAN("%s: channel(%d) mainhp = 0x%p\n", proc, channel, mhp);
10549 
10550 	return (mhp);
10551 }
10552 
10553 #define	CHANSVR_SYNC_CACHE(csp, mmp, chan) \
10554 { \
10555 	ASSERT(IDN_CHAN_RECV_IS_LOCKED(csp)); \
10556 	if ((csp)->ch_recv_changed) { \
10557 		register int _d; \
10558 		(csp)->ch_recv_scanset = (csp)->ch_recv_scanset_pending; \
10559 		(csp)->ch_recv_domset = (csp)->ch_recv_domset_pending; \
10560 		for (_d = 0; _d < MAX_DOMAINS; _d++) { \
10561 			if (DOMAIN_IN_SET((csp)->ch_recv_domset, _d)) { \
10562 				(mmp)[_d] = \
10563 				    &idn_domain[_d].dmbox.m_recv[chan]; \
10564 			} else { \
10565 				(mmp)[_d] = NULL; \
10566 			} \
10567 		} \
10568 		(csp)->ch_recv_changed = 0; \
10569 	} \
10570 }
10571 #define	CHANSVR_NEXT_DOMID(csp, i, d) \
10572 { \
10573 	(i) = ((i) + 1) & (MAX_DOMAINS - 1); \
10574 	(d) = (int)(((csp)->ch_recv_scanset >> ((i) << 2)) & 0xf); \
10575 }
10576 #define	CHANSVR_RESET_INDEX(i)	((i) = -1)
10577 
10578 #ifdef DEBUG
10579 static idn_mainmbox_t	*Mmp[IDN_MAXMAX_NETS][MAX_DOMAINS];
10580 #endif /* DEBUG */
10581 
10582 static void
10583 idn_chan_server(idn_chansvr_t **cspp)
10584 {
10585 	idn_mboxhdr_t	*mainhp;
10586 	register idn_chansvr_t		*csp;
10587 	register idn_mboxmsg_t		*mqp;
10588 #ifdef DEBUG
10589 	idn_mainmbox_t			**mmp;
10590 #else
10591 	idn_mainmbox_t			*mmp[MAX_DOMAINS];
10592 #endif /* DEBUG */
10593 	register int	qi;
10594 	struct idn	*sip;
10595 	int		channel;
10596 	int		cpuid;
10597 	int		empty;
10598 	int		tot_pktcount, tot_dropcount;
10599 	register int	index;
10600 	register int	domid;
10601 	register int	idleloops;
10602 	procname_t	proc = "idn_chan_server";
10603 
10604 
10605 #ifdef DEBUG
10606 	mmp = &Mmp[(*cspp)->ch_id][0];
10607 	bzero(mmp, MAX_DOMAINS * sizeof (idn_mainmbox_t *));
10608 #else /* DEBUG */
10609 	bzero(mmp, sizeof (mmp));
10610 #endif /* DEBUG */
10611 
10612 	tot_pktcount = tot_dropcount = 0;
10613 
10614 	ASSERT(cspp && *cspp);
10615 
10616 	csp = *cspp;
10617 	channel = csp->ch_id;
10618 	sip = IDN_INST2SIP(channel);
10619 	ASSERT(sip);
10620 
10621 	PR_CHAN("%s: CHANNEL SERVER (channel %d) GOING ACTIVE...\n",
10622 	    proc, channel);
10623 
10624 	IDN_CHAN_LOCK_RECV(csp);
10625 	IDN_CHAN_RECV_INPROGRESS(csp);
10626 	ASSERT(csp->ch_recv_threadp == curthread);
10627 	mutex_enter(&cpu_lock);
10628 	if ((cpuid = csp->ch_bound_cpuid_pending) != -1) {
10629 		cpu_t	*cp = cpu_get(cpuid);
10630 		/*
10631 		 * We've been requested to bind to
10632 		 * a particular cpu.
10633 		 */
10634 		if ((cp == NULL) || !cpu_is_online(cp)) {
10635 			/*
10636 			 * Cpu seems to have gone away or gone offline
10637 			 * since originally requested.
10638 			 */
10639 			mutex_exit(&cpu_lock);
10640 			cmn_err(CE_WARN,
10641 			    "IDN: 239: invalid CPU ID (%d) specified for "
10642 			    "IDN net %d",
10643 			    cpuid, channel);
10644 		} else {
10645 			csp->ch_bound_cpuid = cpuid;
10646 			affinity_set(csp->ch_bound_cpuid);
10647 			mutex_exit(&cpu_lock);
10648 		}
10649 		csp->ch_bound_cpuid_pending = -1;
10650 	} else {
10651 		mutex_exit(&cpu_lock);
10652 	}
10653 	if (csp->ch_bound_cpuid != -1) {
10654 		PR_CHAN("%s: thread bound to cpuid %d\n",
10655 		    proc, csp->ch_bound_cpuid);
10656 	}
10657 	/*
10658 	 * Only the first (main) mbox header is used for
10659 	 * synchronization with data delivery since there is
10660 	 * only data server for all mailboxes for this
10661 	 * given channel.
10662 	 */
10663 	CHANSVR_SYNC_CACHE(csp, mmp, channel);
10664 
10665 	mainhp = ((csp->ch_recv_domcount > 0) &&
10666 	    IDN_CHANNEL_IS_RECV_ACTIVE(csp))
10667 	    ? idn_chan_server_syncheader(channel) : NULL;
10668 
10669 	if (mainhp && IDN_CHANNEL_IS_RECV_ACTIVE(csp))
10670 		mainhp->mh_svr_active = 1;
10671 
10672 	ASSERT(csp->ch_recv_domcount ?
10673 	    (csp->ch_recv_scanset && csp->ch_recv_domset) : 1);
10674 
10675 	IDN_CHAN_UNLOCK_RECV(csp);
10676 
10677 	empty = 0;
10678 	idleloops = 0;
10679 	CHANSVR_RESET_INDEX(index);
10680 
10681 	/*
10682 	 * ---------------------------------------------
10683 	 */
10684 	/*CONSTCOND*/
10685 	while (1) {
10686 		register int	pktcount;
10687 		register int	dropcount;
10688 		ushort_t		mbox_csum;
10689 		idn_mboxtbl_t	*smr_mboxp;	/* points to SMR space */
10690 		register smr_offset_t	bufoffset;
10691 #ifdef DEBUG
10692 		register smr_pkthdr_t	*hdrp;
10693 		idn_netaddr_t		netaddr;
10694 #endif /* DEBUG */
10695 
10696 		/*
10697 		 * Speed through and find the next available domid.
10698 		 */
10699 		CHANSVR_NEXT_DOMID(csp, index, domid);
10700 
10701 		if (!index) {
10702 			/*
10703 			 * We only check state changes when
10704 			 * we wrap around.  Done for performance.
10705 			 */
10706 			if (!IDN_CHANNEL_IS_RECV_ACTIVE(csp) ||
10707 			    csp->ch_recv.c_checkin ||
10708 			    (idn.state != IDNGS_ONLINE)) {
10709 
10710 				PR_DATA("%s: (channel %d) %s\n",
10711 				    proc, channel,
10712 				    IDN_CHANNEL_IS_DETACHED(csp)
10713 				    ? "DEAD" :
10714 				    IDN_CHANNEL_IS_PENDING(csp)
10715 				    ? "IDLED" :
10716 				    IDN_CHANNEL_IS_ACTIVE(csp)
10717 				    ? "ACTIVE" : "DISABLED");
10718 				goto cc_sleep;
10719 			}
10720 		}
10721 		if (csp->ch_recv.c_checkin)
10722 			goto cc_sleep;
10723 
10724 		if (empty == csp->ch_recv_domcount) {
10725 			empty = 0;
10726 			goto cc_slowdown;
10727 		}
10728 
10729 		ASSERT(mmp[domid] != NULL);
10730 
10731 		mutex_enter(&mmp[domid]->mm_mutex);
10732 		if ((smr_mboxp = mmp[domid]->mm_smr_mboxp) == NULL) {
10733 			/*
10734 			 * Somebody is trying to shut things down.
10735 			 */
10736 			empty++;
10737 			mutex_exit(&mmp[domid]->mm_mutex);
10738 			continue;
10739 		}
10740 		ASSERT(mmp[domid]->mm_channel == (short)channel);
10741 		/*
10742 		 * We don't care if the mm_smr_mboxp is nullified
10743 		 * after this point.  The thread attempting to shut
10744 		 * us down has to formally pause this channel before
10745 		 * anything is official anyway.  So, we can continue
10746 		 * with our local SMR reference until the thread
10747 		 * shutting us down really stops us.
10748 		 *
10749 		 * Need to get the qiget index _before_ we drop the
10750 		 * lock since it might get flushed (idn_mainmbox_flush)
10751 		 * once we drop the mm_mutex.
10752 		 *
10753 		 * We prefer not to hold the mm_mutex across the
10754 		 * idn_recv_mboxdata() call since that may be time-
10755 		 * consuming.
10756 		 */
10757 		qi  = mmp[domid]->mm_qiget;
10758 
10759 		/*
10760 		 * Check the mailbox header if checksum is turned on.
10761 		 */
10762 		mbox_csum = IDN_CKSUM_MBOX(&smr_mboxp->mt_header);
10763 		if (!VALID_MBOXHDR(&smr_mboxp->mt_header, channel, mbox_csum)) {
10764 			IDN_KSTAT_INC(sip, si_mboxcrc);
10765 			IDN_KSTAT_INC(sip, si_ierrors);
10766 			if (!(mmp[domid]->mm_flags & IDNMMBOX_FLAG_CORRUPTED)) {
10767 				cmn_err(CE_WARN,
10768 				    "IDN: 241: [recv] (domain %d, "
10769 				    "channel %d) SMR CORRUPTED - RELINK",
10770 				    domid, channel);
10771 				mmp[domid]->mm_flags |= IDNMMBOX_FLAG_CORRUPTED;
10772 			}
10773 			empty = 0;
10774 			mutex_exit(&mmp[domid]->mm_mutex);
10775 			goto cc_sleep;
10776 		}
10777 		mutex_exit(&mmp[domid]->mm_mutex);
10778 		mqp = &smr_mboxp->mt_queue[0];
10779 
10780 		pktcount = dropcount = 0;
10781 
10782 		if (mqp[qi].ms_owner == 0)
10783 			goto cc_next;
10784 
10785 		bufoffset = IDN_BFRAME2OFFSET(mqp[qi].ms_bframe);
10786 
10787 		if (!VALID_NWROFFSET(bufoffset, IDN_SMR_BUFSIZE)) {
10788 			/* ASSERT(0); */
10789 			mqp[qi].ms_flag |= IDN_MBOXMSG_FLAG_ERR_BADOFFSET;
10790 			mqp[qi].ms_owner = 0;
10791 			IDN_MMBOXINDEX_INC(qi);
10792 			dropcount++;
10793 
10794 			IDN_KSTAT_INC(sip, si_smraddr);
10795 			IDN_KSTAT_INC(sip, si_ierrors);
10796 
10797 		} else {
10798 			PR_DATA("%s: (channel %d) pkt (off 0x%x, "
10799 			    "qiget %d) from domain %d\n",
10800 			    proc, channel, bufoffset, qi, domid);
10801 #ifdef DEBUG
10802 
10803 			hdrp = IDN_BUF2HDR(IDN_OFFSET2ADDR(bufoffset));
10804 			netaddr.netaddr = hdrp->b_netaddr;
10805 			ASSERT(netaddr.net.chan == (ushort_t)channel);
10806 #endif /* DEBUG */
10807 
10808 			if (idn_recv_mboxdata(channel,
10809 			    IDN_OFFSET2ADDR(bufoffset)) < 0) {
10810 				mutex_enter(&mmp[domid]->mm_mutex);
10811 				if (!(mmp[domid]->mm_flags &
10812 				    IDNMMBOX_FLAG_CORRUPTED)) {
10813 					cmn_err(CE_WARN,
10814 					    "IDN: 241: [recv] (domain "
10815 					    "%d, channel %d) SMR "
10816 					    "CORRUPTED - RELINK",
10817 					    domid, channel);
10818 					mmp[domid]->mm_flags |=
10819 					    IDNMMBOX_FLAG_CORRUPTED;
10820 				}
10821 				mutex_exit(&mmp[domid]->mm_mutex);
10822 			}
10823 
10824 			mqp[qi].ms_owner = 0;
10825 			IDN_MMBOXINDEX_INC(qi);
10826 			pktcount++;
10827 		}
10828 
10829 cc_next:
10830 
10831 		mutex_enter(&mmp[domid]->mm_mutex);
10832 		if (mmp[domid]->mm_smr_mboxp) {
10833 			if (dropcount)
10834 				mmp[domid]->mm_dropped += dropcount;
10835 			mmp[domid]->mm_qiget = qi;
10836 			mmp[domid]->mm_count += pktcount;
10837 		}
10838 		mutex_exit(&mmp[domid]->mm_mutex);
10839 
10840 		if (pktcount == 0) {
10841 			empty++;
10842 		} else {
10843 			csp->ch_recv_waittime = IDN_NETSVR_WAIT_MIN;
10844 			empty = 0;
10845 			idleloops = 0;
10846 
10847 			PR_DATA("%s: (channel %d) dom=%d, pktcnt=%d\n",
10848 			    proc, channel, domid, pktcount);
10849 		}
10850 
10851 		continue;
10852 
10853 cc_slowdown:
10854 
10855 #ifdef DEBUG
10856 		if (idleloops == 0) {
10857 			PR_DATA("%s: (channel %d) going SOFT IDLE...\n",
10858 			    proc, channel);
10859 		}
10860 #endif /* DEBUG */
10861 		if (idleloops++ < IDN_NETSVR_SPIN_COUNT) {
10862 			/*
10863 			 * At this level we only busy-wait.
10864 			 * Get back into action.
10865 			 */
10866 			continue;
10867 		}
10868 		idleloops = 0;
10869 
10870 cc_sleep:
10871 
10872 		if (mainhp)
10873 			mainhp->mh_svr_active = 0;
10874 
10875 		IDN_CHAN_LOCK_RECV(csp);
10876 
10877 cc_die:
10878 
10879 		ASSERT(IDN_CHAN_RECV_IS_LOCKED(csp));
10880 
10881 		if (!IDN_CHANNEL_IS_RECV_ACTIVE(csp) &&
10882 		    IDN_CHANNEL_IS_DETACHED(csp)) {
10883 			/*
10884 			 * Time to die...
10885 			 */
10886 			PR_CHAN("%s: (channel %d) serviced %d "
10887 			    "packets, drop = %d\n", proc, channel,
10888 			    tot_pktcount, tot_dropcount);
10889 			PR_CHAN("%s: (channel %d) TERMINATING\n",
10890 			    proc, channel);
10891 			PR_CHAN("%s: (channel %d) ch_morguep = %p\n",
10892 			    proc, channel, csp->ch_recv_morguep);
10893 
10894 			csp->ch_recv_threadp = NULL;
10895 #ifdef DEBUG
10896 			for (index = 0; index < csp->ch_recv_domcount;
10897 			    index++) {
10898 				if ((int)((csp->ch_recv_scanset >>
10899 				    (index*4)) & 0xf) == domid) {
10900 					PR_DATA("%s: WARNING (channel %d) "
10901 					    "DROPPING domid %d...\n",
10902 					    proc, channel, domid);
10903 				}
10904 			}
10905 #endif /* DEBUG */
10906 			IDN_CHAN_RECV_DONE(csp);
10907 
10908 			sema_v(csp->ch_recv_morguep);
10909 
10910 			IDN_CHAN_UNLOCK_RECV(csp);
10911 
10912 			thread_exit();
10913 			/* not reached */
10914 		}
10915 
10916 		do {
10917 			if (IDN_CHANNEL_IS_DETACHED(csp)) {
10918 				PR_CHAN("%s: (channel %d) going to DIE...\n",
10919 				    proc, channel);
10920 				goto cc_die;
10921 			}
10922 #ifdef DEBUG
10923 			if (IDN_CHANNEL_IS_RECV_ACTIVE(csp) &&
10924 			    (csp->ch_recv_waittime <= IDN_NETSVR_WAIT_MAX)) {
10925 				PR_CHAN("%s: (channel %d) going SOFT IDLE "
10926 				    "(waittime = %d ticks)...\n",
10927 				    proc, channel,
10928 				    csp->ch_recv_waittime);
10929 			} else {
10930 				PR_CHAN("%s: (channel %d) going "
10931 				    "HARD IDLE...\n", proc, channel);
10932 			}
10933 #endif /* DEBUG */
10934 			IDN_CHAN_RECV_DONE(csp);
10935 
10936 			/*
10937 			 * If we're being asked to check-in then
10938 			 * go into a hard sleep.  Want to give the
10939 			 * thread requesting us to checkin a chance.
10940 			 */
10941 			while (csp->ch_recv.c_checkin)
10942 				cv_wait(&csp->ch_recv_cv,
10943 				    &csp->ch_recv.c_mutex);
10944 
10945 			if (csp->ch_recv_waittime > IDN_NETSVR_WAIT_MAX)
10946 				cv_wait(&csp->ch_recv_cv,
10947 				    &csp->ch_recv.c_mutex);
10948 			else
10949 				(void) cv_reltimedwait(&csp->ch_recv_cv,
10950 				    &csp->ch_recv.c_mutex,
10951 				    csp->ch_recv_waittime, TR_CLOCK_TICK);
10952 
10953 			IDN_CHAN_RECV_INPROGRESS(csp);
10954 
10955 			IDN_KSTAT_INC(sip, si_sigsvr);
10956 
10957 			if (csp->ch_recv_waittime <= IDN_NETSVR_WAIT_MAX)
10958 				csp->ch_recv_waittime <<=
10959 				    IDN_NETSVR_WAIT_SHIFT;
10960 
10961 		} while (!IDN_CHANNEL_IS_RECV_ACTIVE(csp));
10962 
10963 		/*
10964 		 * Before we see the world (and touch SMR space),
10965 		 * see if we've been told to die.
10966 		 */
10967 		mainhp = NULL;
10968 		/*
10969 		 * The world may have changed since we were
10970 		 * asleep.  Need to resync cache and check for a
10971 		 * new syncheader.
10972 		 *
10973 		 * Reset chansvr cache against any changes in
10974 		 * mbox fields we need (mm_qiget).
10975 		 */
10976 		CHANSVR_SYNC_CACHE(csp, mmp, channel);
10977 		if (csp->ch_recv_domcount <= 0) {
10978 			/*
10979 			 * Everybody disappeared on us.
10980 			 * Go back to sleep.
10981 			 */
10982 			goto cc_die;
10983 		}
10984 		ASSERT(csp->ch_recv_scanset && csp->ch_recv_domset);
10985 
10986 		mainhp = idn_chan_server_syncheader(channel);
10987 		if (mainhp == NULL) {
10988 			/*
10989 			 * Bummer...we're idling...
10990 			 */
10991 			goto cc_die;
10992 		}
10993 
10994 		mainhp->mh_svr_active = 1;
10995 
10996 		IDN_CHAN_UNLOCK_RECV(csp);
10997 		/*
10998 		 * Reset the domid index after sleeping.
10999 		 */
11000 		CHANSVR_RESET_INDEX(index);
11001 
11002 		empty = 0;
11003 		idleloops = 0;
11004 	}
11005 }
11006 
11007 #if 0
11008 /*
11009  * We maintain a separate function for flushing the STREAMs
11010  * queue of a channel because it must be done outside the
11011  * context of the idn_chan_action routine.  The streams flush
11012  * cannot occur inline with the idn_chan_action because
11013  * the act of flushing may cause IDN send functions to be called
11014  * directly and thus locks to be obtained which could result
11015  * in deadlocks.
11016  */
11017 static void
11018 idn_chan_flush(idn_chansvr_t *csp)
11019 {
11020 	queue_t		*rq;
11021 	struct idn	*sip;
11022 	int		flush_type = 0;
11023 	idn_chaninfo_t	*csend, *crecv;
11024 	procname_t	proc = "idn_chan_flush";
11025 
11026 	csend = &csp->ch_send;
11027 	crecv = &csp->ch_recv;
11028 
11029 	mutex_enter(&crecv->c_mutex);
11030 	mutex_enter(&csend->c_mutex);
11031 
11032 	if (crecv->c_state & IDN_CHANSVC_STATE_FLUSH)
11033 		flush_type |= FLUSHR;
11034 
11035 	if (csend->c_state & IDN_CHANSVC_STATE_FLUSH)
11036 		flush_type |= FLUSHW;
11037 
11038 	if (flush_type) {
11039 		rq = NULL;
11040 		rw_enter(&idn.struprwlock, RW_READER);
11041 		if ((sip = IDN_INST2SIP(csp->ch_id)) != NULL)
11042 			rq = sip->si_ipq;
11043 		rw_exit(&idn.struprwlock);
11044 		if (rq) {
11045 			/*
11046 			 * Flush the STREAM if possible
11047 			 * to get the channel server coherent
11048 			 * enough to respond to us.
11049 			 */
11050 			PR_CHAN("%s: sending FLUSH (%x) to channel %d\n",
11051 			    proc, flush_type, csp->ch_id);
11052 
11053 			(void) putnextctl1(rq, M_FLUSH, flush_type);
11054 		}
11055 		crecv->c_state &= ~IDN_CHANSVC_STATE_FLUSH;
11056 		csend->c_state &= ~IDN_CHANSVC_STATE_FLUSH;
11057 
11058 		if (crecv->c_waiters)
11059 			cv_broadcast(&crecv->c_cv);
11060 	}
11061 
11062 	mutex_exit(&csend->c_mutex);
11063 	mutex_exit(&crecv->c_mutex);
11064 }
11065 #endif /* 0 */
11066 
11067 /*
11068  * Locks are with respect to SEND/RECV locks (c_mutex).
11069  *
11070  * STOP/SUSPEND/DETACH
11071  *	- Entered with locks dropped, leave with locks held.
11072  *	  DETACH - lock dropped manually.
11073  * RESTART/RESUME
11074  *	- Entered with locks held, leave with locks dropped.
11075  * ATTACH
11076  *	- both enter and leave with locks dropped.
11077  */
11078 static void
11079 idn_chan_action(int channel, idn_chanaction_t chanaction, int wait)
11080 {
11081 	uchar_t		clr_state, set_state;
11082 	uint_t		is_running;
11083 	domainset_t	closed_slabwaiters = 0;
11084 	struct idn	*sip;
11085 	idn_chansvr_t	*csp;
11086 	idn_chaninfo_t	*csend, *crecv;
11087 	procname_t	proc = "idn_chan_action";
11088 
11089 	ASSERT((channel >= 0) && (channel < IDN_MAX_NETS));
11090 	ASSERT(idn.chan_servers);
11091 
11092 	csp = &idn.chan_servers[channel];
11093 
11094 	PR_CHAN("%s: requesting %s for channel %d\n",
11095 	    proc, chanaction_str[(int)chanaction], channel);
11096 
11097 	csend = &csp->ch_send;
11098 	crecv = &csp->ch_recv;
11099 
11100 	ASSERT(IDN_CHAN_GLOBAL_IS_LOCKED(csp));
11101 
11102 	clr_state = set_state = 0;
11103 
11104 	switch (chanaction) {
11105 	case IDNCHAN_ACTION_DETACH:
11106 		clr_state = IDN_CHANSVC_STATE_MASK;
11107 		/*FALLTHROUGH*/
11108 
11109 	case IDNCHAN_ACTION_STOP:
11110 		clr_state |= IDN_CHANSVC_STATE_ENABLED;
11111 		/*FALLTHROUGH*/
11112 
11113 	case IDNCHAN_ACTION_SUSPEND:
11114 		clr_state |= IDN_CHANSVC_STATE_ACTIVE;
11115 
11116 		/*
11117 		 * Must maintain this locking order.
11118 		 * Set asynchronous check-in flags.
11119 		 */
11120 		crecv->c_checkin = 1;
11121 		csend->c_checkin = 1;
11122 
11123 		is_running = 0;
11124 		if ((csend->c_inprogress || crecv->c_inprogress) &&
11125 		    wait && (csp->ch_recv_threadp != curthread)) {
11126 
11127 			rw_enter(&idn.struprwlock, RW_READER);
11128 			if ((sip = IDN_INST2SIP(channel)) != NULL) {
11129 				/*
11130 				 * Temporarily turn off the STREAM
11131 				 * to give a chance to breath.
11132 				 */
11133 				is_running = sip->si_flags & IDNRUNNING;
11134 				if (is_running)
11135 					sip->si_flags &= ~IDNRUNNING;
11136 			}
11137 			rw_exit(&idn.struprwlock);
11138 		}
11139 
11140 		mutex_enter(&crecv->c_mutex);
11141 		crecv->c_state &= ~clr_state;
11142 
11143 		mutex_enter(&csend->c_mutex);
11144 		csend->c_state &= ~clr_state;
11145 
11146 		/*
11147 		 * It's possible the channel server could come
11148 		 * through this flow itself due to putting data upstream
11149 		 * that ultimately turned around and came back down for
11150 		 * sending.  If this is the case we certainly don't
11151 		 * want to cv_wait, otherwise we'll obviously deadlock
11152 		 * waiting for ourself.  So, only block if somebody
11153 		 * other than the channel server we're attempting to
11154 		 * suspend/stop.
11155 		 */
11156 		if (wait && (csp->ch_recv_threadp != curthread)) {
11157 			int	do_flush = 0;
11158 
11159 			if (csend->c_inprogress || crecv->c_inprogress)
11160 				do_flush++;
11161 
11162 			if (do_flush) {
11163 				rw_enter(&idn.struprwlock, RW_READER);
11164 				if ((sip = IDN_INST2SIP(channel)) != NULL) {
11165 					/*
11166 					 * Temporarily turn off the STREAM
11167 					 * to give a chance to breath.
11168 					 */
11169 					if (sip->si_flags & IDNRUNNING) {
11170 						is_running = 1;
11171 						sip->si_flags &= ~IDNRUNNING;
11172 					}
11173 				}
11174 				rw_exit(&idn.struprwlock);
11175 			}
11176 
11177 			/*
11178 			 * If we have any senders in-progress
11179 			 * it's possible they're stuck waiting
11180 			 * down in smr_buf_alloc which may never
11181 			 * arrive if we're in an unlink process.
11182 			 * Rather than wait for it to timeout
11183 			 * let's be proactive so we can disconnect
11184 			 * asap.
11185 			 */
11186 			closed_slabwaiters = csp->ch_reg_domset;
11187 			DOMAINSET_ADD(closed_slabwaiters, idn.localid);
11188 			if (closed_slabwaiters)
11189 				smr_slabwaiter_close(closed_slabwaiters);
11190 
11191 			do {
11192 				/*
11193 				 * It's possible due to a STREAMs
11194 				 * loopback from read queue to write queue
11195 				 * that receiver and sender may be same
11196 				 * thread, i.e. receiver's inprogress
11197 				 * flag will never clear until sender's
11198 				 * inprogress flag clears.  So, we wait
11199 				 * for sender's inprogress first.
11200 				 */
11201 				while (csend->c_inprogress) {
11202 					mutex_exit(&crecv->c_mutex);
11203 					while (csend->c_inprogress) {
11204 						csend->c_waiters++;
11205 						cv_wait(&csend->c_cv,
11206 						    &csend->c_mutex);
11207 						csend->c_waiters--;
11208 					}
11209 					/*
11210 					 * Maintain lock ordering.
11211 					 * Eventually we will catch
11212 					 * him due to the flag settings.
11213 					 */
11214 					mutex_exit(&csend->c_mutex);
11215 					mutex_enter(&crecv->c_mutex);
11216 					mutex_enter(&csend->c_mutex);
11217 				}
11218 				if (crecv->c_inprogress) {
11219 					mutex_exit(&csend->c_mutex);
11220 					while (crecv->c_inprogress) {
11221 						crecv->c_waiters++;
11222 						cv_wait(&crecv->c_cv,
11223 						    &crecv->c_mutex);
11224 						crecv->c_waiters--;
11225 					}
11226 					mutex_enter(&csend->c_mutex);
11227 				}
11228 			} while (csend->c_inprogress);
11229 		}
11230 
11231 		if (is_running) {
11232 			/*
11233 			 * Restore the IDNRUNNING bit in
11234 			 * the flags to let them know the
11235 			 * channel is still alive.
11236 			 */
11237 			rw_enter(&idn.struprwlock, RW_READER);
11238 			if ((sip = IDN_INST2SIP(channel)) != NULL)
11239 				sip->si_flags |= IDNRUNNING;
11240 			rw_exit(&idn.struprwlock);
11241 		}
11242 
11243 		if (closed_slabwaiters) {
11244 			/*
11245 			 * We can reopen now since at this point no new
11246 			 * slabwaiters will attempt to come in and wait.
11247 			 */
11248 			smr_slabwaiter_open(csp->ch_reg_domset);
11249 		}
11250 
11251 		crecv->c_checkin = 0;
11252 		csend->c_checkin = 0;
11253 
11254 		/*
11255 		 * ALL leave with locks held.
11256 		 */
11257 		PR_CHAN("%s: action (%s) for channel %d - COMPLETED\n",
11258 		    proc, chanaction_str[(int)chanaction], channel);
11259 		break;
11260 
11261 	case IDNCHAN_ACTION_ATTACH:
11262 		mutex_enter(&crecv->c_mutex);
11263 		mutex_enter(&csend->c_mutex);
11264 		set_state |= csp->ch_state & IDN_CHANSVC_STATE_ATTACHED;
11265 		/*FALLTHROUGH*/
11266 
11267 	case IDNCHAN_ACTION_RESTART:
11268 		set_state |= csp->ch_state & IDN_CHANSVC_STATE_ENABLED;
11269 		/*FALLTHROUGH*/
11270 
11271 	case IDNCHAN_ACTION_RESUME:
11272 		ASSERT(IDN_CHAN_LOCAL_IS_LOCKED(csp));
11273 		set_state |= csp->ch_state & IDN_CHANSVC_STATE_ACTIVE;
11274 
11275 		crecv->c_state |= set_state;
11276 		csend->c_state |= set_state;
11277 
11278 		/*
11279 		 * The channel server itself could come through this
11280 		 * flow, so obviously no point in attempting to wake
11281 		 * ourself up!.
11282 		 */
11283 		if (csp->ch_recv_threadp && (csp->ch_recv_threadp != curthread))
11284 			cv_signal(&csp->ch_recv_cv);
11285 
11286 		PR_CHAN("%s: action (%s) for channel %d - COMPLETED\n",
11287 		    proc, chanaction_str[(int)chanaction], channel);
11288 
11289 		/*
11290 		 * Leaves with lock released.
11291 		 */
11292 		mutex_exit(&csend->c_mutex);
11293 		mutex_exit(&crecv->c_mutex);
11294 		break;
11295 
11296 	default:
11297 		ASSERT(0);
11298 		break;
11299 	}
11300 }
11301 
11302 static void
11303 idn_chan_addmbox(int channel, ushort_t domset)
11304 {
11305 	idn_chansvr_t	*csp;
11306 	register int	d;
11307 	procname_t	proc = "idn_chan_addmbox";
11308 
11309 	PR_CHAN("%s: adding domset 0x%x main mailboxes to channel %d\n",
11310 	    proc, domset, channel);
11311 
11312 	ASSERT(idn.chan_servers);
11313 
11314 	csp = &idn.chan_servers[channel];
11315 
11316 	/*
11317 	 * Adding domains to a channel can be
11318 	 * asynchonous, so we don't bother waiting.
11319 	 */
11320 	IDN_CHANNEL_SUSPEND(channel, 0);
11321 
11322 	/*
11323 	 * Now we have the sending and receiving sides blocked
11324 	 * for this channel.
11325 	 */
11326 	for (d = 0; d < MAX_DOMAINS; d++) {
11327 		if (!DOMAIN_IN_SET(domset, d))
11328 			continue;
11329 		if (IDN_CHAN_DOMAIN_IS_REGISTERED(csp, d)) {
11330 			DOMAINSET_DEL(domset, d);
11331 			continue;
11332 		}
11333 		IDN_CHANSVR_SCANSET_ADD_PENDING(csp, d);
11334 		DOMAINSET_ADD(csp->ch_recv_domset_pending, d);
11335 		IDN_CHAN_DOMAIN_REGISTER(csp, d);
11336 
11337 		PR_CHAN("%s: domain %d (channel %d) RECV (pending) "
11338 		    "scanset = 0x%lx\n", proc, d, channel,
11339 		    csp->ch_recv_scanset_pending);
11340 		PR_CHAN("%s: domain %d (channel %d) domset = 0x%x\n",
11341 		    proc, d, channel, (uint_t)csp->ch_reg_domset);
11342 
11343 		CHECKPOINT_OPENED(IDNSB_CHKPT_CHAN,
11344 		    idn_domain[d].dhw.dh_boardset, 1);
11345 	}
11346 	if (domset)
11347 		csp->ch_recv_changed = 1;
11348 
11349 	IDN_CHANNEL_RESUME(channel);
11350 }
11351 
11352 static void
11353 idn_chan_delmbox(int channel, ushort_t domset)
11354 {
11355 	idn_chansvr_t	*csp;
11356 	register int	d;
11357 	procname_t	proc = "idn_chan_delmbox";
11358 
11359 	PR_CHAN("%s: deleting domset 0x%x main mailboxes from channel %d\n",
11360 	    proc, domset, channel);
11361 
11362 	ASSERT(idn.chan_servers);
11363 
11364 	csp = &idn.chan_servers[channel];
11365 
11366 	/*
11367 	 * Here we have to wait for the channel server
11368 	 * as it's vital that we don't return without guaranteeing
11369 	 * that the given domset is no longer registered.
11370 	 */
11371 	IDN_CHANNEL_SUSPEND(channel, 1);
11372 
11373 	/*
11374 	 * Now we have the sending and receiving sides blocked
11375 	 * for this channel.
11376 	 */
11377 	for (d = 0; d < MAX_DOMAINS; d++) {
11378 		if (!DOMAIN_IN_SET(domset, d))
11379 			continue;
11380 		if (!IDN_CHAN_DOMAIN_IS_REGISTERED(csp, d)) {
11381 			DOMAINSET_DEL(domset, d);
11382 			continue;
11383 		}
11384 		/*
11385 		 * This domain has a mailbox hanging on this channel.
11386 		 * Get him out.
11387 		 *
11388 		 * First remove him from the receive side.
11389 		 */
11390 		ASSERT(csp->ch_recv_domcount > 0);
11391 		IDN_CHANSVR_SCANSET_DEL_PENDING(csp, d);
11392 		DOMAINSET_DEL(csp->ch_recv_domset_pending, d);
11393 		IDN_CHAN_DOMAIN_UNREGISTER(csp, d);
11394 
11395 		PR_CHAN("%s: domain %d (channel %d) RECV (pending) "
11396 		    "scanset = 0x%lx\n", proc, d, channel,
11397 		    csp->ch_recv_scanset_pending);
11398 		PR_CHAN("%s: domain %d (channel %d) domset = 0x%x\n",
11399 		    proc, d, channel, (uint_t)csp->ch_reg_domset);
11400 
11401 		CHECKPOINT_CLOSED(IDNSB_CHKPT_CHAN,
11402 		    idn_domain[d].dhw.dh_boardset, 2);
11403 
11404 	}
11405 	if (domset)
11406 		csp->ch_recv_changed = 1;
11407 
11408 	IDN_CHANNEL_RESUME(channel);
11409 }
11410 
11411 static int
11412 idn_valid_etherheader(struct ether_header *ehp)
11413 {
11414 	uchar_t	*eap;
11415 
11416 	eap = &ehp->ether_dhost.ether_addr_octet[0];
11417 
11418 	if ((eap[IDNETHER_ZERO] != 0) && (eap[IDNETHER_ZERO] != 0xff))
11419 		return (0);
11420 
11421 	if ((eap[IDNETHER_COOKIE1] != IDNETHER_COOKIE1_VAL) &&
11422 	    (eap[IDNETHER_COOKIE1] != 0xff))
11423 		return (0);
11424 
11425 	if ((eap[IDNETHER_COOKIE2] != IDNETHER_COOKIE2_VAL) &&
11426 	    (eap[IDNETHER_COOKIE2] != 0xff))
11427 		return (0);
11428 
11429 	if ((eap[IDNETHER_RESERVED] != IDNETHER_RESERVED_VAL) &&
11430 	    (eap[IDNETHER_RESERVED] != 0xff))
11431 		return (0);
11432 
11433 	if (!VALID_UCHANNEL(eap[IDNETHER_CHANNEL]) &&
11434 	    (eap[IDNETHER_CHANNEL] != 0xff))
11435 		return (0);
11436 
11437 	if (!VALID_UDOMAINID(IDN_NETID2DOMID(eap[IDNETHER_NETID])) &&
11438 	    (eap[IDNETHER_NETID] != 0xff))
11439 		return (0);
11440 
11441 	return (1);
11442 }
11443 
11444 /*
11445  * Packet header has already been filled in.
11446  * RETURNS:	0
11447  *		ENOLINK
11448  *		EPROTO
11449  *		ENOSPC
11450  */
11451 /*ARGSUSED*/
11452 static int
11453 idn_send_mboxdata(int domid, struct idn *sip, int channel, caddr_t bufp)
11454 {
11455 	idn_mainmbox_t	*mmp;
11456 	idn_mboxmsg_t	*mqp;
11457 	smr_pkthdr_t	*hdrp;
11458 	smr_offset_t	bufoffset;
11459 	idn_netaddr_t	dst;
11460 	ushort_t		mbox_csum;
11461 	int		rv = 0;
11462 	int		pktlen, qi;
11463 	procname_t	proc = "idn_send_mboxdata";
11464 
11465 	mmp = idn_domain[domid].dmbox.m_send;
11466 	if (mmp == NULL) {
11467 		PR_DATA("%s: dmbox.m_send == NULL\n", proc);
11468 		IDN_KSTAT_INC(sip, si_linkdown);
11469 		return (ENOLINK);
11470 	}
11471 
11472 	mmp += channel;
11473 	mutex_enter(&mmp->mm_mutex);
11474 
11475 	if (mmp->mm_smr_mboxp == NULL) {
11476 		PR_DATA("%s: (d %d, chn %d) mm_smr_mboxp == NULL\n",
11477 		    proc, domid, channel);
11478 		IDN_KSTAT_INC(sip, si_linkdown);
11479 		rv = ENOLINK;
11480 		goto send_err;
11481 	}
11482 	mbox_csum = IDN_CKSUM_MBOX(&mmp->mm_smr_mboxp->mt_header);
11483 	if (mbox_csum != mmp->mm_smr_mboxp->mt_header.mh_cksum) {
11484 		PR_DATA("%s: (d %d, chn %d) mbox hdr cksum (%d) "
11485 		    "!= actual (%d)\n",
11486 		    proc, domid, channel, mbox_csum,
11487 		    mmp->mm_smr_mboxp->mt_header.mh_cksum);
11488 		if ((mmp->mm_flags & IDNMMBOX_FLAG_CORRUPTED) == 0) {
11489 			cmn_err(CE_WARN,
11490 			    "IDN: 241: [send] (domain %d, "
11491 			    "channel %d) SMR CORRUPTED - RELINK",
11492 			    domid, channel);
11493 			mmp->mm_flags |= IDNMMBOX_FLAG_CORRUPTED;
11494 		}
11495 		IDN_KSTAT_INC(sip, si_mboxcrc);
11496 		IDN_KSTAT_INC(sip, si_oerrors);
11497 		rv = EPROTO;
11498 		goto send_err;
11499 	}
11500 
11501 	bufoffset = IDN_ADDR2OFFSET(bufp);
11502 	hdrp	  = IDN_BUF2HDR(bufp);
11503 	pktlen    = hdrp->b_length;
11504 	dst.netaddr = hdrp->b_netaddr;
11505 	ASSERT(dst.net.chan == (ushort_t)channel);
11506 
11507 	mqp = &mmp->mm_smr_mboxp->mt_queue[0];
11508 	qi  = mmp->mm_qiput;
11509 
11510 	if (mqp[qi].ms_owner) {
11511 		PR_DATA("%s: mailbox FULL (qiput=%d, qiget=%d)\n",
11512 		    proc, mmp->mm_qiput, mmp->mm_qiget);
11513 		IDN_KSTAT_INC(sip, si_txfull);
11514 		rv = ENOSPC;
11515 		goto send_err;
11516 	}
11517 	if (mqp[qi].ms_flag & IDN_MBOXMSG_FLAG_RECLAIM) {
11518 		smr_offset_t	recl_bufoffset;
11519 		/*
11520 		 * Remote domain finished with mailbox entry,
11521 		 * however it has not been reclaimed yet.  A reclaim
11522 		 * was done before coming into this routine, however
11523 		 * timing may have been such that the entry became
11524 		 * free just after the reclamation, but before
11525 		 * entry into here.  Go ahead and reclaim this entry.
11526 		 */
11527 		recl_bufoffset = IDN_BFRAME2OFFSET(mqp[qi].ms_bframe);
11528 
11529 		PR_DATA("%s: attempting reclaim (domain %d) "
11530 		    "(qiput=%d, b_off=0x%x)\n",
11531 		    proc, domid, qi, recl_bufoffset);
11532 
11533 		if (VALID_NWROFFSET(recl_bufoffset, IDN_SMR_BUFSIZE)) {
11534 			int		recl;
11535 			caddr_t		b_bufp;
11536 			smr_pkthdr_t	*b_hdrp;
11537 
11538 			b_bufp = IDN_OFFSET2ADDR(recl_bufoffset);
11539 			b_hdrp = IDN_BUF2HDR(b_bufp);
11540 
11541 			if (IDN_CKSUM_PKT(b_hdrp) != b_hdrp->b_cksum) {
11542 				IDN_KSTAT_INC(sip, si_crc);
11543 				IDN_KSTAT_INC(sip, si_fcs_errors);
11544 				IDN_KSTAT_INC(sip, si_reclaim);
11545 				IDN_KSTAT_INC(sip, si_oerrors);
11546 			}
11547 
11548 			recl = smr_buf_free(domid, b_bufp, b_hdrp->b_length);
11549 #ifdef DEBUG
11550 			if (recl == 0) {
11551 				PR_DATA("%s: SUCCESSFULLY reclaimed buf "
11552 				    "(domain %d)\n", proc, domid);
11553 			} else {
11554 				PR_DATA("%s: WARNING: reclaim failed (FREE) "
11555 				    "(domain %d)\n", proc, domid);
11556 			}
11557 #endif /* DEBUG */
11558 		} else {
11559 			IDN_KSTAT_INC(sip, si_smraddr);
11560 			IDN_KSTAT_INC(sip, si_reclaim);
11561 			PR_DATA("%s: WARNING: reclaim failed (BAD OFFSET) "
11562 			    "(domain %d)\n", proc, domid);
11563 		}
11564 	}
11565 
11566 	if (*mmp->mm_smr_readyp == 0) {
11567 		mmp->mm_qiput = qi;
11568 		IDN_KSTAT_INC(sip, si_linkdown);
11569 		rv = ENOLINK;
11570 		goto send_err;
11571 	}
11572 
11573 	mqp[qi].ms_flag = IDN_MBOXMSG_FLAG_RECLAIM;
11574 	mqp[qi].ms_bframe = IDN_OFFSET2BFRAME(bufoffset);
11575 	/* membar_stst(); */
11576 	mqp[qi].ms_owner = 1;
11577 
11578 	IDN_MMBOXINDEX_INC(qi);
11579 
11580 	mmp->mm_qiput = qi;
11581 
11582 	mmp->mm_count++;
11583 
11584 	if ((*mmp->mm_smr_readyp) && !(*mmp->mm_smr_activep)) {
11585 		idn_msgtype_t	mt;
11586 
11587 		mt.mt_mtype = IDNP_DATA;
11588 		mt.mt_atype = 0;
11589 		IDN_KSTAT_INC(sip, si_xdcall);
11590 		(void) IDNXDC(domid, &mt, (uint_t)dst.net.chan, 0, 0, 0);
11591 	}
11592 	mutex_exit(&mmp->mm_mutex);
11593 	IDN_KSTAT_INC(sip, si_opackets);
11594 	IDN_KSTAT_INC(sip, si_opackets64);
11595 	IDN_KSTAT_ADD(sip, si_xmtbytes, pktlen);
11596 	IDN_KSTAT_ADD(sip, si_obytes64, (uint64_t)pktlen);
11597 
11598 	return (0);
11599 
11600 send_err:
11601 	mmp->mm_dropped++;
11602 
11603 	mutex_exit(&mmp->mm_mutex);
11604 
11605 	return (rv);
11606 }
11607 
11608 static int
11609 idn_recv_mboxdata(int channel, caddr_t bufp)
11610 {
11611 	smr_pkthdr_t	*hdrp;
11612 	struct idn	*sip;
11613 	mblk_t		*mp = nilp(mblk_t);
11614 	int		pktlen;
11615 	int		apktlen;
11616 	int		rv = 0;
11617 	smr_offset_t	bufoffset;
11618 	ushort_t	csum;
11619 	idn_netaddr_t	dst, daddr;
11620 	procname_t	proc = "idn_recv_mboxdata";
11621 
11622 	hdrp = IDN_BUF2HDR(bufp);
11623 
11624 	csum = IDN_CKSUM_PKT(hdrp);
11625 
11626 	sip = IDN_INST2SIP(channel);
11627 	if (sip == NULL) {
11628 		/*LINTED*/
11629 		sip = IDN_INST2SIP(0);
11630 	}
11631 	ASSERT(sip);
11632 
11633 	if (csum != hdrp->b_cksum) {
11634 		PR_DATA("%s: bad checksum(%x) != expected(%x)\n",
11635 		    proc, (uint_t)csum, (uint_t)hdrp->b_cksum);
11636 		IDN_KSTAT_INC(sip, si_crc);
11637 		IDN_KSTAT_INC(sip, si_fcs_errors);
11638 		rv = -1;
11639 		goto recv_err;
11640 	}
11641 
11642 	daddr.net.chan = (ushort_t)channel;
11643 	daddr.net.netid = (ushort_t)idn.localid;
11644 
11645 	dst.netaddr = hdrp->b_netaddr;
11646 	bufoffset = hdrp->b_offset;
11647 
11648 	if (dst.netaddr != daddr.netaddr) {
11649 		PR_DATA("%s: wrong dest netaddr (0x%x), expected (0x%x)\n",
11650 		    proc, dst.netaddr, daddr.netaddr);
11651 		IDN_KSTAT_INC(sip, si_nolink);
11652 		IDN_KSTAT_INC(sip, si_macrcv_errors);
11653 		goto recv_err;
11654 	}
11655 	pktlen  = hdrp->b_length;
11656 	apktlen = pktlen;
11657 
11658 	if ((pktlen <= 0) || (pktlen > IDN_DATA_SIZE)) {
11659 		PR_DATA("%s: invalid packet length (%d) <= 0 || > %lu\n",
11660 		    proc, pktlen, IDN_DATA_SIZE);
11661 		IDN_KSTAT_INC(sip, si_buff);
11662 		IDN_KSTAT_INC(sip, si_toolong_errors);
11663 		goto recv_err;
11664 	}
11665 
11666 	mp = allocb(apktlen + IDN_ALIGNSIZE, BPRI_LO);
11667 	if (mp == nilp(mblk_t)) {
11668 		PR_DATA("%s: allocb(pkt) failed\n", proc);
11669 		IDN_KSTAT_INC(sip, si_allocbfail);
11670 		IDN_KSTAT_INC(sip, si_norcvbuf);	/* MIB II */
11671 		goto recv_err;
11672 	}
11673 	ASSERT(DB_TYPE(mp) == M_DATA);
11674 	/*
11675 	 * Copy data packet into its streams buffer.
11676 	 * Align pointers for maximum bcopy performance.
11677 	 */
11678 	mp->b_rptr = (uchar_t *)IDN_ALIGNPTR(mp->b_rptr, bufoffset);
11679 	bcopy(IDN_BUF2DATA(bufp, bufoffset), mp->b_rptr, apktlen);
11680 	mp->b_wptr = mp->b_rptr + pktlen;
11681 
11682 	if (IDN_CHECKSUM &&
11683 		!idn_valid_etherheader((struct ether_header *)mp->b_rptr)) {
11684 		freeb(mp);
11685 		mp = nilp(mblk_t);
11686 		PR_DATA("%s: etherheader CORRUPTED\n", proc);
11687 		IDN_KSTAT_INC(sip, si_crc);
11688 		IDN_KSTAT_INC(sip, si_fcs_errors);
11689 		rv = -1;
11690 		goto recv_err;
11691 	}
11692 
11693 	idndl_read(NULL, mp);
11694 
11695 recv_err:
11696 
11697 	if (mp == nilp(mblk_t)) {
11698 		IDN_KSTAT_INC(sip, si_ierrors);
11699 	}
11700 
11701 	return (rv);
11702 }
11703 
11704 /*
11705  * When on shutdown path (idn_active_resources) must call
11706  * idn_mainmbox_flush() _BEFORE_ calling idn_reclaim_mboxdata()
11707  * for any final data.  This is necessary incase the mailboxes
11708  * have been unregistered.  If they have then idn_mainmbox_flush()
11709  * will set mm_smr_mboxp to NULL which prevents us from touching
11710  * poison SMR space.
11711  */
11712 int
11713 idn_reclaim_mboxdata(int domid, int channel, int nbufs)
11714 {
11715 	idn_mainmbox_t	*mmp;
11716 	idn_mboxmsg_t	*mqp;
11717 	smr_pkthdr_t	*hdrp;
11718 	idn_domain_t	*dp;
11719 	int		qi;
11720 	int		mi;
11721 	int		reclaim_cnt = 0;
11722 	int		free_cnt;
11723 	ushort_t	csum;
11724 	struct idn	*sip;
11725 	smr_offset_t	reclaim_list, curr, prev;
11726 	procname_t	proc = "idn_reclaim_mboxdata";
11727 
11728 
11729 	sip = IDN_INST2SIP(channel);
11730 	if (sip == NULL) {
11731 		/*LINTED*/
11732 		sip = IDN_INST2SIP(0);
11733 	}
11734 	ASSERT(sip);
11735 
11736 	dp = &idn_domain[domid];
11737 
11738 	PR_DATA("%s: requested %d buffers from domain %d\n",
11739 	    proc, nbufs, domid);
11740 
11741 	if (lock_try(&dp->dreclaim_inprogress) == 0) {
11742 		/*
11743 		 * Reclaim is already in progress, don't
11744 		 * bother.
11745 		 */
11746 		PR_DATA("%s: reclaim already in progress\n", proc);
11747 		return (0);
11748 	}
11749 
11750 	if (dp->dmbox.m_send == NULL)
11751 		return (0);
11752 
11753 	reclaim_list = curr = prev = IDN_NIL_SMROFFSET;
11754 
11755 	mi = (int)dp->dreclaim_index;
11756 	do {
11757 		ushort_t	mbox_csum;
11758 
11759 		mmp = &dp->dmbox.m_send[mi];
11760 		/* do-while continues down */
11761 		ASSERT(mmp);
11762 		if (mutex_tryenter(&mmp->mm_mutex) == 0) {
11763 			/*
11764 			 * This channel is busy, move on.
11765 			 */
11766 			IDN_MBOXCHAN_INC(mi);
11767 			continue;
11768 		}
11769 
11770 		if (mmp->mm_smr_mboxp == NULL) {
11771 			PR_DATA("%s: no smr pointer for domid %d, chan %d\n",
11772 			    proc, domid, (int)mmp->mm_channel);
11773 			ASSERT(mmp->mm_qiget == mmp->mm_qiput);
11774 			mutex_exit(&mmp->mm_mutex);
11775 			IDN_MBOXCHAN_INC(mi);
11776 			continue;
11777 		}
11778 		mbox_csum = IDN_CKSUM_MBOX(&mmp->mm_smr_mboxp->mt_header);
11779 		if (mbox_csum != mmp->mm_smr_mboxp->mt_header.mh_cksum) {
11780 			PR_DATA("%s: (d %d, chn %d) mbox hdr "
11781 			    "cksum (%d) != actual (%d)\n",
11782 			    proc, domid, (int)mmp->mm_channel, mbox_csum,
11783 			    mmp->mm_smr_mboxp->mt_header.mh_cksum);
11784 			IDN_KSTAT_INC(sip, si_mboxcrc);
11785 			IDN_KSTAT_INC(sip, si_oerrors);
11786 			mutex_exit(&mmp->mm_mutex);
11787 			IDN_MBOXCHAN_INC(mi);
11788 			continue;
11789 		}
11790 		mqp = &mmp->mm_smr_mboxp->mt_queue[0];
11791 		qi  = mmp->mm_qiget;
11792 
11793 		while (!mqp[qi].ms_owner &&
11794 		    (mqp[qi].ms_flag & IDN_MBOXMSG_FLAG_RECLAIM) &&
11795 		    nbufs) {
11796 			idn_mboxmsg_t	*msp;
11797 			int		badbuf;
11798 
11799 			badbuf = 0;
11800 			msp = &mqp[qi];
11801 
11802 			if (msp->ms_flag & IDN_MBOXMSG_FLAG_ERRMASK) {
11803 				PR_DATA("%s: msg.flag ERROR(0x%x) (off=0x%x, "
11804 				    "domid=%d, qiget=%d)\n", proc,
11805 				    (uint_t)(msp->ms_flag &
11806 				    IDN_MBOXMSG_FLAG_ERRMASK),
11807 				    IDN_BFRAME2OFFSET(msp->ms_bframe),
11808 				    domid, qi);
11809 			}
11810 			prev = curr;
11811 			curr = IDN_BFRAME2OFFSET(mqp[qi].ms_bframe);
11812 
11813 			if (!VALID_NWROFFSET(curr, IDN_SMR_BUFSIZE)) {
11814 				badbuf = 1;
11815 				IDN_KSTAT_INC(sip, si_reclaim);
11816 			} else {
11817 				/*
11818 				 * Put the buffers onto a list that will be
11819 				 * formally reclaimed down below.  This allows
11820 				 * us to free up mboxq entries as fast as
11821 				 * possible.
11822 				 */
11823 				hdrp = IDN_BUF2HDR(IDN_OFFSET2ADDR(curr));
11824 				csum = IDN_CKSUM_PKT(hdrp);
11825 
11826 				if (csum != hdrp->b_cksum) {
11827 					badbuf = 1;
11828 					IDN_KSTAT_INC(sip, si_crc);
11829 					IDN_KSTAT_INC(sip, si_fcs_errors);
11830 					IDN_KSTAT_INC(sip, si_reclaim);
11831 					if (!(mmp->mm_flags &
11832 					    IDNMMBOX_FLAG_CORRUPTED)) {
11833 						cmn_err(CE_WARN,
11834 						    "IDN: 241: [send] "
11835 						    "(domain %d, channel "
11836 						    "%d) SMR CORRUPTED - "
11837 						    "RELINK",
11838 						    domid, channel);
11839 						mmp->mm_flags |=
11840 						    IDNMMBOX_FLAG_CORRUPTED;
11841 					}
11842 
11843 				} else if (reclaim_list == IDN_NIL_SMROFFSET) {
11844 					reclaim_list = curr;
11845 				} else {
11846 					caddr_t	bufp;
11847 
11848 					bufp = IDN_OFFSET2ADDR(prev);
11849 					hdrp = IDN_BUF2HDR(bufp);
11850 					hdrp->b_next = curr;
11851 				}
11852 			}
11853 
11854 			mqp[qi].ms_flag = 0;
11855 
11856 			IDN_MMBOXINDEX_INC(qi);
11857 
11858 			if (!badbuf) {
11859 				nbufs--;
11860 				reclaim_cnt++;
11861 			}
11862 
11863 			if (qi == mmp->mm_qiget)
11864 				break;
11865 		}
11866 		mmp->mm_qiget = qi;
11867 
11868 		mutex_exit(&mmp->mm_mutex);
11869 
11870 		IDN_MBOXCHAN_INC(mi);
11871 
11872 	} while ((mi != (int)dp->dreclaim_index) && nbufs);
11873 
11874 	dp->dreclaim_index = (uchar_t)mi;
11875 
11876 	if (reclaim_list != IDN_NIL_SMROFFSET) {
11877 		hdrp = IDN_BUF2HDR(IDN_OFFSET2ADDR(curr));
11878 		hdrp->b_next = IDN_NIL_SMROFFSET;
11879 	}
11880 
11881 	PR_DATA("%s: reclaimed %d buffers from domain %d\n",
11882 	    proc, reclaim_cnt, domid);
11883 
11884 	if (reclaim_cnt == 0) {
11885 		lock_clear(&dp->dreclaim_inprogress);
11886 		return (0);
11887 	}
11888 
11889 	/*
11890 	 * Now actually go and reclaim (free) the buffers.
11891 	 */
11892 	free_cnt = 0;
11893 
11894 	for (curr = reclaim_list; curr != IDN_NIL_SMROFFSET; ) {
11895 		caddr_t		bufp;
11896 
11897 		bufp = IDN_OFFSET2ADDR(curr);
11898 		hdrp = IDN_BUF2HDR(bufp);
11899 		csum = IDN_CKSUM_PKT(hdrp);
11900 		if (csum != hdrp->b_cksum) {
11901 			/*
11902 			 * Once corruption is detected we
11903 			 * can't trust our list any further.
11904 			 * These buffers are effectively lost.
11905 			 */
11906 			cmn_err(CE_WARN,
11907 			    "IDN: 241: [send] (domain %d, channel %d) SMR "
11908 			    "CORRUPTED - RELINK", domid, channel);
11909 			break;
11910 		}
11911 
11912 		curr = hdrp->b_next;
11913 
11914 		if (!smr_buf_free(domid, bufp, hdrp->b_length))
11915 			free_cnt++;
11916 	}
11917 
11918 	if ((dp->dio < IDN_WINDOW_EMAX) && dp->diocheck) {
11919 		lock_clear(&dp->diocheck);
11920 		IDN_MSGTIMER_STOP(domid, IDNP_DATA, 0);
11921 	}
11922 
11923 #ifdef DEBUG
11924 	if (free_cnt != reclaim_cnt) {
11925 		PR_DATA("%s: *** WARNING *** freecnt(%d) != reclaim_cnt (%d)\n",
11926 		    proc, free_cnt, reclaim_cnt);
11927 	}
11928 #endif /* DEBUG */
11929 
11930 	lock_clear(&dp->dreclaim_inprogress);
11931 
11932 	return (reclaim_cnt);
11933 }
11934 
11935 void
11936 idn_signal_data_server(int domid, ushort_t channel)
11937 {
11938 	idn_nack_t	nacktype = 0;
11939 	idn_domain_t	*dp;
11940 	idn_chansvr_t	*csp;
11941 	int		c, min_chan, max_chan;
11942 	idn_mainmbox_t	*mmp;
11943 	procname_t	proc = "idn_signal_data_server";
11944 
11945 
11946 	if (domid == IDN_NIL_DOMID)
11947 		return;
11948 
11949 	dp = &idn_domain[domid];
11950 
11951 	if (dp->dawol.a_count > 0) {
11952 		/*
11953 		 * Domain was previously AWOL, but no longer.
11954 		 */
11955 		IDN_SYNC_LOCK();
11956 		IDN_GLOCK_EXCL();
11957 		idn_clear_awol(domid);
11958 		IDN_GUNLOCK();
11959 		IDN_SYNC_UNLOCK();
11960 	}
11961 	/*
11962 	 * Do a precheck before wasting time trying to acquire the lock.
11963 	 */
11964 	if ((dp->dstate != IDNDS_CONNECTED) || !IDN_DLOCK_TRY_SHARED(domid)) {
11965 		/*
11966 		 * Either we're not connected or somebody is busy working
11967 		 * on the domain.  Bail on the signal for now, we'll catch
11968 		 * it on the next go around.
11969 		 */
11970 		return;
11971 	}
11972 	/*
11973 	 * We didn't have the drwlock on the first check of dstate,
11974 	 * but now that we do, make sure the world hasn't changed!
11975 	 */
11976 	if (dp->dstate != IDNDS_CONNECTED) {
11977 		/*
11978 		 * If we reach here, then no connection.
11979 		 * Send no response if this is the case.
11980 		 */
11981 		nacktype = IDNNACK_NOCONN;
11982 		goto send_dresp;
11983 	}
11984 
11985 	/*
11986 	 * No need to worry about locking mainmbox
11987 	 * because we're already holding reader
11988 	 * lock on domain, plus we're just reading
11989 	 * fields in the mainmbox which only change
11990 	 * (or go away) when the writer lock is
11991 	 * held on the domain.
11992 	 */
11993 	if ((mmp = dp->dmbox.m_recv) == NULL) {
11994 		/*
11995 		 * No local mailbox.
11996 		 */
11997 		nacktype = IDNNACK_BADCFG;
11998 		goto send_dresp;
11999 	}
12000 	if ((channel != IDN_BROADCAST_ALLCHAN) && (channel >= IDN_MAX_NETS)) {
12001 		nacktype = IDNNACK_BADCHAN;
12002 		goto send_dresp;
12003 	}
12004 	if (channel == IDN_BROADCAST_ALLCHAN) {
12005 		PR_DATA("%s: requested signal to ALL channels on domain %d\n",
12006 		    proc, domid);
12007 		min_chan = 0;
12008 		max_chan = IDN_MAX_NETS - 1;
12009 	} else {
12010 		PR_DATA("%s: requested signal to channel %d on domain %d\n",
12011 		    proc, channel, domid);
12012 		min_chan = max_chan = (int)channel;
12013 	}
12014 	mmp += min_chan;
12015 	for (c = min_chan; c <= max_chan; mmp++, c++) {
12016 
12017 		/*
12018 		 * We do a quick check for a pending channel.
12019 		 * If pending it will need activation and we rather
12020 		 * do that through a separate (proto) thread.
12021 		 */
12022 		csp = &idn.chan_servers[c];
12023 
12024 		if (csp->ch_recv.c_checkin) {
12025 			PR_DATA("%s: chansvr (%d) for domid %d CHECK-IN\n",
12026 			    proc, c, domid);
12027 			continue;
12028 		}
12029 
12030 		if (IDN_CHAN_TRYLOCK_RECV(csp) == 0) {
12031 			/*
12032 			 * Failed to grab lock, server must be active.
12033 			 */
12034 			PR_DATA("%s: chansvr (%d) for domid %d already actv\n",
12035 			    proc, c, domid);
12036 			continue;
12037 		}
12038 
12039 		if (IDN_CHANNEL_IS_PENDING(csp)) {
12040 			/*
12041 			 * Lock is pending.  Submit asynchronous
12042 			 * job to activate and move-on.
12043 			 */
12044 			IDN_CHAN_UNLOCK_RECV(csp);
12045 			idn_submit_chanactivate_job(c);
12046 			continue;
12047 		}
12048 
12049 		/*
12050 		 * If he ain't active, we ain't talkin'.
12051 		 */
12052 		if (IDN_CHANNEL_IS_RECV_ACTIVE(csp) == 0) {
12053 			IDN_CHAN_UNLOCK_RECV(csp);
12054 			PR_DATA("%s: chansvr (%d) for domid %d inactive\n",
12055 			    proc, c, domid);
12056 			continue;
12057 		}
12058 
12059 		if (mutex_tryenter(&mmp->mm_mutex) == 0) {
12060 			IDN_CHAN_UNLOCK_RECV(csp);
12061 			continue;
12062 		}
12063 
12064 		if (mmp->mm_csp != csp) {
12065 			/*
12066 			 * Not registered.
12067 			 */
12068 			mutex_exit(&mmp->mm_mutex);
12069 			IDN_CHAN_UNLOCK_RECV(csp);
12070 			continue;
12071 
12072 		}
12073 		if (mmp->mm_smr_mboxp == NULL) {
12074 			/*
12075 			 * No SMR mailbox.
12076 			 */
12077 			mutex_exit(&mmp->mm_mutex);
12078 			IDN_CHAN_UNLOCK_RECV(csp);
12079 			continue;
12080 		}
12081 		mutex_exit(&mmp->mm_mutex);
12082 
12083 		if (csp->ch_recv.c_inprogress) {
12084 			/*
12085 			 * Data server is already active.
12086 			 */
12087 			IDN_CHAN_UNLOCK_RECV(csp);
12088 			PR_DATA("%s: chansvr (%d) for domid %d already actv\n",
12089 			    proc, c, domid);
12090 			continue;
12091 		}
12092 		ASSERT(csp == &idn.chan_servers[c]);
12093 
12094 
12095 		PR_DATA("%s: signaling data dispatcher for chan %d dom %d\n",
12096 		    proc, c, domid);
12097 		ASSERT(csp);
12098 		cv_signal(&csp->ch_recv_cv);
12099 		IDN_CHAN_UNLOCK_RECV(csp);
12100 	}
12101 
12102 	if (!nacktype || (channel == IDN_BROADCAST_ALLCHAN)) {
12103 		/*
12104 		 * If there were no real errors or we were
12105 		 * handling multiple channels, then just
12106 		 * return.
12107 		 */
12108 		IDN_DUNLOCK(domid);
12109 		return;
12110 	}
12111 
12112 send_dresp:
12113 
12114 	PR_DATA("%s: sending NACK (%s) back to domain %d (cpu %d)\n",
12115 	    proc, idnnack_str[nacktype], domid, idn_domain[domid].dcpu);
12116 
12117 	idn_send_dataresp(domid, nacktype);
12118 
12119 	IDN_DUNLOCK(domid);
12120 }
12121 
12122 /*ARGSUSED*/
12123 static int
12124 idn_recv_data(int domid, idn_msgtype_t *mtp, idn_xdcargs_t xargs)
12125 {
12126 #ifdef DEBUG
12127 	uint_t		msg = mtp ? mtp->mt_mtype : 0;
12128 	uint_t		msgarg = mtp ? mtp->mt_atype : 0;
12129 	procname_t	proc = "idn_recv_data";
12130 
12131 	PR_PROTO("%s:%d: DATA message received (msg = 0x%x, msgarg = 0x%x)\n",
12132 	    proc, domid, msg, msgarg);
12133 	PR_PROTO("%s:%d: xargs = (0x%x, 0x%x, 0x%x, 0x%x)\n",
12134 	    proc, domid, xargs[0], xargs[1], xargs[2], xargs[3]);
12135 #endif /* DEBUG */
12136 
12137 	return (0);
12138 }
12139 
12140 /*
12141  * Only used when sending a negative response.
12142  */
12143 static void
12144 idn_send_dataresp(int domid, idn_nack_t nacktype)
12145 {
12146 	idn_msgtype_t	mt;
12147 
12148 	ASSERT(IDN_DLOCK_IS_HELD(domid));
12149 
12150 	if (idn_domain[domid].dcpu == IDN_NIL_DCPU)
12151 		return;
12152 
12153 	mt.mt_mtype = IDNP_NACK;
12154 	mt.mt_atype = IDNP_DATA;
12155 
12156 	(void) IDNXDC(domid, &mt, (uint_t)nacktype, 0, 0, 0);
12157 }
12158 
12159 /*
12160  * Checksum routine used in checksum smr_pkthdr_t and idn_mboxhdr_t.
12161  */
12162 static ushort_t
12163 idn_cksum(register ushort_t *hdrp, register int count)
12164 {
12165 	register int		i;
12166 	register ushort_t	sum = 0;
12167 
12168 	for (i = 0; i < count; i++)
12169 		sum += hdrp[i];
12170 
12171 	sum = (sum >> 16) + (sum & 0xffff);
12172 	sum += (sum >> 16);
12173 
12174 	return (~sum);
12175 }
12176 
12177 /*
12178  * ------------------------------------------------
12179  */
12180 
12181 int
12182 idn_open_channel(int channel)
12183 {
12184 	int		masterid;
12185 	idn_chansvr_t	*csp;
12186 	struct idn	*sip;
12187 	procname_t	proc = "idn_open_channel";
12188 
12189 	if (channel >= IDN_MAX_NETS) {
12190 		cmn_err(CE_WARN,
12191 		    "IDN: 242: maximum channels (%d) already open",
12192 		    IDN_MAX_NETS);
12193 		return (-1);
12194 	}
12195 	IDN_GLOCK_EXCL();
12196 
12197 	ASSERT(idn.chan_servers != NULL);
12198 
12199 	csp = &idn.chan_servers[channel];
12200 
12201 	IDN_CHAN_LOCK_GLOBAL(csp);
12202 
12203 	if (IDN_CHANNEL_IS_ATTACHED(csp)) {
12204 		PR_CHAN("%s: channel %d already open\n", proc, channel);
12205 		IDN_CHAN_UNLOCK_GLOBAL(csp);
12206 		IDN_GUNLOCK();
12207 		return (0);
12208 	}
12209 
12210 	/*
12211 	 * Need to zero out the kstats now that we're activating
12212 	 * this channel.
12213 	 */
12214 	for (sip = idn.sip; sip; sip = sip->si_nextp) {
12215 		if (sip->si_dip && (ddi_get_instance(sip->si_dip) == channel)) {
12216 			bzero(&sip->si_kstat, sizeof (sip->si_kstat));
12217 			break;
12218 		}
12219 	}
12220 
12221 	IDN_CHANSVC_MARK_ATTACHED(csp);
12222 	idn.nchannels++;
12223 	CHANSET_ADD(idn.chanset, channel);
12224 	IDN_CHANNEL_ATTACH(channel);
12225 
12226 	IDN_CHAN_UNLOCK_GLOBAL(csp);
12227 
12228 	/*
12229 	 * We increase our window threshold each time a channel
12230 	 * is opened.
12231 	 */
12232 	ASSERT(idn.nchannels > 0);
12233 	IDN_WINDOW_EMAX = IDN_WINDOW_MAX +
12234 	    ((idn.nchannels - 1) * IDN_WINDOW_INCR);
12235 
12236 	PR_CHAN("%s: channel %d is OPEN (nchannels = %d)\n",
12237 	    proc, channel, idn.nchannels);
12238 
12239 	masterid = IDN_GET_MASTERID();
12240 	IDN_GUNLOCK();
12241 
12242 	/*
12243 	 * Check if there is an active master to which
12244 	 * we're connected.  If so, then activate channel.
12245 	 */
12246 	if (masterid != IDN_NIL_DOMID) {
12247 		idn_domain_t	*dp;
12248 
12249 		dp = &idn_domain[masterid];
12250 		IDN_DLOCK_SHARED(masterid);
12251 		if (dp->dvote.v.master && (dp->dstate == IDNDS_CONNECTED))
12252 			(void) idn_activate_channel(CHANSET(channel),
12253 			    IDNCHAN_ONLINE);
12254 		IDN_DUNLOCK(masterid);
12255 	}
12256 
12257 	return (0);
12258 }
12259 
12260 void
12261 idn_close_channel(int channel, idn_chanop_t chanop)
12262 {
12263 	idn_chansvr_t	*csp;
12264 	procname_t	proc = "idn_close_channel";
12265 
12266 
12267 	ASSERT(idn.chan_servers != NULL);
12268 
12269 	csp = &idn.chan_servers[channel];
12270 
12271 	IDN_GLOCK_EXCL();
12272 
12273 	IDN_CHAN_LOCK_GLOBAL(csp);
12274 	if (IDN_CHANNEL_IS_DETACHED(csp)) {
12275 		PR_CHAN("%s: channel %d already closed\n", proc, channel);
12276 		IDN_CHAN_UNLOCK_GLOBAL(csp);
12277 		IDN_GUNLOCK();
12278 		return;
12279 	}
12280 	IDN_CHAN_UNLOCK_GLOBAL(csp);
12281 
12282 	idn_deactivate_channel(CHANSET(channel), chanop);
12283 
12284 	IDN_CHAN_LOCK_GLOBAL(csp);
12285 
12286 	if (chanop == IDNCHAN_HARD_CLOSE) {
12287 		idn.nchannels--;
12288 		CHANSET_DEL(idn.chanset, channel);
12289 		/*
12290 		 * We increase our window threshold each time a channel
12291 		 * is opened.
12292 		 */
12293 		if (idn.nchannels <= 0)
12294 			IDN_WINDOW_EMAX = 0;
12295 		else
12296 			IDN_WINDOW_EMAX = IDN_WINDOW_MAX +
12297 			    ((idn.nchannels - 1) * IDN_WINDOW_INCR);
12298 	}
12299 
12300 	PR_CHAN("%s: channel %d is (%s) CLOSED (nchannels = %d)\n",
12301 	    proc, channel,
12302 	    (chanop == IDNCHAN_SOFT_CLOSE) ? "SOFT"
12303 	    : (chanop == IDNCHAN_HARD_CLOSE) ? "HARD" : "OFFLINE",
12304 	    idn.nchannels);
12305 
12306 	IDN_CHAN_UNLOCK_GLOBAL(csp);
12307 	IDN_GUNLOCK();
12308 }
12309 
12310 static int
12311 idn_activate_channel(idn_chanset_t chanset, idn_chanop_t chanop)
12312 {
12313 	int		c, rv = 0;
12314 	procname_t	proc = "idn_activate_channel";
12315 
12316 	PR_CHAN("%s: chanset = 0x%x, chanop = %s\n",
12317 	    proc, chanset, chanop_str[chanop]);
12318 
12319 	if (idn.state != IDNGS_ONLINE) {
12320 		/*
12321 		 * Can't activate any channels unless local
12322 		 * domain is connected and thus has a master.
12323 		 */
12324 		PR_CHAN("%s: local domain not connected.  no data servers\n",
12325 		    proc);
12326 		return (-1);
12327 	}
12328 
12329 	for (c = 0; c < IDN_MAX_NETS; c++) {
12330 		idn_chansvr_t	*csp;
12331 		idn_mboxhdr_t	*mainhp;
12332 		struct idn	*sip;
12333 
12334 		if (!CHAN_IN_SET(chanset, c))
12335 			continue;
12336 		csp = &idn.chan_servers[c];
12337 
12338 		if (chanop == IDNCHAN_ONLINE) {
12339 			IDN_CHAN_LOCK_GLOBAL(csp);
12340 		} else {
12341 			/*
12342 			 * We don't wait to grab the global lock
12343 			 * if IDNCHAN_OPEN since these occur along
12344 			 * critical data paths and will be retried
12345 			 * anyway if needed.
12346 			 */
12347 			if (IDN_CHAN_TRYLOCK_GLOBAL(csp) == 0) {
12348 				PR_CHAN("%s: failed to acquire global "
12349 				    "lock for channel %d\n",
12350 				    proc, c);
12351 				continue;
12352 			}
12353 		}
12354 
12355 		if (!IDN_CHANNEL_IS_ATTACHED(csp)) {
12356 			PR_CHAN("%s: channel %d NOT open\n", proc, c);
12357 			IDN_CHAN_UNLOCK_GLOBAL(csp);
12358 			continue;
12359 
12360 		}
12361 
12362 		if (IDN_CHANNEL_IS_ACTIVE(csp)) {
12363 
12364 			PR_CHAN("%s: channel %d already active\n", proc, c);
12365 			rv++;
12366 			IDN_CHAN_UNLOCK_GLOBAL(csp);
12367 			continue;
12368 
12369 		}
12370 		/*
12371 		 * Channel activation can happen asynchronously.
12372 		 */
12373 		IDN_CHANNEL_SUSPEND(c, 0);
12374 
12375 		if (IDN_CHANNEL_IS_PENDING(csp) && (chanop == IDNCHAN_OPEN)) {
12376 
12377 			PR_CHAN("%s: ACTIVATING channel %d\n", proc, c);
12378 
12379 			if (idn_activate_channel_services(c) >= 0) {
12380 				PR_CHAN("%s: Setting channel %d ACTIVE\n",
12381 				    proc, c);
12382 				IDN_CHANSVC_MARK_ACTIVE(csp);
12383 				rv++;
12384 			}
12385 		} else if (!IDN_CHANNEL_IS_PENDING(csp) &&
12386 		    (chanop == IDNCHAN_ONLINE)) {
12387 			PR_CHAN("%s: Setting channel %d PENDING\n", proc, c);
12388 
12389 			IDN_CHANSVC_MARK_PENDING(csp);
12390 		}
12391 		/*
12392 		 * Don't syncheader (i.e. touch SMR) unless
12393 		 * channel is at least ENABLED.  For a DISABLED
12394 		 * channel, the SMR may be invalid so do NOT
12395 		 * touch it.
12396 		 */
12397 		if (IDN_CHANNEL_IS_ENABLED(csp) &&
12398 		    ((mainhp = idn_chan_server_syncheader(c)) != NULL)) {
12399 			PR_CHAN("%s: marking chansvr (mhp=0x%p) %d READY\n",
12400 			    proc, mainhp, c);
12401 			mainhp->mh_svr_ready = 1;
12402 		}
12403 
12404 		IDN_CHANNEL_RESUME(c);
12405 		sip = IDN_INST2SIP(c);
12406 		ASSERT(sip);
12407 		if (sip->si_wantw) {
12408 			mutex_enter(&idn.sipwenlock);
12409 			idndl_wenable(sip);
12410 			mutex_exit(&idn.sipwenlock);
12411 		}
12412 		IDN_CHAN_UNLOCK_GLOBAL(csp);
12413 
12414 	}
12415 	/*
12416 	 * Returns "not active", i.e. value of 0 indicates
12417 	 * no channels are activated.
12418 	 */
12419 	return (rv == 0);
12420 }
12421 
12422 static void
12423 idn_deactivate_channel(idn_chanset_t chanset, idn_chanop_t chanop)
12424 {
12425 	int		c;
12426 	procname_t	proc = "idn_deactivate_channel";
12427 
12428 	PR_CHAN("%s: chanset = 0x%x, chanop = %s\n",
12429 	    proc, chanset, chanop_str[chanop]);
12430 
12431 	for (c = 0; c < IDN_MAX_NETS; c++) {
12432 		idn_chansvr_t	*csp;
12433 		idn_mboxhdr_t	*mainhp;
12434 
12435 		if (!CHAN_IN_SET(chanset, c))
12436 			continue;
12437 
12438 		csp = &idn.chan_servers[c];
12439 
12440 		IDN_CHAN_LOCK_GLOBAL(csp);
12441 
12442 		if (((chanop == IDNCHAN_SOFT_CLOSE) &&
12443 		    !IDN_CHANNEL_IS_ACTIVE(csp)) ||
12444 		    ((chanop == IDNCHAN_HARD_CLOSE) &&
12445 		    IDN_CHANNEL_IS_DETACHED(csp)) ||
12446 		    ((chanop == IDNCHAN_OFFLINE) &&
12447 		    !IDN_CHANNEL_IS_ENABLED(csp))) {
12448 
12449 			ASSERT(!IDN_CHANNEL_IS_RECV_ACTIVE(csp));
12450 			ASSERT(!IDN_CHANNEL_IS_SEND_ACTIVE(csp));
12451 
12452 			PR_CHAN("%s: channel %d already deactivated\n",
12453 			    proc, c);
12454 			IDN_CHAN_UNLOCK_GLOBAL(csp);
12455 			continue;
12456 		}
12457 
12458 		switch (chanop) {
12459 		case IDNCHAN_OFFLINE:
12460 			IDN_CHANSVC_MARK_IDLE(csp);
12461 			IDN_CHANSVC_MARK_DISABLED(csp);
12462 			IDN_CHANNEL_STOP(c, 1);
12463 			mainhp = idn_chan_server_syncheader(c);
12464 			if (mainhp != NULL)
12465 				mainhp->mh_svr_ready = 0;
12466 			break;
12467 
12468 		case IDNCHAN_HARD_CLOSE:
12469 			IDN_CHANSVC_MARK_DETACHED(csp);
12470 			IDN_CHANNEL_DETACH(c, 1);
12471 			mainhp = idn_chan_server_syncheader(c);
12472 			if (mainhp != NULL)
12473 				mainhp->mh_svr_ready = 0;
12474 			break;
12475 
12476 		default:
12477 			IDN_CHANSVC_MARK_IDLE(csp);
12478 			IDN_CHANNEL_SUSPEND(c, 1);
12479 			ASSERT(IDN_CHANNEL_IS_ATTACHED(csp));
12480 			break;
12481 		}
12482 
12483 		lock_clear(&csp->ch_actvlck);
12484 		lock_clear(&csp->ch_initlck);
12485 
12486 		PR_CHAN("%s: DEACTIVATING channel %d (%s)\n", proc, c,
12487 		    chanop_str[chanop]);
12488 		PR_CHAN("%s: removing chanset 0x%x data svrs for "
12489 		    "each domain link\n", proc, chanset);
12490 
12491 		(void) idn_deactivate_channel_services(c, chanop);
12492 	}
12493 	/*
12494 	 * Returns with channels unlocked.
12495 	 */
12496 }
12497 
12498 /*
12499  * The priority of the channel server must be less than that
12500  * of the protocol server since the protocol server tasks
12501  * are (can be) of more importance.
12502  *
12503  * Possible range: 60-99.
12504  */
12505 static pri_t	idn_chansvr_pri = (7 * MAXCLSYSPRI) / 8;
12506 
12507 static int
12508 idn_activate_channel_services(int channel)
12509 {
12510 	idn_chansvr_t	*csp;
12511 	procname_t	proc = "idn_activate_channel_services";
12512 
12513 
12514 	ASSERT((channel >= 0) && (channel < IDN_MAX_NETS));
12515 
12516 	csp = &idn.chan_servers[channel];
12517 
12518 	ASSERT(IDN_CHAN_GLOBAL_IS_LOCKED(csp));
12519 	ASSERT(IDN_CHAN_LOCAL_IS_LOCKED(csp));
12520 
12521 	if (csp->ch_recv_threadp) {
12522 		/*
12523 		 * There's an existing dispatcher!
12524 		 * Must have been idle'd during an earlier
12525 		 * stint.
12526 		 */
12527 		ASSERT(csp->ch_id == (uchar_t)channel);
12528 		PR_CHAN("%s: existing chansvr FOUND for (c=%d)\n",
12529 		    proc, channel);
12530 
12531 		if (IDN_CHANNEL_IS_PENDING(csp) == 0)
12532 			return (-1);
12533 
12534 		PR_CHAN("%s: chansvr (c=%d) Rstate = 0x%x, Sstate = 0x%x\n",
12535 		    proc, channel, csp->ch_recv.c_state,
12536 		    csp->ch_send.c_state);
12537 
12538 		cv_signal(&csp->ch_recv_cv);
12539 
12540 		return (0);
12541 	}
12542 
12543 	if (IDN_CHANNEL_IS_PENDING(csp) == 0)
12544 		return (-1);
12545 
12546 	csp->ch_id = (uchar_t)channel;
12547 
12548 	PR_CHAN("%s: init channel %d server\n", proc, channel);
12549 
12550 	csp->ch_recv_morguep = GETSTRUCT(ksema_t, 1);
12551 	sema_init(csp->ch_recv_morguep, 0, NULL, SEMA_DRIVER, NULL);
12552 
12553 	csp->ch_recv.c_inprogress = 0;
12554 	csp->ch_recv.c_waiters = 0;
12555 	csp->ch_recv.c_checkin = 0;
12556 	csp->ch_recv_changed = 1;
12557 
12558 	csp->ch_recv_domset = csp->ch_reg_domset;
12559 
12560 	csp->ch_recv_waittime = IDN_NETSVR_WAIT_MIN;
12561 
12562 	csp->ch_recv_threadp = thread_create(NULL, 0,
12563 	    idn_chan_server, &csp, sizeof (csp), &p0, TS_RUN, idn_chansvr_pri);
12564 
12565 	csp->ch_send.c_inprogress = 0;
12566 	csp->ch_send.c_waiters = 0;
12567 	csp->ch_send.c_checkin = 0;
12568 
12569 	return (0);
12570 }
12571 
12572 /*
12573  * This routine can handle terminating a set of channel
12574  * servers all at once, however currently only used
12575  * for serial killing, i.e. one-at-a-time.
12576  *
12577  * Entered with RECV locks held on chanset.
12578  * Acquires SEND locks if needed.
12579  * Leaves with all RECV and SEND locks dropped.
12580  */
12581 static int
12582 idn_deactivate_channel_services(int channel, idn_chanop_t chanop)
12583 {
12584 	idn_chansvr_t	*csp;
12585 	int		cs_count;
12586 	int		c;
12587 	idn_chanset_t	chanset;
12588 	ksema_t		*central_morguep = NULL;
12589 	procname_t	proc = "idn_deactivate_channel_services";
12590 
12591 
12592 	ASSERT(idn.chan_servers);
12593 
12594 	PR_CHAN("%s: deactivating channel %d services\n", proc, channel);
12595 
12596 	/*
12597 	 * XXX
12598 	 * Old code allowed us to deactivate multiple channel
12599 	 * servers at once.  Keep for now just in case.
12600 	 */
12601 	chanset = CHANSET(channel);
12602 
12603 	/*
12604 	 * Point all the data dispatchers to the same morgue
12605 	 * so we can kill them all at once.
12606 	 */
12607 	cs_count = 0;
12608 	for (c = 0; c < IDN_MAX_NETS; c++) {
12609 		if (!CHAN_IN_SET(chanset, c))
12610 			continue;
12611 
12612 		csp = &idn.chan_servers[c];
12613 		ASSERT(IDN_CHAN_GLOBAL_IS_LOCKED(csp));
12614 		ASSERT(IDN_CHAN_LOCAL_IS_LOCKED(csp));
12615 
12616 		if (csp->ch_recv_threadp == NULL) {
12617 			/*
12618 			 * No channel server home.
12619 			 * But we're still holding the c_mutex.
12620 			 * At mark him idle incase we start him up.
12621 			 */
12622 			PR_CHAN("%s: no channel server found for chan %d\n",
12623 			    proc, c);
12624 			IDN_CHAN_UNLOCK_LOCAL(csp);
12625 			IDN_CHAN_UNLOCK_GLOBAL(csp);
12626 			continue;
12627 		}
12628 		ASSERT(csp->ch_id == (uchar_t)c);
12629 
12630 		/*
12631 		 * Okay, now we've blocked the send and receive sides.
12632 		 */
12633 
12634 		if ((chanop == IDNCHAN_SOFT_CLOSE) ||
12635 		    (chanop == IDNCHAN_OFFLINE)) {
12636 			/*
12637 			 * We set turned off the ACTIVE flag, but there's
12638 			 * no guarantee he stopped because of it.  He may
12639 			 * have already been sleeping.  We need to be
12640 			 * sure he recognizes the IDLE, so we need to
12641 			 * signal him and give him a chance to see it.
12642 			 */
12643 			cv_signal(&csp->ch_recv_cv);
12644 			IDN_CHAN_UNLOCK_LOCAL(csp);
12645 			IDN_CHAN_UNLOCK_GLOBAL(csp);
12646 			cs_count++;
12647 			continue;
12648 		}
12649 
12650 		PR_CHAN("%s: pointing chansvr %d to morgue (0x%p)\n",
12651 		    proc, c, central_morguep ? central_morguep
12652 		    : csp->ch_recv_morguep);
12653 
12654 		if (central_morguep == NULL) {
12655 			central_morguep = csp->ch_recv_morguep;
12656 		} else {
12657 			sema_destroy(csp->ch_recv_morguep);
12658 			FREESTRUCT(csp->ch_recv_morguep, ksema_t, 1);
12659 
12660 			csp->ch_recv_morguep = central_morguep;
12661 		}
12662 		cv_signal(&csp->ch_recv_cv);
12663 		if (csp->ch_recv.c_waiters > 0)
12664 			cv_broadcast(&csp->ch_recv.c_cv);
12665 		/*
12666 		 * Save any existing binding for next reincarnation.
12667 		 * Note that we're holding the local and global
12668 		 * locks so we're protected against others touchers
12669 		 * of the ch_bound_cpuid fields.
12670 		 */
12671 		csp->ch_bound_cpuid_pending = csp->ch_bound_cpuid;
12672 		csp->ch_bound_cpuid = -1;
12673 		IDN_CHAN_UNLOCK_LOCAL(csp);
12674 		IDN_CHAN_UNLOCK_GLOBAL(csp);
12675 		cs_count++;
12676 	}
12677 	PR_CHAN("%s: signaled %d chansvrs for chanset 0x%x\n",
12678 	    proc, cs_count, chanset);
12679 
12680 	if ((chanop == IDNCHAN_SOFT_CLOSE) || (chanop == IDNCHAN_OFFLINE))
12681 		return (cs_count);
12682 
12683 	PR_CHAN("%s: waiting for %d (chnset=0x%x) chan svrs to term\n",
12684 	    proc, cs_count, chanset);
12685 	PR_CHAN("%s: morguep = 0x%p\n", proc, central_morguep);
12686 
12687 	ASSERT((cs_count > 0) ? (central_morguep != NULL) : 1);
12688 	while (cs_count-- > 0)
12689 		sema_p(central_morguep);
12690 
12691 	if (central_morguep) {
12692 		sema_destroy(central_morguep);
12693 		FREESTRUCT(central_morguep, ksema_t, 1);
12694 	}
12695 
12696 	return (cs_count);
12697 }
12698 
12699 int
12700 idn_chanservers_init()
12701 {
12702 	int		c;
12703 	idn_chansvr_t	*csp;
12704 
12705 
12706 	if (idn.chan_servers)
12707 		return (0);
12708 
12709 	idn.chan_servers = GETSTRUCT(idn_chansvr_t, IDN_MAXMAX_NETS);
12710 
12711 	for (c = 0; c < IDN_MAXMAX_NETS; c++) {
12712 		csp = &idn.chan_servers[c];
12713 		mutex_init(&csp->ch_send.c_mutex, NULL, MUTEX_DEFAULT, NULL);
12714 		mutex_init(&csp->ch_recv.c_mutex, NULL, MUTEX_DEFAULT, NULL);
12715 		cv_init(&csp->ch_send.c_cv, NULL, CV_DRIVER, NULL);
12716 		cv_init(&csp->ch_recv.c_cv, NULL, CV_DRIVER, NULL);
12717 		cv_init(&csp->ch_recv_cv, NULL, CV_DRIVER, NULL);
12718 		csp->ch_bound_cpuid = -1;
12719 		csp->ch_bound_cpuid_pending = -1;
12720 	}
12721 
12722 	return (c);
12723 }
12724 
12725 void
12726 idn_chanservers_deinit()
12727 {
12728 	int		c;
12729 	idn_chansvr_t	*csp;
12730 
12731 
12732 	if (idn.chan_servers == NULL)
12733 		return;
12734 
12735 	for (c = 0; c < IDN_MAXMAX_NETS; c++) {
12736 		csp = &idn.chan_servers[c];
12737 
12738 		mutex_destroy(&csp->ch_send.c_mutex);
12739 		mutex_destroy(&csp->ch_recv.c_mutex);
12740 		cv_destroy(&csp->ch_send.c_cv);
12741 		cv_destroy(&csp->ch_recv.c_cv);
12742 		cv_destroy(&csp->ch_recv_cv);
12743 	}
12744 
12745 	FREESTRUCT(idn.chan_servers, idn_chansvr_t, IDN_MAXMAX_NETS);
12746 	idn.chan_servers = NULL;
12747 }
12748 
12749 static void
12750 idn_exec_chanactivate(void *chn)
12751 {
12752 	int		not_active, channel;
12753 	idn_chansvr_t	*csp;
12754 
12755 	channel = (int)(uintptr_t)chn;
12756 
12757 	IDN_GLOCK_SHARED();
12758 	if (idn.chan_servers == NULL) {
12759 		IDN_GUNLOCK();
12760 		return;
12761 	}
12762 	csp = &idn.chan_servers[channel];
12763 
12764 	if (IDN_CHAN_TRYLOCK_GLOBAL(csp) == 0) {
12765 		/*
12766 		 * If we can't grab the global lock, then
12767 		 * something is up, skip out.
12768 		 */
12769 		IDN_GUNLOCK();
12770 		return;
12771 	}
12772 	IDN_GUNLOCK();
12773 
12774 	if (IDN_CHANNEL_IS_PENDING(csp) && lock_try(&csp->ch_actvlck)) {
12775 		IDN_CHAN_UNLOCK_GLOBAL(csp);
12776 		not_active = idn_activate_channel(CHANSET(channel),
12777 		    IDNCHAN_OPEN);
12778 		if (not_active)
12779 			lock_clear(&csp->ch_actvlck);
12780 	} else {
12781 		IDN_CHAN_UNLOCK_GLOBAL(csp);
12782 	}
12783 }
12784 
12785 /*
12786  * Delayed activation of channel.  We don't want to do this within
12787  * idn_signal_data_server() since that's called within the context
12788  * of an XDC handler so we submit it as a timeout() call to be short
12789  * as soon as possible.
12790  * The ch_initlck & ch_actvlck are used to synchronize activation
12791  * of the channel so that we don't have multiple idn_activate_channel's
12792  * attempting to activate the same channel.
12793  */
12794 static void
12795 idn_submit_chanactivate_job(int channel)
12796 {
12797 	idn_chansvr_t	*csp;
12798 
12799 	if (idn.chan_servers == NULL)
12800 		return;
12801 	csp = &idn.chan_servers[channel];
12802 
12803 	if (lock_try(&csp->ch_initlck) == 0)
12804 		return;
12805 
12806 	(void) timeout(idn_exec_chanactivate, (caddr_t)(uintptr_t)channel, 1);
12807 }
12808 
12809 /*ARGSUSED0*/
12810 static void
12811 idn_xmit_monitor(void *unused)
12812 {
12813 	int		c, d;
12814 	idn_chansvr_t	*csp;
12815 	idn_chanset_t	wake_set;
12816 	domainset_t	conset;
12817 	smr_slab_t	*sp;
12818 	procname_t	proc = "idn_xmit_monitor";
12819 
12820 	CHANSET_ZERO(wake_set);
12821 
12822 	mutex_enter(&idn.xmit_lock);
12823 	if ((idn.xmit_tid == NULL) || !idn.xmit_chanset_wanted) {
12824 		idn.xmit_tid = NULL;
12825 		mutex_exit(&idn.xmit_lock);
12826 		PR_XMON("%s: bailing out\n", proc);
12827 		return;
12828 	}
12829 
12830 	/*
12831 	 * No point in transmitting unless state
12832 	 * is ONLINE.
12833 	 */
12834 	if (idn.state != IDNGS_ONLINE)
12835 		goto retry;
12836 
12837 	conset = idn.domset.ds_connected;
12838 
12839 	/*
12840 	 * Try and reclaim some buffers if possible.
12841 	 */
12842 	for (d = 0; d < MAX_DOMAINS; d++) {
12843 		if (!DOMAIN_IN_SET(conset, d))
12844 			continue;
12845 
12846 		if (!IDN_DLOCK_TRY_SHARED(d))
12847 			continue;
12848 
12849 		if (idn_domain[d].dcpu != IDN_NIL_DCPU)
12850 			(void) idn_reclaim_mboxdata(d, 0, -1);
12851 
12852 		IDN_DUNLOCK(d);
12853 	}
12854 
12855 	/*
12856 	 * Now check if we were successful in getting
12857 	 * any buffers.
12858 	 */
12859 	DSLAB_LOCK_SHARED(idn.localid);
12860 	sp = idn_domain[idn.localid].dslab;
12861 	for (; sp; sp = sp->sl_next)
12862 		if (sp->sl_free)
12863 			break;
12864 	DSLAB_UNLOCK(idn.localid);
12865 
12866 	/*
12867 	 * If there are no buffers available,
12868 	 * no point in reenabling the queues.
12869 	 */
12870 	if (sp == NULL)
12871 		goto retry;
12872 
12873 	CHANSET_ZERO(wake_set);
12874 	for (c = 0; c < IDN_MAX_NETS; c++) {
12875 		int		pending_bits;
12876 		struct idn	*sip;
12877 
12878 		if (!CHAN_IN_SET(idn.xmit_chanset_wanted, c))
12879 			continue;
12880 
12881 		csp = &idn.chan_servers[c];
12882 		if (!IDN_CHAN_TRYLOCK_GLOBAL(csp))
12883 			continue;
12884 
12885 		pending_bits = csp->ch_state & IDN_CHANSVC_PENDING_BITS;
12886 
12887 		sip = IDN_INST2SIP(c);
12888 
12889 		if (!csp->ch_send.c_checkin &&
12890 		    (pending_bits == IDN_CHANSVC_PENDING_BITS) &&
12891 		    sip && (sip->si_flags & IDNRUNNING)) {
12892 
12893 			IDN_CHAN_UNLOCK_GLOBAL(csp);
12894 			CHANSET_ADD(wake_set, c);
12895 
12896 			PR_XMON("%s: QENABLE for channel %d\n", proc, c);
12897 
12898 			rw_enter(&idn.struprwlock, RW_READER);
12899 			mutex_enter(&idn.sipwenlock);
12900 			idndl_wenable(sip);
12901 			mutex_exit(&idn.sipwenlock);
12902 			rw_exit(&idn.struprwlock);
12903 		} else {
12904 			IDN_CHAN_UNLOCK_GLOBAL(csp);
12905 		}
12906 	}
12907 
12908 	/*
12909 	 * Clear the channels we enabled.
12910 	 */
12911 	idn.xmit_chanset_wanted &= ~wake_set;
12912 
12913 retry:
12914 
12915 	if (idn.xmit_chanset_wanted == 0)
12916 		idn.xmit_tid = NULL;
12917 	else
12918 		idn.xmit_tid = timeout(idn_xmit_monitor, NULL,
12919 		    idn_xmit_monitor_freq);
12920 
12921 	mutex_exit(&idn.xmit_lock);
12922 }
12923 
12924 void
12925 idn_xmit_monitor_kickoff(int chan_wanted)
12926 {
12927 	procname_t	proc = "idn_xmit_monitor_kickoff";
12928 
12929 	mutex_enter(&idn.xmit_lock);
12930 
12931 	if (chan_wanted < 0) {
12932 		/*
12933 		 * Wants all channels.
12934 		 */
12935 		idn.xmit_chanset_wanted = CHANSET_ALL;
12936 	} else {
12937 		CHANSET_ADD(idn.xmit_chanset_wanted, chan_wanted);
12938 	}
12939 
12940 	if (idn.xmit_tid != (timeout_id_t)NULL) {
12941 		/*
12942 		 * A monitor is already running, so
12943 		 * he will catch the new "wants" when
12944 		 * he comes around.
12945 		 */
12946 		mutex_exit(&idn.xmit_lock);
12947 		return;
12948 	}
12949 
12950 	PR_XMON("%s: xmit_mon kicked OFF (chanset = 0x%x)\n",
12951 	    proc, idn.xmit_chanset_wanted);
12952 
12953 	idn.xmit_tid = timeout(idn_xmit_monitor, NULL, idn_xmit_monitor_freq);
12954 
12955 	mutex_exit(&idn.xmit_lock);
12956 }
12957