xref: /titanic_44/usr/src/uts/sun4u/starfire/io/idn_proto.c (revision c1374a13e412c4ec42cba867e57347a0e049a822)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 /*
28  * Inter-Domain Network
29  *
30  * IDN Protocol functions to support domain link/unlink/reconfig.
31  */
32 
33 #include <sys/types.h>
34 #include <sys/param.h>
35 #include <sys/machparam.h>
36 #include <sys/debug.h>
37 #include <sys/cpuvar.h>
38 #include <sys/kmem.h>
39 #include <sys/mutex.h>
40 #include <sys/rwlock.h>
41 #include <sys/systm.h>
42 #include <sys/stream.h>
43 #include <sys/strsun.h>
44 #include <sys/stropts.h>
45 #include <sys/sema_impl.h>
46 #include <sys/membar.h>
47 #include <sys/utsname.h>
48 #include <inet/common.h>
49 #include <inet/mi.h>
50 #include <netinet/ip6.h>
51 #include <inet/ip.h>
52 #include <netinet/in.h>
53 #include <sys/vm_machparam.h>
54 #include <sys/x_call.h>
55 #include <sys/ddi.h>
56 #include <sys/sunddi.h>
57 #include <sys/atomic.h>
58 #include <vm/as.h>		/* kas decl */
59 
60 #include <sys/idn.h>
61 #include <sys/idn_xf.h>
62 
63 #define	IDNBUG_CPUPERBOARD
64 
65 extern pri_t		maxclsyspri;
66 extern u_longlong_t	gettick();
67 
68 clock_t	idn_xmit_monitor_freq = 50;
69 
70 static int	idn_connect(int domid);
71 static int	idn_disconnect(int domid, idn_fin_t fintype,
72 				idn_finarg_t finarg, idn_finsync_t finsync);
73 static void	idn_deconfig(int domid);
74 static void	idn_unlink_domainset(domainset_t domset, idn_fin_t fintype,
75 				idn_finarg_t finarg, idn_finopt_t finopt,
76 				boardset_t idnset);
77 static void	idn_retry_execute(void *arg);
78 static void	idn_retry_submit(void (*func)(uint_t token, void *arg),
79 				void *arg, uint_t token, clock_t ticks);
80 static void	idn_shutdown_datapath(domainset_t domset, int force);
81 static mblk_t	*idn_fill_buffer(caddr_t bufp, int size, mblk_t *mp,
82 				uchar_t **data_rptrp);
83 static ushort_t	idn_cksum(register ushort_t *hdrp, register int count);
84 static int	idn_mark_awol(int domid, clock_t *atime);
85 
86 static void	idn_recv_proto(idn_protomsg_t *hp);
87 static void	idn_send_config(int domid, int phase);
88 static void	idn_recv_config(int domid, idn_msgtype_t *mtp,
89 				idn_xdcargs_t xargs);
90 static int	idn_send_master_config(int domid, int phase);
91 static int	idn_send_slave_config(int domid, int phase);
92 static uint_t	idn_check_master_config(int domid, uint_t *exp, uint_t *act);
93 static uint_t	idn_check_slave_config(int domid, uint_t *exp, uint_t *act);
94 static int	idn_recv_config_done(int domid);
95 static void	idn_nego_cleanup_check(int domid, int new_masterid,
96 				int new_cpuid);
97 static void	idn_recv_cmd(int domid, idn_msgtype_t *mtp,
98 				idn_xdcargs_t xargs);
99 static int	idn_recv_data(int domid, idn_msgtype_t *mtp,
100 				idn_xdcargs_t xargs);
101 static int	idn_send_data_loopback(idn_netaddr_t dst_netaddr,
102 				queue_t *wq, mblk_t *mp);
103 static void	idn_send_dataresp(int domid, idn_nack_t nacktype);
104 static int	idn_send_mboxdata(int domid, struct idn *sip, int channel,
105 				caddr_t bufp);
106 static int	idn_recv_mboxdata(int channel, caddr_t bufp);
107 static int	idn_program_hardware(int domid);
108 static int	idn_deprogram_hardware(int domid);
109 
110 static void	idn_send_cmd_nackresp(int domid, idn_msgtype_t *mtp,
111 			idn_cmd_t cmdtype, idn_nack_t nacktype);
112 static void	idn_local_cmd(idn_cmd_t cmdtype, uint_t arg1,
113 				uint_t arg2, uint_t arg3);
114 static void	idn_terminate_cmd(int domid, int serrno);
115 static void	idn_mboxarea_init(idn_mboxtbl_t *mtp, register int ntbls);
116 static void	idn_mainmbox_activate(int domid);
117 static void	idn_mainmbox_deactivate(ushort_t domset);
118 static void	idn_mainmbox_chan_register(int domid,
119 				idn_mainmbox_t *send_mmp,
120 				idn_mainmbox_t *recv_mmp, int channel);
121 static int	idn_mainmbox_chan_unregister(ushort_t domset, int channel);
122 static int	idn_mainmbox_flush(int domid, idn_mainmbox_t *mmp);
123 static void	idn_mainmbox_reset(int domid, idn_mainmbox_t *cmp);
124 static int	idn_activate_channel(idn_chanset_t chanset,
125 				idn_chanop_t chanop);
126 static void	idn_deactivate_channel(idn_chanset_t chanset,
127 				idn_chanop_t chanop);
128 static int	idn_deactivate_channel_services(int channel,
129 				idn_chanop_t chanop);
130 static int	idn_activate_channel_services(int channel);
131 static void	idn_chan_server(idn_chansvr_t **cspp);
132 #if 0
133 static void	idn_chan_flush(idn_chansvr_t *csp);
134 #endif /* 0 */
135 static void	idn_chan_action(int channel, idn_chanaction_t chanaction,
136 				int wait);
137 static void	idn_chan_addmbox(int channel, ushort_t domset);
138 static void	idn_chan_delmbox(int channel, ushort_t domset);
139 static void	idn_submit_chanactivate_job(int channel);
140 static void	idn_exec_chanactivate(void *chn);
141 
142 static void	idn_link_established(void *arg);
143 static void	idn_prealloc_slab(int nslabs);
144 static void	idn_recv_slaballoc_req(int domid, idn_msgtype_t *mtp,
145 				uint_t slab_size);
146 static void	idn_send_slaballoc_resp(int domid, idn_msgtype_t *mtp,
147 				uint_t slab_offset, uint_t slab_size,
148 				int serrno);
149 static void	idn_recv_slaballoc_resp(int domid, smr_offset_t slab_offset,
150 				uint_t slab_size, int serrno);
151 static void	idn_recv_slabreap_req(int domid, idn_msgtype_t *mtp,
152 				int nslabs);
153 static void	idn_recv_slabreap_resp(int domid, int nslabs, int serrno);
154 static void	idn_send_slabreap_resp(int domid, idn_msgtype_t *mtp,
155 				int nslabs, int serrno);
156 static void	idn_recv_slabfree_req(int domid, idn_msgtype_t *mtp,
157 				smr_offset_t slab_offset, uint_t slab_size);
158 static void	idn_recv_slabfree_resp(int domid, uint_t slab_offset,
159 				uint_t slab_size, int serrno);
160 static void	idn_send_slabfree_resp(int domid, idn_msgtype_t *mtp,
161 				uint_t slab_offset, uint_t slab_size,
162 				int serrno);
163 static void	idn_retry_nodename_req(void *arg);
164 static void	idn_send_nodename_req(int domid);
165 static void	idn_send_nodename_resp(int domid, idn_msgtype_t *mtp,
166 				uint_t bufoffset, int serrno);
167 static void	idn_recv_nodename_req(int domid, idn_msgtype_t *mtp,
168 				uint_t bufoffset);
169 static void	idn_recv_nodename_resp(int domid, uint_t bufoffset,
170 				int serrno);
171 
172 static void	idn_protocol_server(int *id);
173 static void	idn_protocol_server_killall();
174 static void	idn_protojob_free(idn_protojob_t *jp);
175 
176 static int	idn_xstate_transfunc(int domid, void *transarg);
177 static int	idn_xphase_transition(int domid, idn_msgtype_t *mtp,
178 				idn_xdcargs_t xargs);
179 static void	idn_sync_enter(int domid, idn_synccmd_t cmd,
180 				domainset_t xset, domainset_t rset,
181 				int (*transfunc)(), void *transarg);
182 static domainset_t
183 		idn_sync_register(int domid, idn_synccmd_t cmd,
184 				domainset_t ready_set, idn_syncreg_t regtype);
185 static void	idn_sync_register_awol(int domid);
186 static int	idn_verify_config_mbox(int domid);
187 static int	idn_select_master(int domid, int rmasterid, int rcpuid);
188 
189 static int	valid_mtu(uint_t mtu);
190 static int	valid_bufsize(uint_t bufsize);
191 static int	valid_slabsize(int slabsize);
192 static int	valid_nwrsize(int nwrsize);
193 
194 static int	idn_master_init();
195 static void	idn_master_deinit();
196 
197 static void	idn_send_acknack(int domid, idn_msgtype_t *mtp,
198 				idn_xdcargs_t xargs);
199 
200 static int	idn_send_nego(int domid, idn_msgtype_t *mtp,
201 				domainset_t conset);
202 static void	idn_retry_nego(uint_t token, void *arg);
203 static int	idn_check_nego(int domid, idn_msgtype_t *mtp,
204 				idn_xdcargs_t xargs);
205 static void	idn_action_nego_pend(int domid, idn_msgtype_t *mtp,
206 				idn_xdcargs_t xargs);
207 static void	idn_error_nego(int domid, idn_msgtype_t *mtp,
208 				idn_xdcargs_t xargs);
209 static void	idn_action_nego_sent(int domid, idn_msgtype_t *mtp,
210 				idn_xdcargs_t xargs);
211 static void	idn_action_nego_rcvd(int domid, idn_msgtype_t *mtp,
212 				idn_xdcargs_t xargs);
213 static void	idn_final_nego(int domid);
214 static void	idn_exit_nego(int domid, uint_t msgtype);
215 
216 static int	idn_send_con(int domid, idn_msgtype_t *mtp,
217 				idn_con_t contype, domainset_t conset);
218 static void	idn_retry_con(uint_t token, void *arg);
219 static int	idn_check_con(int domid, idn_msgtype_t *mtp,
220 				idn_xdcargs_t xargs);
221 static void	idn_action_con_pend(int domid, idn_msgtype_t *mtp,
222 				idn_xdcargs_t xargs);
223 static void	idn_error_con(int domid, idn_msgtype_t *mtp,
224 				idn_xdcargs_t xargs);
225 static void	idn_action_con_sent(int domid, idn_msgtype_t *mtp,
226 				idn_xdcargs_t xargs);
227 static void	idn_action_con_rcvd(int domid, idn_msgtype_t *mtp,
228 				idn_xdcargs_t xargs);
229 static void	idn_final_con(int domid);
230 static void	idn_exit_con(int domid, uint_t msgtype);
231 
232 static int	idn_send_fin(int domid, idn_msgtype_t *mtp, idn_fin_t fintype,
233 				idn_finarg_t finarg, idn_finopt_t finopt,
234 				domainset_t finset, uint_t finmaster);
235 static void	idn_retry_fin(uint_t token, void *arg);
236 static int	idn_check_fin_pend(int domid, idn_msgtype_t *mtp,
237 				idn_xdcargs_t xargs);
238 static void	idn_action_fin_pend(int domid, idn_msgtype_t *mtp,
239 				idn_xdcargs_t xargs);
240 static void	idn_error_fin_pend(int domid, idn_msgtype_t *mtp,
241 				idn_xdcargs_t xargs);
242 static int	idn_check_fin_sent(int domid, idn_msgtype_t *mtp,
243 				idn_xdcargs_t xargs);
244 static void	idn_action_fin_sent(int domid, idn_msgtype_t *mtp,
245 				idn_xdcargs_t xargs);
246 static void	idn_error_fin_sent(int domid, idn_msgtype_t *mtp,
247 				idn_xdcargs_t xargs);
248 static void	idn_action_fin_rcvd(int domid, idn_msgtype_t *mtp,
249 				idn_xdcargs_t xargs);
250 static void	idn_final_fin(int domid);
251 static void	idn_exit_fin(int domid, uint_t msgtype);
252 
253 /*
254  * We keep a small cache of protojob structures just
255  * in case allocation within idn_handler comes back
256  * with nothing from the land of kmem.
257  */
258 idn_protojob_t	idn_protojob_cache[IDN_DMV_PENDING_MAX];
259 idn_protojob_t	*idn_protojob_cache_list;
260 kmutex_t	idn_protojob_cache_lock;
261 
262 /*
263  *	- receive message.
264  *	- call check-function for current state.
265  *	- if (check-function == ok) then
266  *		call action-function for current state.
267  *	  else
268  *		call error-function for current state.
269  *	- transition state based on check results.
270  *	- if (next state == final state) then
271  *		call final-function.
272  */
273 static idn_xphase_t xphase_nego = {
274 	IDNP_NEGO,
275 	{
276 		{ IDNDS_NEGO_PEND,
277 			idn_check_nego,
278 			idn_action_nego_pend,
279 			idn_error_nego},
280 		{ IDNDS_NEGO_SENT,
281 			idn_check_nego,
282 			idn_action_nego_sent,
283 			idn_error_nego},
284 		{ IDNDS_NEGO_RCVD,
285 			NULL,
286 			idn_action_nego_rcvd,
287 			NULL },
288 		{ IDNDS_CONFIG, NULL, NULL, NULL },
289 	},
290 	idn_final_nego,
291 	idn_exit_nego
292 };
293 
294 static idn_xphase_t xphase_con = {
295 	IDNP_CON,
296 	{
297 		{ IDNDS_CON_PEND,
298 			idn_check_con,
299 			idn_action_con_pend,
300 			idn_error_con},
301 		{ IDNDS_CON_SENT,
302 			idn_check_con,
303 			idn_action_con_sent,
304 			idn_error_con},
305 		{ IDNDS_CON_RCVD,
306 			NULL,
307 			idn_action_con_rcvd,
308 			NULL },
309 		{ IDNDS_CON_READY, NULL, NULL, NULL },
310 	},
311 	idn_final_con,
312 	idn_exit_con
313 };
314 
315 static idn_xphase_t xphase_fin = {
316 	IDNP_FIN,
317 	{
318 		{ IDNDS_FIN_PEND,
319 			idn_check_fin_pend,
320 			idn_action_fin_pend,
321 			idn_error_fin_pend },
322 		{ IDNDS_FIN_SENT,
323 			idn_check_fin_sent,
324 			idn_action_fin_sent,
325 			idn_error_fin_sent },
326 		{ IDNDS_FIN_RCVD,
327 			NULL,
328 			idn_action_fin_rcvd,
329 			NULL },
330 		{ IDNDS_DMAP, NULL, NULL, NULL },
331 	},
332 	idn_final_fin,
333 	idn_exit_fin
334 };
335 
336 static int idnxs_state_table[4][5][2] = {
337 	{			/* IDNXS_PEND */
338 		{ IDNXS_SENT,	IDNXS_PEND },	/* 0 */
339 		{ IDNXS_RCVD,	IDNXS_PEND },	/* msg */
340 		{ IDNXS_NIL,	IDNXS_PEND },	/* msg+ack */
341 		{ IDNXS_PEND,	IDNXS_NIL },	/* ack */
342 		{ IDNXS_PEND,	IDNXS_NIL },	/* nack */
343 	},
344 	{			/* IDNXS_SENT */
345 		{ IDNXS_NIL,	IDNXS_NIL },	/* 0 */
346 		{ IDNXS_RCVD,	IDNXS_PEND },	/* msg */
347 		{ IDNXS_FINAL,	IDNXS_PEND },	/* msg+ack */
348 		{ IDNXS_NIL,	IDNXS_NIL },	/* ack */
349 		{ IDNXS_PEND,	IDNXS_NIL },	/* nack */
350 	},
351 	{			/* IDNXS_RCVD */
352 		{ IDNXS_NIL,	IDNXS_NIL },	/* 0 */
353 		{ IDNXS_NIL,	IDNXS_NIL },	/* msg */
354 		{ IDNXS_FINAL,	IDNXS_NIL },	/* msg+ack */
355 		{ IDNXS_FINAL,	IDNXS_NIL },	/* ack */
356 		{ IDNXS_PEND,	IDNXS_NIL },	/* nack */
357 	},
358 	{			/* IDNXS_FINAL */
359 		{ IDNXS_NIL,	IDNXS_NIL },	/* 0 */
360 		{ IDNXS_NIL,	IDNXS_NIL },	/* msg */
361 		{ IDNXS_NIL,	IDNXS_NIL },	/* msg+ack */
362 		{ IDNXS_NIL,	IDNXS_NIL },	/* ack */
363 		{ IDNXS_NIL,	IDNXS_NIL },	/* nack */
364 	}
365 };
366 
367 /*
368  * NONE		Respective domain does not have a master.
369  * OTHER	Respective domain has a master different
370  *		than either local or remote.
371  * LOCAL	Respective domain has chosen local as master.
372  * REMOTE	Respective domain has chosen remote as master.
373  *
374  * Actions:
375  *	VOTE		Compare votes and select one.
376  *	VOTE_RCFG	Compare votes and Reconfigure
377  *			if necessary, i.e. remote won.
378  *	CONNECT		Connect to remote's OTHER if different
379  *			than our local master.
380  *	LOCAL		Local domain is winner.
381  *	REMOTE		Remote domain is winner.
382  *	WAIT		Wait for remote to connect to our
383  *			master if his is different.
384  *	ERROR		An impossible condition.
385  *
386  * Index:
387  *	0 = Local
388  *	1 = Remote
389  */
390 static idn_master_select_t master_select_table[4][4] = {
391 	{				/* local	remote	*/
392 		MASTER_SELECT_VOTE,	/* NONE		NONE	*/
393 		MASTER_SELECT_CONNECT,	/* NONE		OTHER	*/
394 		MASTER_SELECT_LOCAL,	/* NONE		LOCAL	*/
395 		MASTER_SELECT_REMOTE	/* NONE		REMOTE	*/
396 	},
397 	{
398 		MASTER_SELECT_WAIT,	/* OTHER	NONE	*/
399 		MASTER_SELECT_CONNECT,	/* OTHER	OTHER	*/
400 		MASTER_SELECT_WAIT,	/* OTHER	LOCAL	*/
401 		MASTER_SELECT_WAIT	/* OTHER	REMOTE	*/
402 	},
403 	{
404 		MASTER_SELECT_LOCAL,	/* LOCAL	NONE	*/
405 		MASTER_SELECT_CONNECT,	/* LOCAL	OTHER	*/
406 		MASTER_SELECT_LOCAL,	/* LOCAL	LOCAL	*/
407 		MASTER_SELECT_VOTE_RCFG	/* LOCAL	REMOTE	*/
408 	},
409 	{
410 		MASTER_SELECT_REMOTE,	/* REMOTE	NONE	*/
411 		MASTER_SELECT_CONNECT,	/* REMOTE	OTHER	*/
412 		MASTER_SELECT_ERROR,	/* REMOTE	LOCAL	*/
413 		MASTER_SELECT_REMOTE	/* REMOTE	REMOTE	*/
414 	}
415 };
416 
417 void
idn_assign_cookie(int domid)418 idn_assign_cookie(int domid)
419 {
420 	static ushort_t	num = 0;
421 	ushort_t	cookie;
422 	procname_t	proc = "idn_assign_cookie";
423 
424 	if ((cookie = idn_domain[domid].dcookie_recv) != 0)
425 		return;
426 
427 	cookie = (ushort_t)(((uint64_t)&idn_domain[domid] >> 8) & 0xff);
428 	while ((cookie ^= num++ & 0xff) == 0)
429 		;
430 
431 	PR_PROTO("%s:%d: assigned RECV cookie 0x%x\n", proc, domid, cookie);
432 
433 	idn_domain[domid].dcookie_recv = cookie;
434 }
435 
436 void
idn_update_priority(int domid,int pri)437 idn_update_priority(int domid, int pri)
438 {
439 	idn_domain_t	*dp;
440 	procname_t	proc = "idn_update_priority";
441 
442 	ASSERT(IDN_DLOCK_IS_EXCL(domid));
443 
444 	dp = &idn_domain[domid];
445 
446 	if (pri >= IDNVOTE_MINPRI) {
447 		dp->dvote.v.priority = pri & IDNVOTE_PRI_MASK;
448 
449 		PR_PROTO("%s:%d: SETTING PRIORITY to req(%d) "
450 		    "(localpri = 0x%x)\n",
451 		    proc, domid, pri, IDNVOTE_PRIVALUE(dp->dvote));
452 	} else {
453 		PR_PROTO("%s:%d: PRIORITIES UNCHANGED (pri = 0x%x)\n",
454 		    proc, domid, IDNVOTE_PRIVALUE(dp->dvote));
455 	}
456 }
457 
458 /*
459  * Initiate a link between the local domain and the remote domain
460  * containing the given cpuid.
461  */
462 int
idn_link(int domid,int cpuid,int pri,int waittime,idnsb_error_t * sep)463 idn_link(int domid, int cpuid, int pri, int waittime, idnsb_error_t *sep)
464 {
465 	int		rv;
466 	idn_domain_t	*dp;
467 	void		*opcookie;
468 	procname_t	proc = "idn_link";
469 
470 	if ((cpuid < 0) || (cpuid >= NCPU)) {
471 		cmn_err(CE_WARN,
472 		    "IDN: 201: (LINK) invalid CPU ID (%d)", cpuid);
473 		return (EINVAL);
474 	}
475 	if (waittime < 0) {
476 		cmn_err(CE_WARN,
477 		    "IDN: 202: (LINK) invalid time-out value (%d)",
478 		    waittime);
479 		return (EINVAL);
480 	}
481 	if (!VALID_DOMAINID(domid)) {
482 		cmn_err(CE_WARN,
483 		    "IDN: 203: (LINK) invalid domain ID (%d)",
484 		    domid);
485 		return (EINVAL);
486 	}
487 	if (domid == idn.localid)
488 		return (0);
489 
490 	IDN_SYNC_LOCK();
491 	IDN_DLOCK_EXCL(domid);
492 
493 	dp = &idn_domain[domid];
494 
495 	switch (dp->dstate) {
496 	case IDNDS_CLOSED:
497 		break;
498 
499 	case IDNDS_CONNECTED:
500 #ifdef DEBUG
501 		cmn_err(CE_NOTE,
502 		    "!IDN: domain %d (CPU ID %d) already connected",
503 		    domid, cpuid);
504 #endif /* DEBUG */
505 		IDN_DUNLOCK(domid);
506 		IDN_SYNC_UNLOCK();
507 		return (0);
508 
509 	default:
510 		cmn_err(CE_WARN,
511 		    "IDN: 204: domain %d state (%s) inappropriate",
512 		    domid, idnds_str[dp->dstate]);
513 		IDN_DUNLOCK(domid);
514 		IDN_SYNC_UNLOCK();
515 		return (EINVAL);
516 	}
517 
518 	rv = idn_open_domain(domid, cpuid, 0);
519 	if (rv != 0) {
520 		cmn_err(CE_WARN,
521 		    "IDN: 205: (%s) failed to open-domain(%d,%d)",
522 		    proc, domid, cpuid);
523 		IDN_DUNLOCK(domid);
524 		IDN_SYNC_UNLOCK();
525 		return (EIO);
526 	}
527 
528 
529 	IDN_DLOCK_EXCL(idn.localid);
530 	idn_update_priority(idn.localid, pri);
531 	IDN_DUNLOCK(idn.localid);
532 
533 	if (waittime > 0)
534 		opcookie = idn_init_op(IDNOP_CONNECTED, DOMAINSET(domid), sep);
535 
536 	(void) idn_connect(domid);
537 
538 	IDN_DUNLOCK(domid);
539 	IDN_SYNC_UNLOCK();
540 
541 	PR_PROTO("%s:%d: ALLOCATED idn_link(%d)\n", proc, domid, cpuid);
542 
543 	if (waittime > 0) {
544 		boardset_t	domset = 0;
545 		/*
546 		 * Well we've successfully allocated a domain id,
547 		 * but the link may not be fully established yet.
548 		 * Need to wait since it happens asynchronously.
549 		 */
550 		PR_PROTO("%s:%d: WAITING for op(%s) for (domset 0%x)...\n",
551 		    proc, domid, idnop_str[IDNOP_CONNECTED],
552 		    DOMAINSET(domid));
553 
554 		rv = idn_wait_op(opcookie, &domset, waittime);
555 	}
556 
557 #ifdef DEBUG
558 	if (rv == 0) {
559 		if (waittime > 0) {
560 			PR_PROTO("%s:%d: connect SUCCEEDED (cpu %d)\n",
561 			    proc, domid, cpuid);
562 		} else {
563 			PR_PROTO("%s:%d: connect KICKED OFF (cpu %d)\n",
564 			    proc, domid, cpuid);
565 		}
566 	} else {
567 		PR_PROTO("%s:%d: connect FAILED (cpu %d)\n",
568 		    proc, domid, cpuid);
569 	}
570 #endif /* DEBUG */
571 
572 	return (rv);
573 }
574 
575 /*
576  * Unlink the given domain from any domain cluster of
577  * which it might be a member.  Force indicates that domain
578  * should not go AWOL and if it's currently AWOL to close
579  * and remove it.
580  * IMPORTANT: If the (hard) force flag is set, the caller is
581  *	      assumed to GUARANTEE that the given domain will
582  *	      not attempt to communicate with the local domain
583  *	      in any manner.
584  */
585 int
idn_unlink(int domid,boardset_t idnset,idn_fin_t fintype,idn_finopt_t finopt,int waittime,idnsb_error_t * sep)586 idn_unlink(int domid, boardset_t idnset, idn_fin_t fintype,
587 		idn_finopt_t finopt, int waittime, idnsb_error_t *sep)
588 {
589 	int		rv = 0;
590 	domainset_t	domset;
591 	void		*opcookie;
592 	procname_t	proc = "idn_unlink";
593 
594 
595 	if (waittime < 0) {
596 		cmn_err(CE_WARN,
597 		    "IDN: 202: (UNLINK) invalid time-out value (%d)",
598 		    waittime);
599 		SET_IDNKERR_IDNERR(sep, IDNKERR_INVALID_WTIME);
600 		SET_IDNKERR_PARAM0(sep, waittime);
601 		return (EINVAL);
602 	}
603 	if (!VALID_DOMAINID(domid)) {
604 		cmn_err(CE_WARN,
605 		    "IDN: 203: (UNLINK) invalid domain ID (%d)",
606 		    domid);
607 		SET_IDNKERR_IDNERR(sep, IDNKERR_INVALID_DOMAIN);
608 		SET_IDNKERR_PARAM0(sep, domid);
609 		SET_IDNKERR_PARAM1(sep, -1);
610 		return (EINVAL);
611 	}
612 	if (idn.localid == IDN_NIL_DOMID) {
613 #ifdef DEBUG
614 		cmn_err(CE_NOTE,
615 		    "!IDN: %s: local domain not connected to an IDNnet",
616 		    proc);
617 #endif /* DEBUG */
618 		return (0);
619 	}
620 
621 	/*
622 	 * Lock ordering protocols requires that we grab the
623 	 * global lock _before_ the local domain's lock.
624 	 * However, non-local domains must have their lock
625 	 * grabbed _before_ the global lock.
626 	 */
627 	IDN_SYNC_LOCK();
628 	IDN_GLOCK_EXCL();
629 	domset = idn.domset.ds_trans_on | idn.domset.ds_trans_off;
630 	if ((idn.state == IDNGS_OFFLINE) && !domset) {
631 #ifdef DEBUG
632 		cmn_err(CE_WARN,
633 		    "!IDN: %s: local domain not connected to an IDNnet",
634 		    proc);
635 #endif /* DEBUG */
636 		IDN_GUNLOCK();
637 		IDN_SYNC_UNLOCK();
638 		return (0);
639 	}
640 
641 	if ((domid == IDN_NIL_DOMID) || (domid == idn.localid)) {
642 		domid = idn.localid;
643 		IDN_GSTATE_TRANSITION(IDNGS_DISCONNECT);
644 		IDN_SET_NEW_MASTERID(IDN_NIL_DOMID);
645 		domset = DOMAINSET_ALL;
646 		DOMAINSET_DEL(domset, idn.localid);
647 	} else {
648 		domset = DOMAINSET(domid);
649 	}
650 	IDN_GUNLOCK();
651 
652 	if (waittime > 0)
653 		opcookie = idn_init_op(IDNOP_DISCONNECTED, domset, sep);
654 
655 	idn_unlink_domainset(domset, fintype, IDNFIN_ARG_NONE, finopt, idnset);
656 
657 	IDN_SYNC_UNLOCK();
658 
659 	if (waittime > 0) {
660 		/*
661 		 * Well the unlink has successfully kicked off.
662 		 * Since process is asynchronous we need to wait
663 		 * for it to complete.
664 		 */
665 		PR_PROTO("%s:%d: WAITING for op(%s) for (domset 0%x)...\n",
666 		    proc, domid, idnop_str[IDNOP_DISCONNECTED],
667 		    domset);
668 
669 		rv = idn_wait_op(opcookie, &domset, waittime);
670 	}
671 
672 	if (rv == 0) {
673 		if (waittime > 0) {
674 			PR_PROTO("%s:%d: disconnect SUCCEEDED\n",
675 			    proc, domid);
676 		} else {
677 			PR_PROTO("%s:%d: disconnect KICKED OFF\n",
678 			    proc, domid);
679 		}
680 	} else {
681 		PR_PROTO("%s:%d: disconnect FAILED\n", proc, domid);
682 	}
683 
684 	return (rv);
685 }
686 
687 static void
idn_unlink_domainset(domainset_t domset,idn_fin_t fintype,idn_finarg_t finarg,idn_finopt_t finopt,boardset_t idnset)688 idn_unlink_domainset(domainset_t domset, idn_fin_t fintype,
689 			idn_finarg_t finarg, idn_finopt_t finopt,
690 			boardset_t idnset)
691 {
692 	int		d;
693 	domainset_t	offset;
694 	procname_t	proc = "idn_unlink_domainset";
695 
696 	ASSERT(IDN_SYNC_IS_LOCKED());
697 
698 	/*
699 	 * Determine subset for which we have
700 	 * no active connections.
701 	 */
702 	offset = domset & ~(idn.domset.ds_trans_on |
703 	    idn.domset.ds_connected |
704 	    idn.domset.ds_trans_off |
705 	    idn.domset.ds_relink);
706 	/*
707 	 * Determine subset that are really candidates.
708 	 * Note that we include those already down the path
709 	 * since it's possible a request came in to upgrade
710 	 * their fintype (e.g. NORMAL->FORCE_SOFT).
711 	 */
712 	domset &= ~offset;
713 
714 	if (offset)
715 		idn_update_op(IDNOP_DISCONNECTED, offset, NULL);
716 
717 	IDN_GLOCK_EXCL();
718 	if ((finopt == IDNFIN_OPT_RELINK) && (idn.state != IDNGS_DISCONNECT)) {
719 		/*
720 		 * Don't add domains already transitioning off.
721 		 * If they caught on an earlier Reconfig wave then
722 		 * they'll already be in ds_relink anyway.  Otherwise,
723 		 * once a domain is transition off we can't upgrade
724 		 * him to a RELINK.
725 		 */
726 #ifdef DEBUG
727 		if (idn.domset.ds_hitlist & domset) {
728 			PR_HITLIST("%s: domset=%x, hitlist=%x, trans_off=%x "
729 			    "-> relink = %x -> %x\n",
730 			    proc, domset, idn.domset.ds_hitlist,
731 			    idn.domset.ds_relink, idn.domset.ds_trans_off,
732 			    idn.domset.ds_relink |
733 			    (domset & ~idn.domset.ds_trans_off));
734 		}
735 #endif /* DEBUG */
736 
737 		domset &= ~idn.domset.ds_trans_off;
738 		idn.domset.ds_relink |= domset;
739 	} else {
740 		idn.domset.ds_relink &= ~domset;
741 	}
742 	/*
743 	 * Update the ds_trans_on/off so we don't waste
744 	 * time talking to these folks.
745 	 */
746 	idn.domset.ds_trans_on  &= ~domset;
747 	idn.domset.ds_trans_off |= domset;
748 
749 	if (domset == 0) {
750 		if ((idn.domset.ds_trans_on |
751 		    idn.domset.ds_connected |
752 		    idn.domset.ds_trans_off |
753 		    idn.domset.ds_relink) == 0) {
754 			PR_HITLIST("%s:%x: HITLIST %x -> 0\n",
755 			    proc, domset, idn.domset.ds_hitlist);
756 			idn.domset.ds_hitlist = 0;
757 			IDN_GSTATE_TRANSITION(IDNGS_OFFLINE);
758 		}
759 		IDN_GUNLOCK();
760 		return;
761 	}
762 	IDN_GUNLOCK();
763 
764 	for (d = 0; d < MAX_DOMAINS; d++) {
765 		idn_domain_t	*dp;
766 		idn_fin_t	ftype;
767 
768 		if (!DOMAIN_IN_SET(domset, d))
769 			continue;
770 
771 		dp = &idn_domain[d];
772 		IDN_DLOCK_EXCL(d);
773 		IDN_HISTORY_LOG(IDNH_RELINK, d, dp->dstate,
774 		    idn.domset.ds_relink);
775 		ftype = fintype;
776 		if ((dp->dcpu != IDN_NIL_DCPU) && dp->dhw.dh_boardset) {
777 			/*
778 			 * If domain is not in the IDNSET passed
779 			 * down then we need to upgrade this to
780 			 * hard-force in order to prevent possible
781 			 * system failures (arbstop).  This is simply
782 			 * extra protection beyond that checked by
783 			 * the SSP.  IDNSET contains the set of boards
784 			 * that have a "link" to the local domain,
785 			 * including the SMD regs.
786 			 */
787 			if ((idnset & dp->dhw.dh_boardset) == 0) {
788 				PR_PROTO("%s:%d: boardset 0x%x "
789 				    "NOT in IDNSET 0x%x\n",
790 				    proc, d, dp->dhw.dh_boardset,
791 				    idnset);
792 				if (ftype != IDNFIN_FORCE_HARD)
793 					cmn_err(CE_NOTE,
794 					    "!IDN: 222: no IDN linkage "
795 					    "found (b=0x%x, i=0x%x) "
796 					    "upgrading unlink %s to %s",
797 					    dp->dhw.dh_boardset,
798 					    idnset, idnfin_str[ftype],
799 					    idnfin_str[IDNFIN_FORCE_HARD]);
800 
801 				ftype = IDNFIN_FORCE_HARD;
802 			} else {
803 				PR_PROTO("%s:%d: boardset 0x%x "
804 				    "FOUND in IDNSET 0x%x\n",
805 				    proc, d, dp->dhw.dh_boardset,
806 				    idnset);
807 			}
808 		}
809 		(void) idn_disconnect(d, ftype, finarg, IDNDS_SYNC_TYPE(dp));
810 		IDN_DUNLOCK(d);
811 	}
812 }
813 
814 /*
815  * Return w/locks held.
816  */
817 static int
idn_connect(int domid)818 idn_connect(int domid)
819 {
820 	idn_xdcargs_t	xargs;
821 	idn_domain_t	*dp = &idn_domain[domid];
822 	procname_t	proc = "idn_connect";
823 
824 	ASSERT(IDN_SYNC_IS_LOCKED());
825 	ASSERT(IDN_DLOCK_IS_EXCL(domid));
826 
827 	ASSERT(dp->dcpu != IDN_NIL_DCPU);
828 
829 	if (dp->dstate != IDNDS_CLOSED) {
830 		if (DOMAIN_IN_SET(idn.domset.ds_trans_on |
831 		    idn.domset.ds_connected, domid)) {
832 			PR_PROTO("%s:%d: already connected or "
833 			    "in-progress\n", proc, domid);
834 		} else {
835 			PR_PROTO("%s:%d: current state (%s) != "
836 			    "CLOSED\n", proc, domid,
837 			    idnds_str[dp->dstate]);
838 		}
839 		return (-1);
840 	}
841 
842 	ASSERT(!DOMAIN_IN_SET(idn.domset.ds_connected, domid));
843 	ASSERT(!DOMAIN_IN_SET(idn.domset.ds_trans_off, domid));
844 
845 	dp->dxp = &xphase_nego;
846 	IDN_XSTATE_TRANSITION(dp, IDNXS_PEND);
847 
848 	(void) idn_xphase_transition(domid, NULL, xargs);
849 
850 	return (0);
851 }
852 
853 /*
854  * Return w/locks held.
855  */
856 static int
idn_disconnect(int domid,idn_fin_t fintype,idn_finarg_t finarg,idn_finsync_t finsync)857 idn_disconnect(int domid, idn_fin_t fintype, idn_finarg_t finarg,
858     idn_finsync_t finsync)
859 {
860 	int		new_masterid, new_cpuid = IDN_NIL_DCPU;
861 	uint_t		token;
862 	uint_t		finmaster;
863 	idn_xdcargs_t	xargs;
864 	idn_finopt_t	finopt;
865 	idn_domain_t	*dp = &idn_domain[domid];
866 	procname_t	proc = "idn_disconnect";
867 
868 	ASSERT(IDN_SYNC_IS_LOCKED());
869 	ASSERT(IDN_DLOCK_IS_EXCL(domid));
870 
871 	if (dp->dstate == IDNDS_CLOSED) {
872 		PR_PROTO("%s:%d: already CLOSED\n", proc, domid);
873 		idn_update_op(IDNOP_DISCONNECTED, DOMAINSET(domid), NULL);
874 		return (-1);
875 	}
876 
877 	/*
878 	 * Terminate any outstanding commands that were
879 	 * targeted towards this domain.
880 	 */
881 	idn_terminate_cmd(domid, ECANCELED);
882 
883 	/*
884 	 * Terminate any and all retries that may have
885 	 * outstanding for this domain.
886 	 */
887 	token = IDN_RETRY_TOKEN(domid, IDN_RETRY_TYPEALL);
888 	(void) idn_retry_terminate(token);
889 
890 	/*
891 	 * Stop all outstanding message timers for
892 	 * this guy.
893 	 */
894 	IDN_MSGTIMER_STOP(domid, 0, 0);
895 
896 	dp->dxp = &xphase_fin;
897 	IDN_XSTATE_TRANSITION(dp, IDNXS_PEND);
898 	if ((int)dp->dfin < (int)fintype) {
899 		/*
900 		 * You can only upgrade a fin type.
901 		 * We don't allow it to be downgraded
902 		 * as it's too dangerous since some
903 		 * state may have been blown away while
904 		 * we were fin'ing at a higher level.
905 		 */
906 		IDN_FSTATE_TRANSITION(dp, fintype);
907 	}
908 
909 	dp->dfin_sync = finsync;
910 	PR_PROTO("%s:%d: disconnect synchronously = %s\n",
911 	    proc, domid, (finsync == IDNFIN_SYNC_OFF) ? "OFF" :
912 	    (finsync == IDNFIN_SYNC_NO) ? "NO" : "YES");
913 
914 	IDN_GLOCK_SHARED();
915 	if (DOMAIN_IN_SET(idn.domset.ds_relink, domid) &&
916 	    (idn.state != IDNGS_DISCONNECT)) {
917 		finopt = IDNFIN_OPT_RELINK;
918 	} else {
919 		finopt = IDNFIN_OPT_UNLINK;
920 		PR_HITLIST("%s:%d: HITLIST %x -> %x\n",
921 		    proc, domid, idn.domset.ds_hitlist,
922 		    idn.domset.ds_hitlist | DOMAINSET(domid));
923 		DOMAINSET_ADD(idn.domset.ds_hitlist, domid);
924 	}
925 
926 	CLR_XARGS(xargs);
927 	SET_XARGS_FIN_TYPE(xargs, dp->dfin);
928 	SET_XARGS_FIN_ARG(xargs, finarg);
929 	SET_XARGS_FIN_OPT(xargs, finopt);
930 	SET_XARGS_FIN_DOMSET(xargs, 0);		/* unused when msg = 0 */
931 	new_masterid = IDN_GET_NEW_MASTERID();
932 	IDN_GUNLOCK();
933 	if (new_masterid != IDN_NIL_DOMID)
934 		new_cpuid = idn_domain[new_masterid].dcpu;
935 	finmaster = MAKE_FIN_MASTER(new_masterid, new_cpuid);
936 	SET_XARGS_FIN_MASTER(xargs, finmaster);
937 
938 	(void) idn_xphase_transition(domid, NULL, xargs);
939 
940 	return (0);
941 }
942 
943 static int
idn_next_xstate(idn_xstate_t o_xstate,int err,uint_t msg)944 idn_next_xstate(idn_xstate_t o_xstate, int err, uint_t msg)
945 {
946 	int		index;
947 	procname_t	proc = "idn_next_xstate";
948 
949 	ASSERT(((int)o_xstate >= 0) && ((int)o_xstate <= 4));
950 
951 	if (!msg)
952 		index = 0;
953 	else if ((msg & IDNP_MSGTYPE_MASK) == 0)
954 		index = (msg & IDNP_ACK) ? 3 : (msg & IDNP_NACK) ? 4 : -1;
955 	else
956 		index = (msg & IDNP_ACK) ? 2 :
957 		    !(msg & IDNP_ACKNACK_MASK) ? 1 : -1;
958 
959 	if (index == -1) {
960 		STRING(str);
961 
962 		INUM2STR(msg, str);
963 		PR_PROTO("%s: (msg = 0x%x(%s))\n", proc, msg, str);
964 		return (IDNXS_NIL);
965 	}
966 
967 	if (err == -1) {
968 		int	n_xstate;
969 		/*
970 		 * Caller is just interested in querying is this
971 		 * is a valid message to receive in the current
972 		 * xstate.  A return value of IDNXS_NIL indicates
973 		 * that it's not.  A return value of non-IDNXS_NIL
974 		 * indicates it's cool.  An invalid message is
975 		 * determined by both err & !err states being IDNXS_NIL.
976 		 */
977 		n_xstate = idnxs_state_table[(int)o_xstate][index][0];
978 		if (n_xstate != IDNXS_NIL)
979 			return (n_xstate);
980 		else
981 			return (idnxs_state_table[(int)o_xstate][index][1]);
982 	} else {
983 		return (idnxs_state_table[(int)o_xstate][index][err ? 1 : 0]);
984 	}
985 }
986 
987 static int
idn_select_candidate(domainset_t master_set)988 idn_select_candidate(domainset_t master_set)
989 {
990 	int		d, best_id = IDN_NIL_DOMID;
991 	uint_t		best_vote = 0;
992 	idn_domain_t	*dp;
993 	procname_t	proc = "idn_select_candidate";
994 
995 	ASSERT(IDN_SYNC_IS_LOCKED());
996 
997 	if (master_set == 0) {
998 		PR_PROTO("%s: %x -> %d\n", proc, master_set, IDN_NIL_DOMID);
999 		return (IDN_NIL_DOMID);
1000 	}
1001 
1002 	for (d = 0; d < MAX_DOMAINS; d++) {
1003 		uint_t		vote;
1004 		idn_vote_t	v;
1005 
1006 		if (!DOMAIN_IN_SET(master_set, d))
1007 			continue;
1008 
1009 		dp = &idn_domain[d];
1010 
1011 		if ((dp->domid == IDN_NIL_DOMID) ||
1012 		    (dp->dcpu == IDN_NIL_DCPU) ||
1013 		    ((v.ticket = dp->dvote.ticket) == 0))
1014 			continue;
1015 
1016 		vote = IDNVOTE_ELECT(v);
1017 
1018 		if (vote > best_vote) {
1019 			best_vote = vote;
1020 			best_id = d;
1021 		}
1022 	}
1023 
1024 	PR_PROTO("%s: %x -> %d\n", proc, master_set, best_id);
1025 
1026 	return (best_id);
1027 }
1028 
1029 /*
1030  * If a non-zero value is returned then GLOCK will have been dropped.
1031  * Otherwise, routine returns with all incoming locks still held.
1032  */
1033 static int
idn_select_master(int domid,int rmasterid,int rcpuid)1034 idn_select_master(int domid, int rmasterid, int rcpuid)
1035 {
1036 	char		*sel;
1037 	int		lmasterid, masterid;
1038 	int		do_reconfig = 0;
1039 	int		lindex, rindex;
1040 	idn_domain_t	*ldp, *rdp;
1041 	uint_t		rvote, lvote;
1042 	idn_master_select_t	select;
1043 	procname_t	proc = "idn_select_master";
1044 
1045 	ASSERT(IDN_SYNC_IS_LOCKED());
1046 	ASSERT(IDN_GLOCK_IS_EXCL());
1047 	ASSERT(IDN_DLOCK_IS_EXCL(domid));
1048 
1049 	PR_PROTO("%s:%d: lmasterid = %d, rmasterid = %d, rcpuid = %d\n",
1050 	    proc, domid, IDN_GET_MASTERID(), rmasterid, rcpuid);
1051 
1052 	IDN_DLOCK_EXCL(idn.localid);
1053 
1054 	ldp = &idn_domain[idn.localid];
1055 	rdp = &idn_domain[domid];
1056 
1057 	/*
1058 	 * Clear master bits since mastership is derived from
1059 	 * other information (local/remote idn.masterid/idn.new_masterid)
1060 	 * and we don't want the vote master bit to confuse matters.
1061 	 */
1062 	lvote = IDNVOTE_ELECT(ldp->dvote);
1063 	rvote = IDNVOTE_ELECT(rdp->dvote);
1064 
1065 	lmasterid = IDN_GET_MASTERID();
1066 
1067 	lindex = (lmasterid == IDN_NIL_DOMID) ? MASTER_IS_NONE :
1068 	    (lmasterid == idn.localid) ? MASTER_IS_LOCAL :
1069 	    (lmasterid == domid) ? MASTER_IS_REMOTE :
1070 	    MASTER_IS_OTHER;
1071 
1072 	rindex = (rmasterid == IDN_NIL_DOMID) ? MASTER_IS_NONE :
1073 	    (rmasterid == domid) ? MASTER_IS_REMOTE :
1074 	    (rmasterid == idn.localid) ? MASTER_IS_LOCAL :
1075 	    MASTER_IS_OTHER;
1076 
1077 	select = master_select_table[lindex][rindex];
1078 
1079 	masterid = IDN_NIL_DOMID;
1080 
1081 	/*
1082 	 * Each case is responsible for dropping DLOCK(localid)
1083 	 * and GLOCK if it doesn't select a master, unless a
1084 	 * reconfig is necessary.
1085 	 */
1086 	switch (select) {
1087 	case MASTER_SELECT_VOTE_RCFG:
1088 		sel = "VOTE_RECONFIG";
1089 		if (lvote > rvote) {
1090 			/*
1091 			 * If the local domain is the winner then remote
1092 			 * domain will have to Reconfig.  We'll continue
1093 			 * through the connection process anyway.  The
1094 			 * remote domains will tell us to back-off while
1095 			 * Reconfigs, but that's okay as we'll keep retrying.
1096 			 */
1097 			masterid = idn.localid;
1098 		} else if (lvote < rvote) {
1099 			do_reconfig = 1;
1100 			/*
1101 			 * GLOCK will get dropped once reconfig
1102 			 * is kicked off.
1103 			 */
1104 		} else {
1105 			cmn_err(CE_WARN,
1106 			    "IDN: 206: cannot link domains "
1107 			    "with equal votes (L(%d),R(%d),0x%x)",
1108 			    idn.localid, domid, rvote);
1109 			IDN_GUNLOCK();
1110 		}
1111 		IDN_DUNLOCK(idn.localid);
1112 		break;
1113 
1114 	case MASTER_SELECT_VOTE:
1115 		sel = "VOTE";
1116 		if (lvote > rvote) {
1117 			masterid = idn.localid;
1118 			ldp->dvote.v.master = 1;
1119 			rdp->dvote.v.master = 0;
1120 		} else if (lvote < rvote) {
1121 			masterid = domid;
1122 			ldp->dvote.v.master = 0;
1123 			rdp->dvote.v.master = 1;
1124 		} else {
1125 			cmn_err(CE_WARN,
1126 			    "IDN: 206: cannot link domains "
1127 			    "with equal votes (L(%d),R(%d),0x%x)",
1128 			    idn.localid, domid, rvote);
1129 		}
1130 		ASSERT(IDN_GET_MASTERID() == IDN_NIL_DOMID);
1131 		if (masterid != IDN_NIL_DOMID) {
1132 			IDN_SET_MASTERID(masterid);
1133 			IDN_SET_NEW_MASTERID(IDN_NIL_DOMID);
1134 		} else {
1135 			IDN_GUNLOCK();
1136 		}
1137 		IDN_DUNLOCK(idn.localid);
1138 		break;
1139 
1140 	case MASTER_SELECT_REMOTE:
1141 		sel = "REMOTE";
1142 		masterid = domid;
1143 		if (IDN_GET_MASTERID() == IDN_NIL_DOMID) {
1144 			IDN_SET_MASTERID(masterid);
1145 			IDN_SET_NEW_MASTERID(IDN_NIL_DOMID);
1146 			ldp->dvote.v.master = 0;
1147 			rdp->dvote.v.master = 1;
1148 		}
1149 		ASSERT(IDN_GET_MASTERID() == domid);
1150 		IDN_DUNLOCK(idn.localid);
1151 		break;
1152 
1153 	case MASTER_SELECT_LOCAL:
1154 		sel = "LOCAL";
1155 		masterid = idn.localid;
1156 		if (IDN_GET_MASTERID() == IDN_NIL_DOMID) {
1157 			IDN_SET_MASTERID(masterid);
1158 			IDN_SET_NEW_MASTERID(IDN_NIL_DOMID);
1159 			ldp->dvote.v.master = 1;
1160 			rdp->dvote.v.master = 0;
1161 		}
1162 		ASSERT(IDN_GET_MASTERID() == idn.localid);
1163 		IDN_DUNLOCK(idn.localid);
1164 		break;
1165 
1166 	case MASTER_SELECT_CONNECT:
1167 		sel = "CONNECT";
1168 		if (rmasterid == lmasterid) {
1169 			/*
1170 			 * Local and remote have same master,
1171 			 * let him come onboard.
1172 			 */
1173 			masterid = lmasterid;
1174 			IDN_DUNLOCK(idn.localid);
1175 
1176 		} else {
1177 			int	rv;
1178 
1179 			IDN_DUNLOCK(idn.localid);
1180 			IDN_GUNLOCK();
1181 			IDN_DLOCK_EXCL(rmasterid);
1182 			PR_PROTO("%s:%d: attempting connect w/remote "
1183 			    "master %d\n",
1184 			    proc, domid, rmasterid);
1185 			rv = idn_open_domain(rmasterid, rcpuid, 0);
1186 			if (rv == 0) {
1187 				(void) idn_connect(rmasterid);
1188 			} else if (rv < 0) {
1189 				cmn_err(CE_WARN,
1190 				    "IDN: 205: (%s) failed to "
1191 				    "open-domain(%d,%d)",
1192 				    proc, rmasterid, rcpuid);
1193 			} else {
1194 				/*
1195 				 * Must already have a connection going.
1196 				 */
1197 				PR_PROTO("%s:%d: failed "
1198 				    "idn_open_domain(%d,%d,0) "
1199 				    "(rv = %d)\n",
1200 				    proc, domid, rmasterid,
1201 				    rcpuid, rv);
1202 			}
1203 			IDN_DUNLOCK(rmasterid);
1204 		}
1205 		break;
1206 
1207 	case MASTER_SELECT_WAIT:
1208 		sel = "WAIT";
1209 		/*
1210 		 * If the remote domain has the same master as the local
1211 		 * domain then there's no need to wait.
1212 		 */
1213 		if (rmasterid == lmasterid) {
1214 			masterid = lmasterid;
1215 		} else {
1216 			IDN_GUNLOCK();
1217 		}
1218 		IDN_DUNLOCK(idn.localid);
1219 		break;
1220 
1221 	case MASTER_SELECT_ERROR:
1222 		sel = "ERROR";
1223 		/*
1224 		 * Hit impossible condition.
1225 		 */
1226 		cmn_err(CE_WARN,
1227 		    "IDN: 207: local/remote master-id conflict "
1228 		    "(%d.lmasterid = %d, %d.rmasterid = %d)",
1229 		    idn.localid, lmasterid, domid, rmasterid);
1230 		IDN_GUNLOCK();
1231 		IDN_DUNLOCK(idn.localid);
1232 		break;
1233 
1234 	default:
1235 		cmn_err(CE_WARN,
1236 		    "IDN: 208: %s: unknown case (%d)",
1237 		    proc, (int)select);
1238 		IDN_GUNLOCK();
1239 		IDN_DUNLOCK(idn.localid);
1240 		ASSERT(0);
1241 		break;
1242 	}
1243 
1244 	if (masterid == IDN_NIL_DOMID) {
1245 		PR_PROTO("%s:%d: NO MASTER SELECTED (rmstr=%d) sel=%s\n",
1246 		    proc, domid, rmasterid, sel);
1247 	} else {
1248 		PR_PROTO("%s:%d: MASTER SELECTED = %d (%s)\n",
1249 		    proc, domid, masterid,
1250 		    (masterid == idn.localid) ? "LOCAL" :
1251 		    (masterid == domid) ? "REMOTE" : "OTHER");
1252 	}
1253 
1254 	if (do_reconfig) {
1255 		domainset_t	dis_set;
1256 
1257 		/*
1258 		 * Local domain already has a master.
1259 		 * Need to dismantle all connections
1260 		 * and reestablish one with new master.
1261 		 */
1262 		IDN_GKSTAT_GLOBAL_EVENT(gk_reconfigs, gk_reconfig_last);
1263 
1264 		PR_PROTO("%s:%d: RECONFIG new masterid = %d\n",
1265 		    proc, domid, domid);
1266 
1267 		IDN_GSTATE_TRANSITION(IDNGS_RECONFIG);
1268 		IDN_SET_NEW_MASTERID(domid);
1269 		IDN_GUNLOCK();
1270 
1271 		dis_set = idn.domset.ds_trans_on | idn.domset.ds_connected;
1272 		DOMAINSET_DEL(dis_set, domid);
1273 
1274 		idn_unlink_domainset(dis_set, IDNFIN_NORMAL, IDNFIN_ARG_NONE,
1275 		    IDNFIN_OPT_RELINK, BOARDSET_ALL);
1276 	}
1277 
1278 	return ((masterid == IDN_NIL_DOMID) ? -1 : 0);
1279 }
1280 
1281 /*ARGSUSED1*/
1282 static void
idn_retry_query(uint_t token,void * arg)1283 idn_retry_query(uint_t token, void *arg)
1284 {
1285 	idn_retry_t	rtype = IDN_RETRY_TOKEN2TYPE(token);
1286 	int		d, domid = IDN_RETRY_TOKEN2DOMID(token);
1287 	idn_domain_t	*dp = &idn_domain[domid];
1288 	idn_synccmd_t	sync_cmd;
1289 	domainset_t	query_set, my_ready_set;
1290 	procname_t	proc = "idn_retry_query";
1291 
1292 	IDN_SYNC_LOCK();
1293 	IDN_DLOCK_EXCL(domid);
1294 
1295 	switch (rtype) {
1296 	case IDNRETRY_CONQ:
1297 		sync_cmd = IDNSYNC_CONNECT;
1298 		my_ready_set = idn.domset.ds_ready_on | idn.domset.ds_connected;
1299 		my_ready_set &= ~idn.domset.ds_trans_off;
1300 		DOMAINSET_ADD(my_ready_set, idn.localid);
1301 		break;
1302 
1303 	case IDNRETRY_FINQ:
1304 		sync_cmd = IDNSYNC_DISCONNECT;
1305 		my_ready_set = idn.domset.ds_ready_off |
1306 		    ~idn.domset.ds_connected;
1307 		break;
1308 
1309 	default:
1310 		IDN_DUNLOCK(domid);
1311 		IDN_SYNC_UNLOCK();
1312 		return;
1313 	}
1314 
1315 	if (dp->dsync.s_cmd == sync_cmd)
1316 		my_ready_set |= dp->dsync.s_set_rdy;
1317 
1318 	query_set = idn_sync_register(domid, sync_cmd, 0, IDNSYNC_REG_QUERY);
1319 
1320 	PR_PROTO("%s:%d: query_set = 0x%x\n", proc, domid, query_set);
1321 
1322 	if (query_set == 0) {
1323 		IDN_DUNLOCK(domid);
1324 		IDN_SYNC_UNLOCK();
1325 		return;
1326 	}
1327 
1328 	for (d = 0; d < MAX_DOMAINS; d++) {
1329 		if (!DOMAIN_IN_SET(query_set, d))
1330 			continue;
1331 
1332 		dp = &idn_domain[d];
1333 		if (d != domid)
1334 			IDN_DLOCK_EXCL(d);
1335 
1336 		if ((dp->dsync.s_cmd == sync_cmd) ||
1337 		    (!dp->dcookie_send &&
1338 		    (rtype == IDNRETRY_CONQ))) {
1339 			if (d != domid)
1340 				IDN_DUNLOCK(d);
1341 			continue;
1342 		}
1343 
1344 		IDN_SYNC_QUERY_UPDATE(domid, d);
1345 
1346 		if (rtype == IDNRETRY_CONQ)
1347 			(void) idn_send_con(d, NULL, IDNCON_QUERY,
1348 			    my_ready_set);
1349 		else
1350 			(void) idn_send_fin(d, NULL, IDNFIN_QUERY,
1351 			    IDNFIN_ARG_NONE, IDNFIN_OPT_NONE, my_ready_set,
1352 			    NIL_FIN_MASTER);
1353 		if (d != domid)
1354 			IDN_DUNLOCK(d);
1355 	}
1356 
1357 	IDN_DUNLOCK(domid);
1358 	IDN_SYNC_UNLOCK();
1359 }
1360 
1361 static int
idn_send_nego(int domid,idn_msgtype_t * mtp,domainset_t conset)1362 idn_send_nego(int domid, idn_msgtype_t *mtp, domainset_t conset)
1363 {
1364 	idn_domain_t	*ldp, *dp;
1365 	int		d, masterid;
1366 	uint_t		dmask;
1367 	uint_t		acknack;
1368 	uint_t		ticket;
1369 	idnneg_dset_t	dset;
1370 	idn_msgtype_t	mt;
1371 	procname_t	proc = "idn_send_nego";
1372 
1373 	ASSERT(IDN_SYNC_IS_LOCKED());
1374 	ASSERT(IDN_DLOCK_IS_EXCL(domid));
1375 
1376 	if (mtp) {
1377 		acknack = mtp->mt_mtype & IDNP_ACKNACK_MASK;
1378 		mt.mt_mtype = mtp->mt_mtype;
1379 		mt.mt_atype = mtp->mt_atype;
1380 		mt.mt_cookie = mtp->mt_cookie;
1381 	} else {
1382 		acknack = 0;
1383 		mt.mt_mtype = IDNP_NEGO;
1384 		mt.mt_atype = 0;
1385 		mt.mt_cookie = IDN_TIMER_PUBLIC_COOKIE;
1386 	}
1387 
1388 	IDN_GLOCK_SHARED();
1389 
1390 	dp = &idn_domain[domid];
1391 	ldp = &idn_domain[idn.localid];
1392 
1393 	if ((idn.state == IDNGS_RECONFIG) ||
1394 	    ((masterid = IDN_GET_MASTERID()) == IDN_NIL_DOMID)) {
1395 		masterid = IDN_GET_NEW_MASTERID();
1396 		if ((masterid == idn.localid) || (masterid == domid)) {
1397 			/*
1398 			 * We only send the new-master "hint" to
1399 			 * "other" domains.  If the new-master is
1400 			 * ourself or we're talking to the new-master
1401 			 * then we need to be accurate about our
1402 			 * real master so that the correct master
1403 			 * is selected.
1404 			 */
1405 			masterid = IDN_NIL_DOMID;
1406 		}
1407 	}
1408 
1409 	DOMAINSET_DEL(conset, idn.localid);
1410 	DOMAINSET_DEL(conset, domid);
1411 	/*
1412 	 * Exclude domains from conset that are on
1413 	 * remote domain's hitlist.  He's not interested
1414 	 * in hearing about them.  SSP is probably requesting
1415 	 * such domains be unlinked - will eventually get to
1416 	 * local domain.
1417 	 */
1418 	conset &= ~idn.domset.ds_hitlist;
1419 	if ((masterid != IDN_NIL_DOMID) &&
1420 	    DOMAIN_IN_SET(idn.domset.ds_hitlist, masterid)) {
1421 		PR_PROTO("%s:%d: masterid(%d) on hitlist(0x%x) -> -1\n",
1422 		    proc, domid, masterid, idn.domset.ds_hitlist);
1423 		/*
1424 		 * Yikes, our chosen master is on the hitlist!
1425 		 */
1426 		masterid = IDN_NIL_DOMID;
1427 	}
1428 
1429 	dmask = IDNNEG_DSET_MYMASK();
1430 	IDNNEG_DSET_INIT(dset, dmask);
1431 	for (d = 0; d < MAX_DOMAINS; d++) {
1432 		int	cpuid;
1433 
1434 		if (!DOMAIN_IN_SET(conset, d))
1435 			continue;
1436 
1437 		if ((cpuid = idn_domain[d].dcpu) == IDN_NIL_DCPU) {
1438 			ASSERT(d != masterid);
1439 			continue;
1440 		}
1441 
1442 		IDNNEG_DSET_SET(dset, d, cpuid, dmask);
1443 	}
1444 	IDNNEG_DSET_SET_MASTER(dset, domid, masterid);
1445 	ASSERT((masterid != IDN_NIL_DOMID) ?
1446 	    (idn_domain[masterid].dcpu != IDN_NIL_DCPU) : 1);
1447 	IDN_GUNLOCK();
1448 
1449 	IDN_DLOCK_SHARED(idn.localid);
1450 	ticket = IDNVOTE_BASICS(ldp->dvote);
1451 	/*
1452 	 * We just want to send basic vote components without an
1453 	 * indication of mastership (master bit) since that's primarily
1454 	 * for local domain's usage.  There is more correct master
1455 	 * indications in the DSET.  Recall that if we were in a
1456 	 * Reconfig we would have transmitted the "new_masterid"
1457 	 * which might conflict with the local domain's vote.v.master
1458 	 * bit if he was originally the master prior to the Reconfig.
1459 	 */
1460 
1461 	PR_PROTO("%s:%d: sending nego%sto (cpu %d) "
1462 	    "[v=0x%x, cs=0x%x, mstr=%d]\n",
1463 	    proc, domid,
1464 	    (acknack & IDNP_ACK) ? "+ack " :
1465 	    (acknack & IDNP_NACK) ? "+nack " : " ",
1466 	    dp->dcpu, ticket, conset, masterid);
1467 
1468 	IDN_MSGTIMER_START(domid, IDNP_NEGO, 0,
1469 	    idn_msg_waittime[IDNP_NEGO], &mt.mt_cookie);
1470 
1471 	IDNXDC(domid, &mt, ticket, dset[0], dset[1], dset[2]);
1472 
1473 	IDN_DUNLOCK(idn.localid);
1474 
1475 	return (0);
1476 }
1477 
1478 static int
idn_recv_nego(int domid,idn_msgtype_t * mtp,idn_xdcargs_t xargs,ushort_t dcookie)1479 idn_recv_nego(int domid, idn_msgtype_t *mtp, idn_xdcargs_t xargs,
1480     ushort_t dcookie)
1481 {
1482 	uint_t		msg = mtp->mt_mtype;
1483 	idn_msgtype_t	mt;
1484 	idn_domain_t	*dp = &idn_domain[domid];
1485 	idn_xdcargs_t	nargs;
1486 	procname_t	proc = "idn_recv_nego";
1487 
1488 	ASSERT(IDN_SYNC_IS_LOCKED());
1489 	ASSERT(IDN_DLOCK_IS_EXCL(domid));
1490 
1491 	mt.mt_cookie = mtp->mt_cookie;
1492 
1493 #ifdef DEBUG
1494 	if (DOMAIN_IN_SET(idn.domset.ds_hitlist, domid)) {
1495 		PR_HITLIST("%s:%d: dcpu=%d, dstate=%s, msg=%x, "
1496 		    "hitlist=%x\n",
1497 		    proc, domid, dp->dcpu, idnds_str[dp->dstate],
1498 		    msg, idn.domset.ds_hitlist);
1499 	}
1500 #endif /* DEBUG */
1501 
1502 	if (dp->dcpu == IDN_NIL_DCPU) {
1503 		int		cpuid;
1504 		uint_t		ticket;
1505 		/*
1506 		 * Brandnew link.  Need to open a new domain entry.
1507 		 */
1508 		ticket = GET_XARGS_NEGO_TICKET(xargs);
1509 		cpuid = dp->dcpu_last;
1510 		ASSERT(VALID_CPUID(cpuid));
1511 
1512 		if (idn_open_domain(domid, cpuid, ticket) != 0) {
1513 			PR_PROTO("%s:%d: FAILED to open doamin "
1514 			    "(ticket = 0x%x)\n",
1515 			    proc, domid, ticket);
1516 			return (-1);
1517 		}
1518 	}
1519 
1520 	if ((msg & IDNP_MSGTYPE_MASK) == IDNP_NEGO) {
1521 		PR_PROTO("%s:%d: assigned SEND cookie 0x%x\n",
1522 		    proc, domid, dcookie);
1523 		dp->dcookie_send = dcookie;
1524 	}
1525 
1526 	if ((dp->dxp == NULL) && IDNDS_IS_CLOSED(dp)) {
1527 		dp->dxp = &xphase_nego;
1528 		IDN_XSTATE_TRANSITION(dp, IDNXS_PEND);
1529 	} else if (dp->dxp != &xphase_nego) {
1530 		if (msg & IDNP_MSGTYPE_MASK) {
1531 			/*
1532 			 * If we already have a connection to somebody
1533 			 * trying to initiate a connection to us, then
1534 			 * possibly we've awaken from a coma or he did.
1535 			 * In any case, dismantle current connection
1536 			 * and attempt to establish a new one.
1537 			 */
1538 			if (dp->dstate == IDNDS_CONNECTED) {
1539 				DOMAINSET_ADD(idn.domset.ds_relink, domid);
1540 				IDN_HISTORY_LOG(IDNH_RELINK, domid,
1541 				    dp->dstate, idn.domset.ds_relink);
1542 				(void) idn_disconnect(domid, IDNFIN_NORMAL,
1543 				    IDNFIN_ARG_NONE, IDNFIN_SYNC_YES);
1544 			} else {
1545 				mt.mt_mtype = IDNP_NACK;
1546 				mt.mt_atype = msg;
1547 
1548 				CLR_XARGS(nargs);
1549 
1550 				if (DOMAIN_IN_SET(idn.domset.ds_hitlist,
1551 				    domid)) {
1552 					SET_XARGS_NACK_TYPE(nargs,
1553 					    IDNNACK_EXIT);
1554 				} else {
1555 					int	new_masterid;
1556 					int	new_cpuid = IDN_NIL_DCPU;
1557 
1558 					SET_XARGS_NACK_TYPE(nargs,
1559 					    IDNNACK_RETRY);
1560 					IDN_GLOCK_SHARED();
1561 					new_masterid = IDN_GET_NEW_MASTERID();
1562 					if (new_masterid == IDN_NIL_DOMID)
1563 						new_masterid =
1564 						    IDN_GET_MASTERID();
1565 					if (new_masterid != IDN_NIL_DOMID) {
1566 						idn_domain_t	*mdp;
1567 
1568 						mdp = &idn_domain[new_masterid];
1569 						new_cpuid = mdp->dcpu;
1570 					}
1571 					SET_XARGS_NACK_ARG1(nargs,
1572 					    new_masterid);
1573 					SET_XARGS_NACK_ARG2(nargs, new_cpuid);
1574 					IDN_GUNLOCK();
1575 				}
1576 				idn_send_acknack(domid, &mt, nargs);
1577 			}
1578 		}
1579 		return (0);
1580 	}
1581 
1582 	(void) idn_xphase_transition(domid, mtp, xargs);
1583 
1584 	return (0);
1585 }
1586 
1587 /*ARGSUSED1*/
1588 static void
idn_retry_nego(uint_t token,void * arg)1589 idn_retry_nego(uint_t token, void *arg)
1590 {
1591 	int		domid = IDN_RETRY_TOKEN2DOMID(token);
1592 	int		new_masterid;
1593 	idn_domain_t	*dp = &idn_domain[domid];
1594 	idn_xdcargs_t	xargs;
1595 	procname_t	proc = "idn_retry_nego";
1596 
1597 	ASSERT(IDN_RETRY_TOKEN2TYPE(token) == IDNRETRY_NEGO);
1598 
1599 	IDN_SYNC_LOCK();
1600 	IDN_DLOCK_EXCL(domid);
1601 
1602 	if (dp->dxp != &xphase_nego) {
1603 		STRING(str);
1604 
1605 #ifdef DEBUG
1606 		if (dp->dxp) {
1607 			INUM2STR(dp->dxp->xt_msgtype, str);
1608 		}
1609 #endif /* DEBUG */
1610 
1611 		PR_PROTO("%s:%d: dxp(%s) != NEGO...bailing...\n",
1612 		    proc, domid, dp->dxp ? str : "NULL");
1613 		IDN_DUNLOCK(domid);
1614 		IDN_SYNC_UNLOCK();
1615 		return;
1616 	}
1617 
1618 	if (dp->dxstate != IDNXS_PEND) {
1619 		PR_PROTO("%s:%d: xstate(%s) != %s...bailing\n",
1620 		    proc, domid, idnxs_str[dp->dxstate],
1621 		    idnxs_str[IDNXS_PEND]);
1622 		IDN_DUNLOCK(domid);
1623 		IDN_SYNC_UNLOCK();
1624 		return;
1625 	}
1626 
1627 	IDN_GLOCK_SHARED();
1628 	if (idn.state == IDNGS_RECONFIG) {
1629 		/*
1630 		 * Have to try again later after
1631 		 * reconfig has completed.
1632 		 */
1633 		PR_PROTO("%s:%d: reconfig in-progress...try later\n",
1634 		    proc, domid);
1635 		idn_retry_submit(idn_retry_nego, NULL, token,
1636 		    idn_msg_retrytime[IDNP_NEGO]);
1637 		IDN_GUNLOCK();
1638 		IDN_DUNLOCK(domid);
1639 		IDN_SYNC_UNLOCK();
1640 		return;
1641 	}
1642 	new_masterid = IDN_GET_NEW_MASTERID();
1643 	if ((idn.state == IDNGS_CONNECT) &&
1644 	    (new_masterid != IDN_NIL_DOMID) &&
1645 	    (domid != new_masterid) &&
1646 	    (idn.localid != new_masterid)) {
1647 		/*
1648 		 * We have a new master pending and this
1649 		 * guy isn't it.  Wait until the local domain
1650 		 * has a chance to connect with the new
1651 		 * master before going forward with this
1652 		 * guy.
1653 		 */
1654 		PR_PROTO("%s:%d: waiting for connect to new master %d\n",
1655 		    proc, domid, IDN_GET_NEW_MASTERID());
1656 		idn_retry_submit(idn_retry_nego, NULL, token,
1657 		    idn_msg_retrytime[IDNP_NEGO]);
1658 		IDN_GUNLOCK();
1659 		IDN_DUNLOCK(domid);
1660 		IDN_SYNC_UNLOCK();
1661 		return;
1662 	}
1663 	IDN_GUNLOCK();
1664 
1665 	(void) idn_xphase_transition(domid, NULL, xargs);
1666 
1667 	IDN_DUNLOCK(domid);
1668 	IDN_SYNC_UNLOCK();
1669 }
1670 
1671 static int
idn_check_nego(int domid,idn_msgtype_t * mtp,idn_xdcargs_t xargs)1672 idn_check_nego(int domid, idn_msgtype_t *mtp, idn_xdcargs_t xargs)
1673 {
1674 	int		d, new_masterid, masterid;
1675 	int		cpuid, m_cpuid = -1;
1676 	uint_t		dmask;
1677 	uint_t		msg = mtp ? mtp->mt_mtype : 0;
1678 	idn_domain_t	*dp, *ldp;
1679 	domainset_t	con_set, pending_set;
1680 	idnneg_dset_t	dset;
1681 	procname_t	proc = "idn_check_nego";
1682 
1683 	ASSERT(IDN_SYNC_IS_LOCKED());
1684 	ASSERT(IDN_DLOCK_IS_EXCL(domid));
1685 
1686 	dp = &idn_domain[domid];
1687 	ldp = &idn_domain[idn.localid];
1688 
1689 	if (msg & IDNP_NACK) {
1690 		if (GET_XARGS_NACK_TYPE(xargs) == IDNNACK_EXIT) {
1691 			PR_HITLIST("%s:%d(%s): (msg=%x) EXIT received, "
1692 			    "adding to hitlist %x -> %x\n",
1693 			    proc, domid, idnds_str[dp->dstate], msg,
1694 			    idn.domset.ds_hitlist,
1695 			    idn.domset.ds_hitlist | DOMAINSET(domid));
1696 
1697 			DOMAINSET_ADD(idn.domset.ds_hitlist, domid);
1698 			return (-1);
1699 		} else {
1700 			return (0);
1701 		}
1702 	}
1703 
1704 	if (DOMAIN_IN_SET(idn.domset.ds_hitlist, domid)) {
1705 		PR_HITLIST("%s:%d(%s): (msg=%x) domain in hitlist (%x) - "
1706 		    "exiting phase\n",
1707 		    proc, domid, idnds_str[dp->dstate], msg,
1708 		    idn.domset.ds_hitlist);
1709 		return (-1);
1710 	}
1711 
1712 	if ((dp->dstate == IDNDS_NEGO_PEND) && (msg & IDNP_MSGTYPE_MASK) &&
1713 	    (msg & IDNP_ACK))		/* nego+ack */
1714 		return (1);
1715 
1716 	dmask = (uint_t)-1;
1717 
1718 	IDN_GLOCK_EXCL();
1719 	if (idn.state == IDNGS_DISCONNECT) {
1720 		PR_PROTO("%s:%d: DISCONNECT in-progress >>> EXIT\n",
1721 		    proc, domid);
1722 		IDN_GUNLOCK();
1723 		return (-1);
1724 	} else if (idn.state == IDNGS_OFFLINE) {
1725 		IDN_GSTATE_TRANSITION(IDNGS_CONNECT);
1726 		IDN_PREP_HWINIT();
1727 		IDN_DLOCK_EXCL(idn.localid);
1728 		ldp->dvote.v.connected = 0;
1729 		IDN_DUNLOCK(idn.localid);
1730 	}
1731 
1732 	if (!DOMAIN_IN_SET(idn.domset.ds_trans_on, domid)) {
1733 		DOMAINSET_ADD(idn.domset.ds_trans_on, domid);
1734 		IDN_HISTORY_LOG(IDNH_NEGO, domid,
1735 		    idn.domset.ds_trans_on,
1736 		    idn.domset.ds_connected);
1737 	}
1738 
1739 	switch (idn.state) {
1740 	case IDNGS_RECONFIG:
1741 		PR_PROTO("%s:%d: RECONFIG in-progress >>> RETRY\n",
1742 		    proc, domid);
1743 		IDN_GUNLOCK();
1744 		return (1);
1745 
1746 	case IDNGS_CONNECT:
1747 		new_masterid = IDN_GET_NEW_MASTERID();
1748 		if ((new_masterid != IDN_NIL_DOMID) &&
1749 		    (domid != new_masterid) &&
1750 		    (idn.localid != new_masterid)) {
1751 			PR_PROTO("%s:%d: waiting for connect to "
1752 			    "new master %d\n",
1753 			    proc, domid, IDN_GET_NEW_MASTERID());
1754 			IDN_GUNLOCK();
1755 			return (1);
1756 		}
1757 		break;
1758 
1759 	default:
1760 		break;
1761 	}
1762 
1763 	ASSERT((idn.state == IDNGS_CONNECT) || (idn.state == IDNGS_ONLINE));
1764 
1765 	con_set = 0;
1766 
1767 	if (msg) {
1768 		idn_domain_t	*mdp;
1769 		idn_vote_t	vote;
1770 
1771 		vote.ticket = GET_XARGS_NEGO_TICKET(xargs);
1772 		/*
1773 		 * Sender should note have set master bit,
1774 		 * but just in case clear it so local domain
1775 		 * doesn't get confused.
1776 		 */
1777 		vote.v.master = 0;
1778 		dp->dvote.ticket = vote.ticket;
1779 		GET_XARGS_NEGO_DSET(xargs, dset);
1780 		/*LINTED*/
1781 		IDNNEG_DSET_GET_MASK(dset, domid, dmask);
1782 		IDNNEG_DSET_GET_MASTER(dset, new_masterid);
1783 		if (new_masterid == IDNNEG_NO_MASTER) {
1784 			new_masterid = IDN_NIL_DOMID;
1785 		} else {
1786 			/*
1787 			 * Remote domain has a master.  Find
1788 			 * his cpuid in the dset.  We may need
1789 			 * it to initiate a connection.
1790 			 */
1791 			if (new_masterid == domid) {
1792 				m_cpuid = dp->dcpu;
1793 			} else {
1794 				IDNNEG_DSET_GET(dset, new_masterid, m_cpuid,
1795 				    dmask);
1796 				if (m_cpuid == -1) {
1797 					/*
1798 					 * Something is bogus if remote domain
1799 					 * is reporting a valid masterid, but
1800 					 * doesn't have the cpuid for it.
1801 					 */
1802 					cmn_err(CE_WARN,
1803 					    "IDN: 209: remote domain (ID "
1804 					    "%d, CPU %d) reporting master "
1805 					    "(ID %d) without CPU ID",
1806 					    domid, dp->dcpu, new_masterid);
1807 					DOMAINSET_ADD(idn.domset.ds_hitlist,
1808 					    domid);
1809 					IDN_GUNLOCK();
1810 					return (-1);
1811 				}
1812 			}
1813 		}
1814 
1815 		for (d = 0; d < MAX_DOMAINS; d++) {
1816 			if ((d == idn.localid) || (d == domid))
1817 				continue;
1818 			IDNNEG_DSET_GET(dset, d, cpuid, dmask);
1819 			if (cpuid != -1) {
1820 				DOMAINSET_ADD(con_set, d);
1821 			}
1822 		}
1823 
1824 #ifdef DEBUG
1825 		if (idn.domset.ds_hitlist) {
1826 			PR_HITLIST("%s:%d: con_set %x -> %x (hitlist = %x)\n",
1827 			    proc, domid, con_set,
1828 			    con_set & ~idn.domset.ds_hitlist,
1829 			    idn.domset.ds_hitlist);
1830 		}
1831 #endif /* DEBUG */
1832 
1833 		con_set &= ~idn.domset.ds_hitlist;
1834 
1835 		ASSERT(!DOMAIN_IN_SET(con_set, idn.localid));
1836 		ASSERT(!DOMAIN_IN_SET(con_set, domid));
1837 
1838 		if ((new_masterid != IDN_NIL_DOMID) &&
1839 		    DOMAIN_IN_SET(idn.domset.ds_hitlist, new_masterid)) {
1840 			PR_HITLIST("%s:%d: new_mstr %d -> -1 (hitlist = %x)\n",
1841 			    proc, domid, new_masterid,
1842 			    idn.domset.ds_hitlist);
1843 			IDN_GUNLOCK();
1844 			return (1);
1845 		}
1846 
1847 		if (idn_select_master(domid, new_masterid, m_cpuid) < 0) {
1848 			/*
1849 			 * Returns w/GLOCK dropped if error.
1850 			 */
1851 			return (1);
1852 		}
1853 
1854 		masterid = IDN_GET_MASTERID();
1855 		ASSERT(masterid != IDN_NIL_DOMID);
1856 
1857 		if (idn.state == IDNGS_CONNECT) {
1858 			/*
1859 			 * This is the initial connection for
1860 			 * the local domain.
1861 			 */
1862 			IDN_DLOCK_EXCL(idn.localid);
1863 
1864 			if (masterid == idn.localid) {
1865 				if (idn_master_init() < 0) {
1866 					cmn_err(CE_WARN,
1867 					    "IDN: 210: failed to init "
1868 					    "MASTER context");
1869 					ldp->dvote.v.master = 0;
1870 					IDN_DUNLOCK(idn.localid);
1871 					IDN_GSTATE_TRANSITION(IDNGS_DISCONNECT);
1872 					IDN_SET_MASTERID(IDN_NIL_DOMID);
1873 					IDN_GUNLOCK();
1874 					return (-1);
1875 				}
1876 				DSLAB_LOCK_EXCL(idn.localid);
1877 				ldp->dslab_state = DSLAB_STATE_LOCAL;
1878 				DSLAB_UNLOCK(idn.localid);
1879 				ldp->dvote.v.connected = 1;
1880 			} else {
1881 				/*
1882 				 * Either the remote domain is the
1883 				 * master or its a new slave trying
1884 				 * to connect to us.  We can't allow
1885 				 * further progress until we've
1886 				 * sync'd up with the master.
1887 				 */
1888 				if (masterid != domid) {
1889 					IDN_DUNLOCK(idn.localid);
1890 					IDN_GUNLOCK();
1891 					return (1);
1892 				}
1893 				DSLAB_LOCK_EXCL(idn.localid);
1894 				ldp->dslab_state = DSLAB_STATE_REMOTE;
1895 				DSLAB_UNLOCK(idn.localid);
1896 			}
1897 			IDN_DUNLOCK(idn.localid);
1898 			/*
1899 			 * We've sync'd up with the new master.
1900 			 */
1901 			IDN_GSTATE_TRANSITION(IDNGS_ONLINE);
1902 		}
1903 
1904 		mdp = &idn_domain[masterid];
1905 
1906 		if ((masterid != domid) && !IDNDS_CONFIG_DONE(mdp)) {
1907 			/*
1908 			 * We can't progress any further with
1909 			 * other domains until we've exchanged all
1910 			 * the necessary CFG info with the master,
1911 			 * i.e. until we have a mailbox area from
1912 			 * which we can allocate mailboxes to
1913 			 * other domains.
1914 			 */
1915 			PR_PROTO("%s:%d: still exchanging CFG "
1916 			    "w/master(%d)\n", proc, domid, masterid);
1917 			IDN_GUNLOCK();
1918 			return (1);
1919 		}
1920 
1921 		DSLAB_LOCK_EXCL(domid);
1922 		dp->dslab_state = ldp->dslab_state;
1923 		DSLAB_UNLOCK(domid);
1924 		if (idn.state != IDNGS_ONLINE) {
1925 			IDN_GSTATE_TRANSITION(IDNGS_ONLINE);
1926 		}
1927 	}
1928 
1929 	IDN_GUNLOCK();
1930 
1931 	pending_set = con_set;
1932 	pending_set &= ~(idn.domset.ds_trans_on | idn.domset.ds_connected);
1933 	idn.domset.ds_trans_on |= pending_set;
1934 
1935 	con_set |= idn.domset.ds_trans_on | idn.domset.ds_connected;
1936 	con_set &= ~idn.domset.ds_trans_off;
1937 	DOMAINSET_ADD(con_set, idn.localid);
1938 
1939 	if (dp->dsync.s_cmd != IDNSYNC_CONNECT) {
1940 		idn_sync_exit(domid, IDNSYNC_DISCONNECT);
1941 		idn_sync_enter(domid, IDNSYNC_CONNECT,
1942 		    con_set, DOMAINSET(idn.localid), idn_xstate_transfunc,
1943 		    (void *)IDNP_CON);
1944 	}
1945 
1946 	/*
1947 	 * Get this domain registered as an expected domain on
1948 	 * the remaining domains in the CONNECT synchronization.
1949 	 */
1950 	(void) idn_sync_register(domid, IDNSYNC_CONNECT, 0, IDNSYNC_REG_NEW);
1951 
1952 	/*
1953 	 * Note that if (msg == 0), i.e. then there will be
1954 	 * no dset and also pending_set will be 0.
1955 	 * So, the following loop will never attempt to
1956 	 * look at the dset unless (msg != 0), implying
1957 	 * that we've been through the initial code above
1958 	 * and have initialized dmask.
1959 	 */
1960 	ASSERT(pending_set ? (dmask != (uint_t)-1) : 1);
1961 
1962 	for (d = 0; d < MAX_DOMAINS; d++) {
1963 		int	rv;
1964 
1965 		if (!DOMAIN_IN_SET(pending_set, d))
1966 			continue;
1967 
1968 		ASSERT((d != idn.localid) && (d != domid));
1969 
1970 		dp = &idn_domain[d];
1971 
1972 		IDNNEG_DSET_GET(dset, d, cpuid, dmask);
1973 		if (cpuid == -1) {
1974 			PR_PROTO("%s:%d: failed to get cpuid from dset "
1975 			    "for domain %d (pset = 0x%x)\n",
1976 			    proc, domid, d, pending_set);
1977 			DOMAINSET_DEL(idn.domset.ds_trans_on, d);
1978 			continue;
1979 		}
1980 
1981 		IDN_DLOCK_EXCL(d);
1982 		if ((rv = idn_open_domain(d, cpuid, 0)) != 0) {
1983 			PR_PROTO("%s:%d: failed "
1984 			    "idn_open_domain(%d,%d,0) (rv = %d)\n",
1985 			    proc, domid, d, cpuid, rv);
1986 			if (rv < 0) {
1987 				cmn_err(CE_WARN,
1988 				    "IDN: 205: (%s) failed to "
1989 				    "open-domain(%d,%d)",
1990 				    proc, d, cpuid);
1991 				DOMAINSET_DEL(idn.domset.ds_trans_on, d);
1992 			} else if (DOMAIN_IN_SET(idn.domset.ds_trans_off, d)) {
1993 				/*
1994 				 * We've requested to connect to a domain
1995 				 * from which we're disconnecting.  We
1996 				 * better mark this guy for relinking.
1997 				 */
1998 				DOMAINSET_ADD(idn.domset.ds_relink, d);
1999 				IDN_HISTORY_LOG(IDNH_RELINK, d, dp->dstate,
2000 				    idn.domset.ds_relink);
2001 			}
2002 			IDN_DUNLOCK(d);
2003 			continue;
2004 		}
2005 
2006 		(void) idn_connect(d);
2007 
2008 		IDN_DUNLOCK(d);
2009 	}
2010 
2011 	return (0);
2012 }
2013 
2014 /*ARGSUSED*/
2015 static void
idn_action_nego_pend(int domid,idn_msgtype_t * mtp,idn_xdcargs_t xargs)2016 idn_action_nego_pend(int domid, idn_msgtype_t *mtp, idn_xdcargs_t xargs)
2017 {
2018 	uint_t		msg = mtp ? mtp->mt_mtype : 0;
2019 	idn_msgtype_t	mt;
2020 	domainset_t	con_set;
2021 
2022 	ASSERT(IDN_SYNC_IS_LOCKED());
2023 	ASSERT(IDN_DLOCK_IS_EXCL(domid));
2024 
2025 	con_set = idn.domset.ds_trans_on | idn.domset.ds_connected;
2026 	con_set &= ~idn.domset.ds_trans_off;
2027 
2028 	if (!msg) {
2029 		(void) idn_send_nego(domid, NULL, con_set);
2030 	} else {
2031 		mt.mt_mtype = IDNP_NEGO | IDNP_ACK;
2032 		mt.mt_atype = 0;
2033 		mt.mt_cookie = mtp->mt_cookie;
2034 		(void) idn_send_nego(domid, &mt, con_set);
2035 	}
2036 }
2037 
2038 /*ARGSUSED*/
2039 static void
idn_error_nego(int domid,idn_msgtype_t * mtp,idn_xdcargs_t xargs)2040 idn_error_nego(int domid, idn_msgtype_t *mtp, idn_xdcargs_t xargs)
2041 {
2042 	int	new_masterid, new_cpuid;
2043 	int	retry = 1;
2044 	uint_t	msg = mtp ? mtp->mt_mtype : 0;
2045 	uint_t	token;
2046 
2047 	ASSERT(IDN_SYNC_IS_LOCKED());
2048 	ASSERT(IDN_DLOCK_IS_EXCL(domid));
2049 
2050 	if (msg & IDNP_NACK) {
2051 		idn_nack_t	nack;
2052 
2053 		nack = GET_XARGS_NACK_TYPE(xargs);
2054 		switch (nack) {
2055 		case IDNNACK_RETRY:
2056 			new_masterid = (int)GET_XARGS_NACK_ARG1(xargs);
2057 			new_cpuid    = (int)GET_XARGS_NACK_ARG2(xargs);
2058 			break;
2059 
2060 		case IDNNACK_EXIT:
2061 			retry = 0;
2062 			/*FALLTHROUGH*/
2063 
2064 		default:
2065 			new_masterid = IDN_NIL_DOMID;
2066 			new_cpuid    = IDN_NIL_DCPU;
2067 			break;
2068 		}
2069 		idn_nego_cleanup_check(domid, new_masterid, new_cpuid);
2070 	}
2071 
2072 	if (msg & IDNP_MSGTYPE_MASK) {
2073 		idn_msgtype_t	mt;
2074 		idn_xdcargs_t	nargs;
2075 
2076 		mt.mt_mtype = IDNP_NACK;
2077 		mt.mt_atype = msg;
2078 		mt.mt_cookie = mtp->mt_cookie;
2079 		CLR_XARGS(nargs);
2080 		SET_XARGS_NACK_TYPE(nargs, IDNNACK_RETRY);
2081 		IDN_GLOCK_SHARED();
2082 		new_masterid = IDN_GET_NEW_MASTERID();
2083 		if (new_masterid == IDN_NIL_DOMID)
2084 			new_masterid = IDN_GET_MASTERID();
2085 		if (new_masterid != IDN_NIL_DOMID)
2086 			new_cpuid = idn_domain[new_masterid].dcpu;
2087 		else
2088 			new_cpuid = IDN_NIL_DCPU;
2089 		SET_XARGS_NACK_ARG1(nargs, new_masterid);
2090 		SET_XARGS_NACK_ARG2(nargs, new_cpuid);
2091 		IDN_GUNLOCK();
2092 		idn_send_acknack(domid, &mt, nargs);
2093 	}
2094 
2095 	if (retry) {
2096 		token = IDN_RETRY_TOKEN(domid, IDNRETRY_NEGO);
2097 		idn_retry_submit(idn_retry_nego, NULL, token,
2098 		    idn_msg_retrytime[(int)IDNRETRY_NEGO]);
2099 	} else {
2100 		DOMAINSET_DEL(idn.domset.ds_relink, domid);
2101 		IDN_RESET_COOKIES(domid);
2102 		(void) idn_disconnect(domid, IDNFIN_NORMAL, IDNFIN_ARG_NONE,
2103 		    IDNDS_SYNC_TYPE(&idn_domain[domid]));
2104 	}
2105 }
2106 
2107 /*ARGSUSED*/
2108 static void
idn_action_nego_sent(int domid,idn_msgtype_t * mtp,idn_xdcargs_t xargs)2109 idn_action_nego_sent(int domid, idn_msgtype_t *mtp, idn_xdcargs_t xargs)
2110 {
2111 	uint_t		msg = mtp ? mtp->mt_mtype : 0;
2112 	domainset_t	conset;
2113 	idn_msgtype_t	mt;
2114 
2115 	ASSERT(IDN_SYNC_IS_LOCKED());
2116 	ASSERT(IDN_DLOCK_IS_EXCL(domid));
2117 
2118 	mt.mt_cookie = mtp ? mtp->mt_cookie : 0;
2119 
2120 	conset = idn.domset.ds_trans_on | idn.domset.ds_connected;
2121 	conset &= ~idn.domset.ds_trans_off;
2122 
2123 	if ((msg & IDNP_ACKNACK_MASK) == 0) {
2124 		/*
2125 		 * nego
2126 		 */
2127 		mt.mt_mtype = IDNP_NEGO | IDNP_ACK;
2128 		mt.mt_atype = 0;
2129 		(void) idn_send_nego(domid, &mt, conset);
2130 	} else if (msg & IDNP_MSGTYPE_MASK) {
2131 		int		d;
2132 		idn_xdcargs_t	nargs;
2133 		idnneg_dset_t	dset;
2134 		uint_t		dmask;
2135 		idn_vote_t	vote;
2136 
2137 		mt.mt_mtype = IDNP_ACK;
2138 		mt.mt_atype = msg;
2139 		DOMAINSET_DEL(conset, idn.localid);
2140 		DOMAINSET_DEL(conset, domid);
2141 
2142 		dmask = IDNNEG_DSET_MYMASK();
2143 		IDNNEG_DSET_INIT(dset, dmask);
2144 		for (d = 0; d < MAX_DOMAINS; d++) {
2145 			int	cpuid;
2146 
2147 			if (!DOMAIN_IN_SET(conset, d))
2148 				continue;
2149 
2150 			if ((cpuid = idn_domain[d].dcpu) == IDN_NIL_DCPU)
2151 				continue;
2152 
2153 			IDNNEG_DSET_SET(dset, d, cpuid, dmask);
2154 		}
2155 		IDNNEG_DSET_SET_MASTER(dset, domid, IDN_GET_MASTERID());
2156 		ASSERT((IDN_GET_MASTERID() != IDN_NIL_DOMID) ?
2157 		    (idn_domain[IDN_GET_MASTERID()].dcpu != IDN_NIL_DCPU) : 1);
2158 		vote.ticket = idn_domain[idn.localid].dvote.ticket;
2159 		vote.v.master = 0;
2160 		CLR_XARGS(nargs);
2161 		SET_XARGS_NEGO_TICKET(nargs, vote.ticket);
2162 		SET_XARGS_NEGO_DSET(nargs, dset);
2163 		/*
2164 		 * nego+ack
2165 		 */
2166 		idn_send_acknack(domid, &mt, nargs);
2167 	} else {
2168 		uint_t		token;
2169 		int		new_masterid, new_cpuid;
2170 		int		retry = 1;
2171 		idn_nack_t	nack;
2172 		/*
2173 		 * nack - retry
2174 		 *
2175 		 * It's possible if we've made it this far that
2176 		 * we may have already chosen a master and this
2177 		 * dude might be it!  If it is we need to clean up.
2178 		 */
2179 		nack = GET_XARGS_NACK_TYPE(xargs);
2180 		switch (nack) {
2181 		case IDNNACK_RETRY:
2182 			new_masterid = (int)GET_XARGS_NACK_ARG1(xargs);
2183 			new_cpuid = (int)GET_XARGS_NACK_ARG2(xargs);
2184 			break;
2185 
2186 		case IDNNACK_EXIT:
2187 			retry = 0;
2188 			/*FALLTHROUGH*/
2189 
2190 		default:
2191 			new_masterid = IDN_NIL_DOMID;
2192 			new_cpuid = IDN_NIL_DCPU;
2193 			break;
2194 		}
2195 
2196 		idn_nego_cleanup_check(domid, new_masterid, new_cpuid);
2197 
2198 		if (retry) {
2199 			token = IDN_RETRY_TOKEN(domid, IDNRETRY_NEGO);
2200 			idn_retry_submit(idn_retry_nego, NULL, token,
2201 			    idn_msg_retrytime[(int)IDNRETRY_NEGO]);
2202 		} else {
2203 			DOMAINSET_DEL(idn.domset.ds_relink, domid);
2204 			IDN_RESET_COOKIES(domid);
2205 			(void) idn_disconnect(domid, IDNFIN_NORMAL,
2206 			    IDNFIN_ARG_NONE,
2207 			    IDNDS_SYNC_TYPE(&idn_domain[domid]));
2208 		}
2209 	}
2210 }
2211 
2212 /*ARGSUSED*/
2213 static void
idn_action_nego_rcvd(int domid,idn_msgtype_t * mtp,idn_xdcargs_t xargs)2214 idn_action_nego_rcvd(int domid, idn_msgtype_t *mtp, idn_xdcargs_t xargs)
2215 {
2216 	uint_t	msg = mtp ? mtp->mt_mtype : 0;
2217 
2218 	ASSERT(IDN_SYNC_IS_LOCKED());
2219 	ASSERT(IDN_DLOCK_IS_EXCL(domid));
2220 
2221 	if (msg & IDNP_NACK) {
2222 		uint_t		token;
2223 		int		new_masterid, new_cpuid;
2224 		int		retry = 1;
2225 		idn_nack_t	nack;
2226 		/*
2227 		 * nack - retry.
2228 		 *
2229 		 * At this stage of receiving a nack we need to
2230 		 * check whether we need to start over again with
2231 		 * selecting a new master.
2232 		 */
2233 		nack = GET_XARGS_NACK_TYPE(xargs);
2234 		switch (nack) {
2235 		case IDNNACK_RETRY:
2236 			new_masterid = (int)GET_XARGS_NACK_ARG1(xargs);
2237 			new_cpuid = (int)GET_XARGS_NACK_ARG2(xargs);
2238 			break;
2239 
2240 		case IDNNACK_EXIT:
2241 			retry = 0;
2242 			/*FALLTHROUGH*/
2243 
2244 		default:
2245 			new_masterid = IDN_NIL_DOMID;
2246 			new_cpuid = IDN_NIL_DCPU;
2247 			break;
2248 		}
2249 
2250 		idn_nego_cleanup_check(domid, new_masterid, new_cpuid);
2251 
2252 		if (retry) {
2253 			token = IDN_RETRY_TOKEN(domid, IDNRETRY_NEGO);
2254 			idn_retry_submit(idn_retry_nego, NULL, token,
2255 			    idn_msg_retrytime[(int)IDNRETRY_NEGO]);
2256 		} else {
2257 			DOMAINSET_DEL(idn.domset.ds_relink, domid);
2258 			IDN_RESET_COOKIES(domid);
2259 			(void) idn_disconnect(domid, IDNFIN_NORMAL,
2260 			    IDNFIN_ARG_NONE,
2261 			    IDNDS_SYNC_TYPE(&idn_domain[domid]));
2262 		}
2263 	}
2264 }
2265 
2266 static void
idn_final_nego(int domid)2267 idn_final_nego(int domid)
2268 {
2269 	idn_domain_t	*dp = &idn_domain[domid];
2270 
2271 	ASSERT(IDN_SYNC_IS_LOCKED());
2272 	ASSERT(IDN_DLOCK_IS_EXCL(domid));
2273 
2274 	(void) idn_retry_terminate(IDN_RETRY_TOKEN(domid, IDNRETRY_NEGO));
2275 
2276 	ASSERT(dp->dstate == IDNDS_CONFIG);
2277 
2278 	dp->dxp = NULL;
2279 	IDN_XSTATE_TRANSITION(dp, IDNXS_NIL);
2280 
2281 	idn_send_config(domid, 1);
2282 }
2283 
2284 /*
2285  */
2286 /*ARGSUSED1*/
2287 static void
idn_exit_nego(int domid,uint_t msgtype)2288 idn_exit_nego(int domid, uint_t msgtype)
2289 {
2290 	idn_domain_t	*dp;
2291 	idn_fin_t	fintype;
2292 
2293 	ASSERT(IDN_SYNC_IS_LOCKED());
2294 	ASSERT(IDN_DLOCK_IS_EXCL(domid));
2295 
2296 	dp = &idn_domain[domid];
2297 
2298 	fintype = msgtype ? IDNFIN_NORMAL : IDNFIN_FORCE_HARD;
2299 
2300 	(void) idn_retry_terminate(IDN_RETRY_TOKEN(domid, IDNRETRY_NEGO));
2301 
2302 	ASSERT(!DOMAIN_IN_SET(idn.domset.ds_connected, domid));
2303 	ASSERT(!DOMAIN_IN_SET(idn.domset.ds_ready_on, domid));
2304 	ASSERT(dp->dxp == &xphase_nego);
2305 
2306 	idn_nego_cleanup_check(domid, IDN_NIL_DOMID, IDN_NIL_DCPU);
2307 
2308 	IDN_GLOCK_SHARED();
2309 	if ((idn.state != IDNGS_DISCONNECT) &&
2310 	    !DOMAIN_IN_SET(idn.domset.ds_hitlist, domid)) {
2311 		DOMAINSET_ADD(idn.domset.ds_relink, domid);
2312 		IDN_HISTORY_LOG(IDNH_RELINK, domid, dp->dstate,
2313 		    idn.domset.ds_relink);
2314 	} else {
2315 		idn_update_op(IDNOP_ERROR, DOMAINSET(domid), NULL);
2316 		DOMAINSET_DEL(idn.domset.ds_relink, domid);
2317 	}
2318 	IDN_GUNLOCK();
2319 	/*
2320 	 * Reset send cookie to 0 so that receiver does not validate
2321 	 * cookie.  This is necessary since at this early stage it's
2322 	 * possible we may not have exchanged appropriate cookies.
2323 	 */
2324 	IDN_RESET_COOKIES(domid);
2325 	(void) idn_disconnect(domid, fintype, IDNFIN_ARG_NONE,
2326 	    IDNDS_SYNC_TYPE(dp));
2327 }
2328 
2329 static void
idn_nego_cleanup_check(int domid,int new_masterid,int new_cpuid)2330 idn_nego_cleanup_check(int domid, int new_masterid, int new_cpuid)
2331 {
2332 	idn_domain_t	*ldp, *dp;
2333 	procname_t	proc = "idn_nego_cleanup_check";
2334 
2335 	ASSERT(IDN_SYNC_IS_LOCKED());
2336 	ASSERT(IDN_DLOCK_IS_EXCL(domid));
2337 
2338 	dp = &idn_domain[domid];
2339 	ldp = &idn_domain[idn.localid];
2340 
2341 	IDN_GLOCK_EXCL();
2342 
2343 	if (((idn.state == IDNGS_ONLINE) && !idn.domset.ds_connected) ||
2344 	    (idn.state == IDNGS_CONNECT)) {
2345 		domainset_t	trans_on;
2346 		int		masterid;
2347 		int		retry_domid = IDN_NIL_DOMID;
2348 		int		rv;
2349 
2350 		IDN_DLOCK_EXCL(idn.localid);
2351 		masterid = (idn.state == IDNGS_ONLINE) ?
2352 		    IDN_GET_MASTERID() : IDN_GET_NEW_MASTERID();
2353 		trans_on = idn.domset.ds_trans_on;
2354 		DOMAINSET_DEL(trans_on, domid);
2355 		if (trans_on == 0) {
2356 			int		d;
2357 			domainset_t	relink = idn.domset.ds_relink;
2358 			/*
2359 			 * This was the only guy we were trying
2360 			 * to connect with.
2361 			 */
2362 			ASSERT((idn.state == IDNGS_ONLINE) ?
2363 			    ((idn.localid == masterid) ||
2364 			    (domid == masterid)) : 1);
2365 			if (idn.localid == masterid)
2366 				idn_master_deinit();
2367 			ldp->dvote.v.connected = 0;
2368 			ldp->dvote.v.master = 0;
2369 			dp->dvote.v.master = 0;
2370 			IDN_SET_MASTERID(IDN_NIL_DOMID);
2371 			IDN_SET_NEW_MASTERID(new_masterid);
2372 			IDN_GSTATE_TRANSITION(IDNGS_CONNECT);
2373 			IDN_PREP_HWINIT();
2374 			IDN_DUNLOCK(idn.localid);
2375 			IDN_GUNLOCK();
2376 			/*
2377 			 * If there's a new master available then
2378 			 * just try and relink with him unless
2379 			 * it's ourself.
2380 			 */
2381 			if ((new_masterid != IDN_NIL_DOMID) &&
2382 			    (new_masterid != idn.localid) &&
2383 			    (new_masterid != domid)) {
2384 				IDN_DLOCK_EXCL(new_masterid);
2385 				rv = idn_open_domain(new_masterid,
2386 				    new_cpuid, 0);
2387 				if (rv < 0) {
2388 					cmn_err(CE_WARN,
2389 					    "IDN: 205: (%s) failed to "
2390 					    "open-domain(%d,%d)",
2391 					    proc, new_masterid, new_cpuid);
2392 					IDN_GLOCK_EXCL();
2393 					IDN_SET_NEW_MASTERID(IDN_NIL_DOMID);
2394 					IDN_GUNLOCK();
2395 				} else {
2396 					relink = DOMAINSET(new_masterid);
2397 				}
2398 				IDN_DUNLOCK(new_masterid);
2399 			}
2400 			DOMAINSET_DEL(relink, domid);
2401 			if (relink)
2402 				for (d = 0; d < MAX_DOMAINS; d++) {
2403 					if (!DOMAIN_IN_SET(relink, d))
2404 						continue;
2405 					retry_domid = d;
2406 					break;
2407 				}
2408 		} else if (domid == masterid) {
2409 			/*
2410 			 * There are other domains we were trying
2411 			 * to connect to.  As long as the chosen
2412 			 * master was somebody other then this
2413 			 * domain that nack'd us, life is cool, but
2414 			 * if it was this remote domain we'll need
2415 			 * to start over.
2416 			 */
2417 			IDN_DUNLOCK(idn.localid);
2418 			dp->dvote.v.master = 0;
2419 			IDN_SET_MASTERID(IDN_NIL_DOMID);
2420 			IDN_SET_NEW_MASTERID(new_masterid);
2421 
2422 			if (idn.state == IDNGS_ONLINE) {
2423 				IDN_GKSTAT_GLOBAL_EVENT(gk_reconfigs,
2424 				    gk_reconfig_last);
2425 				IDN_GSTATE_TRANSITION(IDNGS_RECONFIG);
2426 				IDN_GUNLOCK();
2427 				idn_unlink_domainset(trans_on, IDNFIN_NORMAL,
2428 				    IDNFIN_ARG_NONE,
2429 				    IDNFIN_OPT_RELINK,
2430 				    BOARDSET_ALL);
2431 			} else if ((new_masterid != IDN_NIL_DOMID) &&
2432 			    (new_masterid != idn.localid) &&
2433 			    (new_masterid != domid) &&
2434 			    !DOMAIN_IN_SET(trans_on, new_masterid)) {
2435 				IDN_GUNLOCK();
2436 				IDN_DLOCK_EXCL(new_masterid);
2437 				rv = idn_open_domain(new_masterid,
2438 				    new_cpuid, 0);
2439 				IDN_GLOCK_EXCL();
2440 				IDN_DUNLOCK(new_masterid);
2441 				if (rv < 0) {
2442 					cmn_err(CE_WARN,
2443 					    "IDN: 205: (%s) failed to "
2444 					    "open-domain(%d,%d)",
2445 					    proc, new_masterid,
2446 					    new_cpuid);
2447 					IDN_SET_NEW_MASTERID(IDN_NIL_DOMID);
2448 					new_masterid = IDN_NIL_DOMID;
2449 				} else {
2450 					retry_domid = new_masterid;
2451 				}
2452 				IDN_GUNLOCK();
2453 			} else {
2454 				IDN_GUNLOCK();
2455 			}
2456 		} else {
2457 			IDN_DUNLOCK(idn.localid);
2458 			IDN_GUNLOCK();
2459 		}
2460 		if (retry_domid != IDN_NIL_DOMID) {
2461 			uint_t		token;
2462 			idn_domain_t	*rdp = &idn_domain[retry_domid];
2463 
2464 			IDN_DLOCK_EXCL(retry_domid);
2465 			rdp->dxp = &xphase_nego;
2466 			IDN_XSTATE_TRANSITION(rdp, IDNXS_PEND);
2467 			IDN_DUNLOCK(retry_domid);
2468 			token = IDN_RETRY_TOKEN(retry_domid, IDNRETRY_NEGO);
2469 			idn_retry_submit(idn_retry_nego, NULL, token,
2470 			    idn_msg_retrytime[(int)IDNRETRY_NEGO]);
2471 		}
2472 	} else {
2473 		IDN_GUNLOCK();
2474 	}
2475 }
2476 
2477 static int
idn_send_con(int domid,idn_msgtype_t * mtp,idn_con_t contype,domainset_t conset)2478 idn_send_con(int domid, idn_msgtype_t *mtp, idn_con_t contype, domainset_t
2479     conset)
2480 {
2481 	idn_msgtype_t	mt;
2482 	uint_t		acknack;
2483 	procname_t	proc = "idn_send_con";
2484 
2485 	ASSERT(IDN_SYNC_IS_LOCKED());
2486 	ASSERT(IDN_DLOCK_IS_EXCL(domid));
2487 
2488 	if (mtp) {
2489 		acknack = mtp->mt_mtype & IDNP_ACKNACK_MASK;
2490 		mt.mt_mtype = mtp->mt_mtype;
2491 		mt.mt_atype = mtp->mt_atype;
2492 		mt.mt_cookie = mtp->mt_cookie;
2493 	} else {
2494 		acknack = 0;
2495 		mt.mt_mtype = IDNP_CON;
2496 		mt.mt_atype = 0;
2497 		/*
2498 		 * For simple CON queries we want a unique
2499 		 * timer assigned.  For others, they
2500 		 * effectively share one.
2501 		 */
2502 		if (contype == IDNCON_QUERY)
2503 			mt.mt_cookie = 0;
2504 		else
2505 			mt.mt_cookie = IDN_TIMER_PUBLIC_COOKIE;
2506 	}
2507 
2508 	ASSERT((contype == IDNCON_QUERY) ? idn_domain[domid].dcookie_send : 1);
2509 
2510 	PR_PROTO("%s:%d: sending con%sto (cpu %d) [ct=%s, cs=0x%x]\n",
2511 	    proc, domid,
2512 	    (acknack & IDNP_ACK) ? "+ack " :
2513 	    (acknack & IDNP_NACK) ? "+nack " : " ",
2514 	    idn_domain[domid].dcpu,
2515 	    idncon_str[contype], conset);
2516 
2517 	IDN_MSGTIMER_START(domid, IDNP_CON, (ushort_t)contype,
2518 	    idn_msg_waittime[IDNP_CON], &mt.mt_cookie);
2519 
2520 	IDNXDC(domid, &mt, (uint_t)contype, (uint_t)conset, 0, 0);
2521 
2522 	return (0);
2523 }
2524 
2525 /*
2526  * Must leave w/DLOCK dropped and SYNC_LOCK held.
2527  */
2528 static int
idn_recv_con(int domid,idn_msgtype_t * mtp,idn_xdcargs_t xargs)2529 idn_recv_con(int domid, idn_msgtype_t *mtp, idn_xdcargs_t xargs)
2530 {
2531 	uint_t		msg = mtp ? mtp->mt_mtype : 0;
2532 	uint_t		msgarg = mtp ? mtp->mt_atype : 0;
2533 	idn_con_t	contype;
2534 	domainset_t	my_ready_set, ready_set;
2535 	idn_msgtype_t	mt;
2536 	idn_domain_t	*dp = &idn_domain[domid];
2537 	idn_xdcargs_t	aargs;
2538 	procname_t	proc = "idn_recv_con";
2539 
2540 	ASSERT(IDN_SYNC_IS_LOCKED());
2541 	ASSERT(IDN_DLOCK_IS_EXCL(domid));
2542 
2543 	mt.mt_cookie = mtp ? mtp->mt_cookie : 0;
2544 
2545 	contype   = GET_XARGS_CON_TYPE(xargs);
2546 	ready_set = GET_XARGS_CON_DOMSET(xargs);
2547 
2548 	CLR_XARGS(aargs);
2549 
2550 	if (!(msg & IDNP_NACK) && (contype == IDNCON_QUERY)) {
2551 		domainset_t	query_set;
2552 
2553 		query_set = idn_sync_register(domid, IDNSYNC_CONNECT,
2554 		    ready_set, IDNSYNC_REG_REG);
2555 
2556 		my_ready_set = idn.domset.ds_connected | idn.domset.ds_ready_on;
2557 		my_ready_set &= ~idn.domset.ds_trans_off;
2558 		DOMAINSET_ADD(my_ready_set, idn.localid);
2559 
2560 		if (msg & IDNP_MSGTYPE_MASK) {
2561 			mt.mt_mtype = IDNP_ACK;
2562 			mt.mt_atype = IDNP_CON;
2563 			SET_XARGS_CON_TYPE(aargs, contype);
2564 			SET_XARGS_CON_DOMSET(aargs, my_ready_set);
2565 			idn_send_acknack(domid, &mt, aargs);
2566 		}
2567 
2568 		if (query_set) {
2569 			uint_t	token;
2570 
2571 			token = IDN_RETRY_TOKEN(domid, IDNRETRY_CONQ);
2572 			idn_retry_submit(idn_retry_query, NULL, token,
2573 			    idn_msg_retrytime[(int)IDNRETRY_CONQ]);
2574 		}
2575 
2576 		return (0);
2577 	}
2578 
2579 	if (dp->dxp == NULL) {
2580 		STRING(mstr);
2581 		STRING(lstr);
2582 		/*
2583 		 * Must have received an inappropriate error
2584 		 * message as we should already be registered
2585 		 * by the time we reach here.
2586 		 */
2587 		INUM2STR(msg, mstr);
2588 		INUM2STR(msgarg, lstr);
2589 
2590 		PR_PROTO("%s:%d: ERROR: NOT YET REGISTERED (%s/%s)\n",
2591 		    proc, domid, mstr, lstr);
2592 
2593 		if (msg & IDNP_MSGTYPE_MASK) {
2594 			mt.mt_mtype = IDNP_NACK;
2595 			mt.mt_atype = msg;
2596 			SET_XARGS_NACK_TYPE(aargs, IDNNACK_RETRY);
2597 			idn_send_acknack(domid, &mt, aargs);
2598 		}
2599 
2600 		return (-1);
2601 	}
2602 
2603 	(void) idn_xphase_transition(domid, mtp, xargs);
2604 
2605 	return (0);
2606 }
2607 
2608 /*ARGSUSED1*/
2609 static void
idn_retry_con(uint_t token,void * arg)2610 idn_retry_con(uint_t token, void *arg)
2611 {
2612 	int		domid = IDN_RETRY_TOKEN2DOMID(token);
2613 	idn_domain_t	*dp = &idn_domain[domid];
2614 	idn_xdcargs_t	xargs;
2615 	procname_t	proc = "idn_retry_con";
2616 
2617 	ASSERT(IDN_RETRY_TOKEN2TYPE(token) == IDNRETRY_CON);
2618 
2619 	IDN_SYNC_LOCK();
2620 	IDN_DLOCK_EXCL(domid);
2621 
2622 	if (dp->dxp != &xphase_con) {
2623 		STRING(str);
2624 
2625 #ifdef DEBUG
2626 		if (dp->dxp) {
2627 			INUM2STR(dp->dxp->xt_msgtype, str);
2628 		}
2629 #endif /* DEBUG */
2630 
2631 		PR_PROTO("%s:%d: dxp(%s) != CON...bailing...\n",
2632 		    proc, domid, dp->dxp ? str : "NULL");
2633 		IDN_DUNLOCK(domid);
2634 		IDN_SYNC_UNLOCK();
2635 		return;
2636 	}
2637 
2638 	if ((dp->dsync.s_cmd != IDNSYNC_CONNECT) ||
2639 	    (dp->dxstate != IDNXS_PEND)) {
2640 		PR_PROTO("%s:%d: cmd (%s) and/or xstate (%s) not "
2641 		    "expected (%s/%s)\n",
2642 		    proc, domid, idnsync_str[dp->dsync.s_cmd],
2643 		    idnxs_str[dp->dxstate], idnsync_str[IDNSYNC_CONNECT],
2644 		    idnxs_str[IDNXS_PEND]);
2645 		IDN_DUNLOCK(domid);
2646 		IDN_SYNC_UNLOCK();
2647 		return;
2648 	}
2649 
2650 	(void) idn_xphase_transition(domid, NULL, xargs);
2651 
2652 	IDN_DUNLOCK(domid);
2653 	IDN_SYNC_UNLOCK();
2654 }
2655 
2656 static int
idn_check_con(int domid,idn_msgtype_t * mtp,idn_xdcargs_t xargs)2657 idn_check_con(int domid, idn_msgtype_t *mtp, idn_xdcargs_t xargs)
2658 {
2659 	int		ready;
2660 	uint_t		msg = mtp ? mtp->mt_mtype : 0;
2661 	idn_domain_t	*dp = &idn_domain[domid];
2662 	domainset_t	ready_set, my_ready_set, query_set;
2663 
2664 	ASSERT(IDN_SYNC_IS_LOCKED());
2665 	ASSERT(IDN_DLOCK_IS_EXCL(domid));
2666 
2667 	if (msg & IDNP_NACK)
2668 		return (0);
2669 
2670 	if ((dp->dstate == IDNDS_CON_PEND) &&
2671 	    (msg & IDNP_MSGTYPE_MASK) && (msg & IDNP_ACK))	/* con+ack */
2672 		return (1);
2673 
2674 	if (msg == 0) {
2675 		ready_set = idn.domset.ds_connected &
2676 		    ~idn.domset.ds_trans_off;
2677 	} else {
2678 		ready_set = GET_XARGS_CON_DOMSET(xargs);
2679 		DOMAINSET_ADD(idn.domset.ds_ready_on, domid);
2680 	}
2681 
2682 	DOMAINSET_ADD(ready_set, idn.localid);
2683 
2684 	query_set = idn_sync_register(domid, IDNSYNC_CONNECT,
2685 	    ready_set, IDNSYNC_REG_REG);
2686 	/*
2687 	 * No need to query this domain as he's already
2688 	 * in the CON sequence.
2689 	 */
2690 	DOMAINSET_DEL(query_set, domid);
2691 
2692 	ready = (dp->dsync.s_set_exp == dp->dsync.s_set_rdy) ? 1 : 0;
2693 	if (ready) {
2694 		DOMAINSET_DEL(idn.domset.ds_ready_on, domid);
2695 		DOMAINSET_ADD(idn.domset.ds_connected, domid);
2696 	}
2697 
2698 	if (query_set) {
2699 		int	d;
2700 
2701 		my_ready_set = idn.domset.ds_ready_on |
2702 		    idn.domset.ds_connected;
2703 		my_ready_set &= ~idn.domset.ds_trans_off;
2704 		DOMAINSET_ADD(my_ready_set, idn.localid);
2705 
2706 		for (d = 0; d < MAX_DOMAINS; d++) {
2707 			if (!DOMAIN_IN_SET(query_set, d))
2708 				continue;
2709 
2710 			dp = &idn_domain[d];
2711 
2712 			IDN_DLOCK_EXCL(d);
2713 			if ((dp->dsync.s_cmd == IDNSYNC_CONNECT) ||
2714 			    !dp->dcookie_send) {
2715 				IDN_DUNLOCK(d);
2716 				continue;
2717 			}
2718 
2719 			IDN_SYNC_QUERY_UPDATE(domid, d);
2720 
2721 			(void) idn_send_con(d, NULL, IDNCON_QUERY,
2722 			    my_ready_set);
2723 			IDN_DUNLOCK(d);
2724 		}
2725 	}
2726 
2727 	return (!msg ? 0 : (ready ? 0 : 1));
2728 }
2729 
2730 /*ARGSUSED2*/
2731 static void
idn_error_con(int domid,idn_msgtype_t * mtp,idn_xdcargs_t xargs)2732 idn_error_con(int domid, idn_msgtype_t *mtp, idn_xdcargs_t xargs)
2733 {
2734 	uint_t	token;
2735 	uint_t	msg = mtp ? mtp->mt_mtype : 0;
2736 
2737 	ASSERT(IDN_SYNC_IS_LOCKED());
2738 	ASSERT(IDN_DLOCK_IS_EXCL(domid));
2739 
2740 	if (msg & IDNP_MSGTYPE_MASK) {
2741 		idn_msgtype_t	mt;
2742 		idn_xdcargs_t	nargs;
2743 
2744 		mt.mt_mtype = IDNP_NACK;
2745 		mt.mt_atype = msg;
2746 		mt.mt_cookie = mtp->mt_cookie;
2747 		CLR_XARGS(nargs);
2748 		SET_XARGS_NACK_TYPE(nargs, IDNNACK_RETRY);
2749 		idn_send_acknack(domid, &mt, nargs);
2750 	}
2751 
2752 	token = IDN_RETRY_TOKEN(domid, IDNRETRY_CON);
2753 	idn_retry_submit(idn_retry_con, NULL, token,
2754 	    idn_msg_retrytime[(int)IDNRETRY_CON]);
2755 }
2756 
2757 /*ARGSUSED*/
2758 static void
idn_action_con_pend(int domid,idn_msgtype_t * mtp,idn_xdcargs_t xargs)2759 idn_action_con_pend(int domid, idn_msgtype_t *mtp, idn_xdcargs_t xargs)
2760 {
2761 	uint_t		msg = mtp ? mtp->mt_mtype : 0;
2762 	idn_domain_t	*dp = &idn_domain[domid];
2763 	idn_msgtype_t	mt;
2764 	domainset_t	my_ready_set;
2765 
2766 	ASSERT(IDN_SYNC_IS_LOCKED());
2767 	ASSERT(IDN_DLOCK_IS_EXCL(domid));
2768 
2769 	my_ready_set = dp->dsync.s_set_rdy | idn.domset.ds_ready_on |
2770 	    idn.domset.ds_connected;
2771 	my_ready_set &= ~idn.domset.ds_trans_off;
2772 	DOMAINSET_ADD(my_ready_set, idn.localid);
2773 
2774 	if (!msg) {
2775 		(void) idn_send_con(domid, NULL, IDNCON_NORMAL, my_ready_set);
2776 	} else {
2777 		mt.mt_mtype = IDNP_CON | IDNP_ACK;
2778 		mt.mt_atype = 0;
2779 		mt.mt_cookie = mtp->mt_cookie;
2780 		(void) idn_send_con(domid, &mt, IDNCON_NORMAL, my_ready_set);
2781 	}
2782 }
2783 
2784 static void
idn_action_con_sent(int domid,idn_msgtype_t * mtp,idn_xdcargs_t xargs)2785 idn_action_con_sent(int domid, idn_msgtype_t *mtp, idn_xdcargs_t xargs)
2786 {
2787 	uint_t		msg = mtp ? mtp->mt_mtype : 0;
2788 	idn_domain_t	*dp = &idn_domain[domid];
2789 	idn_con_t	contype;
2790 	domainset_t	my_ready_set;
2791 	idn_msgtype_t	mt;
2792 
2793 	ASSERT(IDN_SYNC_IS_LOCKED());
2794 	ASSERT(IDN_DLOCK_IS_EXCL(domid));
2795 
2796 	mt.mt_cookie = mtp ? mtp->mt_cookie : 0;
2797 
2798 	my_ready_set = dp->dsync.s_set_rdy | idn.domset.ds_ready_on |
2799 	    idn.domset.ds_connected;
2800 	my_ready_set &= ~idn.domset.ds_trans_off;
2801 	DOMAINSET_ADD(my_ready_set, idn.localid);
2802 
2803 	contype = GET_XARGS_CON_TYPE(xargs);
2804 
2805 	if ((msg & IDNP_ACKNACK_MASK) == 0) {
2806 		/*
2807 		 * con
2808 		 */
2809 		mt.mt_mtype = IDNP_CON | IDNP_ACK;
2810 		mt.mt_atype = 0;
2811 		(void) idn_send_con(domid, &mt, contype, my_ready_set);
2812 	} else if (msg & IDNP_MSGTYPE_MASK) {
2813 		idn_xdcargs_t	cargs;
2814 
2815 		mt.mt_mtype = IDNP_ACK;
2816 		mt.mt_atype = msg;
2817 		CLR_XARGS(cargs);
2818 		SET_XARGS_CON_TYPE(cargs, contype);
2819 		SET_XARGS_CON_DOMSET(cargs, my_ready_set);
2820 		/*
2821 		 * con+ack
2822 		 */
2823 		idn_send_acknack(domid, &mt, cargs);
2824 	} else {
2825 		uint_t	token;
2826 		/*
2827 		 * nack - retry
2828 		 */
2829 		token = IDN_RETRY_TOKEN(domid, IDNRETRY_CON);
2830 		idn_retry_submit(idn_retry_con, NULL, token,
2831 		    idn_msg_retrytime[(int)IDNRETRY_CON]);
2832 	}
2833 }
2834 
2835 /*ARGSUSED*/
2836 static void
idn_action_con_rcvd(int domid,idn_msgtype_t * mtp,idn_xdcargs_t xargs)2837 idn_action_con_rcvd(int domid, idn_msgtype_t *mtp, idn_xdcargs_t xargs)
2838 {
2839 	uint_t	msg = mtp ? mtp->mt_mtype : 0;
2840 
2841 	ASSERT(IDN_SYNC_IS_LOCKED());
2842 	ASSERT(IDN_DLOCK_IS_EXCL(domid));
2843 
2844 	if (msg & IDNP_NACK) {
2845 		uint_t	token;
2846 		/*
2847 		 * nack - retry
2848 		 */
2849 		token = IDN_RETRY_TOKEN(domid, IDNRETRY_CON);
2850 		idn_retry_submit(idn_retry_con, NULL, token,
2851 		    idn_msg_retrytime[(int)IDNRETRY_CON]);
2852 	}
2853 }
2854 
2855 static void
idn_final_con(int domid)2856 idn_final_con(int domid)
2857 {
2858 	uint_t		targ;
2859 	uint_t		token = IDN_RETRY_TOKEN(domid, IDNRETRY_CON);
2860 	idn_domain_t	*dp = &idn_domain[domid];
2861 	procname_t	proc = "idn_final_con";
2862 
2863 	ASSERT(IDN_SYNC_IS_LOCKED());
2864 	ASSERT(IDN_DLOCK_IS_EXCL(domid));
2865 
2866 	(void) idn_retry_terminate(token);
2867 
2868 	dp->dxp = NULL;
2869 	IDN_XSTATE_TRANSITION(dp, IDNXS_NIL);
2870 
2871 	idn_sync_exit(domid, IDNSYNC_CONNECT);
2872 
2873 	CHECKPOINT_OPENED(IDNSB_CHKPT_LINK, dp->dhw.dh_boardset, 1);
2874 
2875 	DOMAINSET_DEL(idn.domset.ds_trans_on, domid);
2876 	DOMAINSET_DEL(idn.domset.ds_relink, domid);
2877 	IDN_FSTATE_TRANSITION(dp, IDNFIN_OFF);
2878 
2879 	PR_PROTO("%s:%d: CONNECTED\n", proc, domid);
2880 
2881 	if (idn.domset.ds_trans_on == 0) {
2882 		if ((idn.domset.ds_trans_off | idn.domset.ds_relink) == 0) {
2883 			PR_HITLIST("%s:%d: HITLIST %x -> 0\n",
2884 			    proc, domid, idn.domset.ds_hitlist);
2885 			idn.domset.ds_hitlist = 0;
2886 		}
2887 		PR_PROTO("%s:%d: ALL CONNECTED ************ "
2888 		    "(0x%x + 0x%x) = 0x%x\n", proc, domid,
2889 		    DOMAINSET(idn.localid), idn.domset.ds_connected,
2890 		    DOMAINSET(idn.localid) | idn.domset.ds_connected);
2891 	} else {
2892 		PR_PROTO("%s:%d: >>> ds_trans_on = 0x%x, ds_ready_on = 0x%x\n",
2893 		    proc, domid,
2894 		    idn.domset.ds_trans_on, idn.domset.ds_ready_on);
2895 	}
2896 
2897 	if (idn_verify_config_mbox(domid)) {
2898 		idnsb_error_t	idnerr;
2899 		/*
2900 		 * Mailbox is not cool. Need to disconnect.
2901 		 */
2902 		INIT_IDNKERR(&idnerr);
2903 		SET_IDNKERR_ERRNO(&idnerr, EPROTO);
2904 		SET_IDNKERR_IDNERR(&idnerr, IDNKERR_SMR_CORRUPTED);
2905 		SET_IDNKERR_PARAM0(&idnerr, domid);
2906 		idn_update_op(IDNOP_ERROR, DOMAINSET(domid), &idnerr);
2907 		/*
2908 		 * We cannot disconnect from an individual domain
2909 		 * unless all domains are attempting to disconnect
2910 		 * from him also, especially now since we touched
2911 		 * the SMR and now we have a potential cache conflicts
2912 		 * with the other domains with respect to this
2913 		 * domain.  Disconnect attempt will effectively
2914 		 * shutdown connection with respective domain
2915 		 * which is the effect we really want anyway.
2916 		 */
2917 		(void) idn_disconnect(domid, IDNFIN_NORMAL, IDNFIN_ARG_SMRBAD,
2918 		    IDNFIN_SYNC_YES);
2919 
2920 		return;
2921 	}
2922 
2923 	if (lock_try(&idn.first_swlink)) {
2924 		/*
2925 		 * This is our first connection.  Need to
2926 		 * kick some stuff into gear.
2927 		 */
2928 		idndl_dlpi_init();
2929 		(void) idn_activate_channel(CHANSET_ALL, IDNCHAN_ONLINE);
2930 
2931 		targ = 0xf0;
2932 	} else {
2933 		targ = 0;
2934 	}
2935 
2936 	idn_mainmbox_activate(domid);
2937 
2938 	idn_update_op(IDNOP_CONNECTED, DOMAINSET(domid), NULL);
2939 
2940 	IDN_GKSTAT_GLOBAL_EVENT(gk_links, gk_link_last);
2941 
2942 	membar_stst_ldst();
2943 
2944 	IDN_DSTATE_TRANSITION(dp, IDNDS_CONNECTED);
2945 	/*
2946 	 * Need to kick off initial commands in background.
2947 	 * We do not want to do them within the context of
2948 	 * a protocol server because they may sleep and thus
2949 	 * cause the protocol server to incur a soft-deadlock,
2950 	 * i.e. he's sleeping waiting in the slab-waiting area
2951 	 * for a response that will arrive on his protojob
2952 	 * queue, but which he obviously can't process since
2953 	 * he's not waiting on his protojob queue.
2954 	 */
2955 	targ |= domid & 0x0f;
2956 	(void) timeout(idn_link_established, (void *)(uintptr_t)targ, 50);
2957 
2958 	cmn_err(CE_NOTE,
2959 	    "!IDN: 200: link (domain %d, CPU %d) connected",
2960 	    dp->domid, dp->dcpu);
2961 }
2962 
2963 static void
idn_exit_con(int domid,uint_t msgtype)2964 idn_exit_con(int domid, uint_t msgtype)
2965 {
2966 	idn_domain_t	*dp = &idn_domain[domid];
2967 	idn_fin_t	fintype;
2968 	procname_t	proc = "idn_exit_con";
2969 	STRING(str);
2970 
2971 	ASSERT(IDN_SYNC_IS_LOCKED());
2972 	ASSERT(IDN_DLOCK_IS_EXCL(domid));
2973 
2974 	INUM2STR(msgtype, str);
2975 	PR_PROTO("%s:%d: msgtype = 0x%x(%s)\n", proc, domid, msgtype, str);
2976 
2977 	fintype = msgtype ? IDNFIN_NORMAL : IDNFIN_FORCE_HARD;
2978 
2979 	IDN_GLOCK_SHARED();
2980 	if (idn.state != IDNGS_DISCONNECT) {
2981 		DOMAINSET_ADD(idn.domset.ds_relink, domid);
2982 		IDN_HISTORY_LOG(IDNH_RELINK, domid, dp->dstate,
2983 		    idn.domset.ds_relink);
2984 	} else {
2985 		DOMAINSET_DEL(idn.domset.ds_relink, domid);
2986 	}
2987 	IDN_GUNLOCK();
2988 
2989 	(void) idn_disconnect(domid, fintype, IDNFIN_ARG_NONE,
2990 	    IDNDS_SYNC_TYPE(dp));
2991 }
2992 
2993 static int
idn_send_fin(int domid,idn_msgtype_t * mtp,idn_fin_t fintype,idn_finarg_t finarg,idn_finopt_t finopt,domainset_t finset,uint_t finmaster)2994 idn_send_fin(int domid, idn_msgtype_t *mtp, idn_fin_t fintype, idn_finarg_t
2995     finarg, idn_finopt_t finopt, domainset_t finset, uint_t finmaster)
2996 {
2997 	int		need_timer = 1;
2998 	uint_t		acknack;
2999 	uint_t		fintypearg = 0;
3000 	idn_msgtype_t	mt;
3001 	idn_domain_t	*dp = &idn_domain[domid];
3002 	procname_t	proc = "idn_send_fin";
3003 
3004 	ASSERT(IDN_SYNC_IS_LOCKED());
3005 	ASSERT(IDN_DLOCK_IS_HELD(domid));
3006 
3007 	ASSERT((fintype != IDNFIN_QUERY) ? (finopt != IDNFIN_OPT_NONE) : 1);
3008 
3009 	if (mtp) {
3010 		acknack = mtp->mt_mtype & IDNP_ACKNACK_MASK;
3011 		mt.mt_mtype = mtp->mt_mtype;
3012 		mt.mt_atype = mtp->mt_atype;
3013 		mt.mt_cookie = mtp->mt_cookie;
3014 	} else {
3015 		acknack = 0;
3016 		mt.mt_mtype = IDNP_FIN;
3017 		mt.mt_atype = 0;
3018 		/*
3019 		 * For simple FIN queries we want a unique
3020 		 * timer assigned.  For others, they
3021 		 * effectively share one.
3022 		 */
3023 		if (fintype == IDNFIN_QUERY)
3024 			mt.mt_cookie = 0;
3025 		else
3026 			mt.mt_cookie = IDN_TIMER_PUBLIC_COOKIE;
3027 	}
3028 
3029 	PR_PROTO("%s:%d: sending fin%sto (cpu %d) "
3030 	    "[ft=%s, fa=%s, fs=0x%x, fo=%s, fm=(%d,%d)]\n",
3031 	    proc, domid,
3032 	    (acknack & IDNP_ACK) ? "+ack " :
3033 	    (acknack & IDNP_NACK) ? "+nack " : " ",
3034 	    dp->dcpu, idnfin_str[fintype], idnfinarg_str[finarg],
3035 	    (int)finset, idnfinopt_str[finopt],
3036 	    FIN_MASTER_DOMID(finmaster), FIN_MASTER_CPUID(finmaster));
3037 
3038 	if (need_timer) {
3039 		IDN_MSGTIMER_START(domid, IDNP_FIN, (ushort_t)fintype,
3040 		    idn_msg_waittime[IDNP_FIN], &mt.mt_cookie);
3041 	}
3042 
3043 	SET_FIN_TYPE(fintypearg, fintype);
3044 	SET_FIN_ARG(fintypearg, finarg);
3045 
3046 	IDNXDC(domid, &mt, fintypearg, (uint_t)finset, (uint_t)finopt,
3047 	    finmaster);
3048 
3049 	return (0);
3050 }
3051 
3052 /*
3053  * Must leave w/DLOCK dropped and SYNC_LOCK held.
3054  */
3055 static int
idn_recv_fin(int domid,idn_msgtype_t * mtp,idn_xdcargs_t xargs)3056 idn_recv_fin(int domid, idn_msgtype_t *mtp, idn_xdcargs_t xargs)
3057 {
3058 	uint_t		msg = mtp ? mtp->mt_mtype : 0;
3059 	idn_fin_t	fintype;
3060 	idn_finarg_t	finarg;
3061 	idn_finopt_t	finopt;
3062 	domainset_t	my_ready_set, ready_set;
3063 	idn_msgtype_t	mt;
3064 	idn_domain_t	*dp = &idn_domain[domid];
3065 	idn_xdcargs_t	aargs;
3066 	procname_t	proc = "idn_recv_fin";
3067 
3068 	ASSERT(IDN_SYNC_IS_LOCKED());
3069 	ASSERT(IDN_DLOCK_IS_EXCL(domid));
3070 
3071 	mt.mt_cookie = mtp ? mtp->mt_cookie : 0;
3072 
3073 	fintype   = GET_XARGS_FIN_TYPE(xargs);
3074 	finarg    = GET_XARGS_FIN_ARG(xargs);
3075 	ready_set = GET_XARGS_FIN_DOMSET(xargs);
3076 	finopt    = GET_XARGS_FIN_OPT(xargs);
3077 
3078 	CLR_XARGS(aargs);
3079 
3080 	if (msg & IDNP_NACK) {
3081 		PR_PROTO("%s:%d: received NACK (type = %s)\n",
3082 		    proc, domid, idnnack_str[xargs[0]]);
3083 	} else {
3084 		PR_PROTO("%s:%d: fintype = %s, finopt = %s, "
3085 		    "finarg = %s, ready_set = 0x%x\n",
3086 		    proc, domid, idnfin_str[fintype],
3087 		    idnfinopt_str[finopt],
3088 		    idnfinarg_str[finarg], ready_set);
3089 	}
3090 
3091 	if (!(msg & IDNP_NACK) && (fintype == IDNFIN_QUERY)) {
3092 		domainset_t	query_set;
3093 
3094 		query_set = idn_sync_register(domid, IDNSYNC_DISCONNECT,
3095 		    ready_set, IDNSYNC_REG_REG);
3096 
3097 		my_ready_set = ~idn.domset.ds_connected |
3098 		    idn.domset.ds_ready_off;
3099 
3100 		if (msg & IDNP_MSGTYPE_MASK) {
3101 			mt.mt_mtype = IDNP_ACK;
3102 			mt.mt_atype = IDNP_FIN;
3103 			SET_XARGS_FIN_TYPE(aargs, fintype);
3104 			SET_XARGS_FIN_ARG(aargs, finarg);
3105 			SET_XARGS_FIN_DOMSET(aargs, my_ready_set);
3106 			SET_XARGS_FIN_OPT(aargs, IDNFIN_OPT_NONE);
3107 			SET_XARGS_FIN_MASTER(aargs, NIL_FIN_MASTER);
3108 			idn_send_acknack(domid, &mt, aargs);
3109 		}
3110 
3111 		if (query_set) {
3112 			uint_t	token;
3113 
3114 			token = IDN_RETRY_TOKEN(domid, IDNRETRY_FINQ);
3115 			idn_retry_submit(idn_retry_query, NULL, token,
3116 			    idn_msg_retrytime[(int)IDNRETRY_FINQ]);
3117 		}
3118 
3119 		return (0);
3120 	}
3121 
3122 	if (dp->dxp != &xphase_fin) {
3123 		uint_t	token;
3124 
3125 		if (IDNDS_IS_CLOSED(dp)) {
3126 			PR_PROTO("%s:%d: domain already closed (%s)\n",
3127 			    proc, domid, idnds_str[dp->dstate]);
3128 			if (msg & IDNP_MSGTYPE_MASK) {
3129 				/*
3130 				 * fin or fin+ack.
3131 				 */
3132 				mt.mt_mtype = IDNP_NACK;
3133 				mt.mt_atype = msg;
3134 				SET_XARGS_NACK_TYPE(aargs, IDNNACK_NOCONN);
3135 				idn_send_acknack(domid, &mt, aargs);
3136 			}
3137 			return (0);
3138 		}
3139 		dp->dfin_sync = IDNDS_SYNC_TYPE(dp);
3140 
3141 		/*
3142 		 * Need to do some clean-up ala idn_disconnect().
3143 		 *
3144 		 * Terminate any outstanding commands that were
3145 		 * targeted towards this domain.
3146 		 */
3147 		idn_terminate_cmd(domid, ECANCELED);
3148 
3149 		/*
3150 		 * Terminate any and all retries that may have
3151 		 * outstanding for this domain.
3152 		 */
3153 		token = IDN_RETRY_TOKEN(domid, IDN_RETRY_TYPEALL);
3154 		(void) idn_retry_terminate(token);
3155 
3156 		/*
3157 		 * Stop all outstanding message timers for
3158 		 * this guy.
3159 		 */
3160 		IDN_MSGTIMER_STOP(domid, 0, 0);
3161 
3162 		dp->dxp = &xphase_fin;
3163 		IDN_XSTATE_TRANSITION(dp, IDNXS_PEND);
3164 	}
3165 
3166 	if (msg & IDNP_NACK) {
3167 		idn_nack_t	nack;
3168 
3169 		nack = GET_XARGS_NACK_TYPE(xargs);
3170 		if (nack == IDNNACK_NOCONN) {
3171 			/*
3172 			 * We're trying to FIN with somebody we're
3173 			 * already disconnected from.  Need to
3174 			 * speed this guy through.
3175 			 */
3176 			DOMAINSET_ADD(idn.domset.ds_ready_off, domid);
3177 			(void) idn_sync_register(domid, IDNSYNC_DISCONNECT,
3178 			    DOMAINSET_ALL, IDNSYNC_REG_REG);
3179 			ready_set = (uint_t)DOMAINSET_ALL;
3180 			/*
3181 			 * Need to transform message to allow us to
3182 			 * pass this guy right through and not waste time
3183 			 * talking to him.
3184 			 */
3185 			IDN_FSTATE_TRANSITION(dp, IDNFIN_FORCE_HARD);
3186 
3187 			switch (dp->dstate) {
3188 			case IDNDS_FIN_PEND:
3189 				mtp->mt_mtype = 0;
3190 				mtp->mt_atype = 0;
3191 				break;
3192 
3193 			case IDNDS_FIN_SENT:
3194 				mtp->mt_mtype = IDNP_FIN | IDNP_ACK;
3195 				mtp->mt_atype = 0;
3196 				break;
3197 
3198 			case IDNDS_FIN_RCVD:
3199 				mtp->mt_mtype = IDNP_ACK;
3200 				mtp->mt_atype = IDNP_FIN | IDNP_ACK;
3201 				break;
3202 
3203 			default:
3204 #ifdef DEBUG
3205 				cmn_err(CE_PANIC,
3206 				    "%s:%d: UNEXPECTED state = %s",
3207 				    proc, domid,
3208 				    idnds_str[dp->dstate]);
3209 #endif /* DEBUG */
3210 				break;
3211 			}
3212 		}
3213 		fintype = (uint_t)dp->dfin;
3214 		finopt = DOMAIN_IN_SET(idn.domset.ds_relink, domid) ?
3215 		    IDNFIN_OPT_RELINK : IDNFIN_OPT_UNLINK;
3216 
3217 		CLR_XARGS(xargs);
3218 		SET_XARGS_FIN_TYPE(xargs, fintype);
3219 		SET_XARGS_FIN_ARG(xargs, finarg);
3220 		SET_XARGS_FIN_DOMSET(xargs, ready_set);
3221 		SET_XARGS_FIN_OPT(xargs, finopt);
3222 		SET_XARGS_FIN_MASTER(xargs, NIL_FIN_MASTER);
3223 	}
3224 
3225 	(void) idn_xphase_transition(domid, mtp, xargs);
3226 
3227 	return (0);
3228 }
3229 
3230 /*ARGSUSED1*/
3231 static void
idn_retry_fin(uint_t token,void * arg)3232 idn_retry_fin(uint_t token, void *arg)
3233 {
3234 	int		domid = IDN_RETRY_TOKEN2DOMID(token);
3235 	int		new_masterid, new_cpuid = IDN_NIL_DCPU;
3236 	uint_t		finmaster;
3237 	idn_domain_t	*dp = &idn_domain[domid];
3238 	idn_xdcargs_t	xargs;
3239 	idn_finopt_t	finopt;
3240 	procname_t	proc = "idn_retry_fin";
3241 
3242 	ASSERT(IDN_RETRY_TOKEN2TYPE(token) == IDNRETRY_FIN);
3243 
3244 	IDN_SYNC_LOCK();
3245 	IDN_DLOCK_EXCL(domid);
3246 
3247 	if (dp->dxp != &xphase_fin) {
3248 		PR_PROTO("%s:%d: dxp(0x%p) != xstate_fin(0x%p)...bailing\n",
3249 		    proc, domid, (void *)dp->dxp, (void *)&xphase_fin);
3250 		IDN_DUNLOCK(domid);
3251 		IDN_SYNC_UNLOCK();
3252 		return;
3253 	}
3254 
3255 	if (dp->dxstate != IDNXS_PEND) {
3256 		PR_PROTO("%s:%d: xstate(%s) != %s...bailing\n",
3257 		    proc, domid, idnxs_str[dp->dxstate],
3258 		    idnxs_str[IDNXS_PEND]);
3259 		IDN_DUNLOCK(domid);
3260 		IDN_SYNC_UNLOCK();
3261 		return;
3262 	}
3263 
3264 	finopt = DOMAIN_IN_SET(idn.domset.ds_relink, domid) ?
3265 	    IDNFIN_OPT_RELINK : IDNFIN_OPT_UNLINK;
3266 
3267 	CLR_XARGS(xargs);
3268 	SET_XARGS_FIN_TYPE(xargs, dp->dfin);
3269 	/*LINTED*/
3270 	SET_XARGS_FIN_ARG(xargs, IDNFIN_ARG_NONE);
3271 	SET_XARGS_FIN_OPT(xargs, finopt);
3272 	SET_XARGS_FIN_DOMSET(xargs, 0);		/* unused when msg == 0 */
3273 	IDN_GLOCK_SHARED();
3274 	new_masterid = IDN_GET_NEW_MASTERID();
3275 	IDN_GUNLOCK();
3276 	if (new_masterid != IDN_NIL_DOMID)
3277 		new_cpuid = idn_domain[new_masterid].dcpu;
3278 	finmaster = MAKE_FIN_MASTER(new_masterid, new_cpuid);
3279 	SET_XARGS_FIN_MASTER(xargs, finmaster);
3280 
3281 	(void) idn_xphase_transition(domid, NULL, xargs);
3282 
3283 	IDN_DUNLOCK(domid);
3284 	IDN_SYNC_UNLOCK();
3285 }
3286 
3287 static int
idn_check_fin_pend(int domid,idn_msgtype_t * mtp,idn_xdcargs_t xargs)3288 idn_check_fin_pend(int domid, idn_msgtype_t *mtp, idn_xdcargs_t xargs)
3289 {
3290 	idn_domain_t	*dp = &idn_domain[domid];
3291 	idn_fin_t	fintype;
3292 	idn_finopt_t	finopt;
3293 	idn_finarg_t	finarg;
3294 	int		ready;
3295 	int		finmasterid;
3296 	int		fincpuid;
3297 	uint_t		finmaster;
3298 	uint_t		msg = mtp ? mtp->mt_mtype : 0;
3299 	domainset_t	query_set, ready_set, conn_set;
3300 	domainset_t	my_ready_set, shutdown_set;
3301 	procname_t	proc = "idn_check_fin_pend";
3302 
3303 	ASSERT(IDN_SYNC_IS_LOCKED());
3304 	ASSERT(IDN_DLOCK_IS_EXCL(domid));
3305 
3306 	if (msg & IDNP_NACK)
3307 		return (0);
3308 
3309 	if ((dp->dstate == IDNDS_FIN_PEND) && (msg & IDNP_MSGTYPE_MASK) &&
3310 	    (msg & IDNP_ACK))		/* fin+ack */
3311 		return (1);
3312 
3313 	query_set = 0;
3314 
3315 	if (!DOMAIN_IN_SET(idn.domset.ds_trans_off, domid)) {
3316 		/*
3317 		 * Can't remove domain from ds_connected yet,
3318 		 * since he's still officially connected until
3319 		 * we get an ACK from him.
3320 		 */
3321 		DOMAINSET_DEL(idn.domset.ds_trans_on, domid);
3322 		DOMAINSET_ADD(idn.domset.ds_trans_off, domid);
3323 	}
3324 
3325 	IDN_GLOCK_SHARED();
3326 	conn_set = (idn.domset.ds_connected | idn.domset.ds_trans_on) &
3327 	    ~idn.domset.ds_trans_off;
3328 	if ((idn.state == IDNGS_DISCONNECT) ||
3329 	    (idn.state == IDNGS_RECONFIG) ||
3330 	    (domid == IDN_GET_MASTERID()) || !conn_set) {
3331 		/*
3332 		 * If we're disconnecting, reconfiguring,
3333 		 * unlinking from the master, or unlinking
3334 		 * the last of our connections, then we need
3335 		 * to shutdown all the channels.
3336 		 */
3337 		shutdown_set = DOMAINSET_ALL;
3338 	} else {
3339 		shutdown_set = DOMAINSET(domid);
3340 	}
3341 	IDN_GUNLOCK();
3342 
3343 	idn_shutdown_datapath(shutdown_set, (dp->dfin == IDNFIN_FORCE_HARD));
3344 
3345 	IDN_GLOCK_EXCL();
3346 	/*
3347 	 * Remap the SMR back to our local space if the remote
3348 	 * domain going down is the master.  We do this now before
3349 	 * flushing caches.  This will help guarantee that any
3350 	 * accidental accesses to the SMR after the cache flush
3351 	 * will only go to local memory.
3352 	 */
3353 	if ((domid == IDN_GET_MASTERID()) && (idn.smr.rempfn != PFN_INVALID)) {
3354 		PR_PROTO("%s:%d: deconfiging CURRENT MASTER - SMR remap\n",
3355 		    proc, domid);
3356 		IDN_DLOCK_EXCL(idn.localid);
3357 		/*
3358 		 * We're going to remap the SMR,
3359 		 * so gotta blow away our local
3360 		 * pointer to the mbox table.
3361 		 */
3362 		idn_domain[idn.localid].dmbox.m_tbl = NULL;
3363 		IDN_DUNLOCK(idn.localid);
3364 
3365 		idn.smr.rempfn = PFN_INVALID;
3366 		idn.smr.rempfnlim = PFN_INVALID;
3367 
3368 		smr_remap(&kas, idn.smr.vaddr, idn.smr.locpfn, IDN_SMR_SIZE);
3369 	}
3370 	IDN_GUNLOCK();
3371 
3372 	if (DOMAIN_IN_SET(idn.domset.ds_flush, domid)) {
3373 		idnxf_flushall_ecache();
3374 		CHECKPOINT_CLOSED(IDNSB_CHKPT_CACHE, dp->dhw.dh_boardset, 2);
3375 		DOMAINSET_DEL(idn.domset.ds_flush, domid);
3376 	}
3377 
3378 	fintype   = GET_XARGS_FIN_TYPE(xargs);
3379 	finarg    = GET_XARGS_FIN_ARG(xargs);
3380 	ready_set = GET_XARGS_FIN_DOMSET(xargs);
3381 	finopt    = GET_XARGS_FIN_OPT(xargs);
3382 
3383 	ASSERT(fintype != IDNFIN_QUERY);
3384 	if (!VALID_FIN(fintype)) {
3385 		/*
3386 		 * If for some reason remote domain
3387 		 * sent us an invalid FIN type,
3388 		 * override it to a  NORMAL fin.
3389 		 */
3390 		PR_PROTO("%s:%d: WARNING invalid fintype (%d) -> %s(%d)\n",
3391 		    proc, domid, (int)fintype,
3392 		    idnfin_str[IDNFIN_NORMAL], (int)IDNFIN_NORMAL);
3393 		fintype = IDNFIN_NORMAL;
3394 	}
3395 
3396 	if (!VALID_FINOPT(finopt)) {
3397 		PR_PROTO("%s:%d: WARNING invalid finopt (%d) -> %s(%d)\n",
3398 		    proc, domid, (int)finopt,
3399 		    idnfinopt_str[IDNFIN_OPT_UNLINK],
3400 		    (int)IDNFIN_OPT_UNLINK);
3401 		finopt = IDNFIN_OPT_UNLINK;
3402 	}
3403 
3404 	finmaster = GET_XARGS_FIN_MASTER(xargs);
3405 	finmasterid = FIN_MASTER_DOMID(finmaster);
3406 	fincpuid = FIN_MASTER_CPUID(finmaster);
3407 
3408 	if ((finarg != IDNFIN_ARG_NONE) &&
3409 	    !DOMAIN_IN_SET(idn.domset.ds_hitlist, domid)) {
3410 		idnsb_error_t	idnerr;
3411 
3412 		INIT_IDNKERR(&idnerr);
3413 		SET_IDNKERR_ERRNO(&idnerr, EPROTO);
3414 		SET_IDNKERR_IDNERR(&idnerr, FINARG2IDNKERR(finarg));
3415 		SET_IDNKERR_PARAM0(&idnerr, domid);
3416 
3417 		if (IDNFIN_ARG_IS_FATAL(finarg)) {
3418 			finopt = IDNFIN_OPT_UNLINK;
3419 			DOMAINSET_DEL(idn.domset.ds_relink, domid);
3420 			DOMAINSET_ADD(idn.domset.ds_hitlist, domid);
3421 
3422 			if (idn.domset.ds_connected == 0) {
3423 				domainset_t	domset;
3424 
3425 				IDN_GLOCK_EXCL();
3426 				domset = ~idn.domset.ds_relink;
3427 				if (idn.domset.ds_relink == 0) {
3428 					IDN_GSTATE_TRANSITION(IDNGS_DISCONNECT);
3429 				}
3430 				domset &= ~idn.domset.ds_hitlist;
3431 				/*
3432 				 * The primary domain we were trying to
3433 				 * connect to fin'd us with a fatal argument.
3434 				 * Something isn't cool in our IDN environment,
3435 				 * e.g. corrupted SMR or non-compatible CONFIG
3436 				 * parameters.  In any case we need to dismantle
3437 				 * ourselves completely.
3438 				 */
3439 				IDN_SET_NEW_MASTERID(IDN_NIL_DOMID);
3440 				IDN_GUNLOCK();
3441 				IDN_DUNLOCK(domid);
3442 
3443 				DOMAINSET_DEL(domset, idn.localid);
3444 				DOMAINSET_DEL(domset, domid);
3445 
3446 				idn_update_op(IDNOP_ERROR, DOMAINSET_ALL,
3447 				    &idnerr);
3448 
3449 				PR_HITLIST("%s:%d: unlink_domainset(%x) "
3450 				    "due to CFG error (relink=%x, "
3451 				    "hitlist=%x)\n", proc, domid, domset,
3452 				    idn.domset.ds_relink,
3453 				    idn.domset.ds_hitlist);
3454 
3455 				idn_unlink_domainset(domset, IDNFIN_NORMAL,
3456 				    finarg, IDNFIN_OPT_UNLINK, BOARDSET_ALL);
3457 				IDN_DLOCK_EXCL(domid);
3458 			}
3459 			PR_HITLIST("%s:%d: CFG error, (conn=%x, relink=%x, "
3460 			    "hitlist=%x)\n",
3461 			    proc, domid, idn.domset.ds_connected,
3462 			    idn.domset.ds_relink, idn.domset.ds_hitlist);
3463 		}
3464 		idn_update_op(IDNOP_ERROR, DOMAINSET(domid), &idnerr);
3465 	}
3466 
3467 	if ((finmasterid != IDN_NIL_DOMID) && (!VALID_DOMAINID(finmasterid) ||
3468 	    DOMAIN_IN_SET(idn.domset.ds_hitlist, domid))) {
3469 		PR_HITLIST("%s:%d: finmasterid = %d -> -1, relink=%x, "
3470 		    "hitlist=%x\n",
3471 		    proc, domid, finmasterid, idn.domset.ds_relink,
3472 		    idn.domset.ds_hitlist);
3473 		PR_PROTO("%s:%d: WARNING invalid finmasterid (%d) -> -1\n",
3474 		    proc, domid, finmasterid);
3475 		finmasterid = IDN_NIL_DOMID;
3476 	}
3477 
3478 	IDN_GLOCK_EXCL();
3479 
3480 	if ((finopt == IDNFIN_OPT_RELINK) && (idn.state != IDNGS_DISCONNECT)) {
3481 		DOMAINSET_ADD(idn.domset.ds_relink, domid);
3482 		IDN_HISTORY_LOG(IDNH_RELINK, domid, dp->dstate,
3483 		    idn.domset.ds_relink);
3484 	} else {
3485 		DOMAINSET_DEL(idn.domset.ds_relink, domid);
3486 		DOMAINSET_ADD(idn.domset.ds_hitlist, domid);
3487 	}
3488 
3489 	if ((domid == IDN_GET_NEW_MASTERID()) &&
3490 	    !DOMAIN_IN_SET(idn.domset.ds_relink, domid)) {
3491 		IDN_SET_NEW_MASTERID(IDN_NIL_DOMID);
3492 	}
3493 
3494 	if ((idn.state != IDNGS_DISCONNECT) && (idn.state != IDNGS_RECONFIG) &&
3495 	    (domid == IDN_GET_MASTERID())) {
3496 		domainset_t	dis_set, master_candidates;
3497 
3498 		IDN_GKSTAT_GLOBAL_EVENT(gk_reconfigs, gk_reconfig_last);
3499 
3500 		IDN_GSTATE_TRANSITION(IDNGS_RECONFIG);
3501 		IDN_GUNLOCK();
3502 
3503 		if ((finmasterid != IDN_NIL_DOMID) &&
3504 		    (finmasterid != idn.localid)) {
3505 			if (finmasterid != domid)
3506 				IDN_DLOCK_EXCL(finmasterid);
3507 			if (idn_open_domain(finmasterid, fincpuid, 0) < 0) {
3508 				cmn_err(CE_WARN,
3509 				    "IDN: 205: (%s) failed to "
3510 				    "open-domain(%d,%d)",
3511 				    proc, finmasterid, fincpuid);
3512 				if (finmasterid != domid)
3513 					IDN_DUNLOCK(finmasterid);
3514 				finmasterid = IDN_NIL_DOMID;
3515 			}
3516 			if (finmasterid != domid)
3517 				IDN_DUNLOCK(finmasterid);
3518 		}
3519 
3520 		IDN_GLOCK_EXCL();
3521 		if (finmasterid == IDN_NIL_DOMID) {
3522 			int	m;
3523 
3524 			master_candidates = idn.domset.ds_trans_on |
3525 			    idn.domset.ds_connected |
3526 			    idn.domset.ds_relink;
3527 			master_candidates &= ~(idn.domset.ds_trans_off &
3528 			    ~idn.domset.ds_relink);
3529 			DOMAINSET_DEL(master_candidates, domid);
3530 			/*
3531 			 * Local domain gets to participate also.
3532 			 */
3533 			DOMAINSET_ADD(master_candidates, idn.localid);
3534 
3535 			m = idn_select_candidate(master_candidates);
3536 			IDN_SET_NEW_MASTERID(m);
3537 		} else {
3538 			IDN_SET_NEW_MASTERID(finmasterid);
3539 		}
3540 		IDN_GUNLOCK();
3541 
3542 		dis_set = idn.domset.ds_trans_on | idn.domset.ds_connected;
3543 		DOMAINSET_DEL(dis_set, domid);
3544 
3545 		idn_unlink_domainset(dis_set, IDNFIN_NORMAL, IDNFIN_ARG_NONE,
3546 		    IDNFIN_OPT_RELINK, BOARDSET_ALL);
3547 	} else {
3548 		IDN_GUNLOCK();
3549 	}
3550 
3551 	/*
3552 	 * My local ready-set are those domains from which I
3553 	 * have confirmed no datapaths exist.
3554 	 */
3555 	my_ready_set = ~idn.domset.ds_connected;
3556 
3557 	switch (dp->dfin) {
3558 	case IDNFIN_NORMAL:
3559 	case IDNFIN_FORCE_SOFT:
3560 	case IDNFIN_FORCE_HARD:
3561 		if (fintype < dp->dfin) {
3562 			/*
3563 			 * Remote domain has requested a
3564 			 * FIN of lower priority than what
3565 			 * we're currently running.  Just
3566 			 * leave the priority where it is.
3567 			 */
3568 			break;
3569 		}
3570 		/*FALLTHROUGH*/
3571 
3572 	default:
3573 		IDN_FSTATE_TRANSITION(dp, fintype);
3574 		break;
3575 	}
3576 
3577 	ASSERT(dp->dfin_sync != IDNFIN_SYNC_OFF);
3578 
3579 	if (msg == 0) {
3580 		/*
3581 		 * Local domain is initiating a FIN sequence
3582 		 * to remote domid.  Note that remote domain
3583 		 * remains in ds_connected even though he's
3584 		 * in thet ready-set from the local domain's
3585 		 * perspective.  We can't remove him from
3586 		 * ds_connected until we get a confirmed message
3587 		 * from him indicating he has ceased communication.
3588 		 */
3589 		ready_set = my_ready_set;
3590 	} else {
3591 		/*
3592 		 * Remote domain initiated a FIN sequence
3593 		 * to local domain.  This implies that he
3594 		 * has shutdown his datapath to us.  Since
3595 		 * we shutdown our datapath to him, we're
3596 		 * effectively now in his ready-set.
3597 		 */
3598 		DOMAINSET_ADD(ready_set, idn.localid);
3599 		/*
3600 		 * Since we know both sides of the connection
3601 		 * have ceased, this remote domain is effectively
3602 		 * considered disconnected.
3603 		 */
3604 		DOMAINSET_ADD(idn.domset.ds_ready_off, domid);
3605 	}
3606 
3607 	if (dp->dfin == IDNFIN_FORCE_HARD) {
3608 		/*
3609 		 * If we're doing a hard disconnect
3610 		 * of this domain then we want to
3611 		 * blow straight through and not
3612 		 * waste time trying to talk to the
3613 		 * remote domain nor to domains we
3614 		 * believe are AWOL.  Although we will
3615 		 * try and do it cleanly with
3616 		 * everybody else.
3617 		 */
3618 		DOMAINSET_ADD(my_ready_set, domid);
3619 		my_ready_set |= idn.domset.ds_awol;
3620 		ready_set = DOMAINSET_ALL;
3621 
3622 	} else if (dp->dfin_sync == IDNFIN_SYNC_NO) {
3623 		/*
3624 		 * If we're not fin'ing this domain
3625 		 * synchronously then the only
3626 		 * expected domain set is himself.
3627 		 */
3628 		ready_set |= ~DOMAINSET(domid);
3629 		my_ready_set |= ~DOMAINSET(domid);
3630 	}
3631 
3632 	if (dp->dsync.s_cmd != IDNSYNC_DISCONNECT) {
3633 		idn_sync_exit(domid, IDNSYNC_CONNECT);
3634 		idn_sync_enter(domid, IDNSYNC_DISCONNECT, DOMAINSET_ALL,
3635 		    my_ready_set, idn_xstate_transfunc,	(void *)IDNP_FIN);
3636 	}
3637 
3638 	query_set = idn_sync_register(domid, IDNSYNC_DISCONNECT, ready_set,
3639 	    IDNSYNC_REG_REG);
3640 
3641 	/*
3642 	 * No need to query this domain as he's already
3643 	 * in the FIN sequence.
3644 	 */
3645 	DOMAINSET_DEL(query_set, domid);
3646 
3647 	ready = (dp->dsync.s_set_exp == dp->dsync.s_set_rdy) ? 1 : 0;
3648 	if (ready) {
3649 		DOMAINSET_DEL(idn.domset.ds_ready_off, domid);
3650 		DOMAINSET_DEL(idn.domset.ds_connected, domid);
3651 	}
3652 
3653 	if (query_set) {
3654 		int	d;
3655 
3656 		my_ready_set = idn.domset.ds_ready_off |
3657 		    ~idn.domset.ds_connected;
3658 
3659 		for (d = 0; d < MAX_DOMAINS; d++) {
3660 			if (!DOMAIN_IN_SET(query_set, d))
3661 				continue;
3662 
3663 			dp = &idn_domain[d];
3664 
3665 			IDN_DLOCK_EXCL(d);
3666 
3667 			if (dp->dsync.s_cmd == IDNSYNC_DISCONNECT) {
3668 				IDN_DUNLOCK(d);
3669 				continue;
3670 			}
3671 
3672 			IDN_SYNC_QUERY_UPDATE(domid, d);
3673 
3674 			(void) idn_send_fin(d, NULL, IDNFIN_QUERY,
3675 			    IDNFIN_ARG_NONE, IDNFIN_OPT_NONE, my_ready_set,
3676 			    NIL_FIN_MASTER);
3677 			IDN_DUNLOCK(d);
3678 		}
3679 	}
3680 
3681 	return (!msg ? 0 : (ready ? 0 : 1));
3682 }
3683 
3684 /*ARGSUSED*/
3685 static void
idn_error_fin_pend(int domid,idn_msgtype_t * mtp,idn_xdcargs_t xargs)3686 idn_error_fin_pend(int domid, idn_msgtype_t *mtp, idn_xdcargs_t xargs)
3687 {
3688 	uint_t	msg = mtp ? mtp->mt_mtype : 0;
3689 	uint_t	token;
3690 
3691 	ASSERT(IDN_SYNC_IS_LOCKED());
3692 	ASSERT(IDN_DLOCK_IS_HELD(domid));
3693 
3694 	/*
3695 	 * Don't communicate with domains that
3696 	 * we're forcing a hard disconnect.
3697 	 */
3698 	if ((idn_domain[domid].dfin != IDNFIN_FORCE_HARD) &&
3699 	    (msg & IDNP_MSGTYPE_MASK)) {
3700 		idn_msgtype_t	mt;
3701 		idn_xdcargs_t	nargs;
3702 
3703 		mt.mt_mtype = IDNP_NACK;
3704 		mt.mt_atype = msg;
3705 		mt.mt_cookie = mtp->mt_cookie;
3706 		CLR_XARGS(nargs);
3707 		SET_XARGS_NACK_TYPE(nargs, IDNNACK_RETRY);
3708 		idn_send_acknack(domid, &mt, nargs);
3709 	}
3710 
3711 	token = IDN_RETRY_TOKEN(domid, IDNRETRY_FIN);
3712 	idn_retry_submit(idn_retry_fin, NULL, token,
3713 	    idn_msg_retrytime[(int)IDNRETRY_FIN]);
3714 }
3715 
3716 static void
idn_action_fin_pend(int domid,idn_msgtype_t * mtp,idn_xdcargs_t xargs)3717 idn_action_fin_pend(int domid, idn_msgtype_t *mtp, idn_xdcargs_t xargs)
3718 {
3719 	idn_domain_t	*dp = &idn_domain[domid];
3720 	domainset_t	my_ready_set;
3721 	idn_finopt_t	finopt;
3722 	idn_finarg_t	finarg;
3723 	uint_t		finmaster;
3724 	int		new_masterid, new_cpuid = IDN_NIL_DCPU;
3725 	uint_t		msg = mtp ? mtp->mt_mtype : 0;
3726 	idn_msgtype_t	mt;
3727 
3728 	ASSERT(IDN_SYNC_IS_LOCKED());
3729 	ASSERT(IDN_DLOCK_IS_HELD(domid));
3730 
3731 	my_ready_set = dp->dsync.s_set_rdy | idn.domset.ds_ready_off |
3732 	    ~idn.domset.ds_connected;
3733 
3734 	ASSERT(xargs[0] != (uint_t)IDNFIN_QUERY);
3735 
3736 	finarg = GET_XARGS_FIN_ARG(xargs);
3737 	finopt = DOMAIN_IN_SET(idn.domset.ds_relink, domid) ?
3738 	    IDNFIN_OPT_RELINK : IDNFIN_OPT_UNLINK;
3739 
3740 	mt.mt_cookie = mtp ? mtp->mt_cookie : 0;
3741 
3742 	IDN_GLOCK_SHARED();
3743 	new_masterid = IDN_GET_NEW_MASTERID();
3744 	IDN_GUNLOCK();
3745 	if (new_masterid != IDN_NIL_DOMID)
3746 		new_cpuid = idn_domain[new_masterid].dcpu;
3747 	finmaster = MAKE_FIN_MASTER(new_masterid, new_cpuid);
3748 
3749 	if (dp->dfin == IDNFIN_FORCE_HARD) {
3750 		ASSERT(IDN_DLOCK_IS_EXCL(domid));
3751 
3752 		if (!msg) {
3753 			mt.mt_mtype = IDNP_FIN | IDNP_ACK;
3754 			mt.mt_atype = 0;
3755 		} else {
3756 			mt.mt_mtype = IDNP_ACK;
3757 			mt.mt_atype = IDNP_FIN | IDNP_ACK;
3758 		}
3759 		(void) idn_xphase_transition(domid, &mt, xargs);
3760 	} else if (!msg) {
3761 		(void) idn_send_fin(domid, NULL, dp->dfin, finarg,
3762 		    finopt, my_ready_set, finmaster);
3763 	} else if ((msg & IDNP_ACKNACK_MASK) == 0) {
3764 		/*
3765 		 * fin
3766 		 */
3767 		mt.mt_mtype = IDNP_FIN | IDNP_ACK;
3768 		mt.mt_atype = 0;
3769 		(void) idn_send_fin(domid, &mt, dp->dfin, finarg,
3770 		    finopt, my_ready_set, finmaster);
3771 	} else {
3772 		uint_t	token;
3773 		/*
3774 		 * nack - retry
3775 		 */
3776 		token = IDN_RETRY_TOKEN(domid, IDNRETRY_FIN);
3777 		idn_retry_submit(idn_retry_fin, NULL, token,
3778 		    idn_msg_retrytime[(int)IDNRETRY_FIN]);
3779 	}
3780 }
3781 
3782 static int
idn_check_fin_sent(int domid,idn_msgtype_t * mtp,idn_xdcargs_t xargs)3783 idn_check_fin_sent(int domid, idn_msgtype_t *mtp, idn_xdcargs_t xargs)
3784 {
3785 	int		ready;
3786 	uint_t		msg = mtp ? mtp->mt_mtype : 0;
3787 	idn_fin_t	fintype;
3788 	idn_finopt_t	finopt;
3789 	idn_domain_t	*dp = &idn_domain[domid];
3790 	domainset_t	query_set, ready_set;
3791 
3792 	ASSERT(IDN_SYNC_IS_LOCKED());
3793 	ASSERT(IDN_DLOCK_IS_EXCL(domid));
3794 
3795 	if (msg & IDNP_NACK)
3796 		return (0);
3797 
3798 	fintype   = GET_XARGS_FIN_TYPE(xargs);
3799 	ready_set = GET_XARGS_FIN_DOMSET(xargs);
3800 	finopt    = GET_XARGS_FIN_OPT(xargs);
3801 
3802 	ASSERT(fintype != IDNFIN_QUERY);
3803 	if (!VALID_FIN(fintype)) {
3804 		/*
3805 		 * If for some reason remote domain
3806 		 * sent us an invalid FIN type,
3807 		 * override it to a  NORMAL fin.
3808 		 */
3809 		fintype = IDNFIN_NORMAL;
3810 	}
3811 
3812 	if (!VALID_FINOPT(finopt)) {
3813 		finopt = IDNFIN_OPT_UNLINK;
3814 	}
3815 	IDN_GLOCK_SHARED();
3816 	if ((finopt == IDNFIN_OPT_RELINK) && (idn.state != IDNGS_DISCONNECT)) {
3817 		DOMAINSET_ADD(idn.domset.ds_relink, domid);
3818 		IDN_HISTORY_LOG(IDNH_RELINK, domid, dp->dstate,
3819 		    idn.domset.ds_relink);
3820 	} else {
3821 		DOMAINSET_DEL(idn.domset.ds_relink, domid);
3822 	}
3823 	IDN_GUNLOCK();
3824 
3825 	switch (dp->dfin) {
3826 	case IDNFIN_NORMAL:
3827 	case IDNFIN_FORCE_SOFT:
3828 	case IDNFIN_FORCE_HARD:
3829 		if (fintype < dp->dfin) {
3830 			/*
3831 			 * Remote domain has requested a
3832 			 * FIN of lower priority than what
3833 			 * we're current running.  Just
3834 			 * leave the priority where it is.
3835 			 */
3836 			break;
3837 		}
3838 		/*FALLTHROUGH*/
3839 
3840 	default:
3841 		IDN_FSTATE_TRANSITION(dp, fintype);
3842 		break;
3843 	}
3844 
3845 	if (dp->dfin == IDNFIN_FORCE_HARD) {
3846 		/*
3847 		 * If we're doing a hard disconnect
3848 		 * of this domain then we want to
3849 		 * blow straight through and not
3850 		 * waste time trying to talk to the
3851 		 * remote domain.  By registering him
3852 		 * as ready with respect to all
3853 		 * possible domains he'll transition
3854 		 * immediately.  Note that we'll still
3855 		 * try and do it coherently with
3856 		 * other domains to which we're connected.
3857 		 */
3858 		ready_set = DOMAINSET_ALL;
3859 	} else {
3860 		DOMAINSET_ADD(ready_set, idn.localid);
3861 	}
3862 
3863 	DOMAINSET_ADD(idn.domset.ds_ready_off, domid);
3864 
3865 	query_set = idn_sync_register(domid, IDNSYNC_DISCONNECT,
3866 	    ready_set, IDNSYNC_REG_REG);
3867 	/*
3868 	 * No need to query this domain as he's already
3869 	 * in the FIN sequence.
3870 	 */
3871 	DOMAINSET_DEL(query_set, domid);
3872 
3873 	ready = (dp->dsync.s_set_exp == dp->dsync.s_set_rdy) ? 1 : 0;
3874 	if (ready) {
3875 		DOMAINSET_DEL(idn.domset.ds_ready_off, domid);
3876 		DOMAINSET_DEL(idn.domset.ds_connected, domid);
3877 	}
3878 
3879 	if (query_set) {
3880 		int		d;
3881 		domainset_t	my_ready_set;
3882 
3883 		my_ready_set = idn.domset.ds_ready_off |
3884 		    ~idn.domset.ds_connected;
3885 
3886 		for (d = 0; d < MAX_DOMAINS; d++) {
3887 			if (!DOMAIN_IN_SET(query_set, d))
3888 				continue;
3889 
3890 			dp = &idn_domain[d];
3891 
3892 			IDN_DLOCK_EXCL(d);
3893 
3894 			if (dp->dsync.s_cmd == IDNSYNC_DISCONNECT) {
3895 				IDN_DUNLOCK(d);
3896 				continue;
3897 			}
3898 
3899 			IDN_SYNC_QUERY_UPDATE(domid, d);
3900 
3901 			(void) idn_send_fin(d, NULL, IDNFIN_QUERY,
3902 			    IDNFIN_ARG_NONE, IDNFIN_OPT_NONE, my_ready_set,
3903 			    NIL_FIN_MASTER);
3904 			IDN_DUNLOCK(d);
3905 		}
3906 	}
3907 
3908 	return ((ready > 0) ? 0 : 1);
3909 }
3910 
3911 /*ARGSUSED*/
3912 static void
idn_error_fin_sent(int domid,idn_msgtype_t * mtp,idn_xdcargs_t xargs)3913 idn_error_fin_sent(int domid, idn_msgtype_t *mtp, idn_xdcargs_t xargs)
3914 {
3915 	uint_t	msg = mtp ? mtp->mt_mtype : 0;
3916 	uint_t	token;
3917 
3918 	ASSERT(IDN_SYNC_IS_LOCKED());
3919 	ASSERT(IDN_DLOCK_IS_EXCL(domid));
3920 
3921 	/*
3922 	 * Don't communicate with domains that
3923 	 * we're forcing a hard disconnect.
3924 	 */
3925 	if ((idn_domain[domid].dfin != IDNFIN_FORCE_HARD) &&
3926 	    (msg & IDNP_MSGTYPE_MASK)) {
3927 		idn_msgtype_t	mt;
3928 		idn_xdcargs_t	nargs;
3929 
3930 		mt.mt_mtype = IDNP_NACK;
3931 		mt.mt_atype = msg;
3932 		mt.mt_cookie = mtp->mt_cookie;
3933 		CLR_XARGS(nargs);
3934 		SET_XARGS_NACK_TYPE(nargs, IDNNACK_RETRY);
3935 		idn_send_acknack(domid, &mt, nargs);
3936 	}
3937 
3938 	token = IDN_RETRY_TOKEN(domid, IDNRETRY_FIN);
3939 	idn_retry_submit(idn_retry_fin, NULL, token,
3940 	    idn_msg_retrytime[(int)IDNRETRY_FIN]);
3941 }
3942 
3943 static void
idn_action_fin_sent(int domid,idn_msgtype_t * mtp,idn_xdcargs_t xargs)3944 idn_action_fin_sent(int domid, idn_msgtype_t *mtp, idn_xdcargs_t xargs)
3945 {
3946 	uint_t		msg = mtp ? mtp->mt_mtype : 0;
3947 	int		new_masterid, new_cpuid = IDN_NIL_DCPU;
3948 	uint_t		finmaster;
3949 	idn_msgtype_t	mt;
3950 	idn_finopt_t	finopt;
3951 	idn_finarg_t	finarg;
3952 	domainset_t	my_ready_set;
3953 	idn_domain_t	*dp = &idn_domain[domid];
3954 
3955 	ASSERT(IDN_SYNC_IS_LOCKED());
3956 	ASSERT(IDN_DLOCK_IS_EXCL(domid));
3957 
3958 	mt.mt_cookie = mtp ? mtp->mt_cookie : 0;
3959 
3960 	finopt = DOMAIN_IN_SET(idn.domset.ds_relink, domid) ?
3961 	    IDNFIN_OPT_RELINK : IDNFIN_OPT_UNLINK;
3962 
3963 	finarg = GET_XARGS_FIN_ARG(xargs);
3964 
3965 	my_ready_set = dp->dsync.s_set_rdy | idn.domset.ds_ready_off |
3966 	    ~idn.domset.ds_connected;
3967 
3968 	IDN_GLOCK_SHARED();
3969 	new_masterid = IDN_GET_NEW_MASTERID();
3970 	IDN_GUNLOCK();
3971 	if (new_masterid != IDN_NIL_DOMID)
3972 		new_cpuid = idn_domain[new_masterid].dcpu;
3973 	finmaster = MAKE_FIN_MASTER(new_masterid, new_cpuid);
3974 
3975 	if ((msg & IDNP_ACKNACK_MASK) == 0) {
3976 		/*
3977 		 * fin
3978 		 */
3979 		if (dp->dfin == IDNFIN_FORCE_HARD) {
3980 			mt.mt_mtype = IDNP_ACK;
3981 			mt.mt_atype = IDNP_FIN | IDNP_ACK;
3982 			(void) idn_xphase_transition(domid, &mt, xargs);
3983 		} else {
3984 			mt.mt_mtype = IDNP_FIN | IDNP_ACK;
3985 			mt.mt_atype = 0;
3986 			(void) idn_send_fin(domid, &mt, dp->dfin, finarg,
3987 			    finopt, my_ready_set, finmaster);
3988 		}
3989 	} else if (msg & IDNP_MSGTYPE_MASK) {
3990 		/*
3991 		 * fin+ack
3992 		 */
3993 		if (dp->dfin != IDNFIN_FORCE_HARD) {
3994 			idn_xdcargs_t	fargs;
3995 
3996 			mt.mt_mtype = IDNP_ACK;
3997 			mt.mt_atype = msg;
3998 			CLR_XARGS(fargs);
3999 			SET_XARGS_FIN_TYPE(fargs, dp->dfin);
4000 			SET_XARGS_FIN_ARG(fargs, finarg);
4001 			SET_XARGS_FIN_DOMSET(fargs, my_ready_set);
4002 			SET_XARGS_FIN_OPT(fargs, finopt);
4003 			SET_XARGS_FIN_MASTER(fargs, finmaster);
4004 			idn_send_acknack(domid, &mt, fargs);
4005 		}
4006 	} else {
4007 		uint_t	token;
4008 		/*
4009 		 * nack - retry
4010 		 */
4011 		token = IDN_RETRY_TOKEN(domid, IDNRETRY_FIN);
4012 		idn_retry_submit(idn_retry_fin, NULL, token,
4013 		    idn_msg_retrytime[(int)IDNRETRY_FIN]);
4014 	}
4015 }
4016 
4017 /*ARGSUSED*/
4018 static void
idn_action_fin_rcvd(int domid,idn_msgtype_t * mtp,idn_xdcargs_t xargs)4019 idn_action_fin_rcvd(int domid, idn_msgtype_t *mtp, idn_xdcargs_t xargs)
4020 {
4021 	uint_t	msg = mtp ? mtp->mt_mtype : 0;
4022 
4023 	ASSERT(IDN_SYNC_IS_LOCKED());
4024 	ASSERT(IDN_DLOCK_IS_EXCL(domid));
4025 
4026 	if (msg & IDNP_NACK) {
4027 		uint_t	token;
4028 		/*
4029 		 * nack - retry.
4030 		 */
4031 		token = IDN_RETRY_TOKEN(domid, IDNRETRY_FIN);
4032 		idn_retry_submit(idn_retry_fin, NULL, token,
4033 		    idn_msg_retrytime[(int)IDNRETRY_FIN]);
4034 	}
4035 }
4036 
4037 static void
idn_final_fin(int domid)4038 idn_final_fin(int domid)
4039 {
4040 	int		do_relink;
4041 	int		rv, d, new_masterid = IDN_NIL_DOMID;
4042 	idn_gstate_t	next_gstate;
4043 	domainset_t	relinkset;
4044 	uint_t		token = IDN_RETRY_TOKEN(domid, IDNRETRY_FIN);
4045 	idn_domain_t	*ldp, *dp = &idn_domain[domid];
4046 	procname_t	proc = "idn_final_fin";
4047 
4048 	ASSERT(IDN_SYNC_IS_LOCKED());
4049 	ASSERT(IDN_DLOCK_IS_EXCL(domid));
4050 	ASSERT(dp->dstate == IDNDS_DMAP);
4051 
4052 	(void) idn_retry_terminate(token);
4053 
4054 	dp->dxp = NULL;
4055 	IDN_XSTATE_TRANSITION(dp, IDNXS_NIL);
4056 
4057 	idn_sync_exit(domid, IDNSYNC_DISCONNECT);
4058 
4059 	DOMAINSET_DEL(idn.domset.ds_trans_off, domid);
4060 
4061 	do_relink = DOMAIN_IN_SET(idn.domset.ds_relink, domid) ? 1 : 0;
4062 
4063 	/*
4064 	 * idn_deconfig will idn_close_domain.
4065 	 */
4066 	idn_deconfig(domid);
4067 
4068 	PR_PROTO("%s:%d: DISCONNECTED\n", proc, domid);
4069 
4070 	IDN_GLOCK_EXCL();
4071 	/*
4072 	 * It's important that this update-op occur within
4073 	 * the context of holding the glock(EXCL).  There is
4074 	 * still some additional state stuff to cleanup which
4075 	 * will be completed once the glock is dropped in
4076 	 * this flow.  Which means anybody that's doing a
4077 	 * SSI_INFO and waiting on glock will not actually
4078 	 * run until the clean-up is completed, which is what
4079 	 * we want.  Recall that a separate thread processes
4080 	 * the SSI_LINK/UNLINK calls and when they complete
4081 	 * (i.e. are awakened) they will immediately SSI_INFO
4082 	 * and we don't want them to prematurely pick up stale
4083 	 * information.
4084 	 */
4085 	idn_update_op(IDNOP_DISCONNECTED, DOMAINSET(domid), NULL);
4086 
4087 	ASSERT(idn.state != IDNGS_OFFLINE);
4088 	ASSERT(!DOMAIN_IN_SET(idn.domset.ds_trans_on, domid));
4089 
4090 	if (domid == IDN_GET_MASTERID()) {
4091 		IDN_SET_MASTERID(IDN_NIL_DOMID);
4092 		dp->dvote.v.master = 0;
4093 	}
4094 
4095 	if ((domid == IDN_GET_NEW_MASTERID()) && !do_relink) {
4096 		IDN_SET_NEW_MASTERID(IDN_NIL_DOMID);
4097 	}
4098 
4099 	if (idn.state == IDNGS_RECONFIG)
4100 		new_masterid = IDN_GET_NEW_MASTERID();
4101 
4102 	if ((idn.domset.ds_trans_on | idn.domset.ds_trans_off |
4103 	    idn.domset.ds_relink) == 0) {
4104 		PR_HITLIST("%s:%d: HITLIST %x -> 0\n",
4105 		    proc, domid, idn.domset.ds_hitlist);
4106 		idn.domset.ds_hitlist = 0;
4107 	}
4108 
4109 	if (idn.domset.ds_connected || idn.domset.ds_trans_off) {
4110 		PR_PROTO("%s:%d: ds_connected = 0x%x, ds_trans_off = 0x%x\n",
4111 		    proc, domid, idn.domset.ds_connected,
4112 		    idn.domset.ds_trans_off);
4113 		IDN_GUNLOCK();
4114 		goto fin_done;
4115 	}
4116 
4117 	IDN_DLOCK_EXCL(idn.localid);
4118 	ldp = &idn_domain[idn.localid];
4119 
4120 	if (idn.domset.ds_trans_on != 0) {
4121 		ASSERT((idn.state != IDNGS_DISCONNECT) &&
4122 		    (idn.state != IDNGS_OFFLINE));
4123 
4124 		switch (idn.state) {
4125 		case IDNGS_CONNECT:
4126 			if (idn.localid == IDN_GET_MASTERID()) {
4127 				idn_master_deinit();
4128 				IDN_SET_MASTERID(IDN_NIL_DOMID);
4129 				ldp->dvote.v.master = 0;
4130 			}
4131 			/*FALLTHROUGH*/
4132 		case IDNGS_ONLINE:
4133 			next_gstate = idn.state;
4134 			break;
4135 
4136 		case IDNGS_RECONFIG:
4137 			if (idn.localid == IDN_GET_MASTERID()) {
4138 				idn_master_deinit();
4139 				IDN_SET_MASTERID(IDN_NIL_DOMID);
4140 				ldp->dvote.v.master = 0;
4141 			}
4142 			ASSERT(IDN_GET_MASTERID() == IDN_NIL_DOMID);
4143 			next_gstate = IDNGS_CONNECT;
4144 			ldp->dvote.v.connected = 0;
4145 			/*
4146 			 * Need to do HWINIT since we won't
4147 			 * be transitioning through OFFLINE
4148 			 * which would normally be caught in
4149 			 * idn_check_nego() when we
4150 			 * initially go to CONNECT.
4151 			 */
4152 			IDN_PREP_HWINIT();
4153 			break;
4154 
4155 		case IDNGS_DISCONNECT:
4156 		case IDNGS_OFFLINE:
4157 			cmn_err(CE_WARN,
4158 			    "IDN: 211: disconnect domain %d, "
4159 			    "unexpected Gstate (%s)",
4160 			    domid, idngs_str[idn.state]);
4161 			IDN_DUNLOCK(idn.localid);
4162 			IDN_GUNLOCK();
4163 			goto fin_done;
4164 
4165 		default:
4166 			/*
4167 			 * XXX
4168 			 * Go into FATAL state?
4169 			 */
4170 			cmn_err(CE_PANIC,
4171 			    "IDN: 212: disconnect domain %d, "
4172 			    "bad Gstate (%d)",
4173 			    domid, idn.state);
4174 			/* not reached */
4175 			break;
4176 		}
4177 	} else {
4178 		if (idn.localid == IDN_GET_MASTERID()) {
4179 			idn_master_deinit();
4180 			IDN_SET_MASTERID(IDN_NIL_DOMID);
4181 			ldp->dvote.v.master = 0;
4182 		}
4183 		next_gstate = IDNGS_OFFLINE;
4184 		if (idn.domset.ds_relink == 0) {
4185 			IDN_SET_NEW_MASTERID(IDN_NIL_DOMID);
4186 		}
4187 	}
4188 	IDN_DUNLOCK(idn.localid);
4189 
4190 	/*
4191 	 * If we reach here we've effectively disconnected all
4192 	 * existing links, however new ones may be pending.
4193 	 */
4194 	PR_PROTO("%s:%d: ALL DISCONNECTED *****************\n", proc, domid);
4195 
4196 	IDN_GSTATE_TRANSITION(next_gstate);
4197 
4198 	ASSERT((idn.state == IDNGS_OFFLINE) ?
4199 	    (IDN_GET_MASTERID() == IDN_NIL_DOMID) : 1);
4200 
4201 	IDN_GUNLOCK();
4202 
4203 	/*
4204 	 * If we have no new masterid and yet there are relinkers
4205 	 * out there, then force us to attempt to link with one
4206 	 * of them.
4207 	 */
4208 	if ((new_masterid == IDN_NIL_DOMID) && idn.domset.ds_relink)
4209 		new_masterid = idn.localid;
4210 
4211 	if (new_masterid != IDN_NIL_DOMID) {
4212 		/*
4213 		 * If the local domain is the selected
4214 		 * master then we'll want to initiate
4215 		 * a link with one of the other candidates.
4216 		 * If not, then we want to initiate a link
4217 		 * with the master only.
4218 		 */
4219 		relinkset = (new_masterid == idn.localid) ?
4220 		    idn.domset.ds_relink : DOMAINSET(new_masterid);
4221 
4222 		DOMAINSET_DEL(relinkset, idn.localid);
4223 
4224 		for (d = 0; d < MAX_DOMAINS; d++) {
4225 			int	lock_held;
4226 
4227 			if (!DOMAIN_IN_SET(relinkset, d))
4228 				continue;
4229 
4230 			if (d == domid) {
4231 				do_relink = 0;
4232 				lock_held = 0;
4233 			} else {
4234 				IDN_DLOCK_EXCL(d);
4235 				lock_held = 1;
4236 			}
4237 
4238 			rv = idn_open_domain(d, -1, 0);
4239 			if (rv == 0) {
4240 				rv = idn_connect(d);
4241 				if (lock_held)
4242 					IDN_DUNLOCK(d);
4243 				/*
4244 				 * If we're able to kick off at
4245 				 * least one connect then that's
4246 				 * good enough for now.  The others
4247 				 * will fall into place normally.
4248 				 */
4249 				if (rv == 0)
4250 					break;
4251 			} else if (rv < 0) {
4252 				if (lock_held)
4253 					IDN_DUNLOCK(d);
4254 				cmn_err(CE_WARN,
4255 				    "IDN: 205: (%s.1) failed to "
4256 				    "open-domain(%d,%d)",
4257 				    proc, domid, -1);
4258 				DOMAINSET_DEL(idn.domset.ds_relink, d);
4259 			} else {
4260 				if (lock_held)
4261 					IDN_DUNLOCK(d);
4262 				PR_PROTO("%s:%d: failed to "
4263 				    "re-open domain %d "
4264 				    "(cpu %d) [rv = %d]\n",
4265 				    proc, domid, d, idn_domain[d].dcpu,
4266 				    rv);
4267 			}
4268 		}
4269 	}
4270 
4271 fin_done:
4272 	if (do_relink) {
4273 		ASSERT(IDN_DLOCK_IS_EXCL(domid));
4274 
4275 		rv = idn_open_domain(domid, -1, 0);
4276 		if (rv == 0) {
4277 			(void) idn_connect(domid);
4278 		} else if (rv < 0) {
4279 			cmn_err(CE_WARN,
4280 			    "IDN: 205: (%s.2) failed to "
4281 			    "open-domain(%d,%d)",
4282 			    proc, domid, -1);
4283 			DOMAINSET_DEL(idn.domset.ds_relink, domid);
4284 		}
4285 	}
4286 }
4287 
4288 static void
idn_exit_fin(int domid,uint_t msgtype)4289 idn_exit_fin(int domid, uint_t msgtype)
4290 {
4291 	idn_domain_t	*dp = &idn_domain[domid];
4292 	uint_t		token;
4293 	procname_t	proc = "idn_exit_fin";
4294 	STRING(str);
4295 
4296 	ASSERT(IDN_SYNC_IS_LOCKED());
4297 	ASSERT(IDN_DLOCK_IS_EXCL(domid));
4298 
4299 	INUM2STR(msgtype, str);
4300 	PR_PROTO("%s:%d: msgtype = 0x%x(%s)\n", proc, domid, msgtype, str);
4301 
4302 	token = IDN_RETRY_TOKEN(domid, IDNRETRY_FIN);
4303 	(void) idn_retry_terminate(token);
4304 
4305 	DOMAINSET_DEL(idn.domset.ds_ready_off, domid);
4306 
4307 	dp->dxp = &xphase_fin;
4308 	IDN_XSTATE_TRANSITION(dp, IDNXS_PEND);
4309 
4310 	idn_retry_submit(idn_retry_fin, NULL, token,
4311 	    idn_msg_retrytime[(int)IDNRETRY_FIN]);
4312 }
4313 
4314 /*
4315  * Must return w/locks held.
4316  */
4317 static int
idn_xphase_transition(int domid,idn_msgtype_t * mtp,idn_xdcargs_t xargs)4318 idn_xphase_transition(int domid, idn_msgtype_t *mtp, idn_xdcargs_t xargs)
4319 {
4320 	uint_t		msg = mtp ? mtp->mt_mtype : 0;
4321 	uint_t		msgarg = mtp ? mtp->mt_atype : 0;
4322 	idn_xphase_t	*xp;
4323 	idn_domain_t	*dp;
4324 	int		(*cfunc)(int, idn_msgtype_t *, idn_xdcargs_t);
4325 	void		(*ffunc)(int);
4326 	void		(*afunc)(int, idn_msgtype_t *, idn_xdcargs_t);
4327 	void		(*efunc)(int, idn_msgtype_t *, idn_xdcargs_t);
4328 	void		(*xfunc)(int, uint_t);
4329 	int		err = 0;
4330 	uint_t		msgtype;
4331 	idn_xstate_t	o_xstate, n_xstate;
4332 	procname_t	proc = "idn_xphase_transition";
4333 	STRING(mstr);
4334 	STRING(astr);
4335 
4336 	ASSERT(IDN_SYNC_IS_LOCKED());
4337 	ASSERT(IDN_DLOCK_IS_EXCL(domid));
4338 
4339 	INUM2STR(msg, mstr);
4340 	INUM2STR(msgarg, astr);
4341 
4342 	dp = &idn_domain[domid];
4343 	if ((xp = dp->dxp) == NULL) {
4344 		PR_PROTO("%s:%d: WARNING: domain xsp is NULL (msg = %s, "
4345 		    "msgarg = %s) <<<<<<<<<<<<\n",
4346 		    proc, domid, mstr, astr);
4347 		return (-1);
4348 	}
4349 	o_xstate = dp->dxstate;
4350 
4351 	xfunc = xp->xt_exit;
4352 
4353 	if ((msgtype = (msg & IDNP_MSGTYPE_MASK)) == 0)
4354 		msgtype = msgarg & IDNP_MSGTYPE_MASK;
4355 
4356 	if ((o_xstate == IDNXS_PEND) && msg &&
4357 	    ((msg & IDNP_ACKNACK_MASK) == msg)) {
4358 		PR_PROTO("%s:%d: unwanted acknack received (o_xstate = %s, "
4359 		    "msg = %s/%s - dropping message\n",
4360 		    proc, domid, idnxs_str[(int)o_xstate], mstr, astr);
4361 		return (0);
4362 	}
4363 
4364 	/*
4365 	 * Validate that message received is following
4366 	 * the expected protocol for the current state.
4367 	 */
4368 	if (idn_next_xstate(o_xstate, -1, msg) == IDNXS_NIL) {
4369 		PR_PROTO("%s:%d: WARNING: o_xstate = %s, msg = %s -> NIL "
4370 		    "<<<<<<<<<\n",
4371 		    proc, domid, idnxs_str[(int)o_xstate], mstr);
4372 		if (xfunc)
4373 			(*xfunc)(domid, msgtype);
4374 		return (-1);
4375 	}
4376 
4377 	if (msg || msgarg) {
4378 		/*
4379 		 * Verify that message type is correct for
4380 		 * the given xstate.
4381 		 */
4382 		if (msgtype != xp->xt_msgtype) {
4383 			STRING(xstr);
4384 			STRING(tstr);
4385 
4386 			INUM2STR(xp->xt_msgtype, xstr);
4387 			INUM2STR(msgtype, tstr);
4388 			PR_PROTO("%s:%d: WARNING: msg expected %s(0x%x), "
4389 			    "actual %s(0x%x) [msg=%s(0x%x), "
4390 			    "msgarg=%s(0x%x)]\n",
4391 			    proc, domid, xstr, xp->xt_msgtype,
4392 			    tstr, msgtype, mstr, msg, astr, msgarg);
4393 			if (xfunc)
4394 				(*xfunc)(domid, msgtype);
4395 			return (-1);
4396 		}
4397 	}
4398 
4399 	cfunc = xp->xt_trans[(int)o_xstate].t_check;
4400 
4401 	if (cfunc && ((err = (*cfunc)(domid, mtp, xargs)) < 0)) {
4402 		if (o_xstate != IDNXS_PEND) {
4403 			IDN_XSTATE_TRANSITION(dp, IDNXS_PEND);
4404 		}
4405 		if (xfunc)
4406 			(*xfunc)(domid, msgtype);
4407 		return (-1);
4408 	}
4409 
4410 	n_xstate = idn_next_xstate(o_xstate, err, msg);
4411 
4412 	if (n_xstate == IDNXS_NIL) {
4413 		PR_PROTO("%s:%d: WARNING: n_xstate = %s, msg = %s -> NIL "
4414 		    "<<<<<<<<<\n",
4415 		    proc, domid, idnxs_str[(int)n_xstate], mstr);
4416 		if (xfunc)
4417 			(*xfunc)(domid, msgtype);
4418 		return (-1);
4419 	}
4420 
4421 	if (n_xstate != o_xstate) {
4422 		IDN_XSTATE_TRANSITION(dp, n_xstate);
4423 	}
4424 
4425 	if (err) {
4426 		if ((efunc = xp->xt_trans[(int)o_xstate].t_error) != NULL)
4427 			(*efunc)(domid, mtp, xargs);
4428 	} else if ((afunc = xp->xt_trans[(int)o_xstate].t_action) != NULL) {
4429 		(*afunc)(domid, mtp, xargs);
4430 	}
4431 
4432 	if ((n_xstate == IDNXS_FINAL) && ((ffunc = xp->xt_final) != NULL))
4433 		(*ffunc)(domid);
4434 
4435 	return (0);
4436 }
4437 
4438 /*
4439  * Entered and returns w/DLOCK & SYNC_LOCK held.
4440  */
4441 static int
idn_xstate_transfunc(int domid,void * transarg)4442 idn_xstate_transfunc(int domid, void *transarg)
4443 {
4444 	uint_t		msg = (uint_t)(uintptr_t)transarg;
4445 	uint_t		token;
4446 	procname_t	proc = "idn_xstate_transfunc";
4447 
4448 	ASSERT(IDN_SYNC_IS_LOCKED());
4449 
4450 	switch (msg) {
4451 	case IDNP_CON:
4452 		DOMAINSET_ADD(idn.domset.ds_connected, domid);
4453 		break;
4454 
4455 	case IDNP_FIN:
4456 		DOMAINSET_DEL(idn.domset.ds_connected, domid);
4457 		break;
4458 
4459 	default:
4460 		PR_PROTO("%s:%d: ERROR: unknown msg (0x%x) <<<<<<<<\n",
4461 		    proc, domid, msg);
4462 		return (0);
4463 	}
4464 
4465 	token = IDN_RETRY_TOKEN(domid, (msg == IDNP_CON) ?
4466 	    IDNRETRY_CON : IDNRETRY_FIN);
4467 	if (msg == IDNP_CON)
4468 		idn_retry_submit(idn_retry_con, NULL, token,
4469 		    idn_msg_retrytime[(int)IDNRETRY_CON]);
4470 	else
4471 		idn_retry_submit(idn_retry_fin, NULL, token,
4472 		    idn_msg_retrytime[(int)IDNRETRY_FIN]);
4473 
4474 	return (1);
4475 }
4476 
4477 /*
4478  * Entered and returns w/DLOCK & SYNC_LOCK held.
4479  */
4480 static void
idn_sync_enter(int domid,idn_synccmd_t cmd,domainset_t xset,domainset_t rset,int (* transfunc)(),void * transarg)4481 idn_sync_enter(int domid, idn_synccmd_t cmd, domainset_t xset,
4482     domainset_t rset, int (*transfunc)(), void *transarg)
4483 {
4484 	int		z;
4485 	idn_syncop_t	*sp;
4486 	idn_synczone_t	*zp;
4487 	procname_t	proc = "idn_sync_enter";
4488 
4489 	ASSERT(IDN_SYNC_IS_LOCKED());
4490 	ASSERT(IDN_DLOCK_IS_EXCL(domid));
4491 
4492 	z = IDN_SYNC_GETZONE(cmd);
4493 	ASSERT(z >= 0);
4494 	zp = &idn.sync.sz_zone[z];
4495 
4496 	PR_SYNC("%s:%d: cmd=%s(%d), z=%d, xs=0x%x, rx=0x%x, cnt=%d\n",
4497 	    proc, domid, idnsync_str[cmd], cmd, z, xset, rset, zp->sc_cnt);
4498 
4499 	sp = &idn_domain[domid].dsync;
4500 
4501 	sp->s_domid = domid;
4502 	sp->s_cmd = cmd;
4503 	sp->s_msg = 0;
4504 	sp->s_set_exp = xset;
4505 	sp->s_set_rdy = rset;
4506 	sp->s_transfunc = transfunc;
4507 	sp->s_transarg = transarg;
4508 	IDN_SYNC_QUERY_INIT(domid);
4509 
4510 	sp->s_next = zp->sc_op;
4511 	zp->sc_op = sp;
4512 	zp->sc_cnt++;
4513 }
4514 
4515 /*
4516  * Entered and returns w/DLOCK & SYNC_LOCK held.
4517  */
4518 void
idn_sync_exit(int domid,idn_synccmd_t cmd)4519 idn_sync_exit(int domid, idn_synccmd_t cmd)
4520 {
4521 	int		d, z, zone, tot_queries, tot_domains;
4522 	idn_syncop_t	*sp;
4523 	idn_synczone_t	*zp = NULL;
4524 	procname_t	proc = "idn_sync_exit";
4525 
4526 	ASSERT(IDN_SYNC_IS_LOCKED());
4527 	ASSERT(IDN_DLOCK_IS_EXCL(domid));
4528 
4529 	sp = &idn_domain[domid].dsync;
4530 
4531 	z = IDN_SYNC_GETZONE(sp->s_cmd);
4532 
4533 	zone = IDN_SYNC_GETZONE(cmd);
4534 
4535 	PR_SYNC("%s:%d: cmd=%s(%d) (z=%d, zone=%d)\n",
4536 	    proc, domid, idnsync_str[cmd], cmd, z, zone);
4537 
4538 #ifdef DEBUG
4539 	if (z != -1) {
4540 		tot_queries = tot_domains = 0;
4541 
4542 		for (d = 0; d < MAX_DOMAINS; d++) {
4543 			int	qv;
4544 
4545 			if ((qv = sp->s_query[d]) > 0) {
4546 				tot_queries += qv;
4547 				tot_domains++;
4548 				PR_SYNC("%s:%d: query_count = %d\n",
4549 				    proc, domid, qv);
4550 			}
4551 		}
4552 		PR_SYNC("%s:%d: tot_queries = %d, tot_domaines = %d\n",
4553 		    proc, domid, tot_queries, tot_domains);
4554 	}
4555 #endif /* DEBUG */
4556 
4557 	zp = (z != -1) ? &idn.sync.sz_zone[z] : NULL;
4558 
4559 	if (zp) {
4560 		idn_syncop_t	**spp;
4561 
4562 		for (spp = &zp->sc_op; *spp; spp = &((*spp)->s_next)) {
4563 			if (*spp == sp) {
4564 				*spp = sp->s_next;
4565 				sp->s_next = NULL;
4566 				zp->sc_cnt--;
4567 				break;
4568 			}
4569 		}
4570 	}
4571 
4572 	sp->s_cmd = IDNSYNC_NIL;
4573 
4574 	for (z = 0; z < IDN_SYNC_NUMZONE; z++) {
4575 		idn_syncop_t	**spp, **nspp;
4576 
4577 		if ((zone != -1) && (z != zone))
4578 			continue;
4579 
4580 		zp = &idn.sync.sz_zone[z];
4581 
4582 		for (spp = &zp->sc_op; *spp; spp = nspp) {
4583 			sp = *spp;
4584 			nspp = &sp->s_next;
4585 
4586 			if (!DOMAIN_IN_SET(sp->s_set_exp, domid))
4587 				continue;
4588 
4589 			DOMAINSET_DEL(sp->s_set_exp, domid);
4590 			DOMAINSET_DEL(sp->s_set_rdy, domid);
4591 
4592 			if ((sp->s_set_exp == sp->s_set_rdy) &&
4593 			    sp->s_transfunc) {
4594 				int	delok;
4595 
4596 				ASSERT(sp->s_domid != domid);
4597 
4598 				PR_SYNC("%s:%d invoking transfunc "
4599 				    "for domain %d\n",
4600 				    proc, domid, sp->s_domid);
4601 				delok = (*sp->s_transfunc)(sp->s_domid,
4602 				    sp->s_transarg);
4603 				if (delok) {
4604 					*spp = sp->s_next;
4605 					sp->s_next = NULL;
4606 					zp->sc_cnt--;
4607 					nspp = spp;
4608 				}
4609 			}
4610 		}
4611 	}
4612 }
4613 
4614 /*
4615  * Entered and returns w/DLOCK & SYNC_LOCK held.
4616  */
4617 static domainset_t
idn_sync_register(int domid,idn_synccmd_t cmd,domainset_t ready_set,idn_syncreg_t regtype)4618 idn_sync_register(int domid, idn_synccmd_t cmd, domainset_t ready_set,
4619     idn_syncreg_t regtype)
4620 {
4621 	int		z;
4622 	idn_synczone_t	*zp;
4623 	idn_syncop_t	*sp, **spp, **nspp;
4624 	domainset_t	query_set = 0, trans_set;
4625 	procname_t	proc = "idn_sync_register";
4626 
4627 	ASSERT(IDN_SYNC_IS_LOCKED());
4628 	ASSERT(IDN_DLOCK_IS_EXCL(domid));
4629 
4630 	if ((z = IDN_SYNC_GETZONE(cmd)) == -1) {
4631 		PR_SYNC("%s:%d: ERROR: unexpected sync cmd(%d)\n",
4632 		    proc, domid, cmd);
4633 		return (0);
4634 	}
4635 
4636 	/*
4637 	 * Find out what domains are in transition with respect
4638 	 * to given command.  There will be no need to query
4639 	 * these folks.
4640 	 */
4641 	trans_set = IDN_SYNC_GETTRANS(cmd);
4642 
4643 	zp = &idn.sync.sz_zone[z];
4644 
4645 	PR_SYNC("%s:%d: cmd=%s(%d), z=%d, rset=0x%x, "
4646 	    "regtype=%s(%d), sc_op=%s\n",
4647 	    proc, domid, idnsync_str[cmd], cmd, z, ready_set,
4648 	    idnreg_str[regtype], regtype,
4649 	    zp->sc_op ? idnsync_str[zp->sc_op->s_cmd] : "NULL");
4650 
4651 	for (spp = &zp->sc_op; *spp; spp = nspp) {
4652 		sp = *spp;
4653 		nspp = &sp->s_next;
4654 
4655 		if (regtype == IDNSYNC_REG_NEW) {
4656 			DOMAINSET_ADD(sp->s_set_exp, domid);
4657 			PR_SYNC("%s:%d: adding new to %d (exp=0x%x)\n",
4658 			    proc, domid, sp->s_domid, sp->s_set_exp);
4659 		} else if (regtype == IDNSYNC_REG_QUERY) {
4660 			query_set |= ~sp->s_set_rdy & sp->s_set_exp;
4661 			continue;
4662 		}
4663 
4664 		if (!DOMAIN_IN_SET(sp->s_set_exp, domid))
4665 			continue;
4666 
4667 		if (!DOMAIN_IN_SET(ready_set, sp->s_domid)) {
4668 			/*
4669 			 * Given domid doesn't have a desired
4670 			 * domain in his ready-set.  We'll need
4671 			 * to query him again.
4672 			 */
4673 			DOMAINSET_ADD(query_set, domid);
4674 			continue;
4675 		}
4676 
4677 		/*
4678 		 * If we reach here, then an expected domain
4679 		 * has marked its respective datapath to
4680 		 * sp->s_domid as down (i.e. in his ready_set).
4681 		 */
4682 		DOMAINSET_ADD(sp->s_set_rdy, domid);
4683 
4684 		PR_SYNC("%s:%d: mark READY for domain %d "
4685 		    "(r=0x%x, x=0x%x)\n",
4686 		    proc, domid, sp->s_domid,
4687 		    sp->s_set_rdy, sp->s_set_exp);
4688 
4689 		query_set |= ~sp->s_set_rdy & sp->s_set_exp;
4690 
4691 		if (sp->s_set_exp == sp->s_set_rdy) {
4692 #ifdef DEBUG
4693 			if (sp->s_msg == 0) {
4694 				sp->s_msg = 1;
4695 				PR_SYNC("%s:%d: >>>>>>>>>>> DOMAIN %d "
4696 				    "ALL CHECKED IN (0x%x)\n",
4697 				    proc, domid, sp->s_domid,
4698 				    sp->s_set_exp);
4699 			}
4700 #endif /* DEBUG */
4701 
4702 			if ((sp->s_domid != domid) && sp->s_transfunc) {
4703 				int	delok;
4704 
4705 				PR_SYNC("%s:%d invoking transfunc "
4706 				    "for domain %d\n",
4707 				    proc, domid, sp->s_domid);
4708 				delok = (*sp->s_transfunc)(sp->s_domid,
4709 				    sp->s_transarg);
4710 				if (delok) {
4711 					*spp = sp->s_next;
4712 					sp->s_next = NULL;
4713 					zp->sc_cnt--;
4714 					nspp = spp;
4715 				}
4716 			}
4717 		}
4718 	}
4719 
4720 	PR_SYNC("%s:%d: trans_set = 0x%x, query_set = 0x%x -> 0x%x\n",
4721 	    proc, domid, trans_set, query_set, query_set & ~trans_set);
4722 
4723 	query_set &= ~trans_set;
4724 
4725 	return (query_set);
4726 }
4727 
4728 static void
idn_sync_register_awol(int domid)4729 idn_sync_register_awol(int domid)
4730 {
4731 	int		z;
4732 	idn_synccmd_t	cmd = IDNSYNC_DISCONNECT;
4733 	idn_synczone_t	*zp;
4734 	idn_syncop_t	*sp;
4735 	procname_t	proc = "idn_sync_register_awol";
4736 
4737 	ASSERT(IDN_SYNC_IS_LOCKED());
4738 
4739 	if ((z = IDN_SYNC_GETZONE(cmd)) == -1) {
4740 		PR_SYNC("%s:%d: ERROR: unexpected sync cmd(%d)\n",
4741 		    proc, domid, cmd);
4742 		return;
4743 	}
4744 
4745 	zp = &idn.sync.sz_zone[z];
4746 
4747 	PR_SYNC("%s:%d: cmd=%s(%d), z=%d (domain %d = AWOL)\n",
4748 	    proc, domid, idnsync_str[cmd], cmd, z, domid);
4749 
4750 	for (sp = zp->sc_op; sp; sp = sp->s_next) {
4751 		idn_domain_t	*dp;
4752 
4753 		dp = &idn_domain[sp->s_domid];
4754 		if (dp->dfin == IDNFIN_FORCE_HARD) {
4755 			DOMAINSET_ADD(sp->s_set_rdy, domid);
4756 			PR_SYNC("%s:%d: adding new to %d (rdy=0x%x)\n",
4757 			    proc, domid, sp->s_domid, sp->s_set_rdy);
4758 		}
4759 	}
4760 }
4761 
4762 static void
idn_link_established(void * arg)4763 idn_link_established(void *arg)
4764 {
4765 	int	first_link;
4766 	int	domid, masterid;
4767 	uint_t	info = (uint_t)(uintptr_t)arg;
4768 
4769 	first_link = (int)(info & 0xf0);
4770 	domid = (int)(info & 0x0f);
4771 
4772 	IDN_GLOCK_SHARED();
4773 	masterid = IDN_GET_MASTERID();
4774 	if ((masterid == IDN_NIL_DOMID) ||
4775 	    (idn_domain[masterid].dstate != IDNDS_CONNECTED)) {
4776 		/*
4777 		 * No point in doing this unless we're connected
4778 		 * to the master.
4779 		 */
4780 		if ((masterid != IDN_NIL_DOMID) &&
4781 		    (idn.state == IDNGS_ONLINE)) {
4782 			/*
4783 			 * As long as we're still online keep
4784 			 * trying.
4785 			 */
4786 			(void) timeout(idn_link_established, arg, 50);
4787 		}
4788 		IDN_GUNLOCK();
4789 		return;
4790 	}
4791 	IDN_GUNLOCK();
4792 
4793 	if (first_link && IDN_SLAB_PREALLOC)
4794 		idn_prealloc_slab(IDN_SLAB_PREALLOC);
4795 
4796 	/*
4797 	 * No guarantee, but it might save a little
4798 	 * time.
4799 	 */
4800 	if (idn_domain[domid].dstate == IDNDS_CONNECTED) {
4801 		/*
4802 		 * Get the remote domain's dname.
4803 		 */
4804 		idn_send_nodename_req(domid);
4805 	}
4806 
4807 	/*
4808 	 * May have had some streams backed up waiting for
4809 	 * this connection.  Prod them.
4810 	 */
4811 	rw_enter(&idn.struprwlock, RW_READER);
4812 	mutex_enter(&idn.sipwenlock);
4813 	idndl_wenable(NULL);
4814 	mutex_exit(&idn.sipwenlock);
4815 	rw_exit(&idn.struprwlock);
4816 }
4817 
4818 /*
4819  * Send the following chunk of data received from above onto
4820  * the IDN wire.  This is raw data as far as the IDN driver
4821  * is concerned.
4822  * Returns:
4823  *	IDNXMIT_LOOP	- Msg handled in loopback and thus
4824  *			  still active (i.e. don't free).
4825  *	IDNXMIT_OKAY	- Data handled (freemsg).
4826  *	IDNXMIT_DROP	- Packet should be dropped.
4827  *	IDNXMIT_RETRY	- Packet should be requeued and retried.
4828  *	IDNXMIT_REQUEUE	- Packet should be requeued, but not
4829  *			  immediatetly retried.
4830  */
4831 int
idn_send_data(int dst_domid,idn_netaddr_t dst_netaddr,queue_t * wq,mblk_t * mp)4832 idn_send_data(int dst_domid, idn_netaddr_t dst_netaddr, queue_t *wq, mblk_t *mp)
4833 {
4834 	int		pktcnt = 0;
4835 	int		msglen;
4836 	int		rv = IDNXMIT_OKAY;
4837 	int		xfersize = 0;
4838 	caddr_t		iobufp, iodatap;
4839 	uchar_t		*data_rptr;
4840 	int		cpuindex;
4841 	int		serrno;
4842 	int		channel;
4843 	int		retry_reclaim;
4844 	idn_chansvr_t	*csp = NULL;
4845 	uint_t		netports = 0;
4846 	struct idnstr	*stp;
4847 	struct idn	*sip;
4848 	idn_domain_t	*dp;
4849 	struct ether_header	*ehp;
4850 	smr_pkthdr_t	*hdrp;
4851 	idn_msgtype_t	mt;
4852 	procname_t	proc = "idn_send_data";
4853 #ifdef DEBUG
4854 	size_t		orig_msglen = msgsize(mp);
4855 #endif /* DEBUG */
4856 
4857 	ASSERT(DB_TYPE(mp) == M_DATA);
4858 
4859 	mt.mt_mtype = IDNP_DATA;
4860 	mt.mt_atype = 0;
4861 	mt.mt_cookie = 0;
4862 
4863 	channel = (int)dst_netaddr.net.chan;
4864 
4865 	msglen = msgdsize(mp);
4866 	PR_DATA("%s:%d: (netaddr 0x%x) msgsize=%ld, msgdsize=%d\n",
4867 	    proc, dst_domid, dst_netaddr.netaddr, msgsize(mp), msglen);
4868 
4869 	ASSERT(wq->q_ptr);
4870 
4871 	stp = (struct idnstr *)wq->q_ptr;
4872 	sip = stp->ss_sip;
4873 	ASSERT(sip);
4874 
4875 	if (msglen < 0) {
4876 		/*
4877 		 * No data to send.  That was easy!
4878 		 */
4879 		PR_DATA("%s:%d: BAD msg length (%d) (netaddr 0x%x)\n",
4880 		    proc, dst_domid, msglen, dst_netaddr.netaddr);
4881 		return (IDNXMIT_DROP);
4882 	}
4883 
4884 	ASSERT(RW_READ_HELD(&stp->ss_rwlock));
4885 
4886 	if (dst_domid == IDN_NIL_DOMID) {
4887 		cmn_err(CE_WARN,
4888 		    "IDN: 213: no destination specified "
4889 		    "(d=%d, c=%d, n=0x%x)",
4890 		    dst_domid, dst_netaddr.net.chan,
4891 		    dst_netaddr.net.netid);
4892 		IDN_KSTAT_INC(sip, si_nolink);
4893 		IDN_KSTAT_INC(sip, si_macxmt_errors);
4894 		rv = IDNXMIT_DROP;
4895 		goto nocando;
4896 	}
4897 
4898 	ehp = (struct ether_header *)mp->b_rptr;
4899 	PR_DATA("%s:%d: destination channel = %d\n", proc, dst_domid, channel);
4900 
4901 #ifdef DEBUG
4902 	{
4903 		uchar_t	echn;
4904 
4905 		echn = (uchar_t)
4906 		    ehp->ether_shost.ether_addr_octet[IDNETHER_CHANNEL];
4907 		ASSERT((uchar_t)channel == echn);
4908 	}
4909 #endif /* DEBUG */
4910 	ASSERT(msglen <= IDN_DATA_SIZE);
4911 
4912 	dp = &idn_domain[dst_domid];
4913 	/*
4914 	 * Get reader lock.  We hold for the duration
4915 	 * of the transfer so that our state doesn't
4916 	 * change during this activity.  Note that since
4917 	 * we grab the reader lock, we can still permit
4918 	 * simultaneous tranfers from different threads
4919 	 * to the same domain.
4920 	 * Before we waste a bunch of time gathering locks, etc.
4921 	 * do a an unprotected check to make sure things are
4922 	 * semi-copesetic.  If these values are in flux,
4923 	 * that's okay.
4924 	 */
4925 	if ((dp->dstate != IDNDS_CONNECTED) || (idn.state != IDNGS_ONLINE)) {
4926 		IDN_KSTAT_INC(sip, si_linkdown);
4927 		if (idn.state != IDNGS_ONLINE) {
4928 			rv = IDNXMIT_REQUEUE;
4929 		} else {
4930 			IDN_KSTAT_INC(sip, si_macxmt_errors);
4931 			rv = IDNXMIT_DROP;
4932 		}
4933 		goto nocando;
4934 	}
4935 
4936 	if (idn.chan_servers[channel].ch_send.c_checkin) {
4937 		/*
4938 		 * Gotta bail, somethin' s'up.
4939 		 */
4940 		rv = IDNXMIT_REQUEUE;
4941 		goto nocando;
4942 	}
4943 
4944 	csp = &idn.chan_servers[channel];
4945 	IDN_CHAN_LOCK_SEND(csp);
4946 
4947 	if (dst_netaddr.net.netid == IDN_BROADCAST_ALLNETID) {
4948 		/*
4949 		 * We're doing a broadcast.  Need to set
4950 		 * up IDN netaddr's one at a time.
4951 		 * We set the ethernet destination to the same
4952 		 * instance as the sending address.  The instance
4953 		 * numbers effectively represent subnets.
4954 		 */
4955 		dst_netaddr.net.netid = dp->dnetid;
4956 
4957 		(void) idndl_domain_etheraddr(dst_domid, channel,
4958 		    &ehp->ether_dhost);
4959 
4960 		if (dst_domid == idn.localid) {
4961 			mblk_t	*nmp;
4962 			/*
4963 			 * If this is a broadcast and going to
4964 			 * the local domain, then we need to make
4965 			 * a private copy of the message since
4966 			 * the current one will be reused when
4967 			 * transmitting to other domains.
4968 			 */
4969 			PR_DATA("%s:%d: dup broadcast msg for local domain\n",
4970 			    proc, dst_domid);
4971 			if ((nmp = copymsg(mp)) == NULL) {
4972 				/*
4973 				 * Couldn't get a duplicate copy.
4974 				 */
4975 				IDN_CHAN_UNLOCK_SEND(csp);
4976 				csp = NULL;
4977 				IDN_KSTAT_INC(sip, si_allocbfail);
4978 				IDN_KSTAT_INC(sip, si_noxmtbuf);
4979 				rv = IDNXMIT_DROP;
4980 				goto nocando;
4981 			}
4982 			mp = nmp;
4983 		}
4984 	}
4985 
4986 	if (dp->dnetid != dst_netaddr.net.netid) {
4987 		PR_DATA("%s:%d: dest netid (0x%x) != expected (0x%x)\n",
4988 		    proc, dst_domid, (uint_t)dst_netaddr.net.netid,
4989 		    (uint_t)dp->dnetid);
4990 		IDN_CHAN_UNLOCK_SEND(csp);
4991 		csp = NULL;
4992 		IDN_KSTAT_INC(sip, si_nolink);
4993 		IDN_KSTAT_INC(sip, si_macxmt_errors);
4994 		rv = IDNXMIT_DROP;
4995 		goto nocando;
4996 	}
4997 
4998 	if (dst_domid == idn.localid) {
4999 		int	lbrv;
5000 		/*
5001 		 * Sending to our local domain! Loopback.
5002 		 * Note that idn_send_data_loop returning 0
5003 		 * does not mean the message can now be freed.
5004 		 * We need to return (-1) so that caller doesn't
5005 		 * try to free mblk.
5006 		 */
5007 		IDN_CHAN_UNLOCK_SEND(csp);
5008 		rw_exit(&stp->ss_rwlock);
5009 		lbrv = idn_send_data_loopback(dst_netaddr, wq, mp);
5010 		rw_enter(&stp->ss_rwlock, RW_READER);
5011 		if (lbrv == 0) {
5012 			return (IDNXMIT_LOOP);
5013 		} else {
5014 			IDN_KSTAT_INC(sip, si_macxmt_errors);
5015 			return (IDNXMIT_DROP);
5016 		}
5017 	}
5018 
5019 	if (dp->dstate != IDNDS_CONNECTED) {
5020 		/*
5021 		 * Can't send data unless a link has already been
5022 		 * established with the target domain.  Normally,
5023 		 * a user cannot set the remote netaddr unless a
5024 		 * link has already been established, however it
5025 		 * is possible the connection may have become
5026 		 * disconnected since that time.
5027 		 */
5028 		IDN_CHAN_UNLOCK_SEND(csp);
5029 		csp = NULL;
5030 		IDN_KSTAT_INC(sip, si_linkdown);
5031 		IDN_KSTAT_INC(sip, si_macxmt_errors);
5032 		rv = IDNXMIT_DROP;
5033 		goto nocando;
5034 	}
5035 
5036 	/*
5037 	 * Need to make sure the channel is active and that the
5038 	 * domain to which we're sending is allowed to receive stuff.
5039 	 */
5040 	if (!IDN_CHANNEL_IS_SEND_ACTIVE(csp)) {
5041 		int	not_active;
5042 		/*
5043 		 * See if we can activate channel.
5044 		 */
5045 		IDN_CHAN_UNLOCK_SEND(csp);
5046 		not_active = idn_activate_channel(CHANSET(channel),
5047 		    IDNCHAN_OPEN);
5048 		if (!not_active) {
5049 			/*
5050 			 * Only grab the lock for a recheck if we were
5051 			 * able to activate the channel.
5052 			 */
5053 			IDN_CHAN_LOCK_SEND(csp);
5054 		}
5055 		/*
5056 		 * Verify channel still active now that we have the lock.
5057 		 */
5058 		if (not_active || !IDN_CHANNEL_IS_SEND_ACTIVE(csp)) {
5059 			if (!not_active) {
5060 				/*
5061 				 * Only need to drop the lock if it was
5062 				 * acquired while we thought we had
5063 				 * activated the channel.
5064 				 */
5065 				IDN_CHAN_UNLOCK_SEND(csp);
5066 			}
5067 			ASSERT(!IDN_CHAN_SEND_IS_LOCKED(csp));
5068 			/*
5069 			 * Damn!   Must have went inactive during the window
5070 			 * before we regrabbed the send lock.  Oh well, can't
5071 			 * spend all day doing this, bail out.  Set csp to
5072 			 * NULL to prevent inprogress update at bottom.
5073 			 */
5074 			csp = NULL;
5075 			/*
5076 			 * Channel is not active, should not be used.
5077 			 */
5078 			PR_DATA("%s:%d: dest channel %d NOT ACTIVE\n",
5079 			    proc, dst_domid, channel);
5080 			IDN_KSTAT_INC(sip, si_linkdown);
5081 			rv = IDNXMIT_REQUEUE;
5082 			goto nocando;
5083 		}
5084 		ASSERT(IDN_CHAN_SEND_IS_LOCKED(csp));
5085 	}
5086 	/*
5087 	 * If we made it here then the channel is active
5088 	 * Make sure the target domain is registered to receive stuff,
5089 	 * i.e. we're still linked.
5090 	 */
5091 	if (!IDN_CHAN_DOMAIN_IS_REGISTERED(csp, dst_domid)) {
5092 		/*
5093 		 * If domain is not even registered with this channel
5094 		 * then we have no business being here.  Doesn't matter
5095 		 * whether it's active or not.
5096 		 */
5097 		PR_DATA("%s:%d: domain not registered with channel %d\n",
5098 		    proc, dst_domid, channel);
5099 		/*
5100 		 * Set csp to NULL to prevent in-progress update below.
5101 		 */
5102 		IDN_CHAN_UNLOCK_SEND(csp);
5103 		csp = NULL;
5104 		IDN_KSTAT_INC(sip, si_linkdown);
5105 		IDN_KSTAT_INC(sip, si_macxmt_errors);
5106 		rv = IDNXMIT_DROP;
5107 		goto nocando;
5108 	}
5109 
5110 	IDN_CHAN_SEND_INPROGRESS(csp);
5111 	IDN_CHAN_UNLOCK_SEND(csp);
5112 
5113 	/*
5114 	 * Find a target cpu to send interrupt to if
5115 	 * it becomes necessary (i.e. remote channel
5116 	 * server is idle).
5117 	 */
5118 	cpuindex = dp->dcpuindex;
5119 
5120 	/*
5121 	 * dcpuindex is atomically incremented, but other than
5122 	 * that is not well protected and that's okay.  The
5123 	 * intention is to simply spread around the interrupts
5124 	 * at the destination domain, however we don't have to
5125 	 * anal about it.  If we hit the same cpu multiple times
5126 	 * in a row that's okay, it will only be for a very short
5127 	 * period anyway before the cpuindex is incremented
5128 	 * to the next cpu.
5129 	 */
5130 	if (cpuindex < NCPU) {
5131 		ATOMIC_INC(dp->dcpuindex);
5132 	}
5133 	if (dp->dcpuindex >= NCPU)
5134 		dp->dcpuindex = 0;
5135 
5136 	IDN_ASSIGN_DCPU(dp, cpuindex);
5137 
5138 #ifdef XXX_DLPI_UNFRIENDLY
5139 	{
5140 		ushort_t	dstport = (ushort_t)dp->dcpu;
5141 
5142 		/*
5143 		 * XXX
5144 		 * This is not DLPI friendly, but we need some way
5145 		 * of distributing our XDC interrupts to the cpus
5146 		 * on the remote domain in a relatively random fashion
5147 		 * while trying to remain constant for an individual
5148 		 * network connection.  Don't want the target network
5149 		 * appl pinging around cpus thrashing the caches.
5150 		 * So, we'll pick target cpus based on the destination
5151 		 * TCP/IP port (socket).  The (simple) alternative to
5152 		 * this is to simply send all messages destined for
5153 		 * particular domain to the same cpu (dcpu), but
5154 		 * will lower our bandwidth and introduce a lot of
5155 		 * contention on that target cpu.
5156 		 */
5157 		if (ehp->ether_type == ETHERTYPE_IP) {
5158 			ipha_t	*ipha;
5159 			uchar_t	*dstporta;
5160 			int	hdr_length;
5161 			mblk_t	*nmp = mp;
5162 			uchar_t	*rptr = mp->b_rptr +
5163 			    sizeof (struct ether_header);
5164 			if (nmp->b_wptr <= rptr) {
5165 				/*
5166 				 * Only the ethernet header was contained
5167 				 * in the first block.  Check for the
5168 				 * next packet.
5169 				 */
5170 				if ((nmp = mp->b_cont) != NULL)
5171 					rptr = nmp->b_rptr;
5172 			}
5173 			/*
5174 			 * If we still haven't found the IP header packet
5175 			 * then don't bother.  Can't search forever.
5176 			 */
5177 			if (nmp &&
5178 			    ((nmp->b_wptr - rptr) >= IP_SIMPLE_HDR_LENGTH)) {
5179 				ipha = (ipha_t *)ALIGN32(rptr);
5180 
5181 				ASSERT(DB_TYPE(mp) == M_DATA);
5182 				hdr_length = IPH_HDR_LENGTH(ipha);
5183 
5184 				switch (ipha->ipha_protocol) {
5185 				case IPPROTO_UDP:
5186 				case IPPROTO_TCP:
5187 					/*
5188 					 * TCP/UDP Protocol Header (1st word)
5189 					 * 0	    15,16	31
5190 					 * -----------------------
5191 					 * | src port | dst port |
5192 					 * -----------------------
5193 					 */
5194 					dstporta = (uchar_t *)ipha + hdr_length;
5195 					netports = *(uint_t *)dstporta;
5196 					dstporta += 2;
5197 					dstport  = *(ushort_t *)dstporta;
5198 					break;
5199 				default:
5200 					break;
5201 				}
5202 			}
5203 
5204 		}
5205 		IDN_ASSIGN_DCPU(dp, dstport);
5206 
5207 		PR_DATA("%s:%d: (dstport %d) assigned %d\n",
5208 		    proc, dst_domid, (int)dstport, dp->dcpu);
5209 	}
5210 #endif /* XXX_DLPI_UNFRIENDLY */
5211 
5212 	data_rptr = mp->b_rptr;
5213 
5214 	ASSERT(dp->dcpu != IDN_NIL_DCPU);
5215 
5216 	ASSERT(idn_domain[dst_domid].dmbox.m_send);
5217 
5218 	retry_reclaim = 1;
5219 retry:
5220 	if ((dp->dio >= IDN_RECLAIM_MIN) || dp->diowanted) {
5221 		int	reclaim_req;
5222 		/*
5223 		 * Reclaim however many outstanding buffers
5224 		 * there are up to IDN_RECLAIM_MAX if it's set.
5225 		 */
5226 		reclaim_req = dp->diowanted ? -1 : IDN_RECLAIM_MAX ?
5227 		    MIN(dp->dio, IDN_RECLAIM_MAX) : dp->dio;
5228 		(void) idn_reclaim_mboxdata(dst_domid, channel,
5229 		    reclaim_req);
5230 	}
5231 
5232 	if (dp->dio >= IDN_WINDOW_EMAX) {
5233 
5234 		if (lock_try(&dp->diocheck)) {
5235 			IDN_MSGTIMER_START(dst_domid, IDNP_DATA, 0,
5236 			    idn_msg_waittime[IDNP_DATA],
5237 			    &mt.mt_cookie);
5238 			/*
5239 			 * We have exceeded the minimum window for
5240 			 * outstanding I/O buffers to this domain.
5241 			 * Need to start the MSG timer to check for
5242 			 * possible response from remote domain.
5243 			 * The remote domain may be hung.  Send a
5244 			 * wakeup!  Specify all channels for given
5245 			 * domain since we don't know precisely which
5246 			 * is backed up (dio is global).
5247 			 */
5248 			IDNXDC(dst_domid, &mt,
5249 			    (uint_t)dst_netaddr.net.chan, 0, 0, 0);
5250 		}
5251 
5252 		/*
5253 		 * Yikes!  We have exceeded the maximum window
5254 		 * which means no more packets going to remote
5255 		 * domain until he frees some up.
5256 		 */
5257 		IDN_KSTAT_INC(sip, si_txmax);
5258 		IDN_KSTAT_INC(sip, si_macxmt_errors);
5259 		rv = IDNXMIT_DROP;
5260 		goto nocando;
5261 	}
5262 
5263 	/*
5264 	 * Allocate a SMR I/O buffer and send it.
5265 	 */
5266 	if (msglen == 0) {
5267 		/*
5268 		 * A zero length messages is effectively a signal
5269 		 * to just send an interrupt to the remote domain.
5270 		 */
5271 		IDN_MSGTIMER_START(dst_domid, IDNP_DATA, 0,
5272 		    idn_msg_waittime[IDNP_DATA],
5273 		    &mt.mt_cookie);
5274 		IDNXDC(dst_domid, &mt,
5275 		    (uint_t)dst_netaddr.net.chan, 0, 0, 0);
5276 	}
5277 	for (; (msglen > 0) && mp; msglen -= xfersize) {
5278 		int		xrv;
5279 		smr_offset_t	bufoffset;
5280 #ifdef DEBUG
5281 		int		n_xfersize;
5282 #endif /* DEBUG */
5283 
5284 		ASSERT(msglen <= IDN_DATA_SIZE);
5285 		xfersize = msglen;
5286 
5287 		serrno = smr_buf_alloc(dst_domid, xfersize, &iobufp);
5288 		if (serrno) {
5289 			PR_DATA("%s:%d: failed to alloc SMR I/O buffer "
5290 			    "(serrno = %d)\n",
5291 			    proc, dst_domid, serrno);
5292 			/*
5293 			 * Failure is either due to a timeout waiting
5294 			 * for the master to give us a slab, OR the
5295 			 * local domain exhausted its slab quota!
5296 			 * In either case we'll have to bail from
5297 			 * here and let higher layers decide what
5298 			 * to do.
5299 			 * We also could have had locking problems.
5300 			 * A negative serrno indicates we lost the lock
5301 			 * on dst_domid, so no need in dropping lock.
5302 			 */
5303 
5304 			if (lock_try(&dp->diowanted) && retry_reclaim) {
5305 				/*
5306 				 * We were the first to acquire the
5307 				 * lock indicating that it wasn't
5308 				 * set on entry to idn_send_data.
5309 				 * So, let's go back and see if we
5310 				 * can't reclaim some buffers and
5311 				 * try again.
5312 				 * It's very likely diowanted will be
5313 				 * enough to prevent us from looping
5314 				 * on retrying here, however to protect
5315 				 * against the small window where a
5316 				 * race condition might exist, we use
5317 				 * the retry_reclaim flag so that we
5318 				 * don't retry more than once.
5319 				 */
5320 				retry_reclaim = 0;
5321 				goto retry;
5322 			}
5323 
5324 			rv = (serrno > 0) ? serrno : -serrno;
5325 			IDN_KSTAT_INC(sip, si_notbufs);
5326 			IDN_KSTAT_INC(sip, si_noxmtbuf);	/* MIB II */
5327 			switch (rv) {
5328 			case ENOMEM:
5329 			case EBUSY:
5330 			case ENOLCK:
5331 			case ETIMEDOUT:
5332 			case EDQUOT:
5333 				/*
5334 				 * These are all transient conditions
5335 				 * which should be recoverable over
5336 				 * time.
5337 				 */
5338 				rv = IDNXMIT_REQUEUE;
5339 				break;
5340 
5341 			default:
5342 				rv = IDNXMIT_DROP;
5343 				break;
5344 			}
5345 			goto nocando;
5346 		}
5347 
5348 		lock_clear(&dp->diowanted);
5349 
5350 		hdrp = IDN_BUF2HDR(iobufp);
5351 		bufoffset = (smr_offset_t)IDN_ALIGNPTR(sizeof (smr_pkthdr_t),
5352 		    data_rptr);
5353 		/*
5354 		 * If the alignment of bufoffset took us pass the
5355 		 * length of a smr_pkthdr_t then we need to possibly
5356 		 * lower xfersize since it was calulated based on
5357 		 * a perfect alignment.  However, if we're in DLPI
5358 		 * mode then shouldn't be necessary since the length
5359 		 * of the incoming packet (mblk) should have already
5360 		 * taken into consideration this possible adjustment.
5361 		 */
5362 #ifdef DEBUG
5363 		if (bufoffset != sizeof (smr_pkthdr_t))
5364 			PR_DATA("%s:%d: offset ALIGNMENT (%lu -> %u) "
5365 			    "(data_rptr = %p)\n",
5366 			    proc, dst_domid, sizeof (smr_pkthdr_t),
5367 			    bufoffset, (void *)data_rptr);
5368 
5369 		n_xfersize = MIN(xfersize, (IDN_SMR_BUFSIZE - bufoffset));
5370 		if (xfersize != n_xfersize) {
5371 			PR_DATA("%s:%d: xfersize ADJUST (%d -> %d)\n",
5372 			    proc, dst_domid, xfersize, n_xfersize);
5373 			cmn_err(CE_WARN, "%s: ERROR (xfersize = %d, > "
5374 			    "bufsize(%d)-bufoffset(%d) = %d)",
5375 			    proc, xfersize, IDN_SMR_BUFSIZE,
5376 			    bufoffset,
5377 			    IDN_SMR_BUFSIZE - bufoffset);
5378 		}
5379 #endif /* DEBUG */
5380 		xfersize = MIN(xfersize, (int)(IDN_SMR_BUFSIZE - bufoffset));
5381 
5382 		iodatap = IDN_BUF2DATA(iobufp, bufoffset);
5383 		mp = idn_fill_buffer(iodatap, xfersize, mp, &data_rptr);
5384 
5385 		hdrp->b_netaddr  = dst_netaddr.netaddr;
5386 		hdrp->b_netports = netports;
5387 		hdrp->b_offset   = bufoffset;
5388 		hdrp->b_length   = xfersize;
5389 		hdrp->b_next	 = IDN_NIL_SMROFFSET;
5390 		hdrp->b_rawio	 = 0;
5391 		hdrp->b_cksum    = IDN_CKSUM_PKT(hdrp);
5392 
5393 		xrv = idn_send_mboxdata(dst_domid, sip, channel, iobufp);
5394 		if (xrv) {
5395 			/*
5396 			 * Reclaim packet.
5397 			 * Return error on this packet so it can be retried
5398 			 * (putbq).  Note that it should be safe to assume
5399 			 * that this for-loop is only executed once when in
5400 			 * DLPI mode and so no need to worry about fractured
5401 			 * mblk packet.
5402 			 */
5403 			PR_DATA("%s:%d: DATA XFER to chan %d FAILED "
5404 			    "(ret=%d)\n",
5405 			    proc, dst_domid, channel, xrv);
5406 			(void) smr_buf_free(dst_domid, iobufp, xfersize);
5407 
5408 			PR_DATA("%s:%d: (line %d) dec(dio) -> %d\n",
5409 			    proc, dst_domid, __LINE__, dp->dio);
5410 
5411 			rv = IDNXMIT_DROP;
5412 			IDN_KSTAT_INC(sip, si_macxmt_errors);
5413 			goto nocando;
5414 		} else {
5415 			pktcnt++;
5416 			/*
5417 			 * Packet will get freed on a subsequent send
5418 			 * when we reclaim buffers that the receivers
5419 			 * has finished consuming.
5420 			 */
5421 		}
5422 	}
5423 
5424 #ifdef DEBUG
5425 	if (pktcnt > 1)
5426 		cmn_err(CE_WARN,
5427 		    "%s: ERROR: sent multi-pkts (%d), len = %ld",
5428 		    proc, pktcnt, orig_msglen);
5429 #endif /* DEBUG */
5430 
5431 	PR_DATA("%s:%d: SENT %d packets (%d @ 0x%x)\n",
5432 	    proc, dst_domid, pktcnt, dst_netaddr.net.chan,
5433 	    dst_netaddr.net.netid);
5434 
5435 	IDN_CHAN_LOCK_SEND(csp);
5436 	IDN_CHAN_SEND_DONE(csp);
5437 	IDN_CHAN_UNLOCK_SEND(csp);
5438 
5439 	return (IDNXMIT_OKAY);
5440 
5441 nocando:
5442 
5443 	if (csp) {
5444 		IDN_CHAN_LOCK_SEND(csp);
5445 		IDN_CHAN_SEND_DONE(csp);
5446 		IDN_CHAN_UNLOCK_SEND(csp);
5447 	}
5448 
5449 	if (rv == IDNXMIT_REQUEUE) {
5450 		/*
5451 		 * Better kick off monitor to check when
5452 		 * it's ready to reenable the queues for
5453 		 * this channel.
5454 		 */
5455 		idn_xmit_monitor_kickoff(channel);
5456 	}
5457 
5458 	return (rv);
5459 }
5460 
5461 /*
5462  * Function to support local loopback testing of IDN driver.
5463  * Primarily geared towards measuring stream-head and IDN driver
5464  * overhead with respect to data messages.  Setting idn_strhead_only
5465  * allows routine to focus on stream-head overhead by simply putting
5466  * the message straight to the 'next' queue of the destination
5467  * read-queue.  Current implementation puts the message directly to
5468  * the read-queue thus sending the message right back to the IDN driver
5469  * as though the data came in off the wire.  No need to worry about
5470  * any IDN layers attempting to ack data as that's normally handled
5471  * by idnh_recv_data.
5472  *
5473  * dst_netaddr = destination port-n-addr on local domain.
5474  * wq          = write queue from whence message came.
5475  * mp          = the (data-only) message.
5476  *
5477  * Returns 0		Indicates data handled.
5478  *	   errno	EAGAIN indicates data can be retried.
5479  *			Other errno's indicate failure to handle.
5480  */
5481 static int
idn_send_data_loopback(idn_netaddr_t dst_netaddr,queue_t * wq,mblk_t * mp)5482 idn_send_data_loopback(idn_netaddr_t dst_netaddr, queue_t *wq, mblk_t *mp)
5483 {
5484 	register struct idnstr	*stp;
5485 	struct idn	*sip;
5486 	int		rv = 0;
5487 	procname_t	proc = "idn_send_data_loopback";
5488 
5489 	if (dst_netaddr.net.netid != idn_domain[idn.localid].dnetid) {
5490 		PR_DATA("%s: dst_netaddr.net.netid 0x%x != local 0x%x\n",
5491 		    proc, dst_netaddr.net.netid,
5492 		    idn_domain[idn.localid].dnetid);
5493 		rv = EADDRNOTAVAIL;
5494 		goto done;
5495 	}
5496 	stp = (struct idnstr *)wq->q_ptr;
5497 	if (!stp || !stp->ss_rq) {
5498 		rv = EDESTADDRREQ;
5499 		goto done;
5500 	}
5501 	sip = stp->ss_sip;
5502 
5503 	idndl_read(sip, mp);
5504 	rv = 0;
5505 
5506 done:
5507 	return (rv);
5508 }
5509 
5510 /*
5511  * Fill bufp with as much data as possible from the message pointed
5512  * to by mp up to size bytes.
5513  * Save our current read pointer in the variable parameter (data_rptrp)
5514  * so we know where to start on the next go around.  Don't want to
5515  * bump the actual b_rptr in the mblk because the mblk may need to
5516  * be reused, e.g. broadcast.
5517  * Return the mblk pointer to the position we had to stop.
5518  */
5519 static mblk_t *
idn_fill_buffer(caddr_t bufp,int size,mblk_t * mp,uchar_t ** data_rptrp)5520 idn_fill_buffer(caddr_t bufp, int size, mblk_t *mp, uchar_t **data_rptrp)
5521 {
5522 	int	copysize;
5523 
5524 	ASSERT(bufp && size);
5525 
5526 	if (mp == NULL)
5527 		return (NULL);
5528 
5529 	while ((size > 0) && mp) {
5530 
5531 		copysize = MIN(mp->b_wptr - (*data_rptrp), size);
5532 
5533 		if (copysize > 0) {
5534 			/*
5535 			 * If there's data to copy, do it.
5536 			 */
5537 			bcopy((*data_rptrp), bufp, copysize);
5538 			(*data_rptrp) += copysize;
5539 			bufp += copysize;
5540 			size -= copysize;
5541 		}
5542 		if (mp->b_wptr <= (*data_rptrp)) {
5543 			/*
5544 			 * If we emptied the mblk, then
5545 			 * move on to the next one.
5546 			 */
5547 			for (mp = mp->b_cont;
5548 			    mp && (mp->b_datap->db_type != M_DATA);
5549 			    mp = mp->b_cont)
5550 				;
5551 			if (mp)
5552 				*data_rptrp = mp->b_rptr;
5553 		}
5554 	}
5555 	return (mp);
5556 }
5557 
5558 /*
5559  * Messages received here do NOT arrive on a stream, but are
5560  * instead handled via the idn_protocol_servers.  This routine
5561  * is effectively the job processor for the protocol servers.
5562  */
5563 static void
idn_recv_proto(idn_protomsg_t * hp)5564 idn_recv_proto(idn_protomsg_t *hp)
5565 {
5566 	int		domid, cpuid;
5567 	int		sync_lock = 0;
5568 	idn_domain_t	*dp;
5569 	register uint_t	mtype;
5570 	register uint_t	msgtype, acktype;
5571 	idn_msgtype_t	mt;
5572 	ushort_t	dcookie, tcookie;
5573 	procname_t	proc = "idn_recv_proto";
5574 
5575 
5576 	if (idn.state == IDNGS_IGNORE) {
5577 		/*
5578 		 * Fault injection to simulate non-responsive domain.
5579 		 */
5580 		return;
5581 	}
5582 
5583 	domid   = hp->m_domid;
5584 	cpuid   = hp->m_cpuid;
5585 	msgtype = hp->m_msgtype;
5586 	acktype = hp->m_acktype;
5587 	dcookie = IDN_DCOOKIE(hp->m_cookie);
5588 	tcookie = IDN_TCOOKIE(hp->m_cookie);
5589 	/*
5590 	 * msgtype =	Is the type of message we received,
5591 	 *		e.g. nego, ack, nego+ack, etc.
5592 	 *
5593 	 * acktype =	If we received a pure ack or nack
5594 	 *		then this variable is set to the
5595 	 *		type of message that was ack/nack'd.
5596 	 */
5597 	if ((mtype = msgtype & IDNP_MSGTYPE_MASK) == 0) {
5598 		/*
5599 		 * Received a pure ack/nack.
5600 		 */
5601 		mtype = acktype & IDNP_MSGTYPE_MASK;
5602 	}
5603 
5604 	if (!VALID_MSGTYPE(mtype)) {
5605 		PR_PROTO("%s:%d: ERROR: invalid message type (0x%x)\n",
5606 		    proc, domid, mtype);
5607 		return;
5608 	}
5609 	if (!VALID_CPUID(cpuid)) {
5610 		PR_PROTO("%s:%d: ERROR: invalid cpuid (%d)\n",
5611 		    proc, domid, cpuid);
5612 		return;
5613 	}
5614 
5615 	/*
5616 	 * No pure data packets should reach this level.
5617 	 * Data+ack messages will reach here, but only
5618 	 * for the purpose of stopping the timer which
5619 	 * happens by default when this routine is called.
5620 	 */
5621 	ASSERT(msgtype != IDNP_DATA);
5622 
5623 	/*
5624 	 * We should never receive a request from ourself,
5625 	 * except for commands in the case of broadcasts!
5626 	 */
5627 	if ((domid == idn.localid) && (mtype != IDNP_CMD)) {
5628 		char	str[15];
5629 
5630 		inum2str(hp->m_msgtype, str);
5631 
5632 		cmn_err(CE_WARN,
5633 		    "IDN: 214: received message (%s[0x%x]) from self "
5634 		    "(domid %d)",
5635 		    str, hp->m_msgtype, domid);
5636 		return;
5637 	}
5638 
5639 	IDN_SYNC_LOCK();
5640 	/*
5641 	 * Set a flag indicating whether we really need
5642 	 * SYNC-LOCK.  We'll drop it in a little bit if
5643 	 * we really don't need it.
5644 	 */
5645 	switch (mtype) {
5646 	case IDNP_CON:
5647 	case IDNP_FIN:
5648 	case IDNP_NEGO:
5649 		sync_lock = 1;
5650 		break;
5651 
5652 	default:
5653 		break;
5654 	}
5655 
5656 	dp = &idn_domain[domid];
5657 	IDN_DLOCK_EXCL(domid);
5658 
5659 	/*
5660 	 * The only messages we do _not_ check the cookie are:
5661 	 *	nego
5662 	 *	nego+ack
5663 	 *	fin	 - if received cookie is 0.
5664 	 *	fin+ack	 - if received cookie is 0.
5665 	 *	ack/fin	 - if received cookie is 0.
5666 	 *	nack/fin - if received cookie is 0.
5667 	 */
5668 	if (((msgtype & IDNP_MSGTYPE_MASK) != IDNP_NEGO) &&
5669 	    ((mtype != IDNP_FIN) || (dcookie && dp->dcookie_recv))) {
5670 		if (dp->dcookie_recv != dcookie) {
5671 			dp->dcookie_errcnt++;
5672 			if (dp->dcookie_err == 0) {
5673 				/*
5674 				 * Set cookie error to prevent a
5675 				 * possible flood of bogus cookies
5676 				 * and thus error messages.
5677 				 */
5678 				dp->dcookie_err = 1;
5679 				cmn_err(CE_WARN,
5680 				    "IDN: 215: invalid cookie (0x%x) "
5681 				    "for message (0x%x) from domain %d",
5682 				    dcookie, hp->m_msgtype, domid);
5683 
5684 				PR_PROTO("%s:%d: received cookie (0x%x), "
5685 				    "expected (0x%x) [errcnt = %d]\n",
5686 				    proc, domid, dcookie,
5687 				    dp->dcookie_recv, dp->dcookie_errcnt);
5688 			}
5689 			IDN_DUNLOCK(domid);
5690 			IDN_SYNC_UNLOCK();
5691 			return;
5692 		}
5693 	}
5694 	dp->dcookie_err = 0;
5695 	IDN_GLOCK_EXCL();
5696 
5697 	idn_clear_awol(domid);
5698 
5699 	IDN_GUNLOCK();
5700 	if (!sync_lock)		/* really don't need SYNC-LOCK past here */
5701 		IDN_SYNC_UNLOCK();
5702 
5703 	/*
5704 	 * Stop any timers that may have been outstanding for
5705 	 * this domain, for this particular message type.
5706 	 * Note that CFG timers are directly managed by
5707 	 * config recv/send code.
5708 	 */
5709 	if ((mtype != IDNP_CFG) && (msgtype & IDNP_ACKNACK_MASK) && tcookie) {
5710 		IDN_MSGTIMER_STOP(domid, mtype, tcookie);
5711 	}
5712 
5713 	/*
5714 	 * Keep track of the last cpu to send us a message.
5715 	 * If the domain has not yet been assigned, we'll need
5716 	 * this cpuid in order to send back a respond.
5717 	 */
5718 	dp->dcpu_last = cpuid;
5719 
5720 	mt.mt_mtype = (ushort_t)msgtype;
5721 	mt.mt_atype = (ushort_t)acktype;
5722 	mt.mt_cookie = tcookie;
5723 
5724 	switch (mtype) {
5725 	case IDNP_NEGO:
5726 		(void) idn_recv_nego(domid, &mt, hp->m_xargs, dcookie);
5727 		break;
5728 
5729 	case IDNP_CFG:
5730 		idn_recv_config(domid, &mt, hp->m_xargs);
5731 		break;
5732 
5733 	case IDNP_CON:
5734 		(void) idn_recv_con(domid, &mt, hp->m_xargs);
5735 		break;
5736 
5737 	case IDNP_FIN:
5738 		(void) idn_recv_fin(domid, &mt, hp->m_xargs);
5739 		break;
5740 
5741 	case IDNP_CMD:
5742 		idn_recv_cmd(domid, &mt, hp->m_xargs);
5743 		break;
5744 
5745 	case IDNP_DATA:
5746 		ASSERT(msgtype & IDNP_ACKNACK_MASK);
5747 		/*
5748 		 * When doing the fast track we simply process
5749 		 * possible nack error conditions.  The actual
5750 		 * processing of the SMR data buffer is taken
5751 		 * care of in idnh_recv_dataack.  When NOT doing
5752 		 * the fast track, we do all the processing here
5753 		 * in the protocol server.
5754 		 */
5755 		(void) idn_recv_data(domid, &mt, hp->m_xargs);
5756 		break;
5757 
5758 	default:
5759 		/*
5760 		 * Should be receiving 0 inum and 0 acknack.
5761 		 */
5762 #ifdef DEBUG
5763 		cmn_err(CE_PANIC,
5764 #else /* DEBUG */
5765 		    cmn_err(CE_WARN,
5766 #endif /* DEBUG */
5767 			/* CSTYLED */
5768 			"IDN: 216: (0x%x)msgtype/(0x%x)acktype rcvd from "
5769 			/* CSTYLED */
5770 			"domain %d", msgtype, acktype, domid);
5771 		break;
5772 	}
5773 
5774 	IDN_DUNLOCK(domid);
5775 	/*
5776 	 * All receiving routines are responsible for dropping drwlock.
5777 	 */
5778 
5779 	if (sync_lock)
5780 		IDN_SYNC_UNLOCK();
5781 }
5782 
5783 /*
5784  * Once the CONFIG state is hit we immediately blast out all
5785  * of our config info.  This guarantees that the CONFIG state
5786  * effectively signifies that the sender has sent _all_ of
5787  * their config info.
5788  */
5789 static void
idn_send_config(int domid,int phase)5790 idn_send_config(int domid, int phase)
5791 {
5792 	idn_domain_t	*dp;
5793 	int		rv;
5794 	clock_t		cfg_waittime = idn_msg_waittime[IDNP_CFG];
5795 	procname_t	proc = "idn_send_config";
5796 
5797 	ASSERT(IDN_DLOCK_IS_EXCL(domid));
5798 
5799 	dp = &idn_domain[domid];
5800 
5801 	ASSERT(dp->dstate == IDNDS_CONFIG);
5802 
5803 	if (phase == 1) {
5804 		/*
5805 		 * Reset stuff in dtmp to 0:
5806 		 *	dcfgphase
5807 		 *	dcksum
5808 		 *	dncfgitems
5809 		 *	dmaxnets
5810 		 *	dmboxpernet
5811 		 */
5812 		dp->dtmp = 0;
5813 	}
5814 
5815 	if (dp->dcfgsnddone) {
5816 		if (!dp->dcfgrcvdone) {
5817 			IDN_MSGTIMER_START(domid, IDNP_CFG, 0,
5818 			    cfg_waittime, NULL);
5819 		}
5820 		return;
5821 	}
5822 
5823 	IDN_DLOCK_SHARED(idn.localid);
5824 
5825 	PR_PROTO("%s:%d: sending %s config (phase %d)\n",
5826 	    proc, domid,
5827 	    idn_domain[idn.localid].dvote.v.master ? "MASTER" : "SLAVE",
5828 	    phase);
5829 
5830 	if (idn_domain[idn.localid].dvote.v.master)
5831 		rv = idn_send_master_config(domid, phase);
5832 	else
5833 		rv = idn_send_slave_config(domid, phase);
5834 
5835 	IDN_DUNLOCK(idn.localid);
5836 
5837 	if (rv >= 0) {
5838 
5839 		if (rv == 1) {
5840 			dp->dcfgsnddone = 1;
5841 			PR_PROTO("%s:%d: SEND config DONE\n", proc, domid);
5842 			if (!dp->dcfgrcvdone) {
5843 				IDN_MSGTIMER_START(domid, IDNP_CFG, 0,
5844 				    cfg_waittime, NULL);
5845 			}
5846 		} else {
5847 			IDN_MSGTIMER_START(domid, IDNP_CFG, 0,
5848 			    cfg_waittime, NULL);
5849 		}
5850 	}
5851 }
5852 
5853 /*
5854  * Clear out the mailbox table.
5855  * NOTE: This routine touches the SMR.
5856  */
5857 static void
idn_reset_mboxtbl(idn_mboxtbl_t * mtp)5858 idn_reset_mboxtbl(idn_mboxtbl_t *mtp)
5859 {
5860 	int		qi;
5861 	idn_mboxmsg_t	*mp = &mtp->mt_queue[0];
5862 
5863 	qi = 0;
5864 	do {
5865 		mp[qi].ms_bframe = 0;
5866 		mp[qi].ms_owner = 0;
5867 		mp[qi].ms_flag = 0;
5868 		IDN_MMBOXINDEX_INC(qi);
5869 	} while (qi);
5870 }
5871 
5872 static int
idn_get_mbox_config(int domid,int * mindex,smr_offset_t * mtable,smr_offset_t * mdomain)5873 idn_get_mbox_config(int domid, int *mindex, smr_offset_t *mtable,
5874     smr_offset_t *mdomain)
5875 {
5876 	idn_domain_t	*dp, *ldp;
5877 
5878 	dp = &idn_domain[domid];
5879 	ldp = &idn_domain[idn.localid];
5880 
5881 	ASSERT(IDN_DLOCK_IS_EXCL(domid));
5882 	ASSERT(IDN_DLOCK_IS_SHARED(idn.localid));
5883 	ASSERT(IDN_GET_MASTERID() != IDN_NIL_DOMID);
5884 
5885 	/*
5886 	 * Get SMR offset of receive mailbox assigned
5887 	 * to respective domain.  If I'm a slave then
5888 	 * my dmbox.m_tbl will not have been assigned yet.
5889 	 * Instead of sending the actual offset I send
5890 	 * the master his assigned index.  Since the
5891 	 * master knows what offset it will assign to
5892 	 * me he can determine his assigned (recv) mailbox
5893 	 * based on the offset and given index.  The local
5894 	 * domain can also use this information once the
5895 	 * dmbox.m_tbl is received to properly assign the
5896 	 * correct mbox offset to the master.
5897 	 */
5898 	if (ldp->dmbox.m_tbl == NULL) {
5899 		/*
5900 		 * Local domain has not yet been assigned a
5901 		 * (recv) mailbox table.  This must be the
5902 		 * initial connection of this domain.
5903 		 */
5904 		ASSERT(dp->dvote.v.master && !ldp->dvote.v.master);
5905 		ASSERT(mindex);
5906 		*mindex = domid;
5907 	} else {
5908 		idn_mboxtbl_t	*mtp;
5909 
5910 		mtp = IDN_MBOXTBL_PTR(ldp->dmbox.m_tbl, domid);
5911 
5912 		ASSERT(mdomain);
5913 		*mdomain = IDN_ADDR2OFFSET(mtp);
5914 
5915 		if (ldp->dvote.v.master) {
5916 			/*
5917 			 * Need to calculate mailbox table to
5918 			 * assign to the given domain.  Since
5919 			 * I'm the master his mailbox is in
5920 			 * the (all-domains) mailbox table.
5921 			 */
5922 			mtp = IDN_MBOXAREA_BASE(idn.mboxarea, domid);
5923 			ASSERT(mtable);
5924 			*mtable = IDN_ADDR2OFFSET(mtp);
5925 
5926 			dp->dmbox.m_tbl = mtp;
5927 		}
5928 	}
5929 
5930 	return (0);
5931 }
5932 
5933 /*
5934  * RETURNS:
5935  *	1	Unexpected/unnecessary phase.
5936  *	0	Successfully handled, timer needed.
5937  */
5938 static int
idn_send_master_config(int domid,int phase)5939 idn_send_master_config(int domid, int phase)
5940 {
5941 	idn_cfgsubtype_t	cfg_subtype;
5942 	int		rv = 0;
5943 	idn_domain_t	*dp, *ldp;
5944 	idn_msgtype_t	mt;
5945 	int		nmcadr;
5946 	uint_t		barpfn, larpfn;
5947 	uint_t		cpus_u32, cpus_l32;
5948 	uint_t		mcadr[3];
5949 	smr_offset_t	mbox_table, mbox_domain;
5950 	register int	b, p, m;
5951 	procname_t	proc = "idn_send_master_config";
5952 
5953 	ASSERT(IDN_DLOCK_IS_EXCL(domid));
5954 	ASSERT(IDN_DLOCK_IS_SHARED(idn.localid));
5955 
5956 	dp = &idn_domain[domid];
5957 	ldp = &idn_domain[idn.localid];
5958 
5959 	ASSERT(dp->dstate == IDNDS_CONFIG);
5960 	ASSERT(dp->dvote.v.master == 0);
5961 	ASSERT(ldp->dvote.v.master == 1);
5962 
5963 	mt.mt_mtype = IDNP_CFG;
5964 	mt.mt_atype = 0;
5965 	mt.mt_cookie = 0;
5966 	m = 0;
5967 	mcadr[0] = mcadr[1] = mcadr[2] = 0;
5968 	cfg_subtype.val = 0;
5969 
5970 	switch (phase) {
5971 
5972 	case 1:
5973 		mbox_table = mbox_domain = IDN_NIL_SMROFFSET;
5974 		(void) idn_get_mbox_config(domid, NULL, &mbox_table,
5975 		    &mbox_domain);
5976 		/*
5977 		 * ----------------------------------------------------
5978 		 * Send: SLABSIZE, DATAMBOX.DOMAIN, DATAMBOX.TABLE
5979 		 * ----------------------------------------------------
5980 		 */
5981 		cfg_subtype.param.p[0] = IDN_CFGPARAM(IDNCFG_SIZE,
5982 		    IDNCFGARG_SIZE_SLAB);
5983 		cfg_subtype.param.p[1] = IDN_CFGPARAM(IDNCFG_DATAMBOX,
5984 		    IDNCFGARG_DATAMBOX_DOMAIN);
5985 		cfg_subtype.param.p[2] = IDN_CFGPARAM(IDNCFG_DATAMBOX,
5986 		    IDNCFGARG_DATAMBOX_TABLE);
5987 		cfg_subtype.info.num = 3;
5988 		cfg_subtype.info.phase = phase;
5989 		dp->dcfgphase = phase;
5990 
5991 		ASSERT(mbox_domain != IDN_NIL_SMROFFSET);
5992 		ASSERT(mbox_table != IDN_NIL_SMROFFSET);
5993 
5994 		PR_PROTO("%s:%d:%d: sending SLABSIZE (%d), "
5995 		    "DATAMBOX.DOMAIN (0x%x), DATAMBOX.TABLE (0x%x)\n",
5996 		    proc, domid, phase, IDN_SLAB_BUFCOUNT, mbox_domain,
5997 		    mbox_table);
5998 
5999 		IDNXDC(domid, &mt, cfg_subtype.val, IDN_SLAB_BUFCOUNT,
6000 		    mbox_domain, mbox_table);
6001 		break;
6002 
6003 	case 2:
6004 		barpfn = idn.smr.locpfn;
6005 		larpfn = barpfn + (uint_t)btop(MB2B(IDN_SMR_SIZE));
6006 		/*
6007 		 * ----------------------------------------------------
6008 		 * Send: NETID, BARLAR
6009 		 * ----------------------------------------------------
6010 		 */
6011 		cfg_subtype.param.p[0] = IDN_CFGPARAM(IDNCFG_NETID, 0);
6012 		cfg_subtype.param.p[1] = IDN_CFGPARAM(IDNCFG_BARLAR,
6013 		    IDNCFGARG_BARLAR_BAR);
6014 		cfg_subtype.param.p[2] = IDN_CFGPARAM(IDNCFG_BARLAR,
6015 		    IDNCFGARG_BARLAR_LAR);
6016 		cfg_subtype.info.num = 3;
6017 		cfg_subtype.info.phase = phase;
6018 		dp->dcfgphase = phase;
6019 
6020 		PR_PROTO("%s:%d:%d: sending NETID (%d), "
6021 		    "BARPFN/LARPFN (0x%x/0x%x)\n",
6022 		    proc, domid, phase, ldp->dnetid, barpfn, larpfn);
6023 
6024 		IDNXDC(domid, &mt, cfg_subtype.val,
6025 		    (uint_t)ldp->dnetid, barpfn, larpfn);
6026 		break;
6027 
6028 	case 3:
6029 		nmcadr = ldp->dhw.dh_nmcadr;
6030 		cpus_u32 = UPPER32_CPUMASK(ldp->dcpuset);
6031 		cpus_l32 = LOWER32_CPUMASK(ldp->dcpuset);
6032 		/*
6033 		 * ----------------------------------------------------
6034 		 * Send: CPUSET, NMCADR
6035 		 * ----------------------------------------------------
6036 		 */
6037 		cfg_subtype.param.p[0] = IDN_CFGPARAM(IDNCFG_CPUSET,
6038 		    IDNCFGARG_CPUSET_UPPER);
6039 		cfg_subtype.param.p[1] = IDN_CFGPARAM(IDNCFG_CPUSET,
6040 		    IDNCFGARG_CPUSET_LOWER);
6041 		cfg_subtype.param.p[2] = IDN_CFGPARAM(IDNCFG_NMCADR, 0);
6042 		cfg_subtype.info.num = 3;
6043 		cfg_subtype.info.phase = phase;
6044 		dp->dcfgphase = phase;
6045 
6046 		PR_PROTO("%s:%d:%d: sending CPUSET (0x%x.%x), NMCADR (%d)\n",
6047 		    proc, domid, phase, cpus_u32, cpus_l32, nmcadr);
6048 
6049 		IDNXDC(domid, &mt, cfg_subtype.val,
6050 		    cpus_u32, cpus_l32, nmcadr);
6051 		break;
6052 
6053 	case 4:
6054 		/*
6055 		 * ----------------------------------------------------
6056 		 * Send: BOARDSET, MTU, BUFSIZE
6057 		 * ----------------------------------------------------
6058 		 */
6059 		cfg_subtype.param.p[0] = IDN_CFGPARAM(IDNCFG_BOARDSET, 0);
6060 		cfg_subtype.param.p[1] = IDN_CFGPARAM(IDNCFG_SIZE,
6061 		    IDNCFGARG_SIZE_MTU);
6062 		cfg_subtype.param.p[2] = IDN_CFGPARAM(IDNCFG_SIZE,
6063 		    IDNCFGARG_SIZE_BUF);
6064 		cfg_subtype.info.num = 3;
6065 		cfg_subtype.info.phase = phase;
6066 		dp->dcfgphase = phase;
6067 
6068 		PR_PROTO("%s:%d:%d: sending BOARDSET (0x%x), MTU (0x%lx), "
6069 		    "BUFSIZE (0x%x)\n", proc, domid, phase,
6070 		    ldp->dhw.dh_boardset, IDN_MTU, IDN_SMR_BUFSIZE);
6071 
6072 		IDNXDC(domid, &mt, cfg_subtype.val,
6073 		    ldp->dhw.dh_boardset, IDN_MTU, IDN_SMR_BUFSIZE);
6074 		break;
6075 
6076 	case 5:
6077 		/*
6078 		 * ----------------------------------------------------
6079 		 * Send: MAXNETS, MBOXPERNET, CKSUM
6080 		 * ----------------------------------------------------
6081 		 */
6082 		cfg_subtype.param.p[0] = IDN_CFGPARAM(IDNCFG_DATASVR,
6083 		    IDNCFGARG_DATASVR_MAXNETS);
6084 		cfg_subtype.param.p[1] = IDN_CFGPARAM(IDNCFG_DATASVR,
6085 		    IDNCFGARG_DATASVR_MBXPERNET);
6086 		cfg_subtype.param.p[2] = IDN_CFGPARAM(IDNCFG_OPTIONS,
6087 		    IDNCFGARG_CHECKSUM);
6088 		cfg_subtype.info.num = 3;
6089 		cfg_subtype.info.phase = phase;
6090 		dp->dcfgphase = phase;
6091 
6092 		PR_PROTO("%s:%d:%d: sending MAXNETS (%d), "
6093 		    "MBOXPERNET (%d), CKSUM (%d)\n",
6094 		    proc, domid, phase,
6095 		    IDN_MAX_NETS, IDN_MBOX_PER_NET,
6096 		    IDN_CHECKSUM);
6097 
6098 		IDNXDC(domid, &mt, cfg_subtype.val,
6099 		    IDN_MAX_NETS, IDN_MBOX_PER_NET, IDN_CHECKSUM);
6100 		break;
6101 
6102 	case 6:
6103 		/*
6104 		 * ----------------------------------------------------
6105 		 * Send: NWRSIZE (piggyback on MCADRs)
6106 		 * ----------------------------------------------------
6107 		 */
6108 		cfg_subtype.param.p[0] = IDN_CFGPARAM(IDNCFG_SIZE,
6109 		    IDNCFGARG_SIZE_NWR);
6110 		mcadr[0] = IDN_NWR_SIZE;
6111 		m = 1;
6112 
6113 		/*FALLTHROUGH*/
6114 
6115 	default:	/* case 7 and above */
6116 		/*
6117 		 * ----------------------------------------------------
6118 		 * Send: MCADR's
6119 		 * ----------------------------------------------------
6120 		 * First need to figure how many we've already sent
6121 		 * based on what phase of CONFIG we're in.
6122 		 * ----------------------------------------------------
6123 		 */
6124 		if (phase > 6) {
6125 			p = ((phase - 7) * 3) + 2;
6126 			for (b = 0; (b < MAX_BOARDS) && (p > 0); b++)
6127 				if (ldp->dhw.dh_mcadr[b])
6128 					p--;
6129 		} else {
6130 			b = 0;
6131 		}
6132 
6133 		for (; (b < MAX_BOARDS) && (m < 3); b++) {
6134 			if (ldp->dhw.dh_mcadr[b] == 0)
6135 				continue;
6136 			mcadr[m] = ldp->dhw.dh_mcadr[b];
6137 			cfg_subtype.param.p[m] = IDN_CFGPARAM(IDNCFG_MCADR, b);
6138 			m++;
6139 		}
6140 		if (m > 0) {
6141 			if (phase == 6) {
6142 				PR_PROTO("%s:%d:%d: sending NWRSIZE (%d), "
6143 				    "MCADRs (0x%x, 0x%x)\n",
6144 				    proc, domid, phase,
6145 				    mcadr[0], mcadr[1], mcadr[2]);
6146 			} else {
6147 				PR_PROTO("%s:%d:%d: sending MCADRs "
6148 				    "(0x%x, 0x%x, 0x%x)\n",
6149 				    proc, domid, phase,
6150 				    mcadr[0], mcadr[1], mcadr[2]);
6151 			}
6152 			cfg_subtype.info.num = m;
6153 			cfg_subtype.info.phase = phase;
6154 			dp->dcfgphase = phase;
6155 
6156 			IDNXDC(domid, &mt, cfg_subtype.val,
6157 			    mcadr[0], mcadr[1], mcadr[2]);
6158 		} else {
6159 			rv = 1;
6160 		}
6161 		break;
6162 	}
6163 
6164 	return (rv);
6165 }
6166 
6167 /*
6168  * RETURNS:
6169  *	1	Unexpected/unnecessary phase.
6170  *	0	Successfully handled.
6171  */
6172 static int
idn_send_slave_config(int domid,int phase)6173 idn_send_slave_config(int domid, int phase)
6174 {
6175 	idn_cfgsubtype_t	cfg_subtype;
6176 	int		rv = 0;
6177 	idn_domain_t	*dp, *ldp;
6178 	smr_offset_t	mbox_domain;
6179 	idn_msgtype_t	mt;
6180 	int		mbox_index;
6181 	uint_t		cpus_u32, cpus_l32;
6182 	procname_t	proc = "idn_send_slave_config";
6183 
6184 	ASSERT(IDN_DLOCK_IS_EXCL(domid));
6185 	ASSERT(IDN_DLOCK_IS_SHARED(idn.localid));
6186 
6187 	mt.mt_mtype = IDNP_CFG;
6188 	mt.mt_atype = 0;
6189 	dp = &idn_domain[domid];
6190 	ldp = &idn_domain[idn.localid];
6191 
6192 	ASSERT(dp->dstate == IDNDS_CONFIG);
6193 	ASSERT(ldp->dvote.v.master == 0);
6194 
6195 	switch (phase) {
6196 
6197 	case 1:
6198 		mbox_index = IDN_NIL_DOMID;
6199 		mbox_domain = IDN_NIL_SMROFFSET;
6200 		(void) idn_get_mbox_config(domid, &mbox_index, NULL,
6201 		    &mbox_domain);
6202 		/*
6203 		 * ----------------------------------------------------
6204 		 * Send: DATAMBOX.DOMAIN or DATAMBOX.INDEX,
6205 		 *	 DATASVR.MAXNETS, DATASVR.MBXPERNET
6206 		 * ----------------------------------------------------
6207 		 */
6208 		cfg_subtype.val = 0;
6209 		if (mbox_index == IDN_NIL_DOMID) {
6210 			ASSERT(mbox_domain != IDN_NIL_SMROFFSET);
6211 			cfg_subtype.param.p[0] = IDN_CFGPARAM(IDNCFG_DATAMBOX,
6212 			    IDNCFGARG_DATAMBOX_DOMAIN);
6213 		} else {
6214 			/*
6215 			 * Should only be sending Index to
6216 			 * the master and not another slave.
6217 			 */
6218 			ASSERT(dp->dvote.v.master);
6219 			ASSERT(mbox_domain == IDN_NIL_SMROFFSET);
6220 			cfg_subtype.param.p[0] = IDN_CFGPARAM(IDNCFG_DATAMBOX,
6221 			    IDNCFGARG_DATAMBOX_INDEX);
6222 		}
6223 		cfg_subtype.param.p[1] = IDN_CFGPARAM(IDNCFG_DATASVR,
6224 		    IDNCFGARG_DATASVR_MAXNETS);
6225 		cfg_subtype.param.p[2] = IDN_CFGPARAM(IDNCFG_DATASVR,
6226 		    IDNCFGARG_DATASVR_MBXPERNET);
6227 		cfg_subtype.info.num = 3;
6228 		cfg_subtype.info.phase = phase;
6229 		dp->dcfgphase = phase;
6230 
6231 		PR_PROTO("%s:%d:%d: sending DATAMBOX.%s (0x%x), "
6232 		    "MAXNETS (%d), MBXPERNET (%d)\n",
6233 		    proc, domid, phase,
6234 		    (IDN_CFGPARAM_ARG(cfg_subtype.param.p[0])
6235 		    == IDNCFGARG_DATAMBOX_INDEX) ? "INDEX" : "DOMAIN",
6236 		    (mbox_index == IDN_NIL_DOMID) ? mbox_domain : mbox_index,
6237 		    IDN_MAX_NETS, IDN_MBOX_PER_NET);
6238 
6239 		IDNXDC(domid, &mt, cfg_subtype.val,
6240 		    ((mbox_index == IDN_NIL_DOMID) ? mbox_domain : mbox_index),
6241 		    IDN_MAX_NETS, IDN_MBOX_PER_NET);
6242 		break;
6243 
6244 	case 2:
6245 		cpus_u32 = UPPER32_CPUMASK(ldp->dcpuset);
6246 		cpus_l32 = LOWER32_CPUMASK(ldp->dcpuset);
6247 		/*
6248 		 * ----------------------------------------------------
6249 		 * Send: NETID, CPUSET
6250 		 * ----------------------------------------------------
6251 		 */
6252 		cfg_subtype.val = 0;
6253 		cfg_subtype.param.p[0] = IDN_CFGPARAM(IDNCFG_NETID, 0);
6254 		cfg_subtype.param.p[1] = IDN_CFGPARAM(IDNCFG_CPUSET,
6255 		    IDNCFGARG_CPUSET_UPPER);
6256 		cfg_subtype.param.p[2] = IDN_CFGPARAM(IDNCFG_CPUSET,
6257 		    IDNCFGARG_CPUSET_LOWER);
6258 		cfg_subtype.info.num = 3;
6259 		cfg_subtype.info.phase = phase;
6260 		dp->dcfgphase = phase;
6261 
6262 		PR_PROTO("%s:%d:%d: sending NETID (%d), "
6263 		    "CPUSET (0x%x.%x)\n", proc, domid, phase,
6264 		    ldp->dnetid, cpus_u32, cpus_l32);
6265 
6266 		IDNXDC(domid, &mt, cfg_subtype.val,
6267 		    (uint_t)ldp->dnetid, cpus_u32, cpus_l32);
6268 		break;
6269 
6270 	case 3:
6271 		/*
6272 		 * ----------------------------------------------------
6273 		 * Send: BOARDSET, MTU, BUFSIZE
6274 		 * ----------------------------------------------------
6275 		 */
6276 		cfg_subtype.val = 0;
6277 		cfg_subtype.param.p[0] = IDN_CFGPARAM(IDNCFG_BOARDSET, 0);
6278 		cfg_subtype.param.p[1] = IDN_CFGPARAM(IDNCFG_SIZE,
6279 		    IDNCFGARG_SIZE_MTU);
6280 		cfg_subtype.param.p[2] = IDN_CFGPARAM(IDNCFG_SIZE,
6281 		    IDNCFGARG_SIZE_BUF);
6282 		cfg_subtype.info.num = 3;
6283 		cfg_subtype.info.phase = phase;
6284 		dp->dcfgphase = phase;
6285 
6286 		PR_PROTO("%s:%d:%d: sending BOARDSET (0x%x), MTU (0x%lx), "
6287 		    "BUFSIZE (0x%x)\n",
6288 		    proc, domid, phase, ldp->dhw.dh_boardset, IDN_MTU,
6289 		    IDN_SMR_BUFSIZE);
6290 
6291 		IDNXDC(domid, &mt, cfg_subtype.val,
6292 		    ldp->dhw.dh_boardset, IDN_MTU, IDN_SMR_BUFSIZE);
6293 		break;
6294 
6295 	case 4:
6296 		/*
6297 		 * ----------------------------------------------------
6298 		 * Send: SLABSIZE, OPTIONS.CHECKSUM, NWR_SIZE
6299 		 * ----------------------------------------------------
6300 		 */
6301 		cfg_subtype.val = 0;
6302 		cfg_subtype.param.p[0] = IDN_CFGPARAM(IDNCFG_SIZE,
6303 		    IDNCFGARG_SIZE_SLAB);
6304 		cfg_subtype.param.p[1] = IDN_CFGPARAM(IDNCFG_OPTIONS,
6305 		    IDNCFGARG_CHECKSUM);
6306 		cfg_subtype.param.p[2] = IDN_CFGPARAM(IDNCFG_SIZE,
6307 		    IDNCFGARG_SIZE_NWR);
6308 		cfg_subtype.info.num = 3;
6309 		cfg_subtype.info.phase = phase;
6310 		dp->dcfgphase = phase;
6311 
6312 		PR_PROTO("%s:%d:%d: sending SLABSIZE (%d), CKSUM (%d), "
6313 		    "NWRSIZE (%d)\n",
6314 		    proc, domid, phase, IDN_SLAB_BUFCOUNT,
6315 		    IDN_CHECKSUM, IDN_NWR_SIZE);
6316 
6317 		IDNXDC(domid, &mt, cfg_subtype.val,
6318 		    IDN_SLAB_BUFCOUNT, IDN_CHECKSUM, IDN_NWR_SIZE);
6319 		break;
6320 
6321 	default:
6322 		rv = 1;
6323 		break;
6324 	}
6325 
6326 	return (rv);
6327 }
6328 
6329 #define	CFG_FATAL	((uint_t)-1)	/* reset link */
6330 #define	CFG_CONTINUE	0x0000		/* looking for more */
6331 #define	CFG_DONE	0x0001		/* got everything expected */
6332 #define	CFG_ERR_MTU	0x0002
6333 #define	CFG_ERR_BUF	0x0004
6334 #define	CFG_ERR_SLAB	0x0008
6335 #define	CFG_ERR_NWR	0x0010
6336 #define	CFG_ERR_NETS	0x0020
6337 #define	CFG_ERR_MBOX	0x0040
6338 #define	CFG_ERR_NMCADR	0x0080
6339 #define	CFG_ERR_MCADR	0x0100
6340 #define	CFG_ERR_CKSUM	0x0200
6341 #define	CFG_ERR_SMR	0x0400
6342 #define	CFG_MAX_ERRORS	16
6343 
6344 #define	CFGERR2IDNKERR(ce) \
6345 	(((ce) & CFG_ERR_MTU)	? IDNKERR_CONFIG_MTU 	: \
6346 	((ce) & CFG_ERR_BUF)	? IDNKERR_CONFIG_BUF 	: \
6347 	((ce) & CFG_ERR_SLAB)	? IDNKERR_CONFIG_SLAB 	: \
6348 	((ce) & CFG_ERR_NWR)	? IDNKERR_CONFIG_NWR 	: \
6349 	((ce) & CFG_ERR_NETS)	? IDNKERR_CONFIG_NETS 	: \
6350 	((ce) & CFG_ERR_MBOX)	? IDNKERR_CONFIG_MBOX 	: \
6351 	((ce) & CFG_ERR_NMCADR)	? IDNKERR_CONFIG_NMCADR	: \
6352 	((ce) & CFG_ERR_MCADR)	? IDNKERR_CONFIG_MCADR	: \
6353 	((ce) & CFG_ERR_CKSUM)	? IDNKERR_CONFIG_CKSUM	: \
6354 	((ce) & CFG_ERR_SMR)	? IDNKERR_CONFIG_SMR	: 0)
6355 
6356 #define	CFGERR2FINARG(ce) \
6357 	(((ce) & CFG_ERR_MTU)	? IDNFIN_ARG_CFGERR_MTU    : \
6358 	((ce) & CFG_ERR_BUF)	? IDNFIN_ARG_CFGERR_BUF    : \
6359 	((ce) & CFG_ERR_SLAB)	? IDNFIN_ARG_CFGERR_SLAB   : \
6360 	((ce) & CFG_ERR_NWR)	? IDNFIN_ARG_CFGERR_NWR    : \
6361 	((ce) & CFG_ERR_NETS)	? IDNFIN_ARG_CFGERR_NETS   : \
6362 	((ce) & CFG_ERR_MBOX)	? IDNFIN_ARG_CFGERR_MBOX   : \
6363 	((ce) & CFG_ERR_NMCADR)	? IDNFIN_ARG_CFGERR_NMCADR : \
6364 	((ce) & CFG_ERR_MCADR)	? IDNFIN_ARG_CFGERR_MCADR  : \
6365 	((ce) & CFG_ERR_CKSUM)	? IDNFIN_ARG_CFGERR_CKSUM  : \
6366 	((ce) & CFG_ERR_SMR)	? IDNFIN_ARG_CFGERR_SMR	   : IDNFIN_ARG_NONE)
6367 
6368 /*
6369  * Called when some CFG messages arrive.  We use dncfgitems to count the
6370  * total number of items received so far since we'll receive multiple CFG
6371  * messages during the CONFIG phase.  Note that dncfgitems is initialized
6372  * in idn_send_config.
6373  */
6374 static void
idn_recv_config(int domid,idn_msgtype_t * mtp,idn_xdcargs_t xargs)6375 idn_recv_config(int domid, idn_msgtype_t *mtp, idn_xdcargs_t xargs)
6376 {
6377 	uint_t		msg = mtp->mt_mtype;
6378 	uint_t		rv, rv_expected, rv_actual;
6379 	int		pnum;
6380 	int		phase;
6381 	register int	p;
6382 	register int	c;
6383 	idn_mainmbox_t	*mmp;
6384 	register uint_t	subtype, subtype_arg;
6385 	idn_domain_t	*dp;
6386 	int		index;
6387 	idn_domain_t	*ldp = &idn_domain[idn.localid];
6388 	idn_mboxtbl_t	*mbtp;
6389 	idn_cfgsubtype_t	cfg_subtype;
6390 	idn_xdcargs_t	cfg_arg;
6391 	idn_msgtype_t	mt;
6392 	idnsb_error_t	idnerr;
6393 	procname_t	proc = "idn_recv_config";
6394 
6395 	ASSERT(domid != idn.localid);
6396 
6397 	GET_XARGS(xargs, &cfg_subtype.val, &cfg_arg[0], &cfg_arg[1],
6398 	    &cfg_arg[2]);
6399 	cfg_arg[3] = 0;
6400 
6401 	dp = &idn_domain[domid];
6402 
6403 	ASSERT(IDN_DLOCK_IS_EXCL(domid));
6404 
6405 	if (dp->dstate != IDNDS_CONFIG) {
6406 		/*
6407 		 * Not ready to receive config info.
6408 		 * Drop whatever he sent us.  Let the
6409 		 * timer continue and timeout if needed.
6410 		 */
6411 		PR_PROTO("%s:%d: WARNING state(%s) != CONFIG\n",
6412 		    proc, domid, idnds_str[dp->dstate]);
6413 		return;
6414 	}
6415 
6416 	if ((msg & IDNP_ACKNACK_MASK) || dp->dcfgsnddone) {
6417 		IDN_MSGTIMER_STOP(domid, IDNP_CFG, 0);
6418 	}
6419 
6420 	if (msg & IDNP_ACKNACK_MASK) {
6421 		/*
6422 		 * ack/cfg
6423 		 */
6424 		phase = GET_XARGS_CFG_PHASE(xargs);
6425 
6426 		PR_PROTO("%s:%d: received ACK for CFG phase %d\n",
6427 		    proc, domid, phase);
6428 		if (phase != (int)dp->dcfgphase) {
6429 			/*
6430 			 * Phase is not what we were
6431 			 * expecting.  Something got lost
6432 			 * in the shuffle.  Restart the
6433 			 * timer and let it timeout if necessary
6434 			 * and reestablish the connection.
6435 			 */
6436 			IDN_MSGTIMER_START(domid, IDNP_CFG, dp->dcfgphase,
6437 			    idn_msg_waittime[IDNP_CFG], NULL);
6438 		} else {
6439 			idn_send_config(domid, phase + 1);
6440 
6441 			if (dp->dcfgsnddone && dp->dcfgrcvdone) {
6442 				IDN_DUNLOCK(domid);
6443 				IDN_SYNC_LOCK();
6444 				IDN_DLOCK_EXCL(domid);
6445 				if (dp->dstate == IDNDS_CONFIG) {
6446 					dp->dxp = &xphase_con;
6447 					IDN_XSTATE_TRANSITION(dp, IDNXS_PEND);
6448 					bzero(xargs, sizeof (xargs));
6449 
6450 					(void) idn_xphase_transition(domid,
6451 					    NULL, xargs);
6452 				}
6453 				IDN_SYNC_UNLOCK();
6454 			}
6455 		}
6456 		return;
6457 	}
6458 
6459 	pnum = (int)cfg_subtype.info.num;
6460 	phase = (int)cfg_subtype.info.phase;
6461 
6462 	for (p = 0; p < pnum; p++) {
6463 		int	board;
6464 #ifdef DEBUG
6465 		uint_t	val;
6466 		char	*str;
6467 
6468 		val = 0;
6469 		str = NULL;
6470 #define	RCVCFG(s, v)	{ str = (s); val = (v); }
6471 #else
6472 #define	RCVCFG(s, v)	{}
6473 #endif /* DEBUG */
6474 
6475 		subtype	    = IDN_CFGPARAM_TYPE(cfg_subtype.param.p[p]);
6476 		subtype_arg = IDN_CFGPARAM_ARG(cfg_subtype.param.p[p]);
6477 
6478 		switch (subtype) {
6479 
6480 		case IDNCFG_BARLAR:
6481 			IDN_GLOCK_EXCL();
6482 			switch (subtype_arg) {
6483 
6484 			case IDNCFGARG_BARLAR_BAR:
6485 				if (idn.smr.rempfn == PFN_INVALID) {
6486 					idn.smr.rempfn = (pfn_t)cfg_arg[p];
6487 					dp->dncfgitems++;
6488 					RCVCFG("BARLAR_BAR", cfg_arg[p]);
6489 				}
6490 				break;
6491 
6492 			case IDNCFGARG_BARLAR_LAR:
6493 				if (idn.smr.rempfnlim == PFN_INVALID) {
6494 					idn.smr.rempfnlim = (pfn_t)cfg_arg[p];
6495 					dp->dncfgitems++;
6496 					RCVCFG("BARLAR_LAR", cfg_arg[p]);
6497 				}
6498 				break;
6499 
6500 			default:
6501 				cmn_err(CE_WARN,
6502 				    "IDN 217: unknown CFGARG type (%d) "
6503 				    "from domain %d",
6504 				    subtype_arg, domid);
6505 				break;
6506 			}
6507 			IDN_GUNLOCK();
6508 			break;
6509 
6510 		case IDNCFG_MCADR:
6511 			board = subtype_arg;
6512 			if ((board >= 0) && (board < MAX_BOARDS) &&
6513 			    (dp->dhw.dh_mcadr[board] == 0)) {
6514 				dp->dhw.dh_mcadr[board] = cfg_arg[p];
6515 				dp->dncfgitems++;
6516 				RCVCFG("MCADR", cfg_arg[p]);
6517 			}
6518 			break;
6519 
6520 		case IDNCFG_NMCADR:
6521 			if (dp->dhw.dh_nmcadr == 0) {
6522 				dp->dhw.dh_nmcadr = cfg_arg[p];
6523 				dp->dncfgitems++;
6524 				RCVCFG("NMCADR", cfg_arg[p]);
6525 			}
6526 			break;
6527 
6528 		case IDNCFG_CPUSET:
6529 			switch (subtype_arg) {
6530 
6531 			case IDNCFGARG_CPUSET_UPPER:
6532 			{
6533 				cpuset_t	tmpset;
6534 
6535 				MAKE64_CPUMASK(tmpset, cfg_arg[p], 0);
6536 				CPUSET_OR(dp->dcpuset, tmpset);
6537 				dp->dncfgitems++;
6538 				RCVCFG("CPUSET_UPPER", cfg_arg[p]);
6539 				break;
6540 			}
6541 			case IDNCFGARG_CPUSET_LOWER:
6542 			{
6543 				cpuset_t	tmpset;
6544 
6545 				MAKE64_CPUMASK(tmpset, 0, cfg_arg[p]);
6546 				CPUSET_OR(dp->dcpuset, tmpset);
6547 				dp->dncfgitems++;
6548 				RCVCFG("CPUSET_LOWER", cfg_arg[p]);
6549 				break;
6550 			}
6551 			default:
6552 				ASSERT(0);
6553 				break;
6554 			}
6555 			break;
6556 
6557 		case IDNCFG_NETID:
6558 			if (dp->dnetid == (ushort_t)-1) {
6559 				dp->dnetid = (ushort_t)cfg_arg[p];
6560 				dp->dncfgitems++;
6561 				RCVCFG("NETID", cfg_arg[p]);
6562 			}
6563 			break;
6564 
6565 		case IDNCFG_BOARDSET:
6566 			if ((dp->dhw.dh_boardset & cfg_arg[p])
6567 			    == dp->dhw.dh_boardset) {
6568 				/*
6569 				 * Boardset better include what we
6570 				 * already know about.
6571 				 */
6572 				dp->dhw.dh_boardset = cfg_arg[p];
6573 				dp->dncfgitems++;
6574 				RCVCFG("BOARDSET", cfg_arg[p]);
6575 			}
6576 			break;
6577 
6578 		case IDNCFG_SIZE:
6579 			switch (subtype_arg) {
6580 
6581 			case IDNCFGARG_SIZE_MTU:
6582 				if (dp->dmtu == 0) {
6583 					dp->dmtu = cfg_arg[p];
6584 					dp->dncfgitems++;
6585 					RCVCFG("MTU", cfg_arg[p]);
6586 				}
6587 				break;
6588 
6589 			case IDNCFGARG_SIZE_BUF:
6590 				if (dp->dbufsize == 0) {
6591 					dp->dbufsize = cfg_arg[p];
6592 					dp->dncfgitems++;
6593 					RCVCFG("BUFSIZE", cfg_arg[p]);
6594 				}
6595 				break;
6596 
6597 			case IDNCFGARG_SIZE_SLAB:
6598 				if (dp->dslabsize == 0) {
6599 					dp->dslabsize = (short)cfg_arg[p];
6600 					dp->dncfgitems++;
6601 					RCVCFG("SLABSIZE", cfg_arg[p]);
6602 				}
6603 				break;
6604 
6605 			case IDNCFGARG_SIZE_NWR:
6606 				if (dp->dnwrsize == 0) {
6607 					dp->dnwrsize = (short)cfg_arg[p];
6608 					dp->dncfgitems++;
6609 					RCVCFG("NWRSIZE", cfg_arg[p]);
6610 				}
6611 				break;
6612 
6613 			default:
6614 				ASSERT(0);
6615 				break;
6616 			}
6617 			break;
6618 
6619 		case IDNCFG_DATAMBOX:
6620 			switch (subtype_arg) {
6621 
6622 			case IDNCFGARG_DATAMBOX_TABLE:
6623 				if (ldp->dmbox.m_tbl ||
6624 				    !dp->dvote.v.master ||
6625 				    !VALID_NWROFFSET(cfg_arg[p], 4)) {
6626 					/*
6627 					 * Only a master should be
6628 					 * sending us a datambox table.
6629 					 */
6630 					break;
6631 				}
6632 				IDN_DLOCK_EXCL(idn.localid);
6633 				ldp->dmbox.m_tbl = (idn_mboxtbl_t *)
6634 				    IDN_OFFSET2ADDR(cfg_arg[p]);
6635 				IDN_DUNLOCK(idn.localid);
6636 				dp->dncfgitems++;
6637 				RCVCFG("DATAMBOX.TABLE", cfg_arg[p]);
6638 				break;
6639 
6640 			case IDNCFGARG_DATAMBOX_DOMAIN:
6641 				if (dp->dmbox.m_send->mm_smr_mboxp ||
6642 				    !VALID_NWROFFSET(cfg_arg[p], 4))
6643 					break;
6644 				mbtp = (idn_mboxtbl_t *)
6645 				    IDN_OFFSET2ADDR(cfg_arg[p]);
6646 				mmp = dp->dmbox.m_send;
6647 				for (c = 0; c < IDN_MAX_NETS; c++) {
6648 
6649 					mutex_enter(&mmp[c].mm_mutex);
6650 					mmp[c].mm_smr_mboxp = mbtp;
6651 					mutex_exit(&mmp[c].mm_mutex);
6652 
6653 					IDN_MBOXTBL_PTR_INC(mbtp);
6654 				}
6655 				if (c <= 0)
6656 					break;
6657 				dp->dncfgitems++;
6658 				RCVCFG("DATAMBOX.DOMAIN", cfg_arg[p]);
6659 				break;
6660 
6661 			case IDNCFGARG_DATAMBOX_INDEX:
6662 				if (!ldp->dvote.v.master ||
6663 				    dp->dmbox.m_send->mm_smr_mboxp) {
6664 					/*
6665 					 * If I'm not the master then
6666 					 * I can't handle processing a
6667 					 * mailbox index.
6668 					 * OR, if I already have the send
6669 					 * mailbox, I'm done with this
6670 					 * config item.
6671 					 */
6672 					break;
6673 				}
6674 				ASSERT(dp->dmbox.m_tbl);
6675 				index = (int)cfg_arg[p];
6676 				/*
6677 				 * The given index is the local domain's
6678 				 * index into the remote domain's mailbox
6679 				 * table that contains the mailbox that
6680 				 * remote domain wants the local domain to
6681 				 * use as the send mailbox for messages
6682 				 * destined for the remote domain.
6683 				 * I.e. from the remote domain's
6684 				 *	perspective, this is his receive
6685 				 *	mailbox.
6686 				 */
6687 				mbtp = IDN_MBOXTBL_PTR(dp->dmbox.m_tbl, index);
6688 				mmp = dp->dmbox.m_send;
6689 				for (c = 0; c < IDN_MAX_NETS; c++) {
6690 
6691 					mutex_enter(&mmp[c].mm_mutex);
6692 					mmp[c].mm_smr_mboxp = mbtp;
6693 					mutex_exit(&mmp[c].mm_mutex);
6694 
6695 					IDN_MBOXTBL_PTR_INC(mbtp);
6696 				}
6697 				if (c <= 0)
6698 					break;
6699 				dp->dncfgitems++;
6700 				RCVCFG("DATAMBOX.INDEX", cfg_arg[p]);
6701 				break;
6702 
6703 			default:
6704 				ASSERT(0);
6705 				break;
6706 			}
6707 			break;
6708 
6709 		case IDNCFG_DATASVR:
6710 			switch (subtype_arg) {
6711 
6712 			case IDNCFGARG_DATASVR_MAXNETS:
6713 				if (dp->dmaxnets)
6714 					break;
6715 				dp->dmaxnets = (uint_t)(cfg_arg[p] & 0x3f);
6716 				dp->dncfgitems++;
6717 				RCVCFG("DATASVR.MAXNETS", cfg_arg[p]);
6718 				break;
6719 
6720 			case IDNCFGARG_DATASVR_MBXPERNET:
6721 				if (dp->dmboxpernet)
6722 					break;
6723 				dp->dmboxpernet = (uint_t)(cfg_arg[p] & 0x1ff);
6724 				dp->dncfgitems++;
6725 				RCVCFG("DATASVR.MBXPERNET", cfg_arg[p]);
6726 				break;
6727 
6728 			default:
6729 				ASSERT(0);
6730 				break;
6731 			}
6732 			break;
6733 
6734 		case IDNCFG_OPTIONS:
6735 			switch (subtype_arg) {
6736 
6737 			case IDNCFGARG_CHECKSUM:
6738 				if (dp->dcksum)
6739 					break;
6740 				if ((cfg_arg[p] & 0xff) == 0)
6741 					dp->dcksum = 1;		/* off */
6742 				else
6743 					dp->dcksum = 2;		/* on */
6744 				dp->dncfgitems++;
6745 				RCVCFG("OPTIONS.CHECKSUM", cfg_arg[p]);
6746 				break;
6747 
6748 			default:
6749 				ASSERT(0);
6750 				break;
6751 			}
6752 
6753 		default:
6754 			break;
6755 		}
6756 #ifdef DEBUG
6757 		PR_PROTO("%s:%d: received %s (0x%x)\n",
6758 		    proc, domid, str ? str : "<empty>", val);
6759 #endif /* DEBUG */
6760 	}
6761 
6762 	mt.mt_mtype = IDNP_ACK;
6763 	mt.mt_atype = IDNP_CFG;
6764 	mt.mt_cookie = mtp->mt_cookie;
6765 	CLR_XARGS(cfg_arg);
6766 	SET_XARGS_CFG_PHASE(cfg_arg, phase);
6767 	idn_send_acknack(domid, &mt, cfg_arg);
6768 
6769 	rv_expected = rv_actual = 0;
6770 
6771 	if (dp->dvote.v.master == 0) {
6772 		/*
6773 		 * Remote domain is a slave, check if we've received
6774 		 * all that we were expecting, and if so transition to
6775 		 * the next state.
6776 		 */
6777 		rv = idn_check_slave_config(domid, &rv_expected, &rv_actual);
6778 	} else {
6779 		/*
6780 		 * Remote domain is a master, check if this slave has
6781 		 * received all that it was expecting, and if so
6782 		 * transition to the next state.
6783 		 */
6784 		rv = idn_check_master_config(domid, &rv_expected, &rv_actual);
6785 	}
6786 
6787 	switch (rv) {
6788 	case CFG_DONE:
6789 		/*
6790 		 * All config info received that was expected, wrap up.
6791 		 */
6792 		if (!idn_recv_config_done(domid) && dp->dvote.v.master) {
6793 			IDN_DLOCK_EXCL(idn.localid);
6794 			ldp->dvote.v.connected = 1;
6795 			IDN_DUNLOCK(idn.localid);
6796 		}
6797 		break;
6798 
6799 	case CFG_CONTINUE:
6800 		/*
6801 		 * If we're not done sending our own config, then
6802 		 * there's no need to set a timer since one will
6803 		 * automatically be set when we send a config
6804 		 * message waiting for an acknowledgement.
6805 		 */
6806 		if (dp->dcfgsnddone) {
6807 			/*
6808 			 * We haven't yet received all the config
6809 			 * information we were expecting.  Need to
6810 			 * restart CFG timer if we've sent everything..
6811 			 */
6812 			IDN_MSGTIMER_START(domid, IDNP_CFG, 0,
6813 			    idn_msg_waittime[IDNP_CFG], NULL);
6814 		}
6815 		break;
6816 
6817 	case CFG_FATAL:
6818 		/*
6819 		 * Fatal error occurred during config exchange.
6820 		 * We need to shutdown connection in this
6821 		 * case, so initiate a (non-relink) FIN.
6822 		 * so let's get the show on the road.
6823 		 */
6824 		IDN_DUNLOCK(domid);
6825 		IDN_SYNC_LOCK();
6826 		IDN_DLOCK_EXCL(domid);
6827 		/*
6828 		 * If the state has changed from CONFIG
6829 		 * then somebody else has taken over
6830 		 * control of this domain so we can just
6831 		 * bail out.
6832 		 */
6833 		if (dp->dstate == IDNDS_CONFIG) {
6834 			INIT_IDNKERR(&idnerr);
6835 			SET_IDNKERR_ERRNO(&idnerr, EPROTO);
6836 			SET_IDNKERR_IDNERR(&idnerr, IDNKERR_CONFIG_FATAL);
6837 			SET_IDNKERR_PARAM0(&idnerr, domid);
6838 			idn_update_op(IDNOP_ERROR, DOMAINSET(domid), &idnerr);
6839 			/*
6840 			 * Keep this guy around so we can try again.
6841 			 */
6842 			DOMAINSET_ADD(idn.domset.ds_relink, domid);
6843 			IDN_HISTORY_LOG(IDNH_RELINK, domid, dp->dstate,
6844 			    idn.domset.ds_relink);
6845 			(void) idn_disconnect(domid, IDNFIN_NORMAL,
6846 			    IDNFIN_ARG_CFGERR_FATAL,
6847 			    IDNFIN_SYNC_NO);
6848 		}
6849 		IDN_SYNC_UNLOCK();
6850 		break;
6851 
6852 	default:	/* parameter conflict */
6853 		IDN_DUNLOCK(domid);
6854 		IDN_SYNC_LOCK();
6855 		IDN_DLOCK_EXCL(domid);
6856 		if (dp->dstate != IDNDS_CONFIG) {
6857 			/*
6858 			 * Hmmm...changed in the short period
6859 			 * we had dropped the lock, oh well.
6860 			 */
6861 			IDN_SYNC_UNLOCK();
6862 			break;
6863 		}
6864 		c = 0;
6865 		for (p = 0; p < CFG_MAX_ERRORS; p++)
6866 			if (rv & (1 << p))
6867 				c++;
6868 		INIT_IDNKERR(&idnerr);
6869 		SET_IDNKERR_ERRNO(&idnerr, EINVAL);
6870 		SET_IDNKERR_PARAM0(&idnerr, domid);
6871 		if (c > 1) {
6872 			SET_IDNKERR_IDNERR(&idnerr, IDNKERR_CONFIG_MULTIPLE);
6873 			SET_IDNKERR_PARAM1(&idnerr, c);
6874 		} else {
6875 			SET_IDNKERR_IDNERR(&idnerr, CFGERR2IDNKERR(rv));
6876 			SET_IDNKERR_PARAM1(&idnerr, rv_expected);
6877 			SET_IDNKERR_PARAM2(&idnerr, rv_actual);
6878 		}
6879 		/*
6880 		 * Any parameter conflicts are grounds for dismissal.
6881 		 */
6882 		if (idn.domset.ds_connected == 0) {
6883 			domainset_t	domset;
6884 			/*
6885 			 * We have no other connections yet.
6886 			 * We must blow out of here completely
6887 			 * unless we have relinkers left from
6888 			 * a RECONFIG.
6889 			 */
6890 			IDN_GLOCK_EXCL();
6891 			domset = ~idn.domset.ds_relink;
6892 			if (idn.domset.ds_relink == 0) {
6893 				IDN_GSTATE_TRANSITION(IDNGS_DISCONNECT);
6894 			}
6895 			domset &= ~idn.domset.ds_hitlist;
6896 			IDN_SET_NEW_MASTERID(IDN_NIL_DOMID);
6897 			IDN_GUNLOCK();
6898 			IDN_DUNLOCK(domid);
6899 
6900 			DOMAINSET_DEL(domset, idn.localid);
6901 
6902 			idn_update_op(IDNOP_ERROR, DOMAINSET_ALL, &idnerr);
6903 
6904 			PR_HITLIST("%s:%d: unlink_domainset(%x) due to "
6905 			    "CFG error (relink=%x, hitlist=%x)\n",
6906 			    proc, domid, domset, idn.domset.ds_relink,
6907 			    idn.domset.ds_hitlist);
6908 
6909 			idn_unlink_domainset(domset, IDNFIN_NORMAL,
6910 			    CFGERR2FINARG(rv),
6911 			    IDNFIN_OPT_UNLINK,
6912 			    BOARDSET_ALL);
6913 			IDN_SYNC_UNLOCK();
6914 			IDN_DLOCK_EXCL(domid);
6915 		} else {
6916 			PR_HITLIST("%s:%d: idn_disconnect(%d) due to CFG "
6917 			    "error (conn=%x, relink=%x, hitlist=%x)\n",
6918 			    proc, domid, domid, idn.domset.ds_connected,
6919 			    idn.domset.ds_relink, idn.domset.ds_hitlist);
6920 			/*
6921 			 * If we have other connections then
6922 			 * we're only going to blow away this
6923 			 * single connection.
6924 			 */
6925 			idn_update_op(IDNOP_ERROR, DOMAINSET(domid), &idnerr);
6926 
6927 			DOMAINSET_DEL(idn.domset.ds_relink, domid);
6928 			(void) idn_disconnect(domid, IDNFIN_NORMAL,
6929 			    CFGERR2FINARG(rv), IDNFIN_SYNC_NO);
6930 			IDN_SYNC_UNLOCK();
6931 		}
6932 		break;
6933 	}
6934 }
6935 
6936 /*
6937  * Called by master or slave which expects exactly the following
6938  * with respect to config info received from a SLAVE:
6939  * 	IDNCFG_CPUSET
6940  *	IDNCFG_NETID
6941  *	IDNCFG_BOARDSET
6942  *	IDNCFG_SIZE (MTU, BUF, SLAB, NWR)
6943  *	IDNCFG_DATAMBOX (DOMAIN or INDEX if caller is master)
6944  *	IDNCFG_DATASVR (MAXNETS, MBXPERNET)
6945  *	IDNCFG_OPTIONS (CHECKSUM)
6946  */
6947 static uint_t
idn_check_slave_config(int domid,uint_t * exp,uint_t * act)6948 idn_check_slave_config(int domid, uint_t *exp, uint_t *act)
6949 {
6950 	uint_t		rv = 0;
6951 	idn_domain_t	*ldp, *dp;
6952 	procname_t	proc = "idn_check_slave_config";
6953 
6954 	dp = &idn_domain[domid];
6955 	ldp = &idn_domain[idn.localid];
6956 
6957 	ASSERT(domid != idn.localid);
6958 	ASSERT(IDN_DLOCK_IS_EXCL(domid));
6959 	ASSERT(dp->dstate == IDNDS_CONFIG);
6960 
6961 	PR_PROTO("%s:%d: number received %d, number expected %d\n",
6962 	    proc, domid, (int)dp->dncfgitems, IDN_SLAVE_NCFGITEMS);
6963 
6964 	if ((int)dp->dncfgitems < IDN_SLAVE_NCFGITEMS)
6965 		return (CFG_CONTINUE);
6966 
6967 	if ((dp->dnetid == (ushort_t)-1) ||
6968 	    CPUSET_ISNULL(dp->dcpuset) ||
6969 	    (dp->dhw.dh_boardset == 0) ||
6970 	    (dp->dmbox.m_send->mm_smr_mboxp == NULL) ||
6971 	    (dp->dmaxnets == 0) ||
6972 	    (dp->dmboxpernet == 0) ||
6973 	    (dp->dcksum == 0) ||
6974 	    (dp->dmtu == 0) ||
6975 	    (dp->dbufsize == 0) ||
6976 	    (dp->dslabsize == 0) ||
6977 	    (dp->dnwrsize == 0)) {
6978 		/*
6979 		 * We received our IDN_SLAVE_NCFGITEMS config items,
6980 		 * but not all what we were expecting!  Gotta nack and
6981 		 * close connection.
6982 		 */
6983 		cmn_err(CE_WARN,
6984 		    "IDN: 218: missing some required config items from "
6985 		    "domain %d", domid);
6986 
6987 		rv = CFG_FATAL;
6988 		goto done;
6989 	}
6990 
6991 	if (!valid_mtu(dp->dmtu)) {
6992 		cmn_err(CE_WARN,
6993 		    "IDN: 219: remote domain %d MTU (%d) invalid "
6994 		    "(local.mtu = %d)", dp->domid, dp->dmtu, ldp->dmtu);
6995 
6996 		*exp = (uint_t)ldp->dmtu;
6997 		*act = (uint_t)dp->dmtu;
6998 		rv |= CFG_ERR_MTU;
6999 	}
7000 	if (!valid_bufsize(dp->dbufsize)) {
7001 		cmn_err(CE_WARN,
7002 		    "IDN: 220: remote domain %d BUFSIZE (%d) invalid "
7003 		    "(local.bufsize = %d)", dp->domid, dp->dbufsize,
7004 		    ldp->dbufsize);
7005 
7006 		*exp = (uint_t)ldp->dbufsize;
7007 		*act = (uint_t)dp->dbufsize;
7008 		rv |= CFG_ERR_BUF;
7009 	}
7010 	if (!valid_slabsize((int)dp->dslabsize)) {
7011 		cmn_err(CE_WARN,
7012 		    "IDN: 221: remote domain %d SLABSIZE (%d) invalid "
7013 		    "(local.slabsize = %d)",
7014 		    dp->domid, dp->dslabsize, ldp->dslabsize);
7015 
7016 		*exp = (uint_t)ldp->dslabsize;
7017 		*act = (uint_t)dp->dslabsize;
7018 		rv |= CFG_ERR_SLAB;
7019 	}
7020 	if (!valid_nwrsize((int)dp->dnwrsize)) {
7021 		cmn_err(CE_WARN,
7022 		    "IDN: 223: remote domain %d NWRSIZE (%d) invalid "
7023 		    "(local.nwrsize = %d)",
7024 		    dp->domid, dp->dnwrsize, ldp->dnwrsize);
7025 
7026 		*exp = (uint_t)ldp->dnwrsize;
7027 		*act = (uint_t)dp->dnwrsize;
7028 		rv |= CFG_ERR_NWR;
7029 	}
7030 	if ((int)dp->dmaxnets != IDN_MAX_NETS) {
7031 		cmn_err(CE_WARN,
7032 		    "IDN: 224: remote domain %d MAX_NETS (%d) invalid "
7033 		    "(local.maxnets = %d)",
7034 		    dp->domid, (int)dp->dmaxnets, IDN_MAX_NETS);
7035 
7036 		*exp = (uint_t)IDN_MAX_NETS;
7037 		*act = (uint_t)dp->dmaxnets;
7038 		rv |= CFG_ERR_NETS;
7039 	}
7040 	if ((int)dp->dmboxpernet != IDN_MBOX_PER_NET) {
7041 		cmn_err(CE_WARN,
7042 		    "IDN: 225: remote domain %d MBOX_PER_NET (%d) "
7043 		    "invalid (local.mboxpernet = %d)",
7044 		    dp->domid, (int)dp->dmboxpernet, IDN_MBOX_PER_NET);
7045 
7046 		*exp = (uint_t)IDN_MBOX_PER_NET;
7047 		*act = (uint_t)dp->dmboxpernet;
7048 		rv |= CFG_ERR_MBOX;
7049 	}
7050 	if ((dp->dcksum - 1) != (uchar_t)IDN_CHECKSUM) {
7051 		cmn_err(CE_WARN,
7052 		    "IDN: 226: remote domain %d CHECKSUM flag (%d) "
7053 		    "mismatches local domain's (%d)",
7054 		    dp->domid, (int)dp->dcksum - 1, IDN_CHECKSUM);
7055 
7056 		*exp = (uint_t)IDN_CHECKSUM;
7057 		*act = (uint_t)(dp->dcksum - 1);
7058 		rv |= CFG_ERR_CKSUM;
7059 	}
7060 
7061 done:
7062 
7063 	return (rv ? rv : CFG_DONE);
7064 }
7065 
7066 /*
7067  * Called by slave ONLY which expects exactly the following
7068  * config info from the MASTER:
7069  *	IDNCFG_BARLAR
7070  *	IDNCFG_MCADR
7071  *	IDNCFG_NMCADR
7072  * 	IDNCFG_CPUSET
7073  *	IDNCFG_NETID
7074  *	IDNCFG_BOARDSET
7075  *	IDNCFG_SIZE (MTU, BUF, SLAB, NWR)
7076  *	IDNCFG_DATAMBOX (TABLE, DOMAIN)
7077  *	IDNCFG_DATASVR (MAXNETS, MBXPERNET)
7078  *	IDNCFG_OPTIONS (CHECKSUM)
7079  */
7080 static uint_t
idn_check_master_config(int domid,uint_t * exp,uint_t * act)7081 idn_check_master_config(int domid, uint_t *exp, uint_t *act)
7082 {
7083 	uint_t		rv = 0;
7084 	int		nmcadr;
7085 	int		total_expitems;
7086 	int		p, m, err;
7087 	idn_domain_t	*dp;
7088 	idn_domain_t	*ldp = &idn_domain[idn.localid];
7089 	procname_t	proc = "idn_check_master_config";
7090 
7091 	dp = &idn_domain[domid];
7092 
7093 	ASSERT(IDN_GET_MASTERID() != idn.localid);
7094 	ASSERT(domid != idn.localid);
7095 	ASSERT(IDN_DLOCK_IS_EXCL(domid));
7096 	ASSERT(dp->dstate == IDNDS_CONFIG);
7097 
7098 	PR_PROTO("%s:%d: number received %d, minimum number expected %d\n",
7099 	    proc, domid, (int)dp->dncfgitems, IDN_MASTER_NCFGITEMS);
7100 
7101 	if ((int)dp->dncfgitems < IDN_MASTER_NCFGITEMS)
7102 		return (CFG_CONTINUE);
7103 
7104 	/*
7105 	 * We have at least IDN_MASTER_NCFGITEMS items which
7106 	 * means we have at least one MCADR.  Need to make sure
7107 	 * we have all that we're expecting, NMCADR.
7108 	 */
7109 	total_expitems = IDN_MASTER_NCFGITEMS + dp->dhw.dh_nmcadr - 1;
7110 	if ((dp->dhw.dh_nmcadr == 0) ||
7111 	    ((int)dp->dncfgitems < total_expitems)) {
7112 		/*
7113 		 * We have not yet received all the MCADRs
7114 		 * we're expecting.
7115 		 */
7116 		PR_PROTO("%s:%d: haven't received all MCADRs yet.\n",
7117 		    proc, domid);
7118 		return (CFG_CONTINUE);
7119 	}
7120 
7121 	nmcadr = 0;
7122 	for (p = 0; p < MAX_BOARDS; p++)
7123 		if (dp->dhw.dh_mcadr[p] != 0)
7124 			nmcadr++;
7125 
7126 	IDN_GLOCK_SHARED();
7127 	if ((idn.smr.rempfn == PFN_INVALID) ||
7128 	    (idn.smr.rempfnlim == PFN_INVALID) ||
7129 	    (dp->dnetid == (ushort_t)-1) ||
7130 	    CPUSET_ISNULL(dp->dcpuset) ||
7131 	    (dp->dhw.dh_boardset == 0) ||
7132 	    (nmcadr != dp->dhw.dh_nmcadr) ||
7133 	    (dp->dmbox.m_send->mm_smr_mboxp == NULL) ||
7134 	    (ldp->dmbox.m_tbl == NULL) ||
7135 	    (dp->dmaxnets == 0) ||
7136 	    (dp->dmboxpernet == 0) ||
7137 	    (dp->dcksum == 0) ||
7138 	    (dp->dmtu == 0) ||
7139 	    (dp->dbufsize == 0) ||
7140 	    (dp->dnwrsize == 0)) {
7141 
7142 		IDN_GUNLOCK();
7143 		/*
7144 		 * We received all of our config items, but not
7145 		 * all what we were expecting!  Gotta reset and
7146 		 * close connection.
7147 		 */
7148 		cmn_err(CE_WARN,
7149 		    "IDN: 227: missing some required config items from "
7150 		    "domain %d", domid);
7151 
7152 		rv = CFG_FATAL;
7153 		goto done;
7154 	}
7155 	if ((idn.smr.rempfnlim - idn.smr.rempfn) > btop(MB2B(IDN_SMR_SIZE))) {
7156 		/*
7157 		 * The master's SMR region is larger than
7158 		 * mine!  This means that this domain may
7159 		 * receive I/O buffers which are out of the
7160 		 * range of this local domain's SMR virtual
7161 		 * address space.  The master SMR has to be
7162 		 * no larger than the local SMR in order to
7163 		 * guarantee enough local virtual addresses
7164 		 * to see all of the SMR space.
7165 		 * XXX - Possibly add negotiating SMR size.
7166 		 *	 Try to create a new virtual mapping.
7167 		 *	 Could let domains negotiate SMR size.
7168 		 *	 Winning size would have to be smallest
7169 		 *	 in DC.  If so, how to handle incoming
7170 		 *	 domains with even smaller SMRs?
7171 		 *	 - Could either disallow connection
7172 		 *	 - Could reconfigure to use smaller SMR.
7173 		 */
7174 		cmn_err(CE_WARN,
7175 		    "IDN: 228: master's SMR (%ld) larger than "
7176 		    "local's SMR (%ld)",
7177 		    idn.smr.rempfnlim - idn.smr.rempfn,
7178 		    btop(MB2B(IDN_SMR_SIZE)));
7179 
7180 		*exp = (uint_t)IDN_SMR_SIZE;
7181 		*act = (uint_t)B2MB(ptob(idn.smr.rempfnlim - idn.smr.rempfn));
7182 		rv |= CFG_ERR_SMR;
7183 	}
7184 	IDN_GUNLOCK();
7185 
7186 	if (!valid_mtu(dp->dmtu)) {
7187 		cmn_err(CE_WARN,
7188 		    "IDN: 219: remote domain %d MTU (%d) invalid "
7189 		    "(local.mtu = %d)", dp->domid, dp->dmtu, ldp->dmtu);
7190 
7191 		*exp = (uint_t)ldp->dmtu;
7192 		*act = (uint_t)dp->dmtu;
7193 		rv |= CFG_ERR_MTU;
7194 	}
7195 	if (!valid_bufsize(dp->dbufsize)) {
7196 		cmn_err(CE_WARN,
7197 		    "IDN: 220: remote domain %d BUFSIZE (%d) invalid "
7198 		    "(local.bufsize = %d)", dp->domid, dp->dbufsize,
7199 		    ldp->dbufsize);
7200 
7201 		*exp = (uint_t)ldp->dbufsize;
7202 		*act = (uint_t)dp->dbufsize;
7203 		rv |= CFG_ERR_BUF;
7204 	}
7205 	if (!valid_nwrsize((int)dp->dnwrsize)) {
7206 		cmn_err(CE_WARN,
7207 		    "IDN: 223: remote domain %d NWRSIZE (%d) invalid "
7208 		    "(local.nwrsize = %d)",
7209 		    dp->domid, dp->dnwrsize, ldp->dnwrsize);
7210 
7211 		*exp = (uint_t)ldp->dnwrsize;
7212 		*act = (uint_t)dp->dnwrsize;
7213 		rv |= CFG_ERR_NWR;
7214 	}
7215 	if ((int)dp->dmaxnets != IDN_MAX_NETS) {
7216 		cmn_err(CE_WARN,
7217 		    "IDN: 224: remote domain %d MAX_NETS (%d) invalid "
7218 		    "(local.maxnets = %d)",
7219 		    dp->domid, (int)dp->dmaxnets, IDN_MAX_NETS);
7220 
7221 		*exp = (uint_t)IDN_MAX_NETS;
7222 		*act = (uint_t)dp->dmaxnets;
7223 		rv |= CFG_ERR_NETS;
7224 	}
7225 	if ((int)dp->dmboxpernet != IDN_MBOX_PER_NET) {
7226 		cmn_err(CE_WARN,
7227 		    "IDN: 225: remote domain %d MBOX_PER_NET (%d) "
7228 		    "invalid (local.mboxpernet = %d)",
7229 		    dp->domid, (int)dp->dmboxpernet, IDN_MBOX_PER_NET);
7230 
7231 		*exp = (uint_t)IDN_MBOX_PER_NET;
7232 		*act = (uint_t)dp->dmboxpernet;
7233 		rv |= CFG_ERR_MBOX;
7234 	}
7235 	if ((dp->dcksum - 1) != (uchar_t)IDN_CHECKSUM) {
7236 		cmn_err(CE_WARN,
7237 		    "IDN: 226: remote domain %d CHECKSUM flag (%d) "
7238 		    "mismatches local domain's (%d)",
7239 		    dp->domid, (int)dp->dcksum - 1, IDN_CHECKSUM);
7240 
7241 		*exp = (uint_t)IDN_CHECKSUM;
7242 		*act = (uint_t)(dp->dcksum - 1);
7243 		rv |= CFG_ERR_CKSUM;
7244 	}
7245 	nmcadr = 0;
7246 	err = 0;
7247 	for (m = 0; m < MAX_BOARDS; m++) {
7248 		if (!BOARD_IN_SET(dp->dhw.dh_boardset, m) &&
7249 		    dp->dhw.dh_mcadr[m]) {
7250 			cmn_err(CE_WARN,
7251 			    "IDN: 229: remote domain %d boardset (0x%x) "
7252 			    "conflicts with MCADR(board %d) [0x%x]",
7253 			    dp->domid, (uint_t)dp->dhw.dh_boardset, m,
7254 			    dp->dhw.dh_mcadr[m]);
7255 			err++;
7256 		}
7257 		if (dp->dhw.dh_mcadr[m])
7258 			nmcadr++;
7259 	}
7260 	if (err) {
7261 		*exp = 0;
7262 		*act = err;
7263 		rv |= CFG_ERR_MCADR;
7264 	} else if (nmcadr != dp->dhw.dh_nmcadr) {
7265 		cmn_err(CE_WARN,
7266 		    "IDN: 230: remote domain %d reported number of "
7267 		    "MCADRs (%d) mismatches received (%d)",
7268 		    dp->domid, dp->dhw.dh_nmcadr, nmcadr);
7269 		*exp = (uint_t)dp->dhw.dh_nmcadr;
7270 		*act = (uint_t)nmcadr;
7271 		rv |= CFG_ERR_NMCADR;
7272 	}
7273 
7274 done:
7275 
7276 	return (rv ? rv : CFG_DONE);
7277 }
7278 
7279 static int
idn_recv_config_done(int domid)7280 idn_recv_config_done(int domid)
7281 {
7282 	boardset_t		b_conflicts;
7283 	cpuset_t		p_conflicts;
7284 	register int		p, i;
7285 	register idn_domain_t	*dp;
7286 	idnsb_error_t		idnerr;
7287 	procname_t		proc = "idn_recv_config_done";
7288 
7289 	ASSERT(domid != IDN_NIL_DOMID);
7290 	dp = &idn_domain[domid];
7291 	ASSERT(IDN_DLOCK_IS_EXCL(domid));
7292 
7293 	/*
7294 	 * Well, we received all that we were expecting
7295 	 * so stop any CFG timers we had going.
7296 	 */
7297 	IDN_MSGTIMER_STOP(domid, IDNP_CFG, 0);
7298 
7299 	dp->dncpus = 0;
7300 	for (p = 0; p < NCPU; p++)
7301 		if (CPU_IN_SET(dp->dcpuset, p))
7302 			dp->dncpus++;
7303 	dp->dhw.dh_nboards = 0;
7304 	for (p = 0; p < MAX_BOARDS; p++)
7305 		if (BOARD_IN_SET(dp->dhw.dh_boardset, p))
7306 			dp->dhw.dh_nboards++;
7307 
7308 	IDN_GLOCK_EXCL();
7309 	/*
7310 	 * Verify dcpuset and dhw.dh_boardset don't
7311 	 * conflict with any existing DC member.
7312 	 */
7313 	b_conflicts = idn.dc_boardset & dp->dhw.dh_boardset;
7314 	CPUSET_ZERO(p_conflicts);
7315 	CPUSET_OR(p_conflicts, idn.dc_cpuset);
7316 	CPUSET_AND(p_conflicts, dp->dcpuset);
7317 
7318 	if (b_conflicts || !CPUSET_ISNULL(p_conflicts)) {
7319 		if (b_conflicts) {
7320 			cmn_err(CE_WARN,
7321 			    "IDN: 231: domain %d boardset "
7322 			    "(0x%x) conflicts with existing "
7323 			    "IDN boardset (0x%x)",
7324 			    domid, dp->dhw.dh_boardset,
7325 			    b_conflicts);
7326 		}
7327 		if (!CPUSET_ISNULL(p_conflicts)) {
7328 			cmn_err(CE_WARN,
7329 			    "IDN: 232: domain %d cpuset "
7330 			    "(0x%x.%0x) conflicts with existing "
7331 			    "IDN cpuset (0x%x.%0x)", domid,
7332 			    UPPER32_CPUMASK(dp->dcpuset),
7333 			    LOWER32_CPUMASK(dp->dcpuset),
7334 			    UPPER32_CPUMASK(p_conflicts),
7335 			    LOWER32_CPUMASK(p_conflicts));
7336 		}
7337 		IDN_GUNLOCK();
7338 		/*
7339 		 * Need to disconnect and not retry with this guy.
7340 		 */
7341 		IDN_DUNLOCK(domid);
7342 		IDN_SYNC_LOCK();
7343 		DOMAINSET_DEL(idn.domset.ds_relink, domid);
7344 		IDN_DLOCK_EXCL(domid);
7345 
7346 		INIT_IDNKERR(&idnerr);
7347 		SET_IDNKERR_ERRNO(&idnerr, EPROTO);
7348 		SET_IDNKERR_IDNERR(&idnerr, IDNKERR_CONFIG_FATAL);
7349 		SET_IDNKERR_PARAM0(&idnerr, domid);
7350 		idn_update_op(IDNOP_ERROR, DOMAINSET(domid), &idnerr);
7351 
7352 		(void) idn_disconnect(domid, IDNFIN_FORCE_HARD,
7353 		    IDNFIN_ARG_CFGERR_FATAL, IDNFIN_SYNC_NO);
7354 		IDN_SYNC_UNLOCK();
7355 
7356 		return (-1);
7357 	}
7358 
7359 	idn_mainmbox_reset(domid, dp->dmbox.m_send);
7360 	idn_mainmbox_reset(domid, dp->dmbox.m_recv);
7361 
7362 #ifdef IDNBUG_CPUPERBOARD
7363 	/*
7364 	 * We only allow connections to domains whose (mem) boards
7365 	 * all have at least one cpu.  This is necessary so that
7366 	 * we can program the CICs of that respective board.  This
7367 	 * is primarily only a requirement if the remote domain
7368 	 * is the master _and_ has the SMR in that particular board.
7369 	 * To simplify the checking we simply restrict connections to
7370 	 * domains that have at least one cpu on all boards that
7371 	 * contain memory.
7372 	 */
7373 	if (!idn_cpu_per_board((void *)NULL, dp->dcpuset, &dp->dhw)) {
7374 		cmn_err(CE_WARN,
7375 		    "IDN: 233: domain %d missing CPU per "
7376 		    "memory boardset (0x%x), CPU boardset (0x%x)",
7377 		    domid, dp->dhw.dh_boardset,
7378 		    cpuset2boardset(dp->dcpuset));
7379 
7380 		IDN_GUNLOCK();
7381 		/*
7382 		 * Need to disconnect and not retry with this guy.
7383 		 */
7384 		IDN_DUNLOCK(domid);
7385 		IDN_SYNC_LOCK();
7386 		DOMAINSET_DEL(idn.domset.ds_relink, domid);
7387 		IDN_DLOCK_EXCL(domid);
7388 
7389 		INIT_IDNKERR(&idnerr);
7390 		SET_IDNKERR_ERRNO(&idnerr, EINVAL);
7391 		SET_IDNKERR_IDNERR(&idnerr, IDNKERR_CPU_CONFIG);
7392 		SET_IDNKERR_PARAM0(&idnerr, domid);
7393 		idn_update_op(IDNOP_ERROR, DOMAINSET(domid), &idnerr);
7394 
7395 		(void) idn_disconnect(domid, IDNFIN_FORCE_HARD,
7396 		    IDNFIN_ARG_CPUCFG, IDNFIN_SYNC_NO);
7397 		IDN_SYNC_UNLOCK();
7398 
7399 		return (-1);
7400 	}
7401 #endif /* IDNBUG_CPUPERBOARD */
7402 
7403 	CPUSET_OR(idn.dc_cpuset, dp->dcpuset);
7404 	idn.dc_boardset |= dp->dhw.dh_boardset;
7405 
7406 	IDN_GUNLOCK();
7407 
7408 	/*
7409 	 * Set up the portmap for this domain.
7410 	 */
7411 	i = -1;
7412 	for (p = 0; p < NCPU; p++) {
7413 		BUMP_INDEX(dp->dcpuset, i);
7414 		dp->dcpumap[p] = (uchar_t)i;
7415 	}
7416 
7417 	/*
7418 	 * Got everything we need from the remote
7419 	 * domain, now we can program hardware as needed.
7420 	 */
7421 	if (idn_program_hardware(domid) != 0) {
7422 		domainset_t	domset;
7423 		/*
7424 		 * Yikes!  Failed to program hardware.
7425 		 * Gotta bail.
7426 		 */
7427 		cmn_err(CE_WARN,
7428 		    "IDN: 234: failed to program hardware for domain %d "
7429 		    "(boardset = 0x%x)",
7430 		    domid, dp->dhw.dh_boardset);
7431 
7432 		IDN_DUNLOCK(domid);
7433 		/*
7434 		 * If we're having problems programming our
7435 		 * hardware we better unlink completely from
7436 		 * the IDN before things get really bad.
7437 		 */
7438 		IDN_SYNC_LOCK();
7439 		IDN_GLOCK_EXCL();
7440 		IDN_GSTATE_TRANSITION(IDNGS_DISCONNECT);
7441 		domset = DOMAINSET_ALL;
7442 		DOMAINSET_DEL(domset, idn.localid);
7443 		IDN_SET_NEW_MASTERID(IDN_NIL_DOMID);
7444 		IDN_GUNLOCK();
7445 
7446 		INIT_IDNKERR(&idnerr);
7447 		SET_IDNKERR_ERRNO(&idnerr, EINVAL);
7448 		SET_IDNKERR_IDNERR(&idnerr, IDNKERR_HW_ERROR);
7449 		SET_IDNKERR_PARAM0(&idnerr, domid);
7450 		idn_update_op(IDNOP_ERROR, DOMAINSET_ALL, &idnerr);
7451 
7452 		idn_unlink_domainset(domset, IDNFIN_NORMAL, IDNFIN_ARG_HWERR,
7453 		    IDNFIN_OPT_UNLINK, BOARDSET_ALL);
7454 
7455 		IDN_SYNC_UNLOCK();
7456 		IDN_DLOCK_EXCL(domid);
7457 
7458 		return (-1);
7459 	}
7460 
7461 	/*
7462 	 * Now that hardware has been programmed we can
7463 	 * remap the SMR into our local space, if necessary.
7464 	 */
7465 	IDN_GLOCK_EXCL();
7466 	if (domid == IDN_GET_MASTERID()) {
7467 		/*
7468 		 * No need to worry about disabling the data
7469 		 * server since at this stage there is only
7470 		 * one and he doesn't go active until his
7471 		 * mailbox (dmbox.m_recv->mm_smr_mboxp) is set up.
7472 		 */
7473 		smr_remap(&kas, idn.smr.vaddr, idn.smr.rempfn, IDN_SMR_SIZE);
7474 	}
7475 	IDN_GUNLOCK();
7476 
7477 	/*
7478 	 * There is no need to ACK the CFG messages since remote
7479 	 * domain would not progress to the next state (CON_SENT)
7480 	 * unless he has received everything.
7481 	 */
7482 
7483 	dp->dcfgrcvdone = 1;
7484 	PR_PROTO("%s:%d: RECV config DONE\n", proc, domid);
7485 
7486 	if (dp->dcfgsnddone) {
7487 		idn_xdcargs_t	xargs;
7488 		/*
7489 		 * Well, we've received all that we were expecting,
7490 		 * but we don't know if the remote domain has
7491 		 * received all that it was expecting from us,
7492 		 * although we know we transferred everything
7493 		 * so let's get the show on the road.
7494 		 */
7495 		IDN_DUNLOCK(domid);
7496 		IDN_SYNC_LOCK();
7497 		IDN_DLOCK_EXCL(domid);
7498 		/*
7499 		 * If the state has changed from CONFIG
7500 		 * then somebody else has taken over
7501 		 * control of this domain so we can just
7502 		 * bail out.
7503 		 */
7504 		if (dp->dstate == IDNDS_CONFIG) {
7505 			dp->dxp = &xphase_con;
7506 			IDN_XSTATE_TRANSITION(dp, IDNXS_PEND);
7507 			bzero(xargs, sizeof (xargs));
7508 
7509 			(void) idn_xphase_transition(domid, NULL, xargs);
7510 		}
7511 		IDN_SYNC_UNLOCK();
7512 	}
7513 
7514 	return (0);
7515 }
7516 
7517 static int
idn_verify_config_mbox(int domid)7518 idn_verify_config_mbox(int domid)
7519 {
7520 	idn_domain_t	*ldp, *dp;
7521 	idn_mainmbox_t	*mmp;
7522 	idn_mboxtbl_t	*mtp;
7523 	int		c, rv = 0;
7524 	uint_t		activeptr, readyptr;
7525 	ushort_t	mbox_csum;
7526 
7527 	ASSERT(IDN_DLOCK_IS_EXCL(domid));
7528 
7529 	dp = &idn_domain[domid];
7530 	ldp = &idn_domain[idn.localid];
7531 
7532 	/*
7533 	 * The master will have assigned us the dmbox.m_tbl
7534 	 * from which we assign our receive mailboxes.
7535 	 * The first (0) entry contains the cookie used
7536 	 * for verification.
7537 	 */
7538 	IDN_DLOCK_SHARED(idn.localid);
7539 	/*
7540 	 * Now that we have an assigned mboxtbl from the
7541 	 * master, we can determine which receive mailbox
7542 	 * we indirectly assigned to him at the time we
7543 	 * sent him his MBOX_INDEX.  Prep it, however note
7544 	 * that the master will have not been able to
7545 	 * validate it because of the chicken 'n egg
7546 	 * problem between a master and slave.  Thus we
7547 	 * need to reset the cookie after the prep.
7548 	 */
7549 	mmp = dp->dmbox.m_recv;
7550 	mtp = IDN_MBOXTBL_PTR(ldp->dmbox.m_tbl, domid);
7551 	for (c = 0; c < IDN_MAX_NETS; c++) {
7552 		mutex_enter(&mmp[c].mm_mutex);
7553 		ASSERT(!mmp[c].mm_smr_mboxp);
7554 
7555 		mmp[c].mm_smr_mboxp = mtp;
7556 		mbox_csum = IDN_CKSUM_MBOX(&mtp->mt_header);
7557 		if (!VALID_MBOXHDR(&mtp->mt_header, c, mbox_csum)) {
7558 			cmn_err(CE_WARN,
7559 			    "IDN: 235: [recv] mailbox (domain %d, "
7560 			    "channel %d) SMR CORRUPTED - RELINK",
7561 			    domid, c);
7562 			cmn_err(CE_CONT,
7563 			    "IDN: 235: [recv] expected (cookie 0x%x, "
7564 			    "cksum 0x%x) actual (cookie 0x%x, "
7565 			    "cksum 0x%x)\n",
7566 			    IDN_GET_MBOXHDR_COOKIE(&mtp->mt_header),
7567 			    (int)mtp->mt_header.mh_cksum,
7568 			    IDN_MAKE_MBOXHDR_COOKIE(0, 0, c),
7569 			    (int)mbox_csum);
7570 			mutex_exit(&mmp[c].mm_mutex);
7571 			rv = -1;
7572 			break;
7573 		}
7574 		activeptr = mtp->mt_header.mh_svr_active_ptr;
7575 		readyptr = mtp->mt_header.mh_svr_ready_ptr;
7576 		/*
7577 		 * Verify pointers are valid.
7578 		 */
7579 		if (!activeptr || !VALID_NWROFFSET(activeptr, 2) ||
7580 		    !readyptr || !VALID_NWROFFSET(readyptr, 2)) {
7581 			cmn_err(CE_WARN,
7582 			    "IDN: 235: [recv] mailbox (domain %d, "
7583 			    "channel %d) SMR CORRUPTED - RELINK",
7584 			    domid, c);
7585 			cmn_err(CE_CONT,
7586 			    "IDN: 235: [recv] activeptr (0x%x), "
7587 			    "readyptr (0x%x)\n",
7588 			    activeptr, readyptr);
7589 			mutex_exit(&mmp[c].mm_mutex);
7590 			rv = -1;
7591 			break;
7592 		}
7593 		mmp[c].mm_smr_activep =	(ushort_t *)IDN_OFFSET2ADDR(activeptr);
7594 		mmp[c].mm_smr_readyp =	(ushort_t *)IDN_OFFSET2ADDR(readyptr);
7595 		mutex_exit(&mmp[c].mm_mutex);
7596 		IDN_MBOXTBL_PTR_INC(mtp);
7597 	}
7598 
7599 	IDN_DUNLOCK(idn.localid);
7600 
7601 	if (rv)
7602 		return (rv);
7603 
7604 	/*
7605 	 * Now we need to translate SMR offsets for send mailboxes
7606 	 * to actual virtual addresses.
7607 	 */
7608 	mmp = dp->dmbox.m_send;
7609 	for (c = 0; c < IDN_MAX_NETS; mmp++, c++) {
7610 		mutex_enter(&mmp->mm_mutex);
7611 		if ((mtp = mmp->mm_smr_mboxp) == NULL) {
7612 			mutex_exit(&mmp->mm_mutex);
7613 			rv = -1;
7614 			break;
7615 		}
7616 
7617 		mbox_csum = IDN_CKSUM_MBOX(&mtp->mt_header);
7618 
7619 		if (!VALID_MBOXHDR(&mtp->mt_header, c, mbox_csum)) {
7620 			cmn_err(CE_WARN,
7621 			    "IDN: 235: [send] mailbox (domain %d, "
7622 			    "channel %d) SMR CORRUPTED - RELINK",
7623 			    domid, c);
7624 			cmn_err(CE_CONT,
7625 			    "IDN: 235: [send] expected (cookie 0x%x, "
7626 			    "cksum 0x%x) actual (cookie 0x%x, "
7627 			    "cksum 0x%x)\n",
7628 			    IDN_GET_MBOXHDR_COOKIE(&mtp->mt_header),
7629 			    (int)mtp->mt_header.mh_cksum,
7630 			    IDN_MAKE_MBOXHDR_COOKIE(0, 0, c),
7631 			    (int)mbox_csum);
7632 			mutex_exit(&mmp->mm_mutex);
7633 			rv = -1;
7634 			break;
7635 		}
7636 		activeptr = mtp->mt_header.mh_svr_active_ptr;
7637 		readyptr = mtp->mt_header.mh_svr_ready_ptr;
7638 		/*
7639 		 * Paranoid check.
7640 		 */
7641 		if (!activeptr || !VALID_NWROFFSET(activeptr, 2) ||
7642 		    !readyptr || !VALID_NWROFFSET(readyptr, 2)) {
7643 			cmn_err(CE_WARN,
7644 			    "IDN: 235: [send] mailbox (domain %d, "
7645 			    "channel %d) SMR CORRUPTED - RELINK",
7646 			    domid, c);
7647 			cmn_err(CE_CONT,
7648 			    "IDN: 235: [send] activeptr (0x%x), "
7649 			    "readyptr (0x%x)\n",
7650 			    activeptr, readyptr);
7651 			mutex_exit(&mmp->mm_mutex);
7652 			rv = -1;
7653 			break;
7654 		}
7655 		mmp->mm_smr_activep = (ushort_t *)IDN_OFFSET2ADDR(activeptr);
7656 		mmp->mm_smr_readyp = (ushort_t *)IDN_OFFSET2ADDR(readyptr);
7657 		idn_reset_mboxtbl(mtp);
7658 		mutex_exit(&mmp->mm_mutex);
7659 		IDN_MBOXTBL_PTR_INC(mtp);
7660 	}
7661 
7662 	return (rv);
7663 }
7664 
7665 /*
7666  * The BUFSIZEs between domains have to be equal so that slave buffers
7667  * and the master's slabpool are consistent.
7668  * The MTUs between domains have to be equal so they can transfer
7669  * packets consistently without possible data truncation.
7670  *
7671  * ZZZ - Perhaps these could be negotiated?
7672  */
7673 static int
valid_mtu(uint_t mtu)7674 valid_mtu(uint_t mtu)
7675 {
7676 	return ((mtu == idn_domain[idn.localid].dmtu) && mtu);
7677 }
7678 
7679 static int
valid_bufsize(uint_t bufsize)7680 valid_bufsize(uint_t bufsize)
7681 {
7682 	return ((bufsize == idn_domain[idn.localid].dbufsize) && bufsize);
7683 }
7684 
7685 static int
valid_slabsize(int slabsize)7686 valid_slabsize(int slabsize)
7687 {
7688 	return ((slabsize == idn_domain[idn.localid].dslabsize) && slabsize);
7689 }
7690 
7691 static int
valid_nwrsize(int nwrsize)7692 valid_nwrsize(int nwrsize)
7693 {
7694 	return ((nwrsize == idn_domain[idn.localid].dnwrsize) && nwrsize);
7695 }
7696 
7697 static int
idn_program_hardware(int domid)7698 idn_program_hardware(int domid)
7699 {
7700 	int		rv, is_master;
7701 	idn_domain_t	*dp;
7702 	uint_t		*mcadrp;
7703 	pfn_t		rem_pfn, rem_pfnlimit;
7704 	procname_t	proc = "idn_program_hardware";
7705 
7706 	PR_PROTO("%s:%d: program hw in domain %d w.r.t remote domain %d\n",
7707 	    proc, domid, idn.localid, domid);
7708 
7709 	dp = &idn_domain[domid];
7710 
7711 	ASSERT(domid != idn.localid);
7712 	ASSERT(IDN_DLOCK_IS_EXCL(domid));
7713 	ASSERT(dp->dstate == IDNDS_CONFIG);
7714 
7715 	IDN_GLOCK_EXCL();
7716 
7717 	if (DOMAIN_IN_SET(idn.domset.ds_hwlinked, domid)) {
7718 		IDN_GUNLOCK();
7719 		return (0);
7720 	}
7721 
7722 	DOMAINSET_ADD(idn.domset.ds_flush, domid);
7723 	CHECKPOINT_OPENED(IDNSB_CHKPT_CACHE, dp->dhw.dh_boardset, 1);
7724 
7725 	if (domid != IDN_GET_MASTERID()) {
7726 		/*
7727 		 * If the remote domain is a slave, then
7728 		 * all we have to program is the CIC sm_mask.
7729 		 */
7730 		is_master = 0;
7731 		if ((idn.localid == IDN_GET_MASTERID()) &&
7732 		    lock_try(&idn.first_hwlink)) {
7733 			/*
7734 			 * This is our first HW link and I'm the
7735 			 * master, which means we need to program
7736 			 * our local bar/lar.
7737 			 */
7738 			ASSERT(idn.first_hwmasterid == (short)IDN_NIL_DOMID);
7739 			idn.first_hwmasterid = (short)idn.localid;
7740 			rem_pfn = idn.smr.locpfn;
7741 			rem_pfnlimit = idn.smr.locpfn +
7742 			    btop(MB2B(IDN_SMR_SIZE));
7743 		} else {
7744 			/*
7745 			 * Otherwise, just a slave linking to
7746 			 * another slave.  No bar/lar updating
7747 			 * necessary.
7748 			 */
7749 			rem_pfn = rem_pfnlimit = PFN_INVALID;
7750 		}
7751 		mcadrp = NULL;
7752 	} else {
7753 		/*
7754 		 * If the remote domain is a master, then
7755 		 * we need to program the CIC sm_mask/sm_bar/sm_lar,
7756 		 * and PC's.
7757 		 */
7758 		is_master = 1;
7759 		rem_pfn = idn.smr.rempfn;
7760 		rem_pfnlimit = idn.smr.rempfnlim;
7761 		mcadrp = dp->dhw.dh_mcadr;
7762 		ASSERT(idn.first_hwmasterid == (short)IDN_NIL_DOMID);
7763 		idn.first_hwmasterid = (short)domid;
7764 	}
7765 
7766 	PR_PROTO("%s:%d: ADD bset (0x%x)\n", proc, domid, dp->dhw.dh_boardset);
7767 
7768 	rv = idnxf_shmem_add(is_master, dp->dhw.dh_boardset,
7769 	    rem_pfn, rem_pfnlimit, mcadrp);
7770 
7771 	if (rv == 0) {
7772 		DOMAINSET_ADD(idn.domset.ds_hwlinked, domid);
7773 	} else {
7774 		if (rem_pfn == idn.smr.locpfn)
7775 			lock_clear(&idn.first_hwlink);
7776 
7777 		if (idn.first_hwmasterid == (short)domid)
7778 			idn.first_hwmasterid = (short)IDN_NIL_DOMID;
7779 
7780 		(void) idnxf_shmem_sub(is_master, dp->dhw.dh_boardset);
7781 	}
7782 
7783 	IDN_GUNLOCK();
7784 
7785 	return (rv);
7786 }
7787 
7788 static int
idn_deprogram_hardware(int domid)7789 idn_deprogram_hardware(int domid)
7790 {
7791 	int		rv, is_master;
7792 	idn_domain_t	*dp;
7793 	procname_t	proc = "idn_deprogram_hardware";
7794 
7795 
7796 	dp = &idn_domain[domid];
7797 
7798 	ASSERT(domid != idn.localid);
7799 	ASSERT(IDN_DLOCK_IS_EXCL(domid));
7800 
7801 	/*
7802 	 * Need to take into consideration what boards remote
7803 	 * domain was connected to.  If we don't have a connection to
7804 	 * them ourself, then we better remove them now , otherwise
7805 	 * they'll never be removed (unless we link to them at some point).
7806 	 */
7807 #if 0
7808 	DEBUG_USECDELAY(500000);
7809 #endif /* 0 */
7810 
7811 	IDN_GLOCK_EXCL();
7812 
7813 	if (!DOMAIN_IN_SET(idn.domset.ds_hwlinked, domid)) {
7814 		IDN_GUNLOCK();
7815 		return (0);
7816 	}
7817 
7818 	PR_PROTO("%s:%d: DEprogram hw in domain %d w.r.t remote domain %d\n",
7819 	    proc, domid, idn.localid, domid);
7820 
7821 	/*
7822 	 * It's possible to come through this flow for domains that
7823 	 * have not been programmed, i.e. not in idn.hwlinked_domset,
7824 	 * so don't bother asserting that they might be in there.
7825 	 * This can occur if we lose a domain during the config/syn
7826 	 * sequence.  If this occurs we won't know whether the remote
7827 	 * domain has programmed its hardware or not.  If it has then
7828 	 * it will have to go through the DMAP sequence and thus we
7829 	 * have to go through it also.  So, if we reach at least the
7830 	 * CONFIG state, we need to go through the DMAP handshake.
7831 	 */
7832 
7833 	PR_PROTO("%s:%d: SUB bset (0x%x)\n", proc, domid, dp->dhw.dh_boardset);
7834 
7835 	if (idn.first_hwmasterid == (short)domid) {
7836 		is_master = 1;
7837 		idn.first_hwmasterid = (short)IDN_NIL_DOMID;
7838 	} else {
7839 		is_master = 0;
7840 	}
7841 	rv = idnxf_shmem_sub(is_master, dp->dhw.dh_boardset);
7842 
7843 	if (rv == 0)
7844 		DOMAINSET_DEL(idn.domset.ds_hwlinked, domid);
7845 
7846 	IDN_GUNLOCK();
7847 
7848 	return (rv);
7849 }
7850 
7851 /*
7852  * Remember can't send slabs back to master at this point.
7853  * Entered with write-drwlock held.
7854  * Returns with drwlock dropped.
7855  */
7856 static void
idn_deconfig(int domid)7857 idn_deconfig(int domid)
7858 {
7859 	idn_domain_t	*dp, *ldp;
7860 	smr_slab_t	*sp;
7861 	int		c, masterid;
7862 	procname_t	proc = "idn_deconfig";
7863 
7864 	ASSERT(IDN_SYNC_IS_LOCKED());
7865 	ASSERT(IDN_DLOCK_IS_EXCL(domid));
7866 	ASSERT(domid != idn.localid);
7867 
7868 	ldp = &idn_domain[idn.localid];
7869 	dp = &idn_domain[domid];
7870 
7871 	ASSERT(dp->dstate == IDNDS_DMAP);
7872 
7873 	PR_PROTO("%s:%d: (dio=%d, dioerr=%d, dnslabs=%d)\n",
7874 	    proc, domid, dp->dio, dp->dioerr, dp->dnslabs);
7875 
7876 	IDN_GLOCK_EXCL();
7877 	masterid = IDN_GET_MASTERID();
7878 
7879 	idn.dc_boardset &= ~dp->dhw.dh_boardset;
7880 	for (c = 0; c < NCPU; c++) {
7881 		if (CPU_IN_SET(dp->dcpuset, c)) {
7882 			CPUSET_DEL(idn.dc_cpuset, c);
7883 		}
7884 	}
7885 
7886 	IDN_GUNLOCK();
7887 
7888 	(void) smr_buf_free_all(domid);
7889 
7890 	if (idn.localid == masterid) {
7891 		/*
7892 		 * Since I'm the master there may
7893 		 * have been slabs in this domain's
7894 		 * idn_domain[] entry.
7895 		 */
7896 		DSLAB_LOCK_EXCL(domid);
7897 		if ((sp = dp->dslab) != NULL) {
7898 			PR_PROTO("%s:%d: freeing up %d dead slabs\n",
7899 			    proc, domid, dp->dnslabs);
7900 			smr_slab_free(domid, sp);
7901 			dp->dslab = NULL;
7902 			dp->dnslabs = 0;
7903 			dp->dslab_state = DSLAB_STATE_UNKNOWN;
7904 		}
7905 		DSLAB_UNLOCK(domid);
7906 	} else if (domid == masterid) {
7907 		/*
7908 		 * We're shutting down the master!
7909 		 * We need to blow away our local slab
7910 		 * data structures.
7911 		 * Since I'm not the master, there should
7912 		 * be no slab structures in the given
7913 		 * domain's idn_domain[] entry.  They should
7914 		 * only exist in the local domain's entry.
7915 		 */
7916 		DSLAB_LOCK_EXCL(idn.localid);
7917 		ASSERT(dp->dslab == NULL);
7918 #ifdef DEBUG
7919 		{
7920 			int	nbusy = 0;
7921 			uint_t	dommask = 0;
7922 			for (sp = ldp->dslab; sp; sp = sp->sl_next) {
7923 				smr_slabbuf_t *bp;
7924 
7925 				if (!smr_slab_busy(sp))
7926 					continue;
7927 				nbusy++;
7928 				for (bp = sp->sl_inuse; bp; bp = bp->sb_next)
7929 					if (bp->sb_domid != IDN_NIL_DOMID)
7930 						DOMAINSET_ADD(dommask,
7931 						    bp->sb_domid);
7932 			}
7933 			if (nbusy)
7934 				PR_PROTO("%s:%d: found %d busy slabs "
7935 				    "(dommask = 0x%x)\n",
7936 				    proc, domid, nbusy, dommask);
7937 		}
7938 #endif /* DEBUG */
7939 		if ((sp = ldp->dslab) != NULL) {
7940 			PR_PROTO("%s:%d: freeing up %d local slab "
7941 			    "structs\n", proc, domid, ldp->dnslabs);
7942 			smr_slab_garbage_collection(sp);
7943 			ldp->dslab = NULL;
7944 			ldp->dnslabs = 0;
7945 			ldp->dslab_state = DSLAB_STATE_UNKNOWN;
7946 		}
7947 		DSLAB_UNLOCK(idn.localid);
7948 	}
7949 	if (dp->dio) {
7950 		PR_PROTO("%s:%d: reset dio (%d) to 0\n", proc, domid, dp->dio);
7951 		dp->dio = 0;
7952 	}
7953 	dp->dioerr = 0;
7954 
7955 	PR_PROTO("%s:%d: reset diocheck (%x) to 0\n",
7956 	    proc, domid, dp->diocheck);
7957 	lock_clear(&dp->diocheck);
7958 
7959 	CHECKPOINT_CLOSED(IDNSB_CHKPT_LINK, dp->dhw.dh_boardset, 2);
7960 
7961 	/*
7962 	 * Should have already flush our memory before
7963 	 * reaching this stage.  The issue is that by the
7964 	 * time we reach here the remote domains may have
7965 	 * already reprogrammed their hardware and so flushing
7966 	 * out caches now could result in a arbstop/hang
7967 	 * if we have data that needs to go back to one
7968 	 * of the remote domains that has already reprogrammed
7969 	 * its hardware.
7970 	 */
7971 	ASSERT(!DOMAIN_IN_SET(idn.domset.ds_flush, domid));
7972 
7973 	(void) idn_deprogram_hardware(domid);
7974 	/*
7975 	 * XXX - what to do if we
7976 	 *	 fail to program hardware
7977 	 *	 probably should panic since
7978 	 *	 demise of system may be near?
7979 	 *	 Sufficient to just shutdown network?
7980 	 */
7981 
7982 	IDN_DSTATE_TRANSITION(dp, IDNDS_CLOSED);
7983 
7984 	idn_close_domain(domid);
7985 }
7986 
7987 /*
7988  * If we're sending a Reset we better make sure we don't have any
7989  * references or traffic headed in the direction of this guy, since
7990  * when he receives the reset, he'll start shutting down which means
7991  * we effectively have to shutdown _before_ sending the reset.
7992  * DO NOT HOLD ANY DOMAIN RWLOCKS ON ENTRY.  Could result in deadlock
7993  * due to channel server looping back through STREAMs and attempting
7994  * to acquire domain lock, i.e. channel server will never "stop".
7995  */
7996 static void
idn_shutdown_datapath(domainset_t domset,int force)7997 idn_shutdown_datapath(domainset_t domset, int force)
7998 {
7999 	int		do_allchan;
8000 	idn_domain_t	*dp;
8001 	register int	d;
8002 	procname_t	proc = "idn_shutdown_datapath";
8003 
8004 
8005 	PR_CHAN("%s: domset = 0x%x\n", proc, (uint_t)domset);
8006 
8007 	do_allchan = (domset == DOMAINSET_ALL) ? 1 : 0;
8008 
8009 	DOMAINSET_DEL(domset, idn.localid);
8010 
8011 	if (do_allchan) {
8012 		/*
8013 		 * Need to stop all outgoing and
8014 		 * incoming SMR references.
8015 		 */
8016 		idn_deactivate_channel(CHANSET_ALL, IDNCHAN_OFFLINE);
8017 	}
8018 
8019 	/*
8020 	 * If force is set then we don't want to reference
8021 	 * the SMR at all, so deactivate the domains from
8022 	 * channels first.  This will result in the mainmbox-flush
8023 	 * routines to just clean up without referencing the
8024 	 * SMR space.
8025 	 */
8026 	if (force)
8027 		idn_mainmbox_deactivate(domset);
8028 
8029 	/*
8030 	 * Flush out mailboxes (clear smr reference).
8031 	 */
8032 	for (d = 0; d < MAX_DOMAINS; d++) {
8033 		if (!DOMAIN_IN_SET(domset, d))
8034 			continue;
8035 
8036 		dp = &idn_domain[d];
8037 		if ((dp->dmbox.m_send == NULL) && (dp->dmbox.m_recv == NULL))
8038 			continue;
8039 
8040 		IDN_MBOX_LOCK(d);
8041 		if (dp->dmbox.m_send)
8042 			(void) idn_mainmbox_flush(d, dp->dmbox.m_send);
8043 		if (dp->dmbox.m_recv)
8044 			(void) idn_mainmbox_flush(d, dp->dmbox.m_recv);
8045 		IDN_MBOX_UNLOCK(d);
8046 	}
8047 	/*
8048 	 * Deactivate all domain references also.
8049 	 * Only necessary if it wasn't already done above.
8050 	 */
8051 	if (!force)
8052 		idn_mainmbox_deactivate(domset);
8053 }
8054 
8055 void
idn_send_cmd(int domid,idn_cmd_t cmdtype,uint_t arg1,uint_t arg2,uint_t arg3)8056 idn_send_cmd(int domid, idn_cmd_t cmdtype, uint_t arg1, uint_t arg2, uint_t
8057     arg3)
8058 {
8059 	idn_msgtype_t	mt;
8060 	procname_t	proc = "idn_send_cmd";
8061 
8062 	mt.mt_mtype = IDNP_CMD;
8063 	mt.mt_atype = 0;
8064 	mt.mt_cookie = 0;
8065 
8066 	ASSERT(IDN_DLOCK_IS_HELD(domid));
8067 
8068 	PR_PROTO("%s:%d: sending command %s\n", proc, domid,
8069 	    VALID_IDNCMD(cmdtype) ? idncmd_str[cmdtype] : "unknown");
8070 
8071 	IDN_MSGTIMER_START(domid, IDNP_CMD, (ushort_t)cmdtype,
8072 	    idn_msg_waittime[IDNP_CMD], &mt.mt_cookie);
8073 
8074 	IDNXDC(domid, &mt, (uint_t)cmdtype, arg1, arg2, arg3);
8075 }
8076 
8077 void
idn_send_cmdresp(int domid,idn_msgtype_t * mtp,idn_cmd_t cmdtype,uint_t arg1,uint_t arg2,uint_t cerrno)8078 idn_send_cmdresp(int domid, idn_msgtype_t *mtp, idn_cmd_t cmdtype, uint_t arg1,
8079     uint_t arg2, uint_t cerrno)
8080 {
8081 	idn_msgtype_t	mt;
8082 
8083 	ASSERT(IDN_DLOCK_IS_HELD(domid));
8084 
8085 	if (domid == idn.localid) {
8086 		/*
8087 		 * It's possible local domain received a command
8088 		 * from itself.  However, we cannot send a normal
8089 		 * "ack" response (XDC) to ourself.
8090 		 */
8091 		return;
8092 	}
8093 
8094 	mt.mt_mtype = IDNP_CMD | IDNP_ACK;
8095 	mt.mt_atype = 0;
8096 	mt.mt_cookie = mtp->mt_cookie;
8097 
8098 	IDNXDC(domid, &mt, (uint_t)cmdtype, arg1, arg2, cerrno);
8099 }
8100 
8101 static void
idn_send_cmd_nackresp(int domid,idn_msgtype_t * mtp,idn_cmd_t cmdtype,idn_nack_t nacktype)8102 idn_send_cmd_nackresp(int domid, idn_msgtype_t *mtp, idn_cmd_t cmdtype,
8103     idn_nack_t nacktype)
8104 {
8105 	idn_msgtype_t	mt;
8106 
8107 	if (domid == idn.localid)
8108 		return;
8109 
8110 	mt.mt_mtype = IDNP_CMD | IDNP_NACK;
8111 	mt.mt_atype = 0;
8112 	mt.mt_cookie = mtp->mt_cookie;
8113 
8114 	(void) IDNXDC(domid, &mt, (uint_t)cmdtype, (uint_t)nacktype, 0, 0);
8115 }
8116 
8117 void
idn_broadcast_cmd(idn_cmd_t cmdtype,uint_t arg1,uint_t arg2,uint_t arg3)8118 idn_broadcast_cmd(idn_cmd_t cmdtype, uint_t arg1, uint_t arg2, uint_t arg3)
8119 {
8120 	idn_msgtype_t	mt;
8121 	domainset_t	domset;
8122 	procname_t	proc = "idn_broadcast_cmd";
8123 
8124 	IDN_GLOCK_SHARED();
8125 
8126 	domset = idn.domset.ds_connected;
8127 	DOMAINSET_DEL(domset, idn.localid);
8128 
8129 	PR_PROTO("%s: broadcasting command (%s) to domainset 0x%x\n",
8130 	    proc, VALID_IDNCMD(cmdtype) ? idncmd_str[cmdtype] : "unknown",
8131 	    domset);
8132 
8133 	mt.mt_mtype = IDNP_CMD;
8134 	mt.mt_atype = 0;
8135 	mt.mt_cookie = 0;
8136 
8137 	IDNXDC_BROADCAST(domset, &mt, (uint_t)cmdtype, arg1, arg2, arg3);
8138 
8139 	IDN_GUNLOCK();
8140 	/*
8141 	 * This is a broadcast which means local domain needs
8142 	 * to process it also.  Since we can't XDC to ourselves
8143 	 * we simply call a local function.
8144 	 */
8145 	idn_local_cmd(cmdtype, arg1, arg2, arg3);
8146 }
8147 
8148 /*
8149  * Since xargs[0] contains the cmdtype, only xargs[1], xargs[2], xargs[3]
8150  * are valid possible response arguments.
8151  */
8152 static void
idn_recv_cmd(int domid,idn_msgtype_t * mtp,idn_xdcargs_t xargs)8153 idn_recv_cmd(int domid, idn_msgtype_t *mtp, idn_xdcargs_t xargs)
8154 {
8155 	uint_t			msg = mtp->mt_mtype;
8156 	register idn_domain_t	*dp;
8157 	idn_cmd_t		cmdtype;
8158 	uint_t			acknack;
8159 	uint_t			cmdarg1, cmdarg2, cmdarg3;
8160 	int			islocal;
8161 	int			unsup_cmd_sent, unsup_cmd_recvd;
8162 	procname_t		proc = "idn_recv_cmd";
8163 
8164 	acknack = msg & IDNP_ACKNACK_MASK;
8165 	GET_XARGS(xargs, &cmdtype, &cmdarg1, &cmdarg2, &cmdarg3);
8166 
8167 	dp = &idn_domain[domid];
8168 	ASSERT(IDN_DLOCK_IS_EXCL(domid));
8169 
8170 	IDN_GLOCK_SHARED();
8171 
8172 	islocal = (domid == idn.localid);
8173 
8174 	ASSERT(!acknack || (acknack & IDNP_ACKNACK_MASK));
8175 
8176 	PR_PROTO("%s:%d: (local=%d) acknack=0x%x, cmdtype=%s(%d), "
8177 	    "a1=0x%x, a2=0x%x, a3=0x%x\n",
8178 	    proc, domid, islocal, acknack,
8179 	    VALID_IDNCMD(cmdtype) ? idncmd_str[cmdtype] : "unknown",
8180 	    cmdtype, cmdarg1, cmdarg2, cmdarg3);
8181 
8182 	unsup_cmd_sent = unsup_cmd_recvd = 0;
8183 
8184 	if ((IDN_GET_MASTERID() == IDN_NIL_DOMID) ||
8185 	    (dp->dstate != IDNDS_CONNECTED)) {
8186 		/*
8187 		 * Commands cannot be handled without a valid
8188 		 * master.  If this is a request then nack him.
8189 		 */
8190 		PR_PROTO("%s:%d: cannot process CMD w/o master (%d, %s)\n",
8191 		    proc, domid, IDN_GET_MASTERID(),
8192 		    idnds_str[dp->dstate]);
8193 
8194 		if (!islocal && !(acknack & IDNP_ACKNACK_MASK))
8195 			idn_send_cmd_nackresp(domid, mtp, cmdtype,
8196 			    IDNNACK_NOCONN);
8197 		IDN_GUNLOCK();
8198 		return;
8199 	}
8200 	IDN_GUNLOCK();
8201 
8202 	if (acknack & IDNP_ACKNACK_MASK) {
8203 		idn_nack_t	nack;
8204 		/*
8205 		 * Receiving a cmd+ack or cmd+nack in response to some
8206 		 * earlier command we must have issued.
8207 		 * If the response is a nack, there are two possibilites:
8208 		 *
8209 		 *	1. Remote domain failed to allocate due
8210 		 *	   to limited resources.
8211 		 *
8212 		 *	2. Remote domain does not support this
8213 		 *	   particular command.
8214 		 *
8215 		 * In the case of #2, the argument immediately after
8216 		 * the cmdtype (xargs[1]) will be (-1).
8217 		 */
8218 		nack = (idn_nack_t)cmdarg1;
8219 		if ((acknack & IDNP_NACK) && (nack == IDNNACK_BADCMD))
8220 			unsup_cmd_sent++;
8221 
8222 		if (islocal) {
8223 			/*
8224 			 * Shouldn't be receiving local commands w/acks.
8225 			 */
8226 			cmdtype = (idn_cmd_t)0;
8227 		}
8228 
8229 		switch (cmdtype) {
8230 		case IDNCMD_SLABALLOC:
8231 			idn_recv_slaballoc_resp(domid, cmdarg1, cmdarg2,
8232 			    cmdarg3);
8233 			break;
8234 
8235 		case IDNCMD_SLABFREE:
8236 			idn_recv_slabfree_resp(domid, cmdarg1, cmdarg2,
8237 			    cmdarg3);
8238 			break;
8239 
8240 		case IDNCMD_SLABREAP:
8241 			/*
8242 			 * We only care if successful.
8243 			 */
8244 			if (acknack & IDNP_ACK)
8245 				idn_recv_slabreap_resp(domid, cmdarg1, cmdarg3);
8246 			break;
8247 
8248 		case IDNCMD_NODENAME:
8249 			if ((acknack & IDNP_NACK) == 0) {
8250 				idn_recv_nodename_resp(domid, cmdarg1, cmdarg3);
8251 				break;
8252 			}
8253 			switch (nack) {
8254 			case IDNNACK_NOCONN:
8255 			case IDNNACK_RETRY:
8256 				/*
8257 				 * Remote domain was not quite
8258 				 * ready, try again.
8259 				 */
8260 				PR_PROTO("%s:%d: remote not ready "
8261 				    "for %s - retrying "
8262 				    "[dstate=%s]\n",
8263 				    proc, domid,
8264 				    idncmd_str[IDNCMD_NODENAME],
8265 				    idnds_str[dp->dstate]);
8266 
8267 				if (dp->dstate == IDNDS_CONNECTED)
8268 					(void) timeout(idn_retry_nodename_req,
8269 					    (void *)(uintptr_t)domid, hz);
8270 			default:
8271 				break;
8272 			}
8273 			break;
8274 
8275 		default:
8276 			/*
8277 			 * Unsupported command.
8278 			 */
8279 			unsup_cmd_recvd++;
8280 			break;
8281 		}
8282 		if (unsup_cmd_sent) {
8283 			PR_PROTO("%s:%d: unsupported command "
8284 			    "requested (0x%x)\n",
8285 			    proc, domid, cmdtype);
8286 		}
8287 		if (unsup_cmd_recvd) {
8288 			PR_PROTO("%s:%d: unsupported command "
8289 			    "response (0x%x)\n",
8290 			    proc, domid, cmdtype);
8291 		}
8292 	} else {
8293 		/*
8294 		 * Receiving a regular cmd from a remote domain.
8295 		 */
8296 		switch (cmdtype) {
8297 		case IDNCMD_SLABALLOC:
8298 			idn_recv_slaballoc_req(domid, mtp, cmdarg1);
8299 			break;
8300 
8301 		case IDNCMD_SLABFREE:
8302 			idn_recv_slabfree_req(domid, mtp, cmdarg1, cmdarg2);
8303 			break;
8304 
8305 		case IDNCMD_SLABREAP:
8306 			idn_recv_slabreap_req(domid, mtp, cmdarg1);
8307 			break;
8308 
8309 		case IDNCMD_NODENAME:
8310 			idn_recv_nodename_req(domid, mtp, cmdarg1);
8311 			break;
8312 
8313 		default:
8314 			/*
8315 			 * Unsupported command.
8316 			 */
8317 			unsup_cmd_recvd++;
8318 			break;
8319 		}
8320 		if (!islocal && unsup_cmd_recvd) {
8321 			/*
8322 			 * Received an unsupported IDN command.
8323 			 */
8324 			idn_send_cmd_nackresp(domid, mtp, cmdtype,
8325 			    IDNNACK_BADCMD);
8326 		}
8327 	}
8328 }
8329 
8330 /*
8331  * This is a supporting routine for idn_broadcast_cmd() to
8332  * handle processing of the requested command for the local
8333  * domain.  Currently the only support broadcast command
8334  * supported is reaping.
8335  */
8336 /*ARGSUSED2*/
8337 static void
idn_local_cmd(idn_cmd_t cmdtype,uint_t arg1,uint_t arg2,uint_t arg3)8338 idn_local_cmd(idn_cmd_t cmdtype, uint_t arg1, uint_t arg2, uint_t arg3)
8339 {
8340 	idn_protojob_t	*jp;
8341 	idn_domain_t	*ldp = &idn_domain[idn.localid];
8342 	procname_t	proc = "idn_local_cmd";
8343 
8344 	PR_PROTO("%s: submitting local command %s on domain %d\n",
8345 	    proc, VALID_IDNCMD(cmdtype) ? idncmd_str[cmdtype] : "unknown",
8346 	    idn.localid);
8347 
8348 
8349 	jp = idn_protojob_alloc(KM_SLEEP);
8350 
8351 	jp->j_msg.m_domid    = ldp->domid;
8352 	jp->j_msg.m_msgtype  = IDNP_CMD;
8353 	jp->j_msg.m_cookie   = ldp->dcookie_recv;
8354 	SET_XARGS(jp->j_msg.m_xargs, cmdtype, arg1, arg2, arg3);
8355 
8356 	idn_protojob_submit(ldp->domid, jp);
8357 }
8358 
8359 /*
8360  * Terminate any outstanding commands that may have
8361  * been targeted for the given domain.  A command is
8362  * designated as outstanding if it has an active timer.
8363  *
8364  * serrno = ECANCELED.
8365  */
8366 static void
idn_terminate_cmd(int domid,int serrno)8367 idn_terminate_cmd(int domid, int serrno)
8368 {
8369 	idn_domain_t	*dp;
8370 	idn_timer_t	*tplist = NULL, *tp;
8371 	procname_t	proc = "idn_terminate_cmd";
8372 
8373 	dp = &idn_domain[domid];
8374 
8375 	ASSERT(IDN_DLOCK_IS_EXCL(domid));
8376 
8377 	IDN_MSGTIMER_GET(dp, IDNP_CMD, tplist, 0);
8378 	/*
8379 	 * At this point the timers are effectively terminated
8380 	 * since when they're t_onq indication is set false.
8381 	 */
8382 	if (tplist == NULL) {
8383 		PR_PROTO("%s:%d: no outstanding cmds found\n",
8384 		    proc, domid);
8385 		/*
8386 		 * There is a window where we may have caught a
8387 		 * request just prior to issuing the actual
8388 		 * command (SLABALLOC).  We're guaranteed if there
8389 		 * was, then he will have at least registered.
8390 		 * So, if we abort the command now, he'll catch
8391 		 * it before going to sleep.
8392 		 * Drop through.
8393 		 */
8394 	}
8395 	ASSERT(tplist ? (tplist->t_back->t_forw == NULL) : 1);
8396 
8397 	for (tp = tplist; tp; tp = tp->t_forw) {
8398 		ASSERT(tp->t_type == IDNP_CMD);
8399 
8400 		PR_PROTO("%s:%d: found outstanding cmd: %s\n",
8401 		    proc, domid, idncmd_str[tp->t_subtype]);
8402 
8403 		switch (tp->t_subtype) {
8404 		case IDNCMD_SLABALLOC:
8405 			/*
8406 			 * Outstanding slaballoc request may have
8407 			 * slab waiters hanging around.  Need to
8408 			 * tell them to bail out.  The given domain
8409 			 * must be the master if we have an outstanding
8410 			 * command to him.  This also presumes that
8411 			 * if there are any waiters they're only in
8412 			 * the local domain's waiting area (i.e. we're
8413 			 * a slave).
8414 			 */
8415 #ifdef DEBUG
8416 			IDN_GLOCK_SHARED();
8417 			ASSERT(domid == IDN_GET_MASTERID());
8418 			ASSERT(idn.localid != IDN_GET_MASTERID());
8419 			IDN_GUNLOCK();
8420 #endif /* DEBUG */
8421 			(void) smr_slabwaiter_abort(idn.localid, serrno);
8422 			break;
8423 
8424 		case IDNCMD_SLABFREE:
8425 		case IDNCMD_SLABREAP:
8426 		case IDNCMD_NODENAME:
8427 			/*
8428 			 * Nothing really waiting for these operations
8429 			 * so no biggy if we just drop.
8430 			 * Note that NODENAME may have an outstanding
8431 			 * buffer, however that will be reclaimed
8432 			 * when we actually unlink from domain.
8433 			 */
8434 			break;
8435 
8436 		default:
8437 			ASSERT(0);
8438 			break;
8439 		}
8440 	}
8441 	/*
8442 	 * As mentioned before the timers are effectively no-op'd
8443 	 * once they're dequeued, however let's cleanup house and
8444 	 * get rid of the useless entries in the timeout queue.
8445 	 */
8446 	if (tplist) {
8447 		IDN_TIMER_STOPALL(tplist);
8448 	}
8449 
8450 	if (idn_domain[idn.localid].dvote.v.master) {
8451 		/*
8452 		 * I'm the master so it's possible I had
8453 		 * outstanding commands (SLABALLOC) waiting
8454 		 * to be satisfied for the given domain.
8455 		 * Since we're forcing an error it's okay
8456 		 * to continue holding onto the drwlock.
8457 		 */
8458 		PR_PROTO("%s:%d: abort slaballoc waiters\n", proc, domid);
8459 		(void) smr_slabwaiter_abort(domid, serrno);
8460 
8461 	} else if (dp->dvote.v.master) {
8462 		PR_PROTO("%s:%d: abort (local domain) slaballoc waiters\n",
8463 		    proc, domid);
8464 		(void) smr_slabwaiter_abort(idn.localid, serrno);
8465 	}
8466 }
8467 
8468 static void
idn_send_acknack(int domid,idn_msgtype_t * mtp,idn_xdcargs_t xargs)8469 idn_send_acknack(int domid, idn_msgtype_t *mtp, idn_xdcargs_t xargs)
8470 {
8471 	idn_domain_t	*dp = &idn_domain[domid];
8472 	procname_t	proc = "idn_send_acknack";
8473 
8474 	ASSERT(mtp ? (mtp->mt_mtype & IDNP_ACKNACK_MASK) : 1);
8475 	ASSERT(domid != IDN_NIL_DOMID);
8476 
8477 #ifdef DEBUG
8478 	{
8479 		STRING(mstr);
8480 		STRING(astr);
8481 
8482 		INUM2STR(mtp->mt_mtype, mstr);
8483 		INUM2STR(mtp->mt_atype, astr);
8484 
8485 		if (mtp->mt_mtype & IDNP_ACK) {
8486 			PR_PROTO("%s:%d: dstate=%s, msg=(%s/%s), "
8487 			    "a1=0x%x, a2=0x%x, a3=0x%x, a4 = 0x%x\n",
8488 			    proc, domid, idnds_str[dp->dstate],
8489 			    astr, mstr, xargs[0], xargs[1],
8490 			    xargs[2], xargs[3]);
8491 		} else {
8492 			idn_nack_t	nack;
8493 
8494 			nack = GET_XARGS_NACK_TYPE(xargs);
8495 			PR_PROTO("%s:%d: dstate=%s, msg=(%s/%s), "
8496 			    "nack=%s(0x%x)\n",
8497 			    proc, domid, idnds_str[dp->dstate],
8498 			    astr, mstr, idnnack_str[nack],
8499 			    (uint_t)nack);
8500 		}
8501 	}
8502 #endif /* DEBUG */
8503 
8504 	(void) IDNXDC(domid, mtp, xargs[0], xargs[1], xargs[2], xargs[3]);
8505 }
8506 
8507 /*ARGSUSED0*/
8508 static void
idn_prealloc_slab(int nslabs)8509 idn_prealloc_slab(int nslabs)
8510 {
8511 	register int	s, serrno;
8512 	smr_slab_t	*sp;
8513 	idn_domain_t	*ldp = &idn_domain[idn.localid];
8514 	procname_t	proc = "idn_prealloc_slab";
8515 
8516 	IDN_GLOCK_SHARED();
8517 	DSLAB_LOCK_SHARED(idn.localid);
8518 	if ((idn.state != IDNGS_ONLINE) || (ldp->dnslabs > 0)) {
8519 		/*
8520 		 * Not in the proper state or slab already allocated.
8521 		 */
8522 		DSLAB_UNLOCK(idn.localid);
8523 		IDN_GUNLOCK();
8524 		return;
8525 	}
8526 	IDN_GUNLOCK();
8527 	ASSERT(!ldp->dslab);
8528 
8529 	serrno = 0;
8530 	for (s = 0; (s < nslabs) && ((int)ldp->dnslabs < nslabs); s++) {
8531 		/*
8532 		 * Returns with ldp->drwlock dropped.
8533 		 */
8534 		serrno = smr_slab_alloc(idn.localid, &sp);
8535 		if (serrno != 0) {
8536 			PR_PROTO("%s: FAILED to pre-alloc'd "
8537 			    "slab (serrno = %d)\n", proc, serrno);
8538 			break;
8539 		}
8540 		/*
8541 		 * State may have changed since smr_slab_alloc
8542 		 * temporarily drops drwlock.  Make sure we're
8543 		 * still connected.
8544 		 */
8545 		PR_PROTO("%s: SUCCESSFULLY pre-alloc'd slab\n", proc);
8546 
8547 		if (idn.state != IDNGS_ONLINE) {
8548 			PR_PROTO("%s: Lost connection..leaving\n", proc);
8549 			break;
8550 		}
8551 	}
8552 
8553 	DSLAB_UNLOCK(idn.localid);
8554 }
8555 
8556 /*
8557  * Received a request from a remote domain to
8558  * allocate a slab from the master SMR for him.
8559  * Allocate slab and return the response.
8560  */
8561 static void
idn_recv_slaballoc_req(int domid,idn_msgtype_t * mtp,uint_t slab_size)8562 idn_recv_slaballoc_req(int domid, idn_msgtype_t *mtp, uint_t slab_size)
8563 {
8564 	register idn_domain_t	*dp;
8565 	procname_t		proc = "idn_recv_slaballoc_req";
8566 
8567 	PR_PROTO("%s: slaballoc req from domain %d (size=0x%x)\n",
8568 	    proc, domid, slab_size);
8569 
8570 	dp = &idn_domain[domid];
8571 
8572 	ASSERT(IDN_DLOCK_IS_EXCL(domid));
8573 
8574 	IDN_GLOCK_SHARED();
8575 
8576 	if (idn.localid != IDN_GET_MASTERID()) {
8577 		IDN_GUNLOCK();
8578 		/*
8579 		 * It's a fatal error if the remote domain thinks
8580 		 * we're the master.
8581 		 */
8582 		idn_send_slaballoc_resp(domid, mtp, 0, 0, EACCES);
8583 
8584 	} else if (dp->dstate != IDNDS_CONNECTED) {
8585 
8586 		IDN_GUNLOCK();
8587 		/*
8588 		 * It's a fatal error if we don't yet have a
8589 		 * connection established with the requestor.
8590 		 */
8591 		idn_send_slaballoc_resp(domid, mtp, 0, 0, ENOLINK);
8592 	} else {
8593 		int		serrno;
8594 		smr_slab_t	*sp;
8595 		smr_offset_t	slab_offset;
8596 
8597 		IDN_GUNLOCK();
8598 		DSLAB_LOCK_SHARED(domid);
8599 		IDN_DUNLOCK(domid);
8600 		/*
8601 		 * We're connected and we're the master.
8602 		 * smr_slab_alloc() returns with dp->drwlock dropped.
8603 		 */
8604 		if ((serrno = smr_slab_alloc(domid, &sp)) == 0) {
8605 			/*
8606 			 * Successfully allocated slab for remote slave.
8607 			 */
8608 			slab_offset = IDN_ADDR2OFFSET(sp->sl_start);
8609 			slab_size   = sp->sl_end - sp->sl_start;
8610 			ASSERT((slab_offset != 0) && (slab_size != 0));
8611 		} else {
8612 			slab_offset = slab_size = 0;
8613 		}
8614 		DSLAB_UNLOCK(domid);
8615 		/*
8616 		 * The drwlock is dropped during smr_slab_alloc.
8617 		 * During that time our connection with the given
8618 		 * domain may have changed.  Better check again.
8619 		 */
8620 		IDN_DLOCK_SHARED(domid);
8621 		if ((dp->dstate != IDNDS_CONNECTED) && !serrno) {
8622 			/*
8623 			 * Connection broke.  Keep the slab here.
8624 			 */
8625 			DSLAB_LOCK_EXCL(domid);
8626 			IDN_DUNLOCK(domid);
8627 			smr_slab_free(domid, sp);
8628 			DSLAB_UNLOCK(domid);
8629 			slab_offset = slab_size = 0;
8630 			serrno = ECANCELED;
8631 			IDN_DLOCK_SHARED(domid);
8632 		}
8633 		/*
8634 		 * Send response.
8635 		 * Note that smr_slab_alloc automatically installs
8636 		 * slab into domains respective idn_domain entry
8637 		 * to be associated with that domain.
8638 		 */
8639 		idn_send_slaballoc_resp(domid, mtp, slab_offset, slab_size,
8640 		    serrno);
8641 	}
8642 }
8643 
8644 static void
idn_send_slaballoc_resp(int domid,idn_msgtype_t * mtp,smr_offset_t slab_offset,uint_t slab_size,int serrno)8645 idn_send_slaballoc_resp(int domid, idn_msgtype_t *mtp, smr_offset_t slab_offset,
8646     uint_t slab_size, int serrno)
8647 {
8648 	procname_t	proc = "idn_send_slaballoc_resp";
8649 
8650 	PR_PROTO("%s: slaballoc resp to domain %d (off=0x%x, size=0x%x) "
8651 	    "[serrno = %d]\n",
8652 	    proc, domid, slab_offset, slab_size, serrno);
8653 
8654 	idn_send_cmdresp(domid, mtp, IDNCMD_SLABALLOC, slab_offset, slab_size,
8655 	    serrno);
8656 }
8657 
8658 /*
8659  * Received the ack or nack to a previous allocation request
8660  * made by the local domain to the master for a slab.  Need
8661  * to "put" the response into the waiting area for any
8662  * waiters.
8663  */
8664 static void
idn_recv_slaballoc_resp(int domid,smr_offset_t slab_offset,uint_t slab_size,int serrno)8665 idn_recv_slaballoc_resp(int domid, smr_offset_t slab_offset, uint_t slab_size,
8666     int serrno)
8667 {
8668 	smr_slab_t		*sp = NULL;
8669 	int			rv;
8670 	procname_t		proc = "idn_recv_slaballoc_resp";
8671 
8672 
8673 	ASSERT(IDN_DLOCK_IS_EXCL(domid));
8674 
8675 	PR_PROTO("%s: slaballoc resp from domain %d (off=0x%x, size=0x%x) "
8676 	    "[serrno = %d]\n",
8677 	    proc, domid, slab_offset, slab_size, serrno);
8678 
8679 	if (!serrno) {
8680 		IDN_GLOCK_SHARED();
8681 		if (domid != IDN_GET_MASTERID()) {
8682 			/*
8683 			 * We should only be receiving responses from
8684 			 * our master.  This is either a bogus message
8685 			 * or an old response.  In either case dump it.
8686 			 */
8687 			PR_PROTO("%s: BOGUS slaballoc resp from domid %d "
8688 			    "(master = %d)\n",
8689 			    proc, domid, IDN_GET_MASTERID());
8690 			serrno = EPROTO;
8691 		}
8692 		IDN_GUNLOCK();
8693 
8694 		if (!serrno &&
8695 		    !VALID_NWROFFSET(slab_offset, IDN_SMR_BUFSIZE)) {
8696 			PR_PROTO("%s: slab offset (0x%x) out of range "
8697 			    "(0-0x%lx)\n",
8698 			    proc, slab_offset, MB2B(IDN_NWR_SIZE));
8699 			serrno = EPROTO;
8700 		} else if (!serrno) {
8701 			sp = GETSTRUCT(smr_slab_t, 1);
8702 			sp->sl_start = IDN_OFFSET2ADDR(slab_offset);
8703 			sp->sl_end   = sp->sl_start + slab_size;
8704 			smr_alloc_buflist(sp);
8705 		}
8706 	}
8707 
8708 	/*
8709 	 * Always "put" slabs back to yourself since you're a slave.
8710 	 * Note that we set the forceflag so that even if there are
8711 	 * no waiters we still install the slab for the domain.
8712 	 */
8713 	if (!serrno) {
8714 		DSLAB_LOCK_EXCL(idn.localid);
8715 	}
8716 	rv = smr_slaballoc_put(idn.localid, sp, 1, serrno);
8717 	if (!serrno) {
8718 		DSLAB_UNLOCK(idn.localid);
8719 	}
8720 
8721 	if (rv < 0) {
8722 		/*
8723 		 * Some kind of error trying to install response.
8724 		 * If there was a valid slab sent to us, we'll
8725 		 * just have to send it back.
8726 		 */
8727 		PR_PROTO("%s: failed to install response in waiting area\n",
8728 		    proc);
8729 		if (slab_size != 0) {
8730 			PR_PROTO("%s: sending slab back to domain %d "
8731 			    "(master = %d)\n",
8732 			    proc, domid, IDN_GET_MASTERID());
8733 			idn_send_cmd(domid, IDNCMD_SLABFREE, slab_offset,
8734 			    slab_size, 0);
8735 		}
8736 		if (sp) {
8737 			smr_free_buflist(sp);
8738 			FREESTRUCT(sp, smr_slab_t, 1);
8739 		}
8740 	}
8741 }
8742 
8743 /*
8744  * Note that slab reaping is effectively performed asynchronously
8745  * since the request will be received a protocol server.
8746  */
8747 static void
idn_recv_slabreap_req(int domid,idn_msgtype_t * mtp,int nslabs)8748 idn_recv_slabreap_req(int domid, idn_msgtype_t *mtp, int nslabs)
8749 {
8750 	procname_t	proc = "idn_recv_slabreap_req";
8751 
8752 	PR_PROTO("%s: slab reap request (nslabs = %d)\n", proc, nslabs);
8753 
8754 	ASSERT(IDN_DLOCK_IS_EXCL(domid));
8755 
8756 	IDN_GLOCK_SHARED();
8757 	if (domid != IDN_GET_MASTERID()) {
8758 		/*
8759 		 * Only the master can request that slabs be reaped.
8760 		 */
8761 		IDN_GUNLOCK();
8762 		PR_PROTO("%s: only master can request slab reaping\n", proc);
8763 
8764 		idn_send_cmdresp(domid, mtp, IDNCMD_SLABREAP, 0, 0, EACCES);
8765 
8766 		return;
8767 	}
8768 	IDN_GUNLOCK();
8769 
8770 	if (nslabs != 0) {
8771 		IDN_DUNLOCK(domid);
8772 		smr_slab_reap(idn.localid, &nslabs);
8773 		IDN_DLOCK_SHARED(domid);
8774 	}
8775 
8776 	PR_PROTO("%s: slab reap result (nslabs = %d)\n", proc, nslabs);
8777 
8778 	/*
8779 	 * Go ahead and send the reap response back before we start
8780 	 * free'ing off the individual slabs.
8781 	 */
8782 	idn_send_slabreap_resp(domid, mtp, nslabs, 0);
8783 }
8784 
8785 static void
idn_recv_slabreap_resp(int domid,int nslabs,int serrno)8786 idn_recv_slabreap_resp(int domid, int nslabs, int serrno)
8787 {
8788 	procname_t	proc = "idn_recv_slabreap_resp";
8789 
8790 	ASSERT(IDN_DLOCK_IS_EXCL(domid));
8791 
8792 	if ((idn.localid != IDN_GET_MASTERID()) || (idn.localid == domid)) {
8793 		PR_PROTO("%s: unexpected slabreap resp received "
8794 		    "(domid = %d)\n", proc, domid);
8795 		ASSERT(0);
8796 		return;
8797 	}
8798 	PR_PROTO("%s: recvd reap response from domain %d for %d slabs "
8799 	    "[serrno = %d]\n", proc, domid, nslabs, serrno);
8800 }
8801 
8802 /*
8803  * Not really necessary to send slabreap response.
8804  * XXX - perhaps useful to master for accounting or
8805  *	 throttling of further reaping?
8806  */
8807 static void
idn_send_slabreap_resp(int domid,idn_msgtype_t * mtp,int nslabs,int serrno)8808 idn_send_slabreap_resp(int domid, idn_msgtype_t *mtp, int nslabs, int serrno)
8809 {
8810 	idn_send_cmdresp(domid, mtp, IDNCMD_SLABREAP, nslabs, 0, serrno);
8811 }
8812 
8813 /*
8814  * Slave -> Master ONLY
8815  * Master never sends slabfree request to itself.
8816  */
8817 static void
idn_recv_slabfree_req(int domid,idn_msgtype_t * mtp,smr_offset_t slab_offset,uint_t slab_size)8818 idn_recv_slabfree_req(int domid, idn_msgtype_t *mtp, smr_offset_t slab_offset,
8819     uint_t slab_size)
8820 {
8821 	smr_slab_t	*sp;
8822 	int		serrno;
8823 	caddr_t		s_start, s_end;
8824 	procname_t	proc = "idn_recv_slabfree_req";
8825 
8826 	ASSERT(IDN_DLOCK_IS_EXCL(domid));
8827 
8828 	if (domid == IDN_GET_MASTERID()) {
8829 		PR_PROTO("%s: unexpected slabfree req received (domid = %d)\n",
8830 		    proc, domid);
8831 		idn_send_slabfree_resp(domid, mtp, slab_offset, slab_size,
8832 		    EACCES);
8833 		return;
8834 	}
8835 	if (slab_size > IDN_SLAB_SIZE) {
8836 		PR_PROTO("%s: unexpected slab size. exp %d, recvd %d\n",
8837 		    proc, IDN_SLAB_SIZE, slab_size);
8838 		idn_send_slabfree_resp(domid, mtp, slab_offset, slab_size,
8839 		    EINVAL);
8840 		return;
8841 	}
8842 	s_start = IDN_OFFSET2ADDR(slab_offset);
8843 	s_end   = s_start + slab_size;
8844 	/*
8845 	 * Master has received a SLABFREE request (effectively a response
8846 	 * to some earlier SLABREAP request.
8847 	 * Find the slab associated with this slab and free it up.
8848 	 */
8849 	DSLAB_LOCK_EXCL(domid);
8850 	if ((sp = smr_slaballoc_get(domid, s_start, s_end)) != NULL) {
8851 		smr_slab_free(domid, sp);
8852 		serrno = 0;
8853 	} else {
8854 		serrno = EINVAL;
8855 	}
8856 	DSLAB_UNLOCK(domid);
8857 
8858 	idn_send_slabfree_resp(domid, mtp, slab_offset, slab_size, serrno);
8859 }
8860 
8861 /*
8862  * Master -> Slave ONLY
8863  */
8864 static void
idn_recv_slabfree_resp(int domid,uint_t slab_offset,uint_t slab_size,int serrno)8865 idn_recv_slabfree_resp(int domid, uint_t slab_offset, uint_t slab_size, int
8866     serrno)
8867 {
8868 	procname_t	proc = "idn_recv_slabfree_resp";
8869 
8870 	ASSERT(IDN_DLOCK_IS_EXCL(domid));
8871 
8872 	if (domid != IDN_GET_MASTERID()) {
8873 		PR_PROTO("%s: unexpected slabfree resp received (domid = %d)\n",
8874 		    proc, domid);
8875 		ASSERT(0);
8876 		return;
8877 	}
8878 	if (slab_size > IDN_SLAB_SIZE) {
8879 		PR_PROTO("%s: unexpected slab size. exp %d, recvd %d\n",
8880 		    proc, IDN_SLAB_SIZE, slab_size);
8881 		ASSERT(0);
8882 		return;
8883 	}
8884 	PR_PROTO("%s: recvd free resp from dom %d "
8885 	    "- slab (off/size) 0x%x/0x%x [serrno = %d]\n",
8886 	    proc, domid, slab_offset, slab_size, serrno);
8887 }
8888 
8889 static void
idn_send_slabfree_resp(int domid,idn_msgtype_t * mtp,uint_t slab_offset,uint_t slab_size,int serrno)8890 idn_send_slabfree_resp(int domid, idn_msgtype_t *mtp, uint_t slab_offset,
8891     uint_t slab_size, int serrno)
8892 {
8893 	idn_send_cmdresp(domid, mtp, IDNCMD_SLABFREE, slab_offset, slab_size,
8894 	    serrno);
8895 }
8896 
8897 static void
idn_retry_nodename_req(void * arg)8898 idn_retry_nodename_req(void *arg)
8899 {
8900 	int	domid = (int)(uintptr_t)arg;
8901 
8902 	idn_send_nodename_req(domid);
8903 }
8904 
8905 static void
idn_send_nodename_req(int domid)8906 idn_send_nodename_req(int domid)
8907 {
8908 	caddr_t		b_bufp;
8909 	smr_offset_t	bufoffset;
8910 	int		serrno;
8911 	idn_domain_t	*dp = &idn_domain[domid];
8912 	procname_t	proc = "idn_send_nodename_req";
8913 
8914 	/*
8915 	 * Need to drop domain lock across
8916 	 * SMR allocation.
8917 	 */
8918 	serrno = smr_buf_alloc(domid, MAXDNAME+1, &b_bufp);
8919 
8920 	IDN_DLOCK_SHARED(domid);
8921 	if (dp->dstate != IDNDS_CONNECTED) {
8922 		/*
8923 		 * Lost connection.
8924 		 */
8925 		PR_PROTO("%s:%d: connection lost [dstate = %s]\n",
8926 		    proc, domid, idnds_str[dp->dstate]);
8927 		IDN_DUNLOCK(domid);
8928 		if (!serrno)
8929 			(void) smr_buf_free(domid, b_bufp, MAXDNAME+1);
8930 		return;
8931 	}
8932 	if (serrno) {
8933 		/*
8934 		 * Failed to allocate buffer, but still have
8935 		 * connection so keep trying.  We may have queried
8936 		 * the master a little too earlier.
8937 		 */
8938 		PR_PROTO("%s:%d: buffer alloc failed [dstate = %s]\n",
8939 		    proc, domid, idnds_str[dp->dstate]);
8940 		(void) timeout(idn_retry_nodename_req, (void *)(uintptr_t)domid,
8941 		    hz);
8942 		IDN_DUNLOCK(domid);
8943 		return;
8944 	}
8945 
8946 	*b_bufp = (char)MAXDNAME;
8947 	bufoffset = IDN_ADDR2OFFSET(b_bufp);
8948 
8949 	idn_send_cmd(domid, IDNCMD_NODENAME, bufoffset, 0, 0);
8950 	IDN_DUNLOCK(domid);
8951 }
8952 
8953 static void
idn_send_nodename_resp(int domid,idn_msgtype_t * mtp,smr_offset_t bufoffset,int serrno)8954 idn_send_nodename_resp(int domid, idn_msgtype_t *mtp, smr_offset_t bufoffset,
8955     int serrno)
8956 {
8957 	idn_send_cmdresp(domid, mtp, IDNCMD_NODENAME, (uint_t)bufoffset, 0,
8958 	    serrno);
8959 }
8960 
8961 static void
idn_recv_nodename_req(int domid,idn_msgtype_t * mtp,smr_offset_t bufoffset)8962 idn_recv_nodename_req(int domid, idn_msgtype_t *mtp, smr_offset_t bufoffset)
8963 {
8964 	caddr_t		b_bufp;
8965 	int		length;
8966 	idn_domain_t	*ldp = &idn_domain[idn.localid];
8967 	procname_t	proc = "idn_recv_nodename_req";
8968 
8969 	IDN_DLOCK_EXCL(idn.localid);
8970 	if (!strlen(ldp->dname)) {
8971 		if (!strlen(utsname.nodename)) {
8972 			/*
8973 			 * Local domain's nodename hasn't been
8974 			 * set yet.
8975 			 */
8976 			IDN_DUNLOCK(idn.localid);
8977 			idn_send_cmd_nackresp(domid, mtp, IDNCMD_NODENAME,
8978 			    IDNNACK_RETRY);
8979 			return;
8980 		}
8981 		(void) strncpy(ldp->dname, utsname.nodename, MAXDNAME - 1);
8982 	}
8983 	IDN_DLOCK_DOWNGRADE(idn.localid);
8984 
8985 	if (!VALID_NWROFFSET(bufoffset, IDN_SMR_BUFSIZE)) {
8986 		PR_PROTO("%s:%d: invalid SMR offset received (0x%x)\n",
8987 		    proc, domid, bufoffset);
8988 		IDN_DUNLOCK(idn.localid);
8989 		idn_send_nodename_resp(domid, mtp, bufoffset, EINVAL);
8990 		return;
8991 	}
8992 
8993 	b_bufp = IDN_OFFSET2ADDR(bufoffset);
8994 	length = (int)(*b_bufp++ & 0xff);
8995 
8996 	if (length < strlen(ldp->dname)) {
8997 		PR_PROTO("%s:%d: buffer not big enough (req %lu, got %d)\n",
8998 		    proc, domid, strlen(ldp->dname), length);
8999 		IDN_DUNLOCK(idn.localid);
9000 		idn_send_nodename_resp(domid, mtp, bufoffset, EINVAL);
9001 		return;
9002 	}
9003 
9004 	(void) strncpy(b_bufp, ldp->dname, MAXDNAME);
9005 	b_bufp[MAXDNAME-1] = 0;
9006 	IDN_DUNLOCK(idn.localid);
9007 
9008 	idn_send_nodename_resp(domid, mtp, bufoffset, 0);
9009 }
9010 
9011 static void
idn_recv_nodename_resp(int domid,smr_offset_t bufoffset,int serrno)9012 idn_recv_nodename_resp(int domid, smr_offset_t bufoffset, int serrno)
9013 {
9014 	caddr_t		b_bufp;
9015 	idn_domain_t	*dp = &idn_domain[domid];
9016 	procname_t	proc = "idn_recv_nodename_resp";
9017 
9018 	ASSERT(IDN_DLOCK_IS_EXCL(domid));
9019 
9020 	if (!VALID_NWROFFSET(bufoffset, IDN_SMR_BUFSIZE)) {
9021 		PR_PROTO("%s:%d: invalid SMR offset received (0x%x)\n",
9022 		    proc, domid, bufoffset);
9023 		return;
9024 	}
9025 
9026 	if (serrno == 0) {
9027 		b_bufp = IDN_OFFSET2ADDR(bufoffset) + 1;
9028 		b_bufp[MAXDNAME-1] = 0;
9029 
9030 		if (strlen(b_bufp) > 0) {
9031 			(void) strncpy(dp->dname, b_bufp, MAXDNAME);
9032 			PR_PROTO("%s:%d: received nodename(%s)\n",
9033 			    proc, domid, dp->dname);
9034 		}
9035 	}
9036 
9037 	(void) smr_buf_free(domid, b_bufp - 1, MAXDNAME + 1);
9038 }
9039 
9040 /*
9041  * The master allocations the SMR management structures.
9042  */
9043 static int
idn_master_init()9044 idn_master_init()
9045 {
9046 	idn_domain_t	*ldp = &idn_domain[idn.localid];
9047 	size_t		reserved_size = 0;
9048 	caddr_t		reserved_area = NULL;
9049 	procname_t	proc = "idn_master_init";
9050 
9051 	ASSERT(IDN_GLOCK_IS_EXCL());
9052 	ASSERT(IDN_DLOCK_IS_EXCL(idn.localid));
9053 
9054 	if (idn.mboxarea != NULL) {
9055 		PR_PROTO("%s: master data already initialized\n", proc);
9056 		return (0);
9057 	}
9058 
9059 	PR_PROTO("%s: initializing master data (domid = %d)\n",
9060 	    proc, idn.localid);
9061 
9062 	/*
9063 	 * Reserve an area of the SMR for mailbox usage.
9064 	 * This area is allocated to other domains via
9065 	 * the master.  Round it up to IDN_SMR_BUFSIZE multiple.
9066 	 */
9067 	reserved_size = IDNROUNDUP(IDN_MBOXAREA_SIZE, IDN_SMR_BUFSIZE);
9068 
9069 	PR_PROTO("%s: reserving %lu bytes for mailbox area\n",
9070 	    proc, reserved_size);
9071 
9072 #ifdef DEBUG
9073 	if (reserved_size > (size_t)IDN_SLAB_SIZE) {
9074 		PR_PROTO("%s: WARNING mbox area (%ld) > slab size (%d)\n",
9075 		    proc, reserved_size, IDN_SLAB_SIZE);
9076 	}
9077 #endif /* DEBUG */
9078 	/*
9079 	 * Initialize the pool of slabs and SMR I/O buffers.
9080 	 */
9081 	if (smr_slabpool_init(reserved_size, &reserved_area) != 0) {
9082 		idn_master_deinit();
9083 		return (-1);
9084 	}
9085 
9086 	ASSERT(idn.mboxarea == NULL);
9087 	ASSERT(reserved_area);
9088 
9089 	bzero(reserved_area, reserved_size);
9090 
9091 	idn.mboxarea = (idn_mboxtbl_t *)reserved_area;
9092 	ldp->dmbox.m_tbl = IDN_MBOXAREA_BASE(idn.mboxarea, idn.localid);
9093 	/*
9094 	 * Initialize the SMR pointers in the entire
9095 	 * mailbox table.
9096 	 */
9097 	idn_mboxarea_init(idn.mboxarea, IDN_MBOXAREA_SIZE / IDN_MBOXTBL_SIZE);
9098 
9099 	return (0);
9100 }
9101 
9102 static void
idn_master_deinit()9103 idn_master_deinit()
9104 {
9105 	idn_domain_t	*ldp;
9106 	smr_slab_t	*sp;
9107 	procname_t	proc = "idn_master_deinit";
9108 
9109 	ASSERT(IDN_GLOCK_IS_EXCL());
9110 	ASSERT(IDN_DLOCK_IS_EXCL(idn.localid));
9111 
9112 	if (idn.mboxarea == NULL) {
9113 		PR_PROTO("%s: master data already deinitialized\n", proc);
9114 		return;
9115 	}
9116 
9117 	ldp = &idn_domain[idn.localid];
9118 
9119 	PR_PROTO("%s: deinitializing master data (domid = %d)\n",
9120 	    proc, idn.localid);
9121 
9122 	ldp->dmbox.m_tbl = NULL;
9123 	idn.mboxarea = NULL;
9124 	/*
9125 	 * Master may still be holding onto slabs of his own.
9126 	 */
9127 	DSLAB_LOCK_EXCL(idn.localid);
9128 	sp = ldp->dslab;
9129 	ldp->dslab = NULL;
9130 	ldp->dnslabs = 0;
9131 	if (sp)
9132 		smr_slab_free(idn.localid, sp);
9133 	ldp->dslab_state = DSLAB_STATE_UNKNOWN;
9134 	DSLAB_UNLOCK(idn.localid);
9135 
9136 	smr_slabpool_deinit();
9137 }
9138 
9139 static int
idn_mark_awol(int domid,clock_t * atime)9140 idn_mark_awol(int domid, clock_t *atime)
9141 {
9142 	clock_t		awol;
9143 	idn_domain_t	*dp = &idn_domain[domid];
9144 
9145 	ASSERT(IDN_SYNC_IS_LOCKED());
9146 	ASSERT(IDN_GLOCK_IS_EXCL());
9147 
9148 	if (!DOMAIN_IN_SET(idn.domset.ds_awol, domid)) {
9149 		DOMAINSET_ADD(idn.domset.ds_awol, domid);
9150 		idn.nawols++;
9151 	}
9152 	awol = ddi_get_lbolt();
9153 	if (dp->dawol.a_count++ == 0)
9154 		dp->dawol.a_time = awol;
9155 	dp->dawol.a_last = awol;
9156 	if ((awol - dp->dawol.a_msg) >= (clock_t)(idn_awolmsg_interval * hz))
9157 		dp->dawol.a_msg = awol;
9158 	else
9159 		awol = 0;
9160 
9161 	*atime = awol;
9162 
9163 	idn_awol_event_set(dp->dhw.dh_boardset);
9164 
9165 	return (dp->dawol.a_count);
9166 }
9167 
9168 void
idn_clear_awol(int domid)9169 idn_clear_awol(int domid)
9170 {
9171 	idn_domain_t	*dp = &idn_domain[domid];
9172 
9173 	ASSERT(IDN_SYNC_IS_LOCKED());
9174 	ASSERT(IDN_GLOCK_IS_EXCL());
9175 	if (DOMAIN_IN_SET(idn.domset.ds_awol, domid)) {
9176 		DOMAINSET_DEL(idn.domset.ds_awol, domid);
9177 		idn.nawols--;
9178 	}
9179 	if (dp->dawol.a_count > 0) {
9180 		dp->dawol.a_count = 0;
9181 		dp->dawol.a_last = dp->dawol.a_time;
9182 		dp->dawol.a_time = 0;
9183 		dp->dawol.a_msg = 0;
9184 
9185 		idn_awol_event_clear(dp->dhw.dh_boardset);
9186 	}
9187 }
9188 
9189 /*
9190  * A timer expired.
9191  */
9192 void
idn_timer_expired(void * arg)9193 idn_timer_expired(void *arg)
9194 {
9195 	idn_domain_t	*dp;
9196 	char		*op = "UNKNOWN";
9197 	clock_t		awol = 0;
9198 	int		awolcount, dcpu, domid;
9199 	idn_timer_t	*tp = (idn_timer_t *)arg;
9200 	idn_timerq_t	*tq = NULL;
9201 	uint_t		token;
9202 	char		dname[MAXDNAME];
9203 	procname_t	proc = "idn_timer_expired";
9204 	STRING(str);
9205 
9206 	tq = tp->t_q;
9207 
9208 	ASSERT(tp->t_domid != IDN_NIL_DOMID);
9209 
9210 	IDN_TIMERQ_LOCK(tq);
9211 
9212 	INUM2STR(tp->t_type, str);
9213 
9214 	if (tp->t_onq == 0) {
9215 		PR_TIMER("%s: timer CAUGHT TERMINATION (type = %s)\n",
9216 		    proc, str);
9217 		/*
9218 		 * Timer was dequeued.  Somebody is trying
9219 		 * to shut it down.
9220 		 */
9221 		IDN_TIMERQ_UNLOCK(tq);
9222 		return;
9223 	}
9224 
9225 	IDN_TIMER_DEQUEUE(tq, tp);
9226 
9227 	IDN_TIMERQ_UNLOCK(tq);
9228 
9229 	IDN_SYNC_LOCK();
9230 	IDN_DLOCK_EXCL(tp->t_domid);
9231 
9232 	domid = tp->t_domid;
9233 
9234 	dp = &idn_domain[domid];
9235 	(void) strcpy(dname, dp->dname);
9236 	dcpu = dp->dcpu;
9237 
9238 	IDN_TIMER_EXEC(tp);
9239 
9240 #ifdef DEBUG
9241 	PR_TIMER("%s:%d: [%s] timer EXPIRED (C=0x%x, P=0x%llx, X=0x%llx)\n",
9242 	    proc, tp->t_domid, str, tp->t_cookie,
9243 	    tp->t_posttime, tp->t_exectime);
9244 #endif /* DEBUG */
9245 
9246 	/*
9247 	 * IMPORTANT:
9248 	 * Each case is responsible for dropping SYNC_LOCK & DLOCK.
9249 	 */
9250 	switch (tp->t_type) {
9251 	case IDNP_DATA:
9252 		IDN_SYNC_UNLOCK();
9253 		/*
9254 		 * Timed out waiting for a data packet response.
9255 		 * We can't close domain since he may just be
9256 		 * temporarily AWOL.
9257 		 * Note that dio and diocheck do not get cleared.
9258 		 * This is taken care of when the domain restarts
9259 		 * or is fatally closed.
9260 		 * We only need a reader lock for this.
9261 		 */
9262 		IDN_DLOCK_DOWNGRADE(domid);
9263 		if (dp->diocheck && dp->dmbox.m_send) {
9264 			(void) idn_reclaim_mboxdata(domid, 0, -1);
9265 			if (dp->dio >= IDN_WINDOW_EMAX) {
9266 				idn_msgtype_t	mt;
9267 				/*
9268 				 * Restart timer for another
9269 				 * go around.
9270 				 */
9271 				IDN_MSGTIMER_START(domid, IDNP_DATA, 0,
9272 				    idn_msg_waittime[IDNP_DATA],
9273 				    &mt.mt_cookie);
9274 			} else {
9275 				lock_clear(&dp->diocheck);
9276 			}
9277 		}
9278 		IDN_DUNLOCK(domid);
9279 		break;
9280 
9281 	case IDNP_NEGO:
9282 		/*
9283 		 * If we're not in a NEGO transition, then
9284 		 * just ignore this timeout.
9285 		 */
9286 		if (dp->dxp == &xphase_nego) {
9287 			uint_t		token;
9288 
9289 			IDN_GLOCK_EXCL();
9290 			op = "CONNECT";
9291 			awolcount = idn_mark_awol(domid, &awol);
9292 			IDN_GUNLOCK();
9293 
9294 			idn_nego_cleanup_check(domid, IDN_NIL_DOMID,
9295 			    IDN_NIL_DCPU);
9296 
9297 			IDN_XSTATE_TRANSITION(dp, IDNXS_PEND);
9298 			token = IDN_RETRY_TOKEN(domid, IDNRETRY_NEGO);
9299 			idn_retry_submit(idn_retry_nego, NULL, token,
9300 			    idn_msg_retrytime[(int)IDNRETRY_NEGO]);
9301 		}
9302 		IDN_DUNLOCK(domid);
9303 		IDN_SYNC_UNLOCK();
9304 		break;
9305 
9306 	case IDNP_CMD:
9307 		/*
9308 		 * Timeouts on commands typically mean that the
9309 		 * the master is not responding.  Furthermore, we
9310 		 * can't FORCE a FIN disconnect since at this stage
9311 		 * we are CONNECTED and thus other domains may
9312 		 * have cache entries that we're sharing with them.
9313 		 * Only choice is to completely disconnect from
9314 		 * IDN and try to reestablish connection.
9315 		 *
9316 		 * However, timeouts attempting to get nodename
9317 		 * are not fatal.  Although we don't want to retry
9318 		 * either since each timeout is a lost buffer to
9319 		 * the remote domain.
9320 		 */
9321 		if (tp->t_subtype == (ushort_t)IDNCMD_NODENAME) {
9322 			PR_PROTO("%s:%d: timedout waiting for nodename\n",
9323 			    proc, domid);
9324 			IDN_DUNLOCK(domid);
9325 			IDN_SYNC_UNLOCK();
9326 			break;
9327 		}
9328 
9329 		IDN_GLOCK_EXCL();
9330 		if (idn.state == IDNGS_ONLINE) {
9331 			domainset_t	domset;
9332 			int		masterid = IDN_GET_MASTERID();
9333 
9334 			IDN_GKSTAT_GLOBAL_EVENT(gk_reconfigs,
9335 			    gk_reconfig_last);
9336 
9337 			PR_PROTO("%s:%d: RECONFIG trying old masterid = %d\n",
9338 			    proc, domid, masterid);
9339 
9340 			IDN_GSTATE_TRANSITION(IDNGS_RECONFIG);
9341 			IDN_SET_NEW_MASTERID(masterid);
9342 			IDN_GUNLOCK();
9343 			IDN_DUNLOCK(domid);
9344 
9345 			domset = idn.domset.ds_trans_on |
9346 			    idn.domset.ds_connected;
9347 
9348 			idn_unlink_domainset(domset, IDNFIN_NORMAL,
9349 			    IDNFIN_ARG_NONE, IDNFIN_OPT_RELINK,	BOARDSET_ALL);
9350 		} else {
9351 			IDN_GUNLOCK();
9352 			IDN_DUNLOCK(domid);
9353 		}
9354 		IDN_SYNC_UNLOCK();
9355 		break;
9356 
9357 	case IDNP_CON:
9358 		if (tp->t_subtype == (ushort_t)IDNCON_QUERY) {
9359 			/*
9360 			 * Timed out sending a CON-query.  This is
9361 			 * non-fatal.  We simply need to retry.
9362 			 */
9363 			IDN_GLOCK_EXCL();
9364 			op = "CONNECT";
9365 			awolcount = idn_mark_awol(domid, &awol);
9366 			IDN_GUNLOCK();
9367 			token = IDN_RETRY_TOKEN(domid, IDNRETRY_CONQ);
9368 			idn_retry_submit(idn_retry_query, NULL, token,
9369 			    idn_msg_retrytime[(int)IDNRETRY_CONQ]);
9370 			IDN_DUNLOCK(domid);
9371 			IDN_SYNC_UNLOCK();
9372 			break;
9373 		}
9374 		/*FALLTHROUGH*/
9375 	case IDNP_CFG:
9376 		/*
9377 		 * Any timeouts here we simply try to disconnect
9378 		 * and reestablish the link.  Since we haven't
9379 		 * reached the connected state w.r.t. this domain
9380 		 * we put his fin state to FORCE-HARD in order
9381 		 * to shoot right through without involving other
9382 		 * domains.  Recall that other domains may have
9383 		 * established connections with the given domain
9384 		 * which means any FIN queries to them will always
9385 		 * return connected to the given domain.  Since
9386 		 * neither the given domain nor the local domain
9387 		 * plan on disconnecting from the IDN the connection
9388 		 * to the other domains will remain thereby preventing
9389 		 * the local FIN from ever completing.  Recall that
9390 		 * a FIN depends on all member domains FIN'ing also.
9391 		 */
9392 		IDN_GLOCK_EXCL();
9393 		op = "CONNECT";
9394 		awolcount = idn_mark_awol(domid, &awol);
9395 		IDN_GUNLOCK();
9396 		DOMAINSET_ADD(idn.domset.ds_relink, domid);
9397 		IDN_HISTORY_LOG(IDNH_RELINK, domid, dp->dstate,
9398 		    idn.domset.ds_relink);
9399 		(void) idn_disconnect(domid, IDNFIN_FORCE_SOFT,
9400 		    IDNFIN_ARG_NONE, IDNFIN_SYNC_NO);
9401 		IDN_DUNLOCK(domid);
9402 		IDN_SYNC_UNLOCK();
9403 		break;
9404 
9405 	case IDNP_FIN:
9406 		/*
9407 		 * Timeouts here simply try to retry.
9408 		 */
9409 		IDN_GLOCK_EXCL();
9410 		op = "DISCONNECT";
9411 		awolcount = idn_mark_awol(domid, &awol);
9412 		IDN_GUNLOCK();
9413 		if (tp->t_subtype == (ushort_t)IDNFIN_QUERY) {
9414 			int		d;
9415 			domainset_t	rdyset;
9416 			/*
9417 			 * Timed out sending a FIN-query.  This is
9418 			 * non-fatal.  We simply need to retry.
9419 			 * If we were doing a forced unlink of any
9420 			 * domains, we don't want this awol guy
9421 			 * to hold us up.  Looks for any forced
9422 			 * unlinks and make them "ready" with
9423 			 * respect to this awol domain.
9424 			 */
9425 			rdyset = 0;
9426 			for (d = 0; d < MAX_DOMAINS; d++) {
9427 				if (FIN_IS_FORCE(idn_domain[d].dfin)) {
9428 					DOMAINSET_ADD(rdyset, d);
9429 				}
9430 			}
9431 			if (rdyset)
9432 				(void) idn_sync_register(domid,
9433 				    IDNSYNC_DISCONNECT,
9434 				    rdyset, IDNSYNC_REG_REG);
9435 
9436 			token = IDN_RETRY_TOKEN(domid, IDNRETRY_FINQ);
9437 			idn_retry_submit(idn_retry_query, NULL, token,
9438 			    idn_msg_retrytime[(int)IDNRETRY_FINQ]);
9439 			IDN_DUNLOCK(domid);
9440 			IDN_SYNC_UNLOCK();
9441 			break;
9442 		}
9443 
9444 		if (dp->dfin == IDNFIN_FORCE_SOFT) {
9445 			IDN_FSTATE_TRANSITION(dp, IDNFIN_FORCE_HARD);
9446 		}
9447 		/*
9448 		 * Anybody that was waiting on this domain and
9449 		 * had a hard-force in action gets this guy for
9450 		 * free in their base ready-set.
9451 		 */
9452 		idn_sync_register_awol(domid);
9453 
9454 		dp->dxp = &xphase_fin;
9455 		IDN_XSTATE_TRANSITION(dp, IDNXS_PEND);
9456 		token = IDN_RETRY_TOKEN(domid, IDNRETRY_FIN);
9457 		idn_retry_submit(idn_retry_fin, NULL, token,
9458 		    idn_msg_retrytime[(int)IDNRETRY_FIN]);
9459 		IDN_DUNLOCK(domid);
9460 		IDN_SYNC_UNLOCK();
9461 		break;
9462 
9463 	default:
9464 
9465 		ASSERT(0);
9466 		IDN_DUNLOCK(domid);
9467 		IDN_SYNC_UNLOCK();
9468 		break;
9469 	}
9470 
9471 	IDN_TIMER_FREE(tp);
9472 
9473 	if (awol) {
9474 		if (strlen(dname) > 0) {
9475 			cmn_err(CE_WARN,
9476 			    "IDN: 236: domain (%s) [ID %d] not "
9477 			    "responding to %s [#%d]",
9478 			    dname, domid, op, awolcount);
9479 		} else {
9480 			cmn_err(CE_WARN,
9481 			    "IDN: 236: domain [ID %d, CPU %d] not "
9482 			    "responding to %s [#%d]",
9483 			    domid, dcpu, op, awolcount);
9484 		}
9485 	}
9486 }
9487 
9488 #if 0
9489 static int
9490 idn_retry_check(uint_t token)
9491 {
9492 	int			i, count = 0;
9493 	int			domid = IDN_RETRY_TOKEN2DOMID(token);
9494 	int			key = IDN_RETRY_TOKEN2TYPE(token);
9495 	idn_retry_job_t		*rp;
9496 	idn_retry_queue_t	*qp;
9497 
9498 	qp = &idn.retryqueue;
9499 
9500 	mutex_enter(&qp->rq_mutex);
9501 
9502 	for (i = 0, rp = qp->rq_jobs; i < qp->rq_count; i++, rp = rp->rj_next)
9503 		if ((domid == IDN_RETRY_TOKEN2DOMID(rp->rj_token)) &&
9504 		    ((key == IDN_RETRY_TYPEALL) || (rp->rj_token == token)))
9505 			count++;
9506 
9507 	mutex_exit(&qp->rq_mutex);
9508 
9509 	return (count);
9510 }
9511 #endif /* 0 */
9512 
9513 static void
idn_retry_execute(void * arg)9514 idn_retry_execute(void *arg)
9515 {
9516 	idn_retry_job_t		*rp = (idn_retry_job_t *)arg;
9517 	idn_retry_queue_t	*qp;
9518 
9519 	qp = &idn.retryqueue;
9520 
9521 	mutex_enter(&qp->rq_mutex);
9522 	if (rp->rj_onq == 0) {
9523 		/*
9524 		 * Job has already been claimed by
9525 		 * retry termination routine.
9526 		 * Bail out.
9527 		 */
9528 		mutex_exit(&qp->rq_mutex);
9529 		return;
9530 	}
9531 	rp->rj_next->rj_prev = rp->rj_prev;
9532 	rp->rj_prev->rj_next = rp->rj_next;
9533 	if (--(qp->rq_count) == 0)
9534 		qp->rq_jobs = NULL;
9535 	else if (qp->rq_jobs == rp)
9536 		qp->rq_jobs = rp->rj_next;
9537 	mutex_exit(&qp->rq_mutex);
9538 
9539 	(*rp->rj_func)(rp->rj_token, rp->rj_arg);
9540 
9541 	IDNRETRY_FREEJOB(rp);
9542 }
9543 
9544 /*
9545  *
9546  */
9547 static void
idn_retry_submit(void (* func)(uint_t token,void * arg),void * arg,uint_t token,clock_t ticks)9548 idn_retry_submit(void (*func)(uint_t token, void *arg), void *arg, uint_t token,
9549     clock_t ticks)
9550 {
9551 	idn_retry_job_t		*rp, *cp;
9552 	idn_retry_queue_t	*qp;
9553 	int			c;
9554 	procname_t		proc = "idn_retry_submit";
9555 
9556 	if (ticks < 0) {
9557 		PR_PROTO("%s: (token = 0x%x) WARNING ticks = %ld\n",
9558 		    proc, token, ticks);
9559 		return;
9560 	}
9561 	if (ticks == 0)		/* At least one tick to get into background */
9562 		ticks++;
9563 
9564 	PR_PROTO("%s: token = 0x%x\n", proc, token);
9565 
9566 	qp = &idn.retryqueue;
9567 
9568 	mutex_enter(&qp->rq_mutex);
9569 	for (c = 0, cp = qp->rq_jobs; c < qp->rq_count; cp = cp->rj_next, c++) {
9570 		if (cp->rj_token == token) {
9571 			PR_PROTO("%s: token = (%d,0x%x) already present\n",
9572 			    proc, IDN_RETRY_TOKEN2DOMID(token),
9573 			    IDN_RETRY_TOKEN2TYPE(token));
9574 			break;
9575 		}
9576 	}
9577 
9578 	if (c < qp->rq_count) {
9579 		mutex_exit(&qp->rq_mutex);
9580 		return;
9581 	}
9582 
9583 	rp = IDNRETRY_ALLOCJOB();
9584 	rp->rj_func = func;
9585 	rp->rj_arg = arg;
9586 	rp->rj_token = token;
9587 	rp->rj_prev = rp->rj_next = rp;
9588 
9589 	if (qp->rq_jobs == NULL) {
9590 		qp->rq_jobs = rp;
9591 	} else {
9592 		rp->rj_next = qp->rq_jobs;
9593 		rp->rj_prev = qp->rq_jobs->rj_prev;
9594 		rp->rj_next->rj_prev = rp;
9595 		rp->rj_prev->rj_next = rp;
9596 	}
9597 	rp->rj_onq = 1;
9598 	qp->rq_count++;
9599 	rp->rj_id = timeout(idn_retry_execute, (caddr_t)rp, ticks);
9600 	mutex_exit(&qp->rq_mutex);
9601 }
9602 
9603 int
idn_retry_terminate(uint_t token)9604 idn_retry_terminate(uint_t token)
9605 {
9606 	int			i, domid;
9607 	uint_t			key, count;
9608 	idn_retry_job_t		*rp, *nrp, *fp;
9609 	idn_retry_queue_t	*qp;
9610 	procname_t		proc = "idn_retry_terminate";
9611 
9612 	key = IDN_RETRY_TOKEN2TYPE(token);
9613 	domid = IDN_RETRY_TOKEN2DOMID(token);
9614 	fp = NULL;
9615 	qp = &idn.retryqueue;
9616 
9617 	mutex_enter(&qp->rq_mutex);
9618 	for (i = count = 0, rp = qp->rq_jobs; i < qp->rq_count; i++) {
9619 		nrp = rp->rj_next;
9620 		if ((domid == IDN_RETRY_TOKEN2DOMID(rp->rj_token)) &&
9621 		    ((key == IDN_RETRY_TYPEALL) ||
9622 		    (rp->rj_token == token))) {
9623 			/*
9624 			 * Turn off onq field as a signal to
9625 			 * the execution routine that this
9626 			 * retry has been terminated.  This
9627 			 * is necessary since we can't untimeout
9628 			 * while holding the rq_mutex otherwise
9629 			 * we'll deadlock with the execution
9630 			 * routine.  We'll untimeout these guys
9631 			 * _after_ we drop rq_mutex.
9632 			 */
9633 			rp->rj_onq = 0;
9634 			rp->rj_next->rj_prev = rp->rj_prev;
9635 			rp->rj_prev->rj_next = rp->rj_next;
9636 			if (qp->rq_jobs == rp)
9637 				qp->rq_jobs = rp->rj_next;
9638 			rp->rj_next = fp;
9639 			fp = rp;
9640 			count++;
9641 		}
9642 		rp = nrp;
9643 	}
9644 
9645 	if ((qp->rq_count -= count) == 0)
9646 		qp->rq_jobs = NULL;
9647 
9648 	mutex_exit(&qp->rq_mutex);
9649 
9650 	PR_PROTO("%s: token = (%d,0x%x), dequeued = %d\n",
9651 	    proc, domid, key, count);
9652 
9653 	for (; fp; fp = nrp) {
9654 		(void) untimeout(fp->rj_id);
9655 
9656 		nrp = fp->rj_next;
9657 		IDNRETRY_FREEJOB(fp);
9658 	}
9659 
9660 	return (count);
9661 }
9662 
9663 /*
9664  * -----------------------------------------------------------------------
9665  * The sole purpose of the idn_protocol_server is to manage the IDN
9666  * protocols between the various domains.  These messages do _not_ go
9667  * through the regular streams queues since they are not dependent on
9668  * any user process or module necessarily having the IDN driver open.
9669  * There may be multiple instances of these servers to enhance performance
9670  * of domain management.  Each server is assigned a idn_protoqueue_t
9671  * from which to obtain the work they need to do.
9672  * -----------------------------------------------------------------------
9673  */
9674 int
idn_protocol_init(int nservers)9675 idn_protocol_init(int nservers)
9676 {
9677 	int		i;
9678 	idn_protojob_t	*jp;
9679 	register idn_protoqueue_t	*protoq;
9680 
9681 	if (nservers <= 0) {
9682 		cmn_err(CE_WARN,
9683 		    "IDN: 237: invalid number (%d) of protocol servers",
9684 		    nservers);
9685 		return (-1);
9686 	}
9687 
9688 	idn.protocol.p_jobpool = kmem_cache_create("idn_protocol_jobcache",
9689 	    sizeof (idn_protojob_t), 0, NULL, NULL, NULL, NULL, NULL, 0);
9690 	if (idn.protocol.p_jobpool == NULL) {
9691 		cmn_err(CE_WARN,
9692 		    "IDN: 238: kmem_cache_create(jobcache) failed");
9693 		return (-1);
9694 	}
9695 
9696 	/*
9697 	 * Initialize static cache for protojob.
9698 	 */
9699 	mutex_init(&idn_protojob_cache_lock, NULL, MUTEX_DRIVER, NULL);
9700 	jp = &idn_protojob_cache[0];
9701 	for (i = 1; i < IDN_DMV_PENDING_MAX; jp = jp->j_next, i++) {
9702 		jp->j_cache = 1;
9703 		jp->j_next = &idn_protojob_cache[i];
9704 	}
9705 	jp->j_cache = 1;
9706 	jp->j_next = NULL;
9707 	idn_protojob_cache_list = &idn_protojob_cache[0];
9708 
9709 	/*
9710 	 * Init morgue semaphore.
9711 	 */
9712 	sema_init(&idn.protocol.p_morgue, 0, NULL, SEMA_DEFAULT, NULL);
9713 	/*
9714 	 * Alloc server queues.
9715 	 */
9716 	idn.protocol.p_serverq = GETSTRUCT(idn_protoqueue_t, nservers);
9717 
9718 	/*
9719 	 * Init server queues.
9720 	 */
9721 	protoq = idn.protocol.p_serverq;
9722 	for (i = 0; i < nservers; protoq++, i++) {
9723 		mutex_init(&protoq->q_mutex, NULL, MUTEX_DRIVER, NULL);
9724 		cv_init(&protoq->q_cv, NULL, CV_DEFAULT, NULL);
9725 		protoq->q_id	  = i;
9726 		protoq->q_joblist = NULL;
9727 		protoq->q_joblist_tail = NULL;
9728 		protoq->q_die	  = 0;
9729 		protoq->q_morgue  = &idn.protocol.p_morgue;
9730 		/*
9731 		 * Create protocol server thread.
9732 		 */
9733 		protoq->q_threadp = thread_create(NULL, 0,
9734 		    idn_protocol_server, (caddr_t)&i, sizeof (i), &p0,
9735 		    TS_RUN, maxclsyspri);
9736 	}
9737 	/*
9738 	 * The servers are kept in the p_server[] array, however
9739 	 * we'll build a linked list of them to facilitate debugging.
9740 	 */
9741 	protoq = idn.protocol.p_serverq;
9742 	for (i = 0; i < (nservers - 1); protoq++, i++)
9743 		protoq->q_next = (protoq + 1);
9744 	protoq->q_next = NULL;
9745 
9746 	idn.nservers = nservers;
9747 
9748 	return (idn.nservers);
9749 }
9750 
9751 void
idn_protocol_deinit()9752 idn_protocol_deinit()
9753 {
9754 	register int	i;
9755 	int		nservers;
9756 	register idn_protoqueue_t	*protoq;
9757 
9758 	nservers = idn.nservers;
9759 
9760 	if (nservers <= 0)
9761 		return;
9762 
9763 	/*
9764 	 * Make sure the servers are dead.
9765 	 */
9766 	idn_protocol_server_killall();
9767 	ASSERT(idn.nservers == 0);
9768 	/*
9769 	 * Destroy the mutexes.
9770 	 */
9771 	protoq = idn.protocol.p_serverq;
9772 	for (i = 0; i < nservers; protoq++, i++) {
9773 		mutex_destroy(&protoq->q_mutex);
9774 		cv_destroy(&protoq->q_cv);
9775 	}
9776 	/*
9777 	 * Free up the protoqueue memory.
9778 	 */
9779 	FREESTRUCT(idn.protocol.p_serverq, idn_protoqueue_t, nservers);
9780 	idn.protocol.p_serverq = NULL;
9781 	/*
9782 	 * Destroy the morgue semaphore.
9783 	 */
9784 	sema_destroy(&idn.protocol.p_morgue);
9785 
9786 	if (idn.protocol.p_jobpool) {
9787 		kmem_cache_destroy(idn.protocol.p_jobpool);
9788 		idn.protocol.p_jobpool = NULL;
9789 	}
9790 }
9791 
9792 static void
idn_protocol_server(int * id)9793 idn_protocol_server(int *id)
9794 {
9795 	idn_protoqueue_t	*pq;
9796 	idn_protojob_t		*jl;
9797 	register idn_protojob_t	*jp;
9798 	procname_t		proc = "idn_protocol_server";
9799 
9800 	if (id == NULL) {
9801 		PR_PROTO("%s: id == NULL, thread exiting\n", proc);
9802 		return;
9803 	}
9804 	ASSERT((*id >= 0) && (*id < idn_protocol_nservers));
9805 
9806 	pq = &idn.protocol.p_serverq[*id];
9807 
9808 	ASSERT(pq->q_id == *id);
9809 
9810 	PR_PROTO("%s: id %d starting up (pq = 0x%p)\n",
9811 	    proc, pq->q_id, (void *)pq);
9812 
9813 	/*CONSTCOND*/
9814 	while (1) {
9815 		mutex_enter(&pq->q_mutex);
9816 
9817 		while (((jl = pq->q_joblist) == NULL) && !pq->q_die)
9818 			cv_wait(&pq->q_cv, &pq->q_mutex);
9819 
9820 		pq->q_joblist = pq->q_joblist_tail = NULL;
9821 
9822 		if (pq->q_die) {
9823 			/*
9824 			 * We've been killed.  Need to check-in
9825 			 * at the morgue.
9826 			 */
9827 			pq->q_threadp = NULL;
9828 			mutex_exit(&pq->q_mutex);
9829 			PR_PROTO("%s: thread (%d) killed...bye bye\n",
9830 			    proc, pq->q_id);
9831 			for (jp = jl; jp; jp = jl) {
9832 				jl = jp->j_next;
9833 				idn_protojob_free(jp);
9834 			}
9835 			sema_v(pq->q_morgue);
9836 			thread_exit();
9837 			/*NOTREACHED*/
9838 		}
9839 		mutex_exit(&pq->q_mutex);
9840 
9841 		/*
9842 		 * We can process the jobs asynchronously while more are
9843 		 * put on.
9844 		 */
9845 		for (jp = jl; jp; jp = jl) {
9846 			jl = jp->j_next;
9847 			idn_recv_proto(&(jp->j_msg));
9848 			idn_protojob_free(jp);
9849 		}
9850 	}
9851 }
9852 
9853 /*
9854  * Kill off all the protocol servers.
9855  */
9856 static void
idn_protocol_server_killall()9857 idn_protocol_server_killall()
9858 {
9859 	register idn_protoqueue_t	*pq;
9860 	int		i;
9861 	procname_t	proc = "idn_protocol_server_killall";
9862 
9863 	PR_PROTO("%s: killing off %d protocol servers\n",
9864 	    proc, idn.nservers);
9865 
9866 	pq = idn.protocol.p_serverq;
9867 	for (i = 0; i < idn.nservers; pq++, i++) {
9868 		mutex_enter(&pq->q_mutex);
9869 		pq->q_die = 1;
9870 		cv_signal(&pq->q_cv);
9871 		mutex_exit(&pq->q_mutex);
9872 	}
9873 
9874 	while (idn.nservers > 0) {
9875 		sema_p(&idn.protocol.p_morgue);
9876 		idn.nservers--;
9877 	}
9878 }
9879 
9880 idn_protojob_t *
idn_protojob_alloc(int kmflag)9881 idn_protojob_alloc(int kmflag)
9882 {
9883 	idn_protojob_t	*jp;
9884 
9885 	jp = kmem_cache_alloc(idn.protocol.p_jobpool, kmflag);
9886 	if (jp == NULL) {
9887 		mutex_enter(&idn_protojob_cache_lock);
9888 		if ((jp = idn_protojob_cache_list) != NULL)
9889 			idn_protojob_cache_list = jp->j_next;
9890 		mutex_exit(&idn_protojob_cache_lock);
9891 	} else {
9892 		jp->j_cache = 0;
9893 	}
9894 
9895 	return (jp);
9896 }
9897 
9898 static void
idn_protojob_free(idn_protojob_t * jp)9899 idn_protojob_free(idn_protojob_t *jp)
9900 {
9901 	ASSERT(jp);
9902 
9903 	if (jp->j_cache) {
9904 		mutex_enter(&idn_protojob_cache_lock);
9905 		jp->j_next = idn_protojob_cache_list;
9906 		idn_protojob_cache_list = jp;
9907 		mutex_exit(&idn_protojob_cache_lock);
9908 	} else {
9909 		kmem_cache_free(idn.protocol.p_jobpool, (void *)jp);
9910 	}
9911 }
9912 
9913 void
idn_protojob_submit(int cookie,idn_protojob_t * jp)9914 idn_protojob_submit(int cookie, idn_protojob_t *jp)
9915 {
9916 	idn_protoqueue_t	*pq;
9917 	int			serverid;
9918 	procname_t		proc = "idn_protojob_submit";
9919 	STRING(str);
9920 
9921 	if (jp == NULL)
9922 		return;
9923 
9924 	serverid = IDN_PROTOCOL_SERVER_HASH(cookie);
9925 
9926 	pq = &idn.protocol.p_serverq[serverid];
9927 
9928 	INUM2STR(jp->j_msg.m_msgtype, str);
9929 	PR_PROTO("%s: job (d=%d, m=0x%x, %s) submitted to "
9930 	    "protocol server %d\n", proc, jp->j_msg.m_domid,
9931 	    jp->j_msg.m_msgtype, str, serverid);
9932 
9933 	mutex_enter(&pq->q_mutex);
9934 	/*
9935 	 * Can't submit jobs to dying servers.
9936 	 */
9937 	if (!pq->q_die) {
9938 		if (pq->q_joblist_tail) {
9939 			pq->q_joblist_tail->j_next = jp;
9940 			pq->q_joblist_tail = jp;
9941 		} else {
9942 			pq->q_joblist = pq->q_joblist_tail = jp;
9943 		}
9944 		jp->j_next = NULL;
9945 		cv_signal(&pq->q_cv);
9946 	} else {
9947 		PR_PROTO("%s: protocol server dead.  freeing protojob\n",
9948 		    proc);
9949 		idn_protojob_free(jp);
9950 	}
9951 	mutex_exit(&pq->q_mutex);
9952 }
9953 
9954 static void
idn_mboxarea_init(idn_mboxtbl_t * mtp,register int ntbls)9955 idn_mboxarea_init(idn_mboxtbl_t *mtp, register int ntbls)
9956 {
9957 	register int	d;
9958 	caddr_t		state_ptr = NULL, mtbasep = (caddr_t)mtp;
9959 	idn_mboxtbl_t	*amtp;
9960 	procname_t	proc = "idn_mboxarea_init";
9961 
9962 	ASSERT(mtp && (ntbls > 0));
9963 
9964 	PR_PROTO("%s: init mboxtbl (0x%p) ntbls = %d\n",
9965 	    proc, (void *)mtp, ntbls);
9966 
9967 	for (d = 0; d < ntbls; d++) {
9968 		register int	pd, sd;
9969 		register int	ch;
9970 
9971 		mtp->mt_header.mh_svr_active = 0;
9972 		mtp->mt_header.mh_svr_ready = 0;
9973 		/*
9974 		 * Initialize the header of each mbox table
9975 		 * with a cookie for identity.
9976 		 */
9977 		/*
9978 		 * Format: 0xc0c0DSCC
9979 		 *	 D = primary domain
9980 		 *	 S = sub-domain of primary
9981 		 *	CC = channel of sub-domain.
9982 		 */
9983 		pd = (d / MAX_DOMAINS) / IDN_MAX_NETS;
9984 		sd = (d / IDN_MAX_NETS) % MAX_DOMAINS;
9985 		ch = d % IDN_MAX_NETS;
9986 
9987 		/*
9988 		 * We point all sub-domains in the same channel
9989 		 * to the same active sync flag since a single server
9990 		 * services all domains in the same channel.
9991 		 */
9992 		amtp = IDN_MBOXTBL_ABS_PTR(mtbasep, pd, 0, ch);
9993 
9994 		state_ptr = (caddr_t)&amtp->mt_header.mh_svr_active;
9995 		mtp->mt_header.mh_svr_active_ptr = IDN_ADDR2OFFSET(state_ptr);
9996 
9997 		state_ptr = (caddr_t)&amtp->mt_header.mh_svr_ready;
9998 		mtp->mt_header.mh_svr_ready_ptr = IDN_ADDR2OFFSET(state_ptr);
9999 
10000 		mtp->mt_header.mh_cookie = IDN_MAKE_MBOXHDR_COOKIE(pd, sd, ch);
10001 
10002 		mtp->mt_header.mh_cksum = IDN_CKSUM_MBOX(&mtp->mt_header);
10003 
10004 		IDN_MBOXTBL_PTR_INC(mtp);
10005 	}
10006 	/*
10007 	 * Now that the master has initialized the entire mailbox
10008 	 * region the referenced memory may not necessarily be up-to-date
10009 	 * with respect to the actual SMR memory due to caching.
10010 	 * In order to make sure future connecting domains get a
10011 	 * consistent picture of the mailbox region, it's necessary
10012 	 * for the master to flush its caches.
10013 	 */
10014 	PR_PROTO("%s: flushing ecache's of local (master) domain\n", proc);
10015 
10016 	idnxf_flushall_ecache();
10017 }
10018 
10019 idn_mainmbox_t *
idn_mainmbox_init(int domid,int mbx)10020 idn_mainmbox_init(int domid, int mbx)
10021 {
10022 	idn_mainmbox_t	*mmp;
10023 	int		c;
10024 	idn_mainmbox_t	*cmp;
10025 	procname_t	proc = "idn_mainmbox_init";
10026 
10027 	ASSERT(idn_domain[domid].dcpu != IDN_NIL_DCPU);
10028 	ASSERT(IDN_DLOCK_IS_HELD(domid));
10029 
10030 	PR_PROTO("%s: initializing main %s mailbox for domain %d\n",
10031 	    proc, IDNMBOX_IS_RECV(mbx) ? "RECV" : "SEND", domid);
10032 
10033 	cmp = GETSTRUCT(idn_mainmbox_t, IDN_MAX_NETS);
10034 	for (c = 0; c < IDN_MAX_NETS; c++) {
10035 		mmp = &cmp[c];
10036 		mmp->mm_channel = (short)c;
10037 		mutex_init(&mmp->mm_mutex, NULL, MUTEX_DRIVER, NULL);
10038 		mmp->mm_domid = (short)domid;
10039 		mmp->mm_type = mbx;
10040 	}
10041 	mmp = cmp;
10042 	/*
10043 	 * The actual SMR mailbox (mmp->mm_smr_mboxp) gets setup
10044 	 * when the SMR is setup.
10045 	 */
10046 
10047 	return (mmp);
10048 }
10049 
10050 static void
idn_mainmbox_reset(int domid,idn_mainmbox_t * cmp)10051 idn_mainmbox_reset(int domid, idn_mainmbox_t *cmp)
10052 {
10053 	idn_mainmbox_t	*mmp;
10054 	int		c;
10055 	procname_t	proc = "idn_mainmbox_reset";
10056 
10057 	ASSERT(IDN_DLOCK_IS_EXCL(domid));
10058 
10059 	PR_PROTO("%s: reseting main %s mailbox for domain %d\n",
10060 	    proc, IDNMBOX_IS_RECV(cmp->mm_type) ? "RECV" : "SEND", domid);
10061 
10062 	for (c = 0; c < IDN_MAX_NETS; c++) {
10063 		mmp = &cmp[c];
10064 
10065 		mmp->mm_channel = (short)c;
10066 		mmp->mm_domid = (short)domid;
10067 		mmp->mm_count = 0;
10068 		mmp->mm_flags = 0;
10069 		mmp->mm_qiget = mmp->mm_qiput = 0;
10070 		mmp->mm_csp = NULL;
10071 		ASSERT(mmp->mm_type == cmp->mm_type);
10072 	}
10073 }
10074 
10075 void
idn_mainmbox_deinit(int domid,idn_mainmbox_t * mmp)10076 idn_mainmbox_deinit(int domid, idn_mainmbox_t *mmp)
10077 {
10078 	procname_t	proc = "idn_mainmbox_deinit";
10079 
10080 	ASSERT(IDN_DLOCK_IS_HELD(domid));
10081 
10082 	PR_PROTO("%s: deinitializing main %s mailbox for domain %d\n",
10083 	    proc, IDNMBOX_IS_RECV(mmp->mm_type) ? "RECV" : "SEND", domid);
10084 
10085 	ASSERT(idn_domain_is_registered(domid, -1, NULL) == 0);
10086 
10087 	FREESTRUCT(mmp, idn_mainmbox_t, IDN_MAX_NETS);
10088 }
10089 
10090 static void
idn_mainmbox_activate(int domid)10091 idn_mainmbox_activate(int domid)
10092 {
10093 	register int	c;
10094 	idn_domain_t	*dp = &idn_domain[domid];
10095 	procname_t	proc = "idn_mainmbox_activate";
10096 
10097 	ASSERT(IDN_DLOCK_IS_HELD(domid));
10098 
10099 	PR_PROTO("%s:%d: activating main mailbox\n", proc, domid);
10100 
10101 	for (c = 0; c < IDN_MAX_NETS; c++)
10102 		idn_mainmbox_chan_register(domid, &dp->dmbox.m_send[c],
10103 		    &dp->dmbox.m_recv[c], c);
10104 }
10105 
10106 /*
10107  * Called upon disabling the SMR to deactivate all the mailboxes
10108  * so that they no longer reference the SMR that's going away.
10109  *
10110  * stopall - Indicates to stop all channel services, across the board.
10111  */
10112 static void
idn_mainmbox_deactivate(ushort_t domset)10113 idn_mainmbox_deactivate(ushort_t domset)
10114 {
10115 	int		svr_count;
10116 	procname_t	proc = "idn_mainmbox_deactivate";
10117 
10118 
10119 	if (domset == 0)
10120 		return;
10121 
10122 	PR_PROTO("%s: %s deactivating main mailboxes for domset 0x%x\n",
10123 	    proc, (domset == (ushort_t)-1) ? "STOP-ALL" : "NORMAL", domset);
10124 
10125 	svr_count = idn_mainmbox_chan_unregister(domset, -1);
10126 
10127 	PR_PROTO("%s: deactivated %d chansvrs (domset 0x%x)\n",
10128 	    proc, svr_count, domset);
10129 }
10130 
10131 static void
idn_mainmbox_chan_register(int domid,idn_mainmbox_t * send_mmp,idn_mainmbox_t * recv_mmp,int channel)10132 idn_mainmbox_chan_register(int domid, idn_mainmbox_t *send_mmp,
10133     idn_mainmbox_t *recv_mmp, int channel)
10134 {
10135 	ASSERT(IDN_DLOCK_IS_HELD(domid));
10136 
10137 	/*
10138 	 * Obtain receive mailbox lock first.
10139 	 */
10140 	mutex_enter(&recv_mmp->mm_mutex);
10141 	mutex_enter(&send_mmp->mm_mutex);
10142 
10143 	ASSERT(recv_mmp->mm_channel == (short)channel);
10144 	ASSERT(send_mmp->mm_channel == (short)channel);
10145 
10146 	recv_mmp->mm_csp = &idn.chan_servers[channel];
10147 	recv_mmp->mm_count = 0;
10148 	recv_mmp->mm_dropped = 0;
10149 	recv_mmp->mm_flags = 0;
10150 
10151 	send_mmp->mm_csp = &idn.chan_servers[channel];
10152 	send_mmp->mm_count = 0;
10153 	send_mmp->mm_dropped = 0;
10154 	send_mmp->mm_flags = 0;
10155 
10156 	mutex_exit(&send_mmp->mm_mutex);
10157 	mutex_exit(&recv_mmp->mm_mutex);
10158 
10159 	/*
10160 	 * We have to add ourselves to the respective
10161 	 * channel server's service table.
10162 	 * Note that the channel may not necessarily be
10163 	 * active at this time.
10164 	 */
10165 	ASSERT(idn.chan_servers);
10166 	/*
10167 	 * Have to get the channel server under
10168 	 * control so we can add ourselves.
10169 	 * Returns w/c_mutex.
10170 	 */
10171 	IDN_CHAN_LOCK_GLOBAL(&idn.chan_servers[channel]);
10172 	/*
10173 	 * Add the following domain (mailbox) for monitoring
10174 	 * by the respective channel server.
10175 	 */
10176 	idn_chan_addmbox(channel, DOMAINSET(domid));
10177 
10178 	IDN_CHAN_UNLOCK_GLOBAL(&idn.chan_servers[channel]);
10179 }
10180 
10181 /*
10182  * Unregister the given domain from the specified channel(s) for monitoring.
10183  */
10184 static int
idn_mainmbox_chan_unregister(ushort_t domset,int channel)10185 idn_mainmbox_chan_unregister(ushort_t domset, int channel)
10186 {
10187 	int		c, dd_count;
10188 	int		min_chan, max_chan;
10189 	procname_t	proc = "idn_mainmbox_chan_unregister";
10190 
10191 	PR_CHAN("%s: deactivating main mailboxes (channel %d) "
10192 	    "for domset 0x%x\n", proc, channel, domset);
10193 
10194 	if (channel == -1) {
10195 		min_chan = 0;
10196 		max_chan = IDN_MAX_NETS - 1;
10197 	} else {
10198 		min_chan = max_chan = channel;
10199 	}
10200 	/*
10201 	 * Point all the data dispatchers to the same morgue
10202 	 * so we can kill them all at once.
10203 	 */
10204 	dd_count = 0;
10205 	for (c = min_chan; c <= max_chan; c++) {
10206 
10207 		/*
10208 		 * Have to get the channel server under
10209 		 * control so we can remove ourselves.
10210 		 * Returns w/c_mutex held.
10211 		 */
10212 		IDN_CHAN_LOCK_GLOBAL(&idn.chan_servers[c]);
10213 		/*
10214 		 * Delete the following domain (mailbox) from
10215 		 * monitoring by the respective channel server.
10216 		 */
10217 		idn_chan_delmbox(c, (ushort_t)domset);
10218 
10219 		IDN_CHAN_UNLOCK_GLOBAL(&idn.chan_servers[c]);
10220 		dd_count++;
10221 	}
10222 	PR_CHAN("%s: deactivated %d channel mboxes for domset 0x%x, chan %d\n",
10223 	    proc, dd_count, domset, channel);
10224 	return (dd_count);
10225 }
10226 
10227 /*
10228  * Check if the given domain is registered with the given channel(s).
10229  */
10230 int
idn_domain_is_registered(int domid,int channel,idn_chanset_t * chansetp)10231 idn_domain_is_registered(int domid, int channel, idn_chanset_t *chansetp)
10232 {
10233 	int		regcount;
10234 	int		c, min_chan, max_chan;
10235 	idn_chanset_t	chanset;
10236 	procname_t	proc = "idn_domain_is_registered";
10237 
10238 	CHANSET_ZERO(chanset);
10239 
10240 	if (idn.chan_servers == NULL) {
10241 		PR_CHAN("%s: idn.chan_servers == NULL!!\n", proc);
10242 		return (0);
10243 	}
10244 
10245 	if (channel == -1) {
10246 		min_chan = 0;
10247 		max_chan = IDN_MAX_NETS - 1;
10248 	} else {
10249 		min_chan = max_chan = channel;
10250 	}
10251 
10252 	regcount = 0;
10253 
10254 	for (c = min_chan; c <= max_chan; c++) {
10255 		idn_chansvr_t	*csp;
10256 
10257 		csp = &idn.chan_servers[c];
10258 		IDN_CHAN_LOCK_SEND(csp);
10259 		/*
10260 		 * Don't really need recv side lock since registeration
10261 		 * can't change while we're holding send side.
10262 		 * No need to wait for send side to actually suspend
10263 		 * since all we want to do is prevent the registered
10264 		 * information from changing.
10265 		 */
10266 		if (IDN_CHAN_DOMAIN_IS_REGISTERED(csp, domid)) {
10267 			regcount++;
10268 			CHANSET_ADD(chanset, c);
10269 		}
10270 
10271 		IDN_CHAN_UNLOCK_SEND(csp);
10272 	}
10273 
10274 	PR_CHAN("%s: domid %d mbox reg'd with %d channels [0x%x] (req=%d)\n",
10275 	    proc, domid, regcount, chanset, channel);
10276 
10277 	if (chansetp)
10278 		*chansetp = chanset;
10279 
10280 	return (regcount);
10281 }
10282 
10283 static int
idn_mainmbox_flush(int domid,idn_mainmbox_t * mmp)10284 idn_mainmbox_flush(int domid, idn_mainmbox_t *mmp)
10285 {
10286 	register int		qi;
10287 	register idn_mboxmsg_t	*mqp;
10288 	int		total_count = 0;
10289 	int		c, count;
10290 	int		mbox_type;
10291 	char		*mbox_str;
10292 	int		lost_io, total_lost_io = 0;
10293 	idn_chanset_t	chanset;
10294 	procname_t	proc = "idn_mainmbox_flush";
10295 
10296 
10297 	if (mmp == NULL)
10298 		return (0);
10299 
10300 	CHANSET_ZERO(chanset);
10301 
10302 	mbox_type = mmp->mm_type;
10303 	ASSERT((mbox_type == IDNMMBOX_TYPE_SEND) ||
10304 	    (mbox_type == IDNMMBOX_TYPE_RECV));
10305 
10306 	mbox_str = (mbox_type == IDNMMBOX_TYPE_SEND) ? "SEND" : "RECV";
10307 
10308 	/*
10309 	 * Determine which channels this domain is registered
10310 	 * with.  If he's not registered with any, then we
10311 	 * can't touch the SMR.
10312 	 */
10313 	(void) idn_domain_is_registered(domid, -1, &chanset);
10314 
10315 	for (c = 0; c < IDN_MAX_NETS; c++) {
10316 		ushort_t	mbox_csum;
10317 
10318 		if (mmp[c].mm_smr_mboxp == NULL)
10319 			continue;
10320 		mutex_enter(&mmp[c].mm_mutex);
10321 		ASSERT(mmp[c].mm_type == mbox_type);
10322 		if (CHAN_IN_SET(chanset, c) == 0) {
10323 			/*
10324 			 * Domain is no longer registered.
10325 			 * DON'T TOUCH THE SMR - IT'S POISON!
10326 			 */
10327 			if (mmp[c].mm_smr_mboxp) {
10328 				PR_CHAN("%s:%d:%s: domain unregistered "
10329 				    "w/chan %d - DUMPING SMR reference\n",
10330 				    proc, domid, mbox_str, c);
10331 				lost_io = IDN_MMBOXINDEX_DIFF(mmp[c].mm_qiput,
10332 				    mmp[c].mm_qiget);
10333 #ifdef DEBUG
10334 				if (mbox_type == IDNMMBOX_TYPE_RECV) {
10335 					PR_CHAN("%s:%d:%s: blowing away %d "
10336 					    "incoming pkts\n",
10337 					    proc, domid, mbox_str, lost_io);
10338 				} else {
10339 					PR_CHAN("%s:%d:%s: blowing away %d/%d "
10340 					    "outstanding pkts\n",
10341 					    proc, domid, mbox_str, lost_io,
10342 					    idn_domain[domid].dio);
10343 				}
10344 #endif /* DEBUG */
10345 			}
10346 			mmp[c].mm_qiput = mmp[c].mm_qiget = 0;
10347 			mmp[c].mm_smr_mboxp = NULL;
10348 			total_lost_io += lost_io;
10349 		}
10350 		if (mmp[c].mm_smr_mboxp) {
10351 			mbox_csum =
10352 			    IDN_CKSUM_MBOX(&mmp[c].mm_smr_mboxp->mt_header);
10353 			if (!VALID_NWRADDR(mmp[c].mm_smr_mboxp, 4) ||
10354 			    !VALID_MBOXHDR(&mmp[c].mm_smr_mboxp->mt_header,
10355 			    c, mbox_csum)) {
10356 				lost_io = IDN_MMBOXINDEX_DIFF(mmp[c].mm_qiput,
10357 				    mmp[c].mm_qiget);
10358 #ifdef DEBUG
10359 				if (mbox_type == IDNMMBOX_TYPE_RECV) {
10360 					PR_CHAN("%s:%d:%s: bad mbox.  blowing "
10361 					    "away %d incoming pkts\n",
10362 					    proc, domid, mbox_str, lost_io);
10363 				} else {
10364 					PR_CHAN("%s:%d:%s: bad mbox.  blowing "
10365 					    "away %d/%d outstanding pkts\n",
10366 					    proc, domid, mbox_str, lost_io,
10367 					    idn_domain[domid].dio);
10368 				}
10369 #endif /* DEBUG */
10370 				mmp[c].mm_smr_mboxp = NULL;
10371 				mmp[c].mm_qiput = mmp[c].mm_qiget = 0;
10372 				total_lost_io += lost_io;
10373 			}
10374 		}
10375 		if (mmp[c].mm_smr_mboxp == NULL) {
10376 			mutex_exit(&mmp[c].mm_mutex);
10377 			continue;
10378 		}
10379 		mqp = &mmp[c].mm_smr_mboxp->mt_queue[0];
10380 		qi = 0;
10381 		count = 0;
10382 		/*
10383 		 * It's quite possible the remote domain may be accessing
10384 		 * these mailbox entries at the exact same time we're
10385 		 * clearing the owner bit.  That's okay.  All we're trying
10386 		 * to do at this point is to minimize the number of packets
10387 		 * the remote domain might try to process unnecessarily.
10388 		 */
10389 		do {
10390 			if (mqp[qi].ms_owner)
10391 				count++;
10392 			mqp[qi].ms_owner = 0;
10393 			IDN_MMBOXINDEX_INC(qi);
10394 		} while (qi);
10395 
10396 		lost_io = IDN_MMBOXINDEX_DIFF(mmp[c].mm_qiput, mmp[c].mm_qiget);
10397 		total_lost_io += lost_io;
10398 
10399 		mmp[c].mm_qiput = mmp[c].mm_qiget = 0;
10400 		mmp[c].mm_smr_mboxp = NULL;
10401 		mutex_exit(&mmp[c].mm_mutex);
10402 
10403 		total_count += count;
10404 
10405 		PR_CHAN("%s:%d:%s: flushed out %d mbox entries for chan %d\n",
10406 		    proc, domid, mbox_str, count, c);
10407 	}
10408 
10409 	if (total_lost_io && (mbox_type == IDNMMBOX_TYPE_SEND)) {
10410 		int	lost_bufs;
10411 		/*
10412 		 * If we lost all our outstanding I/O.  We could
10413 		 * possible could have slabs now with mistakenly
10414 		 * outstanding I/O buffers.  Need to clean them up.
10415 		 * Clean up of leftovers our self.
10416 		 */
10417 		lost_bufs = smr_buf_free_all(domid);
10418 
10419 		PR_CHAN("%s:%d:%s: flushed %d/%d buffers from slabs\n",
10420 		    proc, domid, mbox_str, lost_bufs, total_lost_io);
10421 	}
10422 
10423 	PR_CHAN("%s:%d:%s: flushed total of %d mailbox entries (lost %d)\n",
10424 	    proc, domid, mbox_str, total_count, total_lost_io);
10425 
10426 	return (total_count);
10427 }
10428 
10429 void
idn_chanserver_bind(int net,int cpuid)10430 idn_chanserver_bind(int net, int cpuid)
10431 {
10432 	int		ocpuid;
10433 	cpu_t		*cp;
10434 	idn_chansvr_t	*csp;
10435 	kthread_id_t	tp;
10436 	procname_t	proc = "idn_chanserver_bind";
10437 
10438 	csp = &idn.chan_servers[net];
10439 	IDN_CHAN_LOCK_GLOBAL(csp);
10440 
10441 	mutex_enter(&cpu_lock);		/* protect checking cpu_ready_set */
10442 	ocpuid = csp->ch_bound_cpuid;
10443 	cp = cpu_get(cpuid);
10444 	if ((cpuid != -1) && ((cp == NULL) || !cpu_is_online(cp))) {
10445 		mutex_exit(&cpu_lock);
10446 		cmn_err(CE_WARN,
10447 		    "IDN: 239: invalid CPU ID (%d) specified for "
10448 		    "IDN net %d",
10449 		    cpuid, net);
10450 		IDN_CHAN_UNLOCK_GLOBAL(csp);
10451 		return;
10452 	}
10453 	if ((tp = csp->ch_recv_threadp) == NULL) {
10454 		/*
10455 		 * Thread is not yet active.  Set ch_bound_cpuid
10456 		 * so when thread activates it will automatically
10457 		 * bind itself.
10458 		 */
10459 		csp->ch_bound_cpuid = -1;
10460 		csp->ch_bound_cpuid_pending = cpuid;
10461 	} else {
10462 		if (ocpuid != -1) {
10463 			thread_affinity_clear(tp);
10464 			csp->ch_bound_cpuid = -1;
10465 		}
10466 		if (cpuid >= 0) {
10467 			thread_affinity_set(tp, cpuid);
10468 			csp->ch_bound_cpuid = cpuid;
10469 		}
10470 		csp->ch_bound_cpuid_pending = -1;
10471 	}
10472 	mutex_exit(&cpu_lock);
10473 
10474 	PR_CHAN("%s: bound net/channel (%d) from cpuid %d to%scpuid %d\n",
10475 	    proc, net, ocpuid, tp ? " " : " (pending) ", cpuid);
10476 
10477 	IDN_CHAN_UNLOCK_GLOBAL(csp);
10478 }
10479 
10480 #ifdef DEBUG
10481 static idn_mboxhdr_t	*prev_mhp[IDN_MAXMAX_NETS];
10482 #endif /* DEBUG */
10483 /*
10484  * Get access to the respective channel server's synchronization
10485  * header which resides in SMR space.
10486  */
10487 static idn_mboxhdr_t *
idn_chan_server_syncheader(int channel)10488 idn_chan_server_syncheader(int channel)
10489 {
10490 	idn_domain_t	*ldp = &idn_domain[idn.localid];
10491 	idn_mboxtbl_t	*mtp;
10492 	idn_mboxhdr_t	*mhp;
10493 	ushort_t	mbox_csum;
10494 	procname_t	proc = "idn_chan_server_syncheader";
10495 
10496 	ASSERT(IDN_CHAN_RECV_IS_LOCKED(&idn.chan_servers[channel]));
10497 
10498 	IDN_DLOCK_SHARED(idn.localid);
10499 
10500 	if (ldp->dmbox.m_tbl == NULL) {
10501 		PR_CHAN("%s: local dmbox.m_tbl == NULL\n", proc);
10502 		IDN_DUNLOCK(idn.localid);
10503 		return (NULL);
10504 	}
10505 
10506 	mtp = IDN_MBOXTBL_PTR_CHAN(ldp->dmbox.m_tbl, channel);
10507 	mhp = &mtp->mt_header;
10508 	mbox_csum = IDN_CKSUM_MBOX(&mtp->mt_header);
10509 
10510 #ifdef DEBUG
10511 	if (mhp != prev_mhp[channel]) {
10512 		prev_mhp[channel] = mhp;
10513 		PR_CHAN("%s: chan_server (%d) cookie = 0x%x (exp 0x%x)\n",
10514 		    proc, channel, IDN_GET_MBOXHDR_COOKIE(mhp),
10515 		    IDN_MAKE_MBOXHDR_COOKIE(0, 0, channel));
10516 		PR_CHAN("%s: chan_server (%d) actv_ptr = 0x%x (exp 0x%x)\n",
10517 		    proc, channel, mhp->mh_svr_active_ptr,
10518 		    IDN_ADDR2OFFSET(&mhp->mh_svr_active));
10519 		PR_CHAN("%s: chan_server (%d) ready_ptr = 0x%x (exp 0x%x)\n",
10520 		    proc, channel, mhp->mh_svr_ready_ptr,
10521 		    IDN_ADDR2OFFSET(&mhp->mh_svr_ready));
10522 		PR_CHAN("%s: chan_server (%d) mbox_cksum = 0x%x (exp 0x%x)\n",
10523 		    proc, channel, (int)mhp->mh_cksum, (int)mbox_csum);
10524 	}
10525 #endif /* DEBUG */
10526 
10527 	if ((IDN_ADDR2OFFSET(&mhp->mh_svr_active) !=
10528 	    mhp->mh_svr_active_ptr) ||
10529 	    (IDN_ADDR2OFFSET(&mhp->mh_svr_ready) != mhp->mh_svr_ready_ptr) ||
10530 	    !VALID_MBOXHDR(mhp, channel, mbox_csum)) {
10531 		idn_chansvr_t	*csp;
10532 
10533 		csp = &idn.chan_servers[channel];
10534 		if (IDN_CHANNEL_IS_RECV_CORRUPTED(csp) == 0) {
10535 			IDN_CHANSVC_MARK_RECV_CORRUPTED(csp);
10536 
10537 			cmn_err(CE_WARN,
10538 			    "IDN: 240: (channel %d) SMR CORRUPTED "
10539 			    "- RELINK", channel);
10540 			cmn_err(CE_CONT,
10541 			    "IDN: 240: (channel %d) cookie "
10542 			    "(expected 0x%x, actual 0x%x)\n",
10543 			    channel,
10544 			    IDN_MAKE_MBOXHDR_COOKIE(0, 0, channel),
10545 			    mhp->mh_cookie);
10546 			cmn_err(CE_CONT,
10547 			    "IDN: 240: (channel %d) actv_flg "
10548 			    "(expected 0x%x, actual 0x%x)\n",
10549 			    channel, mhp->mh_svr_active_ptr,
10550 			    IDN_ADDR2OFFSET(&mhp->mh_svr_active));
10551 			cmn_err(CE_CONT,
10552 			    "IDN: 240: (channel %d) ready_flg "
10553 			    "(expected 0x%x, actual 0x%x)\n",
10554 			    channel, mhp->mh_svr_ready_ptr,
10555 			    IDN_ADDR2OFFSET(&mhp->mh_svr_ready));
10556 		}
10557 
10558 		mhp = NULL;
10559 	}
10560 	IDN_DUNLOCK(idn.localid);
10561 
10562 	PR_CHAN("%s: channel(%d) mainhp = 0x%p\n", proc, channel, (void *)mhp);
10563 
10564 	return (mhp);
10565 }
10566 
10567 #define	CHANSVR_SYNC_CACHE(csp, mmp, chan) \
10568 { \
10569 	ASSERT(IDN_CHAN_RECV_IS_LOCKED(csp)); \
10570 	if ((csp)->ch_recv_changed) { \
10571 		register int _d; \
10572 		(csp)->ch_recv_scanset = (csp)->ch_recv_scanset_pending; \
10573 		(csp)->ch_recv_domset = (csp)->ch_recv_domset_pending; \
10574 		for (_d = 0; _d < MAX_DOMAINS; _d++) { \
10575 			if (DOMAIN_IN_SET((csp)->ch_recv_domset, _d)) { \
10576 				(mmp)[_d] = \
10577 				    &idn_domain[_d].dmbox.m_recv[chan]; \
10578 			} else { \
10579 				(mmp)[_d] = NULL; \
10580 			} \
10581 		} \
10582 		(csp)->ch_recv_changed = 0; \
10583 	} \
10584 }
10585 #define	CHANSVR_NEXT_DOMID(csp, i, d) \
10586 { \
10587 	(i) = ((i) + 1) & (MAX_DOMAINS - 1); \
10588 	(d) = (int)(((csp)->ch_recv_scanset >> ((i) << 2)) & 0xf); \
10589 }
10590 #define	CHANSVR_RESET_INDEX(i)	((i) = -1)
10591 
10592 #ifdef DEBUG
10593 static idn_mainmbox_t	*Mmp[IDN_MAXMAX_NETS][MAX_DOMAINS];
10594 #endif /* DEBUG */
10595 
10596 static void
idn_chan_server(idn_chansvr_t ** cspp)10597 idn_chan_server(idn_chansvr_t **cspp)
10598 {
10599 	idn_mboxhdr_t	*mainhp;
10600 	register idn_chansvr_t		*csp;
10601 	register idn_mboxmsg_t		*mqp;
10602 #ifdef DEBUG
10603 	idn_mainmbox_t			**mmp;
10604 #else
10605 	idn_mainmbox_t			*mmp[MAX_DOMAINS];
10606 #endif /* DEBUG */
10607 	register int	qi;
10608 	struct idn	*sip;
10609 	int		channel;
10610 	int		cpuid;
10611 	int		empty;
10612 	int		tot_pktcount, tot_dropcount;
10613 	register int	index;
10614 	register int	domid;
10615 	register int	idleloops;
10616 	procname_t	proc = "idn_chan_server";
10617 
10618 
10619 #ifdef DEBUG
10620 	mmp = &Mmp[(*cspp)->ch_id][0];
10621 	bzero(mmp, MAX_DOMAINS * sizeof (idn_mainmbox_t *));
10622 #else /* DEBUG */
10623 	bzero(mmp, sizeof (mmp));
10624 #endif /* DEBUG */
10625 
10626 	tot_pktcount = tot_dropcount = 0;
10627 
10628 	ASSERT(cspp && *cspp);
10629 
10630 	csp = *cspp;
10631 	channel = csp->ch_id;
10632 	sip = IDN_INST2SIP(channel);
10633 	ASSERT(sip);
10634 
10635 	PR_CHAN("%s: CHANNEL SERVER (channel %d) GOING ACTIVE...\n",
10636 	    proc, channel);
10637 
10638 	IDN_CHAN_LOCK_RECV(csp);
10639 	IDN_CHAN_RECV_INPROGRESS(csp);
10640 	ASSERT(csp->ch_recv_threadp == curthread);
10641 	mutex_enter(&cpu_lock);
10642 	if ((cpuid = csp->ch_bound_cpuid_pending) != -1) {
10643 		cpu_t	*cp = cpu_get(cpuid);
10644 		/*
10645 		 * We've been requested to bind to
10646 		 * a particular cpu.
10647 		 */
10648 		if ((cp == NULL) || !cpu_is_online(cp)) {
10649 			/*
10650 			 * Cpu seems to have gone away or gone offline
10651 			 * since originally requested.
10652 			 */
10653 			mutex_exit(&cpu_lock);
10654 			cmn_err(CE_WARN,
10655 			    "IDN: 239: invalid CPU ID (%d) specified for "
10656 			    "IDN net %d",
10657 			    cpuid, channel);
10658 		} else {
10659 			csp->ch_bound_cpuid = cpuid;
10660 			affinity_set(csp->ch_bound_cpuid);
10661 			mutex_exit(&cpu_lock);
10662 		}
10663 		csp->ch_bound_cpuid_pending = -1;
10664 	} else {
10665 		mutex_exit(&cpu_lock);
10666 	}
10667 	if (csp->ch_bound_cpuid != -1) {
10668 		PR_CHAN("%s: thread bound to cpuid %d\n",
10669 		    proc, csp->ch_bound_cpuid);
10670 	}
10671 	/*
10672 	 * Only the first (main) mbox header is used for
10673 	 * synchronization with data delivery since there is
10674 	 * only data server for all mailboxes for this
10675 	 * given channel.
10676 	 */
10677 	CHANSVR_SYNC_CACHE(csp, mmp, channel);
10678 
10679 	mainhp = ((csp->ch_recv_domcount > 0) &&
10680 	    IDN_CHANNEL_IS_RECV_ACTIVE(csp))
10681 	    ? idn_chan_server_syncheader(channel) : NULL;
10682 
10683 	if (mainhp && IDN_CHANNEL_IS_RECV_ACTIVE(csp))
10684 		mainhp->mh_svr_active = 1;
10685 
10686 	ASSERT(csp->ch_recv_domcount ?
10687 	    (csp->ch_recv_scanset && csp->ch_recv_domset) : 1);
10688 
10689 	IDN_CHAN_UNLOCK_RECV(csp);
10690 
10691 	empty = 0;
10692 	idleloops = 0;
10693 	CHANSVR_RESET_INDEX(index);
10694 
10695 	/*
10696 	 * ---------------------------------------------
10697 	 */
10698 	/*CONSTCOND*/
10699 	while (1) {
10700 		register int	pktcount;
10701 		register int	dropcount;
10702 		ushort_t		mbox_csum;
10703 		idn_mboxtbl_t	*smr_mboxp;	/* points to SMR space */
10704 		register smr_offset_t	bufoffset;
10705 #ifdef DEBUG
10706 		register smr_pkthdr_t	*hdrp;
10707 		idn_netaddr_t		netaddr;
10708 #endif /* DEBUG */
10709 
10710 		/*
10711 		 * Speed through and find the next available domid.
10712 		 */
10713 		CHANSVR_NEXT_DOMID(csp, index, domid);
10714 
10715 		if (!index) {
10716 			/*
10717 			 * We only check state changes when
10718 			 * we wrap around.  Done for performance.
10719 			 */
10720 			if (!IDN_CHANNEL_IS_RECV_ACTIVE(csp) ||
10721 			    csp->ch_recv.c_checkin ||
10722 			    (idn.state != IDNGS_ONLINE)) {
10723 
10724 				PR_DATA("%s: (channel %d) %s\n",
10725 				    proc, channel,
10726 				    IDN_CHANNEL_IS_DETACHED(csp)
10727 				    ? "DEAD" :
10728 				    IDN_CHANNEL_IS_PENDING(csp)
10729 				    ? "IDLED" :
10730 				    IDN_CHANNEL_IS_ACTIVE(csp)
10731 				    ? "ACTIVE" : "DISABLED");
10732 				goto cc_sleep;
10733 			}
10734 		}
10735 		if (csp->ch_recv.c_checkin)
10736 			goto cc_sleep;
10737 
10738 		if (empty == csp->ch_recv_domcount) {
10739 			empty = 0;
10740 			goto cc_slowdown;
10741 		}
10742 
10743 		ASSERT(mmp[domid] != NULL);
10744 
10745 		mutex_enter(&mmp[domid]->mm_mutex);
10746 		if ((smr_mboxp = mmp[domid]->mm_smr_mboxp) == NULL) {
10747 			/*
10748 			 * Somebody is trying to shut things down.
10749 			 */
10750 			empty++;
10751 			mutex_exit(&mmp[domid]->mm_mutex);
10752 			continue;
10753 		}
10754 		ASSERT(mmp[domid]->mm_channel == (short)channel);
10755 		/*
10756 		 * We don't care if the mm_smr_mboxp is nullified
10757 		 * after this point.  The thread attempting to shut
10758 		 * us down has to formally pause this channel before
10759 		 * anything is official anyway.  So, we can continue
10760 		 * with our local SMR reference until the thread
10761 		 * shutting us down really stops us.
10762 		 *
10763 		 * Need to get the qiget index _before_ we drop the
10764 		 * lock since it might get flushed (idn_mainmbox_flush)
10765 		 * once we drop the mm_mutex.
10766 		 *
10767 		 * We prefer not to hold the mm_mutex across the
10768 		 * idn_recv_mboxdata() call since that may be time-
10769 		 * consuming.
10770 		 */
10771 		qi  = mmp[domid]->mm_qiget;
10772 
10773 		/*
10774 		 * Check the mailbox header if checksum is turned on.
10775 		 */
10776 		mbox_csum = IDN_CKSUM_MBOX(&smr_mboxp->mt_header);
10777 		if (!VALID_MBOXHDR(&smr_mboxp->mt_header, channel, mbox_csum)) {
10778 			IDN_KSTAT_INC(sip, si_mboxcrc);
10779 			IDN_KSTAT_INC(sip, si_ierrors);
10780 			if (!(mmp[domid]->mm_flags & IDNMMBOX_FLAG_CORRUPTED)) {
10781 				cmn_err(CE_WARN,
10782 				    "IDN: 241: [recv] (domain %d, "
10783 				    "channel %d) SMR CORRUPTED - RELINK",
10784 				    domid, channel);
10785 				mmp[domid]->mm_flags |= IDNMMBOX_FLAG_CORRUPTED;
10786 			}
10787 			empty = 0;
10788 			mutex_exit(&mmp[domid]->mm_mutex);
10789 			goto cc_sleep;
10790 		}
10791 		mutex_exit(&mmp[domid]->mm_mutex);
10792 		mqp = &smr_mboxp->mt_queue[0];
10793 
10794 		pktcount = dropcount = 0;
10795 
10796 		if (mqp[qi].ms_owner == 0)
10797 			goto cc_next;
10798 
10799 		bufoffset = IDN_BFRAME2OFFSET(mqp[qi].ms_bframe);
10800 
10801 		if (!VALID_NWROFFSET(bufoffset, IDN_SMR_BUFSIZE)) {
10802 			/* ASSERT(0); */
10803 			mqp[qi].ms_flag |= IDN_MBOXMSG_FLAG_ERR_BADOFFSET;
10804 			mqp[qi].ms_owner = 0;
10805 			IDN_MMBOXINDEX_INC(qi);
10806 			dropcount++;
10807 
10808 			IDN_KSTAT_INC(sip, si_smraddr);
10809 			IDN_KSTAT_INC(sip, si_ierrors);
10810 
10811 		} else {
10812 			PR_DATA("%s: (channel %d) pkt (off 0x%x, "
10813 			    "qiget %d) from domain %d\n",
10814 			    proc, channel, bufoffset, qi, domid);
10815 #ifdef DEBUG
10816 
10817 			hdrp = IDN_BUF2HDR(IDN_OFFSET2ADDR(bufoffset));
10818 			netaddr.netaddr = hdrp->b_netaddr;
10819 			ASSERT(netaddr.net.chan == (ushort_t)channel);
10820 #endif /* DEBUG */
10821 
10822 			if (idn_recv_mboxdata(channel,
10823 			    IDN_OFFSET2ADDR(bufoffset)) < 0) {
10824 				mutex_enter(&mmp[domid]->mm_mutex);
10825 				if (!(mmp[domid]->mm_flags &
10826 				    IDNMMBOX_FLAG_CORRUPTED)) {
10827 					cmn_err(CE_WARN,
10828 					    "IDN: 241: [recv] (domain "
10829 					    "%d, channel %d) SMR "
10830 					    "CORRUPTED - RELINK",
10831 					    domid, channel);
10832 					mmp[domid]->mm_flags |=
10833 					    IDNMMBOX_FLAG_CORRUPTED;
10834 				}
10835 				mutex_exit(&mmp[domid]->mm_mutex);
10836 			}
10837 
10838 			mqp[qi].ms_owner = 0;
10839 			IDN_MMBOXINDEX_INC(qi);
10840 			pktcount++;
10841 		}
10842 
10843 cc_next:
10844 
10845 		mutex_enter(&mmp[domid]->mm_mutex);
10846 		if (mmp[domid]->mm_smr_mboxp) {
10847 			if (dropcount)
10848 				mmp[domid]->mm_dropped += dropcount;
10849 			mmp[domid]->mm_qiget = qi;
10850 			mmp[domid]->mm_count += pktcount;
10851 		}
10852 		mutex_exit(&mmp[domid]->mm_mutex);
10853 
10854 		if (pktcount == 0) {
10855 			empty++;
10856 		} else {
10857 			csp->ch_recv_waittime = IDN_NETSVR_WAIT_MIN;
10858 			empty = 0;
10859 			idleloops = 0;
10860 
10861 			PR_DATA("%s: (channel %d) dom=%d, pktcnt=%d\n",
10862 			    proc, channel, domid, pktcount);
10863 		}
10864 
10865 		continue;
10866 
10867 cc_slowdown:
10868 
10869 #ifdef DEBUG
10870 		if (idleloops == 0) {
10871 			PR_DATA("%s: (channel %d) going SOFT IDLE...\n",
10872 			    proc, channel);
10873 		}
10874 #endif /* DEBUG */
10875 		if (idleloops++ < IDN_NETSVR_SPIN_COUNT) {
10876 			/*
10877 			 * At this level we only busy-wait.
10878 			 * Get back into action.
10879 			 */
10880 			continue;
10881 		}
10882 		idleloops = 0;
10883 
10884 cc_sleep:
10885 
10886 		if (mainhp)
10887 			mainhp->mh_svr_active = 0;
10888 
10889 		IDN_CHAN_LOCK_RECV(csp);
10890 
10891 cc_die:
10892 
10893 		ASSERT(IDN_CHAN_RECV_IS_LOCKED(csp));
10894 
10895 		if (!IDN_CHANNEL_IS_RECV_ACTIVE(csp) &&
10896 		    IDN_CHANNEL_IS_DETACHED(csp)) {
10897 			/*
10898 			 * Time to die...
10899 			 */
10900 			PR_CHAN("%s: (channel %d) serviced %d "
10901 			    "packets, drop = %d\n", proc, channel,
10902 			    tot_pktcount, tot_dropcount);
10903 			PR_CHAN("%s: (channel %d) TERMINATING\n",
10904 			    proc, channel);
10905 			PR_CHAN("%s: (channel %d) ch_morguep = %p\n",
10906 			    proc, channel, (void *)csp->ch_recv_morguep);
10907 
10908 			csp->ch_recv_threadp = NULL;
10909 #ifdef DEBUG
10910 			for (index = 0; index < csp->ch_recv_domcount;
10911 			    index++) {
10912 				if ((int)((csp->ch_recv_scanset >>
10913 				    (index*4)) & 0xf) == domid) {
10914 					PR_DATA("%s: WARNING (channel %d) "
10915 					    "DROPPING domid %d...\n",
10916 					    proc, channel, domid);
10917 				}
10918 			}
10919 #endif /* DEBUG */
10920 			IDN_CHAN_RECV_DONE(csp);
10921 
10922 			sema_v(csp->ch_recv_morguep);
10923 
10924 			IDN_CHAN_UNLOCK_RECV(csp);
10925 
10926 			thread_exit();
10927 			/* not reached */
10928 		}
10929 
10930 		do {
10931 			if (IDN_CHANNEL_IS_DETACHED(csp)) {
10932 				PR_CHAN("%s: (channel %d) going to DIE...\n",
10933 				    proc, channel);
10934 				goto cc_die;
10935 			}
10936 #ifdef DEBUG
10937 			if (IDN_CHANNEL_IS_RECV_ACTIVE(csp) &&
10938 			    (csp->ch_recv_waittime <= IDN_NETSVR_WAIT_MAX)) {
10939 				PR_CHAN("%s: (channel %d) going SOFT IDLE "
10940 				    "(waittime = %d ticks)...\n",
10941 				    proc, channel,
10942 				    csp->ch_recv_waittime);
10943 			} else {
10944 				PR_CHAN("%s: (channel %d) going "
10945 				    "HARD IDLE...\n", proc, channel);
10946 			}
10947 #endif /* DEBUG */
10948 			IDN_CHAN_RECV_DONE(csp);
10949 
10950 			/*
10951 			 * If we're being asked to check-in then
10952 			 * go into a hard sleep.  Want to give the
10953 			 * thread requesting us to checkin a chance.
10954 			 */
10955 			while (csp->ch_recv.c_checkin)
10956 				cv_wait(&csp->ch_recv_cv,
10957 				    &csp->ch_recv.c_mutex);
10958 
10959 			if (csp->ch_recv_waittime > IDN_NETSVR_WAIT_MAX)
10960 				cv_wait(&csp->ch_recv_cv,
10961 				    &csp->ch_recv.c_mutex);
10962 			else
10963 				(void) cv_reltimedwait(&csp->ch_recv_cv,
10964 				    &csp->ch_recv.c_mutex,
10965 				    csp->ch_recv_waittime, TR_CLOCK_TICK);
10966 
10967 			IDN_CHAN_RECV_INPROGRESS(csp);
10968 
10969 			IDN_KSTAT_INC(sip, si_sigsvr);
10970 
10971 			if (csp->ch_recv_waittime <= IDN_NETSVR_WAIT_MAX)
10972 				csp->ch_recv_waittime <<=
10973 				    IDN_NETSVR_WAIT_SHIFT;
10974 
10975 		} while (!IDN_CHANNEL_IS_RECV_ACTIVE(csp));
10976 
10977 		/*
10978 		 * Before we see the world (and touch SMR space),
10979 		 * see if we've been told to die.
10980 		 */
10981 		mainhp = NULL;
10982 		/*
10983 		 * The world may have changed since we were
10984 		 * asleep.  Need to resync cache and check for a
10985 		 * new syncheader.
10986 		 *
10987 		 * Reset chansvr cache against any changes in
10988 		 * mbox fields we need (mm_qiget).
10989 		 */
10990 		CHANSVR_SYNC_CACHE(csp, mmp, channel);
10991 		if (csp->ch_recv_domcount <= 0) {
10992 			/*
10993 			 * Everybody disappeared on us.
10994 			 * Go back to sleep.
10995 			 */
10996 			goto cc_die;
10997 		}
10998 		ASSERT(csp->ch_recv_scanset && csp->ch_recv_domset);
10999 
11000 		mainhp = idn_chan_server_syncheader(channel);
11001 		if (mainhp == NULL) {
11002 			/*
11003 			 * Bummer...we're idling...
11004 			 */
11005 			goto cc_die;
11006 		}
11007 
11008 		mainhp->mh_svr_active = 1;
11009 
11010 		IDN_CHAN_UNLOCK_RECV(csp);
11011 		/*
11012 		 * Reset the domid index after sleeping.
11013 		 */
11014 		CHANSVR_RESET_INDEX(index);
11015 
11016 		empty = 0;
11017 		idleloops = 0;
11018 	}
11019 }
11020 
11021 #if 0
11022 /*
11023  * We maintain a separate function for flushing the STREAMs
11024  * queue of a channel because it must be done outside the
11025  * context of the idn_chan_action routine.  The streams flush
11026  * cannot occur inline with the idn_chan_action because
11027  * the act of flushing may cause IDN send functions to be called
11028  * directly and thus locks to be obtained which could result
11029  * in deadlocks.
11030  */
11031 static void
11032 idn_chan_flush(idn_chansvr_t *csp)
11033 {
11034 	queue_t		*rq;
11035 	struct idn	*sip;
11036 	int		flush_type = 0;
11037 	idn_chaninfo_t	*csend, *crecv;
11038 	procname_t	proc = "idn_chan_flush";
11039 
11040 	csend = &csp->ch_send;
11041 	crecv = &csp->ch_recv;
11042 
11043 	mutex_enter(&crecv->c_mutex);
11044 	mutex_enter(&csend->c_mutex);
11045 
11046 	if (crecv->c_state & IDN_CHANSVC_STATE_FLUSH)
11047 		flush_type |= FLUSHR;
11048 
11049 	if (csend->c_state & IDN_CHANSVC_STATE_FLUSH)
11050 		flush_type |= FLUSHW;
11051 
11052 	if (flush_type) {
11053 		rq = NULL;
11054 		rw_enter(&idn.struprwlock, RW_READER);
11055 		if ((sip = IDN_INST2SIP(csp->ch_id)) != NULL)
11056 			rq = sip->si_ipq;
11057 		rw_exit(&idn.struprwlock);
11058 		if (rq) {
11059 			/*
11060 			 * Flush the STREAM if possible
11061 			 * to get the channel server coherent
11062 			 * enough to respond to us.
11063 			 */
11064 			PR_CHAN("%s: sending FLUSH (%x) to channel %d\n",
11065 			    proc, flush_type, csp->ch_id);
11066 
11067 			(void) putnextctl1(rq, M_FLUSH, flush_type);
11068 		}
11069 		crecv->c_state &= ~IDN_CHANSVC_STATE_FLUSH;
11070 		csend->c_state &= ~IDN_CHANSVC_STATE_FLUSH;
11071 
11072 		if (crecv->c_waiters)
11073 			cv_broadcast(&crecv->c_cv);
11074 	}
11075 
11076 	mutex_exit(&csend->c_mutex);
11077 	mutex_exit(&crecv->c_mutex);
11078 }
11079 #endif /* 0 */
11080 
11081 /*
11082  * Locks are with respect to SEND/RECV locks (c_mutex).
11083  *
11084  * STOP/SUSPEND/DETACH
11085  *	- Entered with locks dropped, leave with locks held.
11086  *	  DETACH - lock dropped manually.
11087  * RESTART/RESUME
11088  *	- Entered with locks held, leave with locks dropped.
11089  * ATTACH
11090  *	- both enter and leave with locks dropped.
11091  */
11092 static void
idn_chan_action(int channel,idn_chanaction_t chanaction,int wait)11093 idn_chan_action(int channel, idn_chanaction_t chanaction, int wait)
11094 {
11095 	uchar_t		clr_state, set_state;
11096 	uint_t		is_running;
11097 	domainset_t	closed_slabwaiters = 0;
11098 	struct idn	*sip;
11099 	idn_chansvr_t	*csp;
11100 	idn_chaninfo_t	*csend, *crecv;
11101 	procname_t	proc = "idn_chan_action";
11102 
11103 	ASSERT((channel >= 0) && (channel < IDN_MAX_NETS));
11104 	ASSERT(idn.chan_servers);
11105 
11106 	csp = &idn.chan_servers[channel];
11107 
11108 	PR_CHAN("%s: requesting %s for channel %d\n",
11109 	    proc, chanaction_str[(int)chanaction], channel);
11110 
11111 	csend = &csp->ch_send;
11112 	crecv = &csp->ch_recv;
11113 
11114 	ASSERT(IDN_CHAN_GLOBAL_IS_LOCKED(csp));
11115 
11116 	clr_state = set_state = 0;
11117 
11118 	switch (chanaction) {
11119 	case IDNCHAN_ACTION_DETACH:
11120 		clr_state = IDN_CHANSVC_STATE_MASK;
11121 		/*FALLTHROUGH*/
11122 
11123 	case IDNCHAN_ACTION_STOP:
11124 		clr_state |= IDN_CHANSVC_STATE_ENABLED;
11125 		/*FALLTHROUGH*/
11126 
11127 	case IDNCHAN_ACTION_SUSPEND:
11128 		clr_state |= IDN_CHANSVC_STATE_ACTIVE;
11129 
11130 		/*
11131 		 * Must maintain this locking order.
11132 		 * Set asynchronous check-in flags.
11133 		 */
11134 		crecv->c_checkin = 1;
11135 		csend->c_checkin = 1;
11136 
11137 		is_running = 0;
11138 		if ((csend->c_inprogress || crecv->c_inprogress) &&
11139 		    wait && (csp->ch_recv_threadp != curthread)) {
11140 
11141 			rw_enter(&idn.struprwlock, RW_READER);
11142 			if ((sip = IDN_INST2SIP(channel)) != NULL) {
11143 				/*
11144 				 * Temporarily turn off the STREAM
11145 				 * to give a chance to breath.
11146 				 */
11147 				is_running = sip->si_flags & IDNRUNNING;
11148 				if (is_running)
11149 					sip->si_flags &= ~IDNRUNNING;
11150 			}
11151 			rw_exit(&idn.struprwlock);
11152 		}
11153 
11154 		mutex_enter(&crecv->c_mutex);
11155 		crecv->c_state &= ~clr_state;
11156 
11157 		mutex_enter(&csend->c_mutex);
11158 		csend->c_state &= ~clr_state;
11159 
11160 		/*
11161 		 * It's possible the channel server could come
11162 		 * through this flow itself due to putting data upstream
11163 		 * that ultimately turned around and came back down for
11164 		 * sending.  If this is the case we certainly don't
11165 		 * want to cv_wait, otherwise we'll obviously deadlock
11166 		 * waiting for ourself.  So, only block if somebody
11167 		 * other than the channel server we're attempting to
11168 		 * suspend/stop.
11169 		 */
11170 		if (wait && (csp->ch_recv_threadp != curthread)) {
11171 			int	do_flush = 0;
11172 
11173 			if (csend->c_inprogress || crecv->c_inprogress)
11174 				do_flush++;
11175 
11176 			if (do_flush) {
11177 				rw_enter(&idn.struprwlock, RW_READER);
11178 				if ((sip = IDN_INST2SIP(channel)) != NULL) {
11179 					/*
11180 					 * Temporarily turn off the STREAM
11181 					 * to give a chance to breath.
11182 					 */
11183 					if (sip->si_flags & IDNRUNNING) {
11184 						is_running = 1;
11185 						sip->si_flags &= ~IDNRUNNING;
11186 					}
11187 				}
11188 				rw_exit(&idn.struprwlock);
11189 			}
11190 
11191 			/*
11192 			 * If we have any senders in-progress
11193 			 * it's possible they're stuck waiting
11194 			 * down in smr_buf_alloc which may never
11195 			 * arrive if we're in an unlink process.
11196 			 * Rather than wait for it to timeout
11197 			 * let's be proactive so we can disconnect
11198 			 * asap.
11199 			 */
11200 			closed_slabwaiters = csp->ch_reg_domset;
11201 			DOMAINSET_ADD(closed_slabwaiters, idn.localid);
11202 			if (closed_slabwaiters)
11203 				smr_slabwaiter_close(closed_slabwaiters);
11204 
11205 			do {
11206 				/*
11207 				 * It's possible due to a STREAMs
11208 				 * loopback from read queue to write queue
11209 				 * that receiver and sender may be same
11210 				 * thread, i.e. receiver's inprogress
11211 				 * flag will never clear until sender's
11212 				 * inprogress flag clears.  So, we wait
11213 				 * for sender's inprogress first.
11214 				 */
11215 				while (csend->c_inprogress) {
11216 					mutex_exit(&crecv->c_mutex);
11217 					while (csend->c_inprogress) {
11218 						csend->c_waiters++;
11219 						cv_wait(&csend->c_cv,
11220 						    &csend->c_mutex);
11221 						csend->c_waiters--;
11222 					}
11223 					/*
11224 					 * Maintain lock ordering.
11225 					 * Eventually we will catch
11226 					 * him due to the flag settings.
11227 					 */
11228 					mutex_exit(&csend->c_mutex);
11229 					mutex_enter(&crecv->c_mutex);
11230 					mutex_enter(&csend->c_mutex);
11231 				}
11232 				if (crecv->c_inprogress) {
11233 					mutex_exit(&csend->c_mutex);
11234 					while (crecv->c_inprogress) {
11235 						crecv->c_waiters++;
11236 						cv_wait(&crecv->c_cv,
11237 						    &crecv->c_mutex);
11238 						crecv->c_waiters--;
11239 					}
11240 					mutex_enter(&csend->c_mutex);
11241 				}
11242 			} while (csend->c_inprogress);
11243 		}
11244 
11245 		if (is_running) {
11246 			/*
11247 			 * Restore the IDNRUNNING bit in
11248 			 * the flags to let them know the
11249 			 * channel is still alive.
11250 			 */
11251 			rw_enter(&idn.struprwlock, RW_READER);
11252 			if ((sip = IDN_INST2SIP(channel)) != NULL)
11253 				sip->si_flags |= IDNRUNNING;
11254 			rw_exit(&idn.struprwlock);
11255 		}
11256 
11257 		if (closed_slabwaiters) {
11258 			/*
11259 			 * We can reopen now since at this point no new
11260 			 * slabwaiters will attempt to come in and wait.
11261 			 */
11262 			smr_slabwaiter_open(csp->ch_reg_domset);
11263 		}
11264 
11265 		crecv->c_checkin = 0;
11266 		csend->c_checkin = 0;
11267 
11268 		/*
11269 		 * ALL leave with locks held.
11270 		 */
11271 		PR_CHAN("%s: action (%s) for channel %d - COMPLETED\n",
11272 		    proc, chanaction_str[(int)chanaction], channel);
11273 		break;
11274 
11275 	case IDNCHAN_ACTION_ATTACH:
11276 		mutex_enter(&crecv->c_mutex);
11277 		mutex_enter(&csend->c_mutex);
11278 		set_state |= csp->ch_state & IDN_CHANSVC_STATE_ATTACHED;
11279 		/*FALLTHROUGH*/
11280 
11281 	case IDNCHAN_ACTION_RESTART:
11282 		set_state |= csp->ch_state & IDN_CHANSVC_STATE_ENABLED;
11283 		/*FALLTHROUGH*/
11284 
11285 	case IDNCHAN_ACTION_RESUME:
11286 		ASSERT(IDN_CHAN_LOCAL_IS_LOCKED(csp));
11287 		set_state |= csp->ch_state & IDN_CHANSVC_STATE_ACTIVE;
11288 
11289 		crecv->c_state |= set_state;
11290 		csend->c_state |= set_state;
11291 
11292 		/*
11293 		 * The channel server itself could come through this
11294 		 * flow, so obviously no point in attempting to wake
11295 		 * ourself up!.
11296 		 */
11297 		if (csp->ch_recv_threadp && (csp->ch_recv_threadp != curthread))
11298 			cv_signal(&csp->ch_recv_cv);
11299 
11300 		PR_CHAN("%s: action (%s) for channel %d - COMPLETED\n",
11301 		    proc, chanaction_str[(int)chanaction], channel);
11302 
11303 		/*
11304 		 * Leaves with lock released.
11305 		 */
11306 		mutex_exit(&csend->c_mutex);
11307 		mutex_exit(&crecv->c_mutex);
11308 		break;
11309 
11310 	default:
11311 		ASSERT(0);
11312 		break;
11313 	}
11314 }
11315 
11316 static void
idn_chan_addmbox(int channel,ushort_t domset)11317 idn_chan_addmbox(int channel, ushort_t domset)
11318 {
11319 	idn_chansvr_t	*csp;
11320 	register int	d;
11321 	procname_t	proc = "idn_chan_addmbox";
11322 
11323 	PR_CHAN("%s: adding domset 0x%x main mailboxes to channel %d\n",
11324 	    proc, domset, channel);
11325 
11326 	ASSERT(idn.chan_servers);
11327 
11328 	csp = &idn.chan_servers[channel];
11329 
11330 	/*
11331 	 * Adding domains to a channel can be
11332 	 * asynchonous, so we don't bother waiting.
11333 	 */
11334 	IDN_CHANNEL_SUSPEND(channel, 0);
11335 
11336 	/*
11337 	 * Now we have the sending and receiving sides blocked
11338 	 * for this channel.
11339 	 */
11340 	for (d = 0; d < MAX_DOMAINS; d++) {
11341 		if (!DOMAIN_IN_SET(domset, d))
11342 			continue;
11343 		if (IDN_CHAN_DOMAIN_IS_REGISTERED(csp, d)) {
11344 			DOMAINSET_DEL(domset, d);
11345 			continue;
11346 		}
11347 		IDN_CHANSVR_SCANSET_ADD_PENDING(csp, d);
11348 		DOMAINSET_ADD(csp->ch_recv_domset_pending, d);
11349 		IDN_CHAN_DOMAIN_REGISTER(csp, d);
11350 
11351 		PR_CHAN("%s: domain %d (channel %d) RECV (pending) "
11352 		    "scanset = 0x%lx\n", proc, d, channel,
11353 		    csp->ch_recv_scanset_pending);
11354 		PR_CHAN("%s: domain %d (channel %d) domset = 0x%x\n",
11355 		    proc, d, channel, (uint_t)csp->ch_reg_domset);
11356 
11357 		CHECKPOINT_OPENED(IDNSB_CHKPT_CHAN,
11358 		    idn_domain[d].dhw.dh_boardset, 1);
11359 	}
11360 	if (domset)
11361 		csp->ch_recv_changed = 1;
11362 
11363 	IDN_CHANNEL_RESUME(channel);
11364 }
11365 
11366 static void
idn_chan_delmbox(int channel,ushort_t domset)11367 idn_chan_delmbox(int channel, ushort_t domset)
11368 {
11369 	idn_chansvr_t	*csp;
11370 	register int	d;
11371 	procname_t	proc = "idn_chan_delmbox";
11372 
11373 	PR_CHAN("%s: deleting domset 0x%x main mailboxes from channel %d\n",
11374 	    proc, domset, channel);
11375 
11376 	ASSERT(idn.chan_servers);
11377 
11378 	csp = &idn.chan_servers[channel];
11379 
11380 	/*
11381 	 * Here we have to wait for the channel server
11382 	 * as it's vital that we don't return without guaranteeing
11383 	 * that the given domset is no longer registered.
11384 	 */
11385 	IDN_CHANNEL_SUSPEND(channel, 1);
11386 
11387 	/*
11388 	 * Now we have the sending and receiving sides blocked
11389 	 * for this channel.
11390 	 */
11391 	for (d = 0; d < MAX_DOMAINS; d++) {
11392 		if (!DOMAIN_IN_SET(domset, d))
11393 			continue;
11394 		if (!IDN_CHAN_DOMAIN_IS_REGISTERED(csp, d)) {
11395 			DOMAINSET_DEL(domset, d);
11396 			continue;
11397 		}
11398 		/*
11399 		 * This domain has a mailbox hanging on this channel.
11400 		 * Get him out.
11401 		 *
11402 		 * First remove him from the receive side.
11403 		 */
11404 		ASSERT(csp->ch_recv_domcount > 0);
11405 		IDN_CHANSVR_SCANSET_DEL_PENDING(csp, d);
11406 		DOMAINSET_DEL(csp->ch_recv_domset_pending, d);
11407 		IDN_CHAN_DOMAIN_UNREGISTER(csp, d);
11408 
11409 		PR_CHAN("%s: domain %d (channel %d) RECV (pending) "
11410 		    "scanset = 0x%lx\n", proc, d, channel,
11411 		    csp->ch_recv_scanset_pending);
11412 		PR_CHAN("%s: domain %d (channel %d) domset = 0x%x\n",
11413 		    proc, d, channel, (uint_t)csp->ch_reg_domset);
11414 
11415 		CHECKPOINT_CLOSED(IDNSB_CHKPT_CHAN,
11416 		    idn_domain[d].dhw.dh_boardset, 2);
11417 
11418 	}
11419 	if (domset)
11420 		csp->ch_recv_changed = 1;
11421 
11422 	IDN_CHANNEL_RESUME(channel);
11423 }
11424 
11425 static int
idn_valid_etherheader(struct ether_header * ehp)11426 idn_valid_etherheader(struct ether_header *ehp)
11427 {
11428 	uchar_t	*eap;
11429 
11430 	eap = &ehp->ether_dhost.ether_addr_octet[0];
11431 
11432 	if ((eap[IDNETHER_ZERO] != 0) && (eap[IDNETHER_ZERO] != 0xff))
11433 		return (0);
11434 
11435 	if ((eap[IDNETHER_COOKIE1] != IDNETHER_COOKIE1_VAL) &&
11436 	    (eap[IDNETHER_COOKIE1] != 0xff))
11437 		return (0);
11438 
11439 	if ((eap[IDNETHER_COOKIE2] != IDNETHER_COOKIE2_VAL) &&
11440 	    (eap[IDNETHER_COOKIE2] != 0xff))
11441 		return (0);
11442 
11443 	if ((eap[IDNETHER_RESERVED] != IDNETHER_RESERVED_VAL) &&
11444 	    (eap[IDNETHER_RESERVED] != 0xff))
11445 		return (0);
11446 
11447 	if (!VALID_UCHANNEL(eap[IDNETHER_CHANNEL]) &&
11448 	    (eap[IDNETHER_CHANNEL] != 0xff))
11449 		return (0);
11450 
11451 	if (!VALID_UDOMAINID(IDN_NETID2DOMID(eap[IDNETHER_NETID])) &&
11452 	    (eap[IDNETHER_NETID] != 0xff))
11453 		return (0);
11454 
11455 	return (1);
11456 }
11457 
11458 /*
11459  * Packet header has already been filled in.
11460  * RETURNS:	0
11461  *		ENOLINK
11462  *		EPROTO
11463  *		ENOSPC
11464  */
11465 /*ARGSUSED*/
11466 static int
idn_send_mboxdata(int domid,struct idn * sip,int channel,caddr_t bufp)11467 idn_send_mboxdata(int domid, struct idn *sip, int channel, caddr_t bufp)
11468 {
11469 	idn_mainmbox_t	*mmp;
11470 	idn_mboxmsg_t	*mqp;
11471 	smr_pkthdr_t	*hdrp;
11472 	smr_offset_t	bufoffset;
11473 	idn_netaddr_t	dst;
11474 	ushort_t		mbox_csum;
11475 	int		rv = 0;
11476 	int		pktlen, qi;
11477 	procname_t	proc = "idn_send_mboxdata";
11478 
11479 	mmp = idn_domain[domid].dmbox.m_send;
11480 	if (mmp == NULL) {
11481 		PR_DATA("%s: dmbox.m_send == NULL\n", proc);
11482 		IDN_KSTAT_INC(sip, si_linkdown);
11483 		return (ENOLINK);
11484 	}
11485 
11486 	mmp += channel;
11487 	mutex_enter(&mmp->mm_mutex);
11488 
11489 	if (mmp->mm_smr_mboxp == NULL) {
11490 		PR_DATA("%s: (d %d, chn %d) mm_smr_mboxp == NULL\n",
11491 		    proc, domid, channel);
11492 		IDN_KSTAT_INC(sip, si_linkdown);
11493 		rv = ENOLINK;
11494 		goto send_err;
11495 	}
11496 	mbox_csum = IDN_CKSUM_MBOX(&mmp->mm_smr_mboxp->mt_header);
11497 	if (mbox_csum != mmp->mm_smr_mboxp->mt_header.mh_cksum) {
11498 		PR_DATA("%s: (d %d, chn %d) mbox hdr cksum (%d) "
11499 		    "!= actual (%d)\n",
11500 		    proc, domid, channel, mbox_csum,
11501 		    mmp->mm_smr_mboxp->mt_header.mh_cksum);
11502 		if ((mmp->mm_flags & IDNMMBOX_FLAG_CORRUPTED) == 0) {
11503 			cmn_err(CE_WARN,
11504 			    "IDN: 241: [send] (domain %d, "
11505 			    "channel %d) SMR CORRUPTED - RELINK",
11506 			    domid, channel);
11507 			mmp->mm_flags |= IDNMMBOX_FLAG_CORRUPTED;
11508 		}
11509 		IDN_KSTAT_INC(sip, si_mboxcrc);
11510 		IDN_KSTAT_INC(sip, si_oerrors);
11511 		rv = EPROTO;
11512 		goto send_err;
11513 	}
11514 
11515 	bufoffset = IDN_ADDR2OFFSET(bufp);
11516 	hdrp	  = IDN_BUF2HDR(bufp);
11517 	pktlen    = hdrp->b_length;
11518 	dst.netaddr = hdrp->b_netaddr;
11519 	ASSERT(dst.net.chan == (ushort_t)channel);
11520 
11521 	mqp = &mmp->mm_smr_mboxp->mt_queue[0];
11522 	qi  = mmp->mm_qiput;
11523 
11524 	if (mqp[qi].ms_owner) {
11525 		PR_DATA("%s: mailbox FULL (qiput=%d, qiget=%d)\n",
11526 		    proc, mmp->mm_qiput, mmp->mm_qiget);
11527 		IDN_KSTAT_INC(sip, si_txfull);
11528 		rv = ENOSPC;
11529 		goto send_err;
11530 	}
11531 	if (mqp[qi].ms_flag & IDN_MBOXMSG_FLAG_RECLAIM) {
11532 		smr_offset_t	recl_bufoffset;
11533 		/*
11534 		 * Remote domain finished with mailbox entry,
11535 		 * however it has not been reclaimed yet.  A reclaim
11536 		 * was done before coming into this routine, however
11537 		 * timing may have been such that the entry became
11538 		 * free just after the reclamation, but before
11539 		 * entry into here.  Go ahead and reclaim this entry.
11540 		 */
11541 		recl_bufoffset = IDN_BFRAME2OFFSET(mqp[qi].ms_bframe);
11542 
11543 		PR_DATA("%s: attempting reclaim (domain %d) "
11544 		    "(qiput=%d, b_off=0x%x)\n",
11545 		    proc, domid, qi, recl_bufoffset);
11546 
11547 		if (VALID_NWROFFSET(recl_bufoffset, IDN_SMR_BUFSIZE)) {
11548 			int		recl;
11549 			caddr_t		b_bufp;
11550 			smr_pkthdr_t	*b_hdrp;
11551 
11552 			b_bufp = IDN_OFFSET2ADDR(recl_bufoffset);
11553 			b_hdrp = IDN_BUF2HDR(b_bufp);
11554 
11555 			if (IDN_CKSUM_PKT(b_hdrp) != b_hdrp->b_cksum) {
11556 				IDN_KSTAT_INC(sip, si_crc);
11557 				IDN_KSTAT_INC(sip, si_fcs_errors);
11558 				IDN_KSTAT_INC(sip, si_reclaim);
11559 				IDN_KSTAT_INC(sip, si_oerrors);
11560 			}
11561 
11562 			recl = smr_buf_free(domid, b_bufp, b_hdrp->b_length);
11563 #ifdef DEBUG
11564 			if (recl == 0) {
11565 				PR_DATA("%s: SUCCESSFULLY reclaimed buf "
11566 				    "(domain %d)\n", proc, domid);
11567 			} else {
11568 				PR_DATA("%s: WARNING: reclaim failed (FREE) "
11569 				    "(domain %d)\n", proc, domid);
11570 			}
11571 #endif /* DEBUG */
11572 		} else {
11573 			IDN_KSTAT_INC(sip, si_smraddr);
11574 			IDN_KSTAT_INC(sip, si_reclaim);
11575 			PR_DATA("%s: WARNING: reclaim failed (BAD OFFSET) "
11576 			    "(domain %d)\n", proc, domid);
11577 		}
11578 	}
11579 
11580 	if (*mmp->mm_smr_readyp == 0) {
11581 		mmp->mm_qiput = qi;
11582 		IDN_KSTAT_INC(sip, si_linkdown);
11583 		rv = ENOLINK;
11584 		goto send_err;
11585 	}
11586 
11587 	mqp[qi].ms_flag = IDN_MBOXMSG_FLAG_RECLAIM;
11588 	mqp[qi].ms_bframe = IDN_OFFSET2BFRAME(bufoffset);
11589 	/* membar_stst(); */
11590 	mqp[qi].ms_owner = 1;
11591 
11592 	IDN_MMBOXINDEX_INC(qi);
11593 
11594 	mmp->mm_qiput = qi;
11595 
11596 	mmp->mm_count++;
11597 
11598 	if ((*mmp->mm_smr_readyp) && !(*mmp->mm_smr_activep)) {
11599 		idn_msgtype_t	mt;
11600 
11601 		mt.mt_mtype = IDNP_DATA;
11602 		mt.mt_atype = 0;
11603 		IDN_KSTAT_INC(sip, si_xdcall);
11604 		(void) IDNXDC(domid, &mt, (uint_t)dst.net.chan, 0, 0, 0);
11605 	}
11606 	mutex_exit(&mmp->mm_mutex);
11607 	IDN_KSTAT_INC(sip, si_opackets);
11608 	IDN_KSTAT_INC(sip, si_opackets64);
11609 	IDN_KSTAT_ADD(sip, si_xmtbytes, pktlen);
11610 	IDN_KSTAT_ADD(sip, si_obytes64, (uint64_t)pktlen);
11611 
11612 	return (0);
11613 
11614 send_err:
11615 	mmp->mm_dropped++;
11616 
11617 	mutex_exit(&mmp->mm_mutex);
11618 
11619 	return (rv);
11620 }
11621 
11622 static int
idn_recv_mboxdata(int channel,caddr_t bufp)11623 idn_recv_mboxdata(int channel, caddr_t bufp)
11624 {
11625 	smr_pkthdr_t	*hdrp;
11626 	struct idn	*sip;
11627 	mblk_t		*mp = nilp(mblk_t);
11628 	int		pktlen;
11629 	int		apktlen;
11630 	int		rv = 0;
11631 	smr_offset_t	bufoffset;
11632 	ushort_t	csum;
11633 	idn_netaddr_t	dst, daddr;
11634 	procname_t	proc = "idn_recv_mboxdata";
11635 
11636 	hdrp = IDN_BUF2HDR(bufp);
11637 
11638 	csum = IDN_CKSUM_PKT(hdrp);
11639 
11640 	sip = IDN_INST2SIP(channel);
11641 	if (sip == NULL) {
11642 		/*LINTED*/
11643 		sip = IDN_INST2SIP(0);
11644 	}
11645 	ASSERT(sip);
11646 
11647 	if (csum != hdrp->b_cksum) {
11648 		PR_DATA("%s: bad checksum(%x) != expected(%x)\n",
11649 		    proc, (uint_t)csum, (uint_t)hdrp->b_cksum);
11650 		IDN_KSTAT_INC(sip, si_crc);
11651 		IDN_KSTAT_INC(sip, si_fcs_errors);
11652 		rv = -1;
11653 		goto recv_err;
11654 	}
11655 
11656 	daddr.net.chan = (ushort_t)channel;
11657 	daddr.net.netid = (ushort_t)idn.localid;
11658 
11659 	dst.netaddr = hdrp->b_netaddr;
11660 	bufoffset = hdrp->b_offset;
11661 
11662 	if (dst.netaddr != daddr.netaddr) {
11663 		PR_DATA("%s: wrong dest netaddr (0x%x), expected (0x%x)\n",
11664 		    proc, dst.netaddr, daddr.netaddr);
11665 		IDN_KSTAT_INC(sip, si_nolink);
11666 		IDN_KSTAT_INC(sip, si_macrcv_errors);
11667 		goto recv_err;
11668 	}
11669 	pktlen  = hdrp->b_length;
11670 	apktlen = pktlen;
11671 
11672 	if ((pktlen <= 0) || (pktlen > IDN_DATA_SIZE)) {
11673 		PR_DATA("%s: invalid packet length (%d) <= 0 || > %lu\n",
11674 		    proc, pktlen, IDN_DATA_SIZE);
11675 		IDN_KSTAT_INC(sip, si_buff);
11676 		IDN_KSTAT_INC(sip, si_toolong_errors);
11677 		goto recv_err;
11678 	}
11679 
11680 	mp = allocb(apktlen + IDN_ALIGNSIZE, BPRI_LO);
11681 	if (mp == nilp(mblk_t)) {
11682 		PR_DATA("%s: allocb(pkt) failed\n", proc);
11683 		IDN_KSTAT_INC(sip, si_allocbfail);
11684 		IDN_KSTAT_INC(sip, si_norcvbuf);	/* MIB II */
11685 		goto recv_err;
11686 	}
11687 	ASSERT(DB_TYPE(mp) == M_DATA);
11688 	/*
11689 	 * Copy data packet into its streams buffer.
11690 	 * Align pointers for maximum bcopy performance.
11691 	 */
11692 	mp->b_rptr = (uchar_t *)IDN_ALIGNPTR(mp->b_rptr, bufoffset);
11693 	bcopy(IDN_BUF2DATA(bufp, bufoffset), mp->b_rptr, apktlen);
11694 	mp->b_wptr = mp->b_rptr + pktlen;
11695 
11696 	if (IDN_CHECKSUM &&
11697 		!idn_valid_etherheader((struct ether_header *)mp->b_rptr)) {
11698 		freeb(mp);
11699 		mp = nilp(mblk_t);
11700 		PR_DATA("%s: etherheader CORRUPTED\n", proc);
11701 		IDN_KSTAT_INC(sip, si_crc);
11702 		IDN_KSTAT_INC(sip, si_fcs_errors);
11703 		rv = -1;
11704 		goto recv_err;
11705 	}
11706 
11707 	idndl_read(NULL, mp);
11708 
11709 recv_err:
11710 
11711 	if (mp == nilp(mblk_t)) {
11712 		IDN_KSTAT_INC(sip, si_ierrors);
11713 	}
11714 
11715 	return (rv);
11716 }
11717 
11718 /*
11719  * When on shutdown path (idn_active_resources) must call
11720  * idn_mainmbox_flush() _BEFORE_ calling idn_reclaim_mboxdata()
11721  * for any final data.  This is necessary incase the mailboxes
11722  * have been unregistered.  If they have then idn_mainmbox_flush()
11723  * will set mm_smr_mboxp to NULL which prevents us from touching
11724  * poison SMR space.
11725  */
11726 int
idn_reclaim_mboxdata(int domid,int channel,int nbufs)11727 idn_reclaim_mboxdata(int domid, int channel, int nbufs)
11728 {
11729 	idn_mainmbox_t	*mmp;
11730 	idn_mboxmsg_t	*mqp;
11731 	smr_pkthdr_t	*hdrp;
11732 	idn_domain_t	*dp;
11733 	int		qi;
11734 	int		mi;
11735 	int		reclaim_cnt = 0;
11736 	int		free_cnt;
11737 	ushort_t	csum;
11738 	struct idn	*sip;
11739 	smr_offset_t	reclaim_list, curr, prev;
11740 	procname_t	proc = "idn_reclaim_mboxdata";
11741 
11742 
11743 	sip = IDN_INST2SIP(channel);
11744 	if (sip == NULL) {
11745 		/*LINTED*/
11746 		sip = IDN_INST2SIP(0);
11747 	}
11748 	ASSERT(sip);
11749 
11750 	dp = &idn_domain[domid];
11751 
11752 	PR_DATA("%s: requested %d buffers from domain %d\n",
11753 	    proc, nbufs, domid);
11754 
11755 	if (lock_try(&dp->dreclaim_inprogress) == 0) {
11756 		/*
11757 		 * Reclaim is already in progress, don't
11758 		 * bother.
11759 		 */
11760 		PR_DATA("%s: reclaim already in progress\n", proc);
11761 		return (0);
11762 	}
11763 
11764 	if (dp->dmbox.m_send == NULL)
11765 		return (0);
11766 
11767 	reclaim_list = curr = prev = IDN_NIL_SMROFFSET;
11768 
11769 	mi = (int)dp->dreclaim_index;
11770 	do {
11771 		ushort_t	mbox_csum;
11772 
11773 		mmp = &dp->dmbox.m_send[mi];
11774 		/* do-while continues down */
11775 		ASSERT(mmp);
11776 		if (mutex_tryenter(&mmp->mm_mutex) == 0) {
11777 			/*
11778 			 * This channel is busy, move on.
11779 			 */
11780 			IDN_MBOXCHAN_INC(mi);
11781 			continue;
11782 		}
11783 
11784 		if (mmp->mm_smr_mboxp == NULL) {
11785 			PR_DATA("%s: no smr pointer for domid %d, chan %d\n",
11786 			    proc, domid, (int)mmp->mm_channel);
11787 			ASSERT(mmp->mm_qiget == mmp->mm_qiput);
11788 			mutex_exit(&mmp->mm_mutex);
11789 			IDN_MBOXCHAN_INC(mi);
11790 			continue;
11791 		}
11792 		mbox_csum = IDN_CKSUM_MBOX(&mmp->mm_smr_mboxp->mt_header);
11793 		if (mbox_csum != mmp->mm_smr_mboxp->mt_header.mh_cksum) {
11794 			PR_DATA("%s: (d %d, chn %d) mbox hdr "
11795 			    "cksum (%d) != actual (%d)\n",
11796 			    proc, domid, (int)mmp->mm_channel, mbox_csum,
11797 			    mmp->mm_smr_mboxp->mt_header.mh_cksum);
11798 			IDN_KSTAT_INC(sip, si_mboxcrc);
11799 			IDN_KSTAT_INC(sip, si_oerrors);
11800 			mutex_exit(&mmp->mm_mutex);
11801 			IDN_MBOXCHAN_INC(mi);
11802 			continue;
11803 		}
11804 		mqp = &mmp->mm_smr_mboxp->mt_queue[0];
11805 		qi  = mmp->mm_qiget;
11806 
11807 		while (!mqp[qi].ms_owner &&
11808 		    (mqp[qi].ms_flag & IDN_MBOXMSG_FLAG_RECLAIM) &&
11809 		    nbufs) {
11810 			idn_mboxmsg_t	*msp;
11811 			int		badbuf;
11812 
11813 			badbuf = 0;
11814 			msp = &mqp[qi];
11815 
11816 			if (msp->ms_flag & IDN_MBOXMSG_FLAG_ERRMASK) {
11817 				PR_DATA("%s: msg.flag ERROR(0x%x) (off=0x%x, "
11818 				    "domid=%d, qiget=%d)\n", proc,
11819 				    (uint_t)(msp->ms_flag &
11820 				    IDN_MBOXMSG_FLAG_ERRMASK),
11821 				    IDN_BFRAME2OFFSET(msp->ms_bframe),
11822 				    domid, qi);
11823 			}
11824 			prev = curr;
11825 			curr = IDN_BFRAME2OFFSET(mqp[qi].ms_bframe);
11826 
11827 			if (!VALID_NWROFFSET(curr, IDN_SMR_BUFSIZE)) {
11828 				badbuf = 1;
11829 				IDN_KSTAT_INC(sip, si_reclaim);
11830 			} else {
11831 				/*
11832 				 * Put the buffers onto a list that will be
11833 				 * formally reclaimed down below.  This allows
11834 				 * us to free up mboxq entries as fast as
11835 				 * possible.
11836 				 */
11837 				hdrp = IDN_BUF2HDR(IDN_OFFSET2ADDR(curr));
11838 				csum = IDN_CKSUM_PKT(hdrp);
11839 
11840 				if (csum != hdrp->b_cksum) {
11841 					badbuf = 1;
11842 					IDN_KSTAT_INC(sip, si_crc);
11843 					IDN_KSTAT_INC(sip, si_fcs_errors);
11844 					IDN_KSTAT_INC(sip, si_reclaim);
11845 					if (!(mmp->mm_flags &
11846 					    IDNMMBOX_FLAG_CORRUPTED)) {
11847 						cmn_err(CE_WARN,
11848 						    "IDN: 241: [send] "
11849 						    "(domain %d, channel "
11850 						    "%d) SMR CORRUPTED - "
11851 						    "RELINK",
11852 						    domid, channel);
11853 						mmp->mm_flags |=
11854 						    IDNMMBOX_FLAG_CORRUPTED;
11855 					}
11856 
11857 				} else if (reclaim_list == IDN_NIL_SMROFFSET) {
11858 					reclaim_list = curr;
11859 				} else {
11860 					caddr_t	bufp;
11861 
11862 					bufp = IDN_OFFSET2ADDR(prev);
11863 					hdrp = IDN_BUF2HDR(bufp);
11864 					hdrp->b_next = curr;
11865 				}
11866 			}
11867 
11868 			mqp[qi].ms_flag = 0;
11869 
11870 			IDN_MMBOXINDEX_INC(qi);
11871 
11872 			if (!badbuf) {
11873 				nbufs--;
11874 				reclaim_cnt++;
11875 			}
11876 
11877 			if (qi == mmp->mm_qiget)
11878 				break;
11879 		}
11880 		mmp->mm_qiget = qi;
11881 
11882 		mutex_exit(&mmp->mm_mutex);
11883 
11884 		IDN_MBOXCHAN_INC(mi);
11885 
11886 	} while ((mi != (int)dp->dreclaim_index) && nbufs);
11887 
11888 	dp->dreclaim_index = (uchar_t)mi;
11889 
11890 	if (reclaim_list != IDN_NIL_SMROFFSET) {
11891 		hdrp = IDN_BUF2HDR(IDN_OFFSET2ADDR(curr));
11892 		hdrp->b_next = IDN_NIL_SMROFFSET;
11893 	}
11894 
11895 	PR_DATA("%s: reclaimed %d buffers from domain %d\n",
11896 	    proc, reclaim_cnt, domid);
11897 
11898 	if (reclaim_cnt == 0) {
11899 		lock_clear(&dp->dreclaim_inprogress);
11900 		return (0);
11901 	}
11902 
11903 	/*
11904 	 * Now actually go and reclaim (free) the buffers.
11905 	 */
11906 	free_cnt = 0;
11907 
11908 	for (curr = reclaim_list; curr != IDN_NIL_SMROFFSET; ) {
11909 		caddr_t		bufp;
11910 
11911 		bufp = IDN_OFFSET2ADDR(curr);
11912 		hdrp = IDN_BUF2HDR(bufp);
11913 		csum = IDN_CKSUM_PKT(hdrp);
11914 		if (csum != hdrp->b_cksum) {
11915 			/*
11916 			 * Once corruption is detected we
11917 			 * can't trust our list any further.
11918 			 * These buffers are effectively lost.
11919 			 */
11920 			cmn_err(CE_WARN,
11921 			    "IDN: 241: [send] (domain %d, channel %d) SMR "
11922 			    "CORRUPTED - RELINK", domid, channel);
11923 			break;
11924 		}
11925 
11926 		curr = hdrp->b_next;
11927 
11928 		if (!smr_buf_free(domid, bufp, hdrp->b_length))
11929 			free_cnt++;
11930 	}
11931 
11932 	if ((dp->dio < IDN_WINDOW_EMAX) && dp->diocheck) {
11933 		lock_clear(&dp->diocheck);
11934 		IDN_MSGTIMER_STOP(domid, IDNP_DATA, 0);
11935 	}
11936 
11937 #ifdef DEBUG
11938 	if (free_cnt != reclaim_cnt) {
11939 		PR_DATA("%s: *** WARNING *** freecnt(%d) != reclaim_cnt (%d)\n",
11940 		    proc, free_cnt, reclaim_cnt);
11941 	}
11942 #endif /* DEBUG */
11943 
11944 	lock_clear(&dp->dreclaim_inprogress);
11945 
11946 	return (reclaim_cnt);
11947 }
11948 
11949 void
idn_signal_data_server(int domid,ushort_t channel)11950 idn_signal_data_server(int domid, ushort_t channel)
11951 {
11952 	idn_nack_t	nacktype = 0;
11953 	idn_domain_t	*dp;
11954 	idn_chansvr_t	*csp;
11955 	int		c, min_chan, max_chan;
11956 	idn_mainmbox_t	*mmp;
11957 	procname_t	proc = "idn_signal_data_server";
11958 
11959 
11960 	if (domid == IDN_NIL_DOMID)
11961 		return;
11962 
11963 	dp = &idn_domain[domid];
11964 
11965 	if (dp->dawol.a_count > 0) {
11966 		/*
11967 		 * Domain was previously AWOL, but no longer.
11968 		 */
11969 		IDN_SYNC_LOCK();
11970 		IDN_GLOCK_EXCL();
11971 		idn_clear_awol(domid);
11972 		IDN_GUNLOCK();
11973 		IDN_SYNC_UNLOCK();
11974 	}
11975 	/*
11976 	 * Do a precheck before wasting time trying to acquire the lock.
11977 	 */
11978 	if ((dp->dstate != IDNDS_CONNECTED) || !IDN_DLOCK_TRY_SHARED(domid)) {
11979 		/*
11980 		 * Either we're not connected or somebody is busy working
11981 		 * on the domain.  Bail on the signal for now, we'll catch
11982 		 * it on the next go around.
11983 		 */
11984 		return;
11985 	}
11986 	/*
11987 	 * We didn't have the drwlock on the first check of dstate,
11988 	 * but now that we do, make sure the world hasn't changed!
11989 	 */
11990 	if (dp->dstate != IDNDS_CONNECTED) {
11991 		/*
11992 		 * If we reach here, then no connection.
11993 		 * Send no response if this is the case.
11994 		 */
11995 		nacktype = IDNNACK_NOCONN;
11996 		goto send_dresp;
11997 	}
11998 
11999 	/*
12000 	 * No need to worry about locking mainmbox
12001 	 * because we're already holding reader
12002 	 * lock on domain, plus we're just reading
12003 	 * fields in the mainmbox which only change
12004 	 * (or go away) when the writer lock is
12005 	 * held on the domain.
12006 	 */
12007 	if ((mmp = dp->dmbox.m_recv) == NULL) {
12008 		/*
12009 		 * No local mailbox.
12010 		 */
12011 		nacktype = IDNNACK_BADCFG;
12012 		goto send_dresp;
12013 	}
12014 	if ((channel != IDN_BROADCAST_ALLCHAN) && (channel >= IDN_MAX_NETS)) {
12015 		nacktype = IDNNACK_BADCHAN;
12016 		goto send_dresp;
12017 	}
12018 	if (channel == IDN_BROADCAST_ALLCHAN) {
12019 		PR_DATA("%s: requested signal to ALL channels on domain %d\n",
12020 		    proc, domid);
12021 		min_chan = 0;
12022 		max_chan = IDN_MAX_NETS - 1;
12023 	} else {
12024 		PR_DATA("%s: requested signal to channel %d on domain %d\n",
12025 		    proc, channel, domid);
12026 		min_chan = max_chan = (int)channel;
12027 	}
12028 	mmp += min_chan;
12029 	for (c = min_chan; c <= max_chan; mmp++, c++) {
12030 
12031 		/*
12032 		 * We do a quick check for a pending channel.
12033 		 * If pending it will need activation and we rather
12034 		 * do that through a separate (proto) thread.
12035 		 */
12036 		csp = &idn.chan_servers[c];
12037 
12038 		if (csp->ch_recv.c_checkin) {
12039 			PR_DATA("%s: chansvr (%d) for domid %d CHECK-IN\n",
12040 			    proc, c, domid);
12041 			continue;
12042 		}
12043 
12044 		if (IDN_CHAN_TRYLOCK_RECV(csp) == 0) {
12045 			/*
12046 			 * Failed to grab lock, server must be active.
12047 			 */
12048 			PR_DATA("%s: chansvr (%d) for domid %d already actv\n",
12049 			    proc, c, domid);
12050 			continue;
12051 		}
12052 
12053 		if (IDN_CHANNEL_IS_PENDING(csp)) {
12054 			/*
12055 			 * Lock is pending.  Submit asynchronous
12056 			 * job to activate and move-on.
12057 			 */
12058 			IDN_CHAN_UNLOCK_RECV(csp);
12059 			idn_submit_chanactivate_job(c);
12060 			continue;
12061 		}
12062 
12063 		/*
12064 		 * If he ain't active, we ain't talkin'.
12065 		 */
12066 		if (IDN_CHANNEL_IS_RECV_ACTIVE(csp) == 0) {
12067 			IDN_CHAN_UNLOCK_RECV(csp);
12068 			PR_DATA("%s: chansvr (%d) for domid %d inactive\n",
12069 			    proc, c, domid);
12070 			continue;
12071 		}
12072 
12073 		if (mutex_tryenter(&mmp->mm_mutex) == 0) {
12074 			IDN_CHAN_UNLOCK_RECV(csp);
12075 			continue;
12076 		}
12077 
12078 		if (mmp->mm_csp != csp) {
12079 			/*
12080 			 * Not registered.
12081 			 */
12082 			mutex_exit(&mmp->mm_mutex);
12083 			IDN_CHAN_UNLOCK_RECV(csp);
12084 			continue;
12085 
12086 		}
12087 		if (mmp->mm_smr_mboxp == NULL) {
12088 			/*
12089 			 * No SMR mailbox.
12090 			 */
12091 			mutex_exit(&mmp->mm_mutex);
12092 			IDN_CHAN_UNLOCK_RECV(csp);
12093 			continue;
12094 		}
12095 		mutex_exit(&mmp->mm_mutex);
12096 
12097 		if (csp->ch_recv.c_inprogress) {
12098 			/*
12099 			 * Data server is already active.
12100 			 */
12101 			IDN_CHAN_UNLOCK_RECV(csp);
12102 			PR_DATA("%s: chansvr (%d) for domid %d already actv\n",
12103 			    proc, c, domid);
12104 			continue;
12105 		}
12106 		ASSERT(csp == &idn.chan_servers[c]);
12107 
12108 
12109 		PR_DATA("%s: signaling data dispatcher for chan %d dom %d\n",
12110 		    proc, c, domid);
12111 		ASSERT(csp);
12112 		cv_signal(&csp->ch_recv_cv);
12113 		IDN_CHAN_UNLOCK_RECV(csp);
12114 	}
12115 
12116 	if (!nacktype || (channel == IDN_BROADCAST_ALLCHAN)) {
12117 		/*
12118 		 * If there were no real errors or we were
12119 		 * handling multiple channels, then just
12120 		 * return.
12121 		 */
12122 		IDN_DUNLOCK(domid);
12123 		return;
12124 	}
12125 
12126 send_dresp:
12127 
12128 	PR_DATA("%s: sending NACK (%s) back to domain %d (cpu %d)\n",
12129 	    proc, idnnack_str[nacktype], domid, idn_domain[domid].dcpu);
12130 
12131 	idn_send_dataresp(domid, nacktype);
12132 
12133 	IDN_DUNLOCK(domid);
12134 }
12135 
12136 /*ARGSUSED*/
12137 static int
idn_recv_data(int domid,idn_msgtype_t * mtp,idn_xdcargs_t xargs)12138 idn_recv_data(int domid, idn_msgtype_t *mtp, idn_xdcargs_t xargs)
12139 {
12140 #ifdef DEBUG
12141 	uint_t		msg = mtp ? mtp->mt_mtype : 0;
12142 	uint_t		msgarg = mtp ? mtp->mt_atype : 0;
12143 	procname_t	proc = "idn_recv_data";
12144 
12145 	PR_PROTO("%s:%d: DATA message received (msg = 0x%x, msgarg = 0x%x)\n",
12146 	    proc, domid, msg, msgarg);
12147 	PR_PROTO("%s:%d: xargs = (0x%x, 0x%x, 0x%x, 0x%x)\n",
12148 	    proc, domid, xargs[0], xargs[1], xargs[2], xargs[3]);
12149 #endif /* DEBUG */
12150 
12151 	return (0);
12152 }
12153 
12154 /*
12155  * Only used when sending a negative response.
12156  */
12157 static void
idn_send_dataresp(int domid,idn_nack_t nacktype)12158 idn_send_dataresp(int domid, idn_nack_t nacktype)
12159 {
12160 	idn_msgtype_t	mt;
12161 
12162 	ASSERT(IDN_DLOCK_IS_HELD(domid));
12163 
12164 	if (idn_domain[domid].dcpu == IDN_NIL_DCPU)
12165 		return;
12166 
12167 	mt.mt_mtype = IDNP_NACK;
12168 	mt.mt_atype = IDNP_DATA;
12169 
12170 	(void) IDNXDC(domid, &mt, (uint_t)nacktype, 0, 0, 0);
12171 }
12172 
12173 /*
12174  * Checksum routine used in checksum smr_pkthdr_t and idn_mboxhdr_t.
12175  */
12176 static ushort_t
idn_cksum(register ushort_t * hdrp,register int count)12177 idn_cksum(register ushort_t *hdrp, register int count)
12178 {
12179 	register int		i;
12180 	register ushort_t	sum = 0;
12181 
12182 	for (i = 0; i < count; i++)
12183 		sum += hdrp[i];
12184 
12185 	sum = (sum >> 16) + (sum & 0xffff);
12186 	sum += (sum >> 16);
12187 
12188 	return (~sum);
12189 }
12190 
12191 /*
12192  * ------------------------------------------------
12193  */
12194 
12195 int
idn_open_channel(int channel)12196 idn_open_channel(int channel)
12197 {
12198 	int		masterid;
12199 	idn_chansvr_t	*csp;
12200 	struct idn	*sip;
12201 	procname_t	proc = "idn_open_channel";
12202 
12203 	if (channel >= IDN_MAX_NETS) {
12204 		cmn_err(CE_WARN,
12205 		    "IDN: 242: maximum channels (%d) already open",
12206 		    IDN_MAX_NETS);
12207 		return (-1);
12208 	}
12209 	IDN_GLOCK_EXCL();
12210 
12211 	ASSERT(idn.chan_servers != NULL);
12212 
12213 	csp = &idn.chan_servers[channel];
12214 
12215 	IDN_CHAN_LOCK_GLOBAL(csp);
12216 
12217 	if (IDN_CHANNEL_IS_ATTACHED(csp)) {
12218 		PR_CHAN("%s: channel %d already open\n", proc, channel);
12219 		IDN_CHAN_UNLOCK_GLOBAL(csp);
12220 		IDN_GUNLOCK();
12221 		return (0);
12222 	}
12223 
12224 	/*
12225 	 * Need to zero out the kstats now that we're activating
12226 	 * this channel.
12227 	 */
12228 	for (sip = idn.sip; sip; sip = sip->si_nextp) {
12229 		if (sip->si_dip && (ddi_get_instance(sip->si_dip) == channel)) {
12230 			bzero(&sip->si_kstat, sizeof (sip->si_kstat));
12231 			break;
12232 		}
12233 	}
12234 
12235 	IDN_CHANSVC_MARK_ATTACHED(csp);
12236 	idn.nchannels++;
12237 	CHANSET_ADD(idn.chanset, channel);
12238 	IDN_CHANNEL_ATTACH(channel);
12239 
12240 	IDN_CHAN_UNLOCK_GLOBAL(csp);
12241 
12242 	/*
12243 	 * We increase our window threshold each time a channel
12244 	 * is opened.
12245 	 */
12246 	ASSERT(idn.nchannels > 0);
12247 	IDN_WINDOW_EMAX = IDN_WINDOW_MAX +
12248 	    ((idn.nchannels - 1) * IDN_WINDOW_INCR);
12249 
12250 	PR_CHAN("%s: channel %d is OPEN (nchannels = %d)\n",
12251 	    proc, channel, idn.nchannels);
12252 
12253 	masterid = IDN_GET_MASTERID();
12254 	IDN_GUNLOCK();
12255 
12256 	/*
12257 	 * Check if there is an active master to which
12258 	 * we're connected.  If so, then activate channel.
12259 	 */
12260 	if (masterid != IDN_NIL_DOMID) {
12261 		idn_domain_t	*dp;
12262 
12263 		dp = &idn_domain[masterid];
12264 		IDN_DLOCK_SHARED(masterid);
12265 		if (dp->dvote.v.master && (dp->dstate == IDNDS_CONNECTED))
12266 			(void) idn_activate_channel(CHANSET(channel),
12267 			    IDNCHAN_ONLINE);
12268 		IDN_DUNLOCK(masterid);
12269 	}
12270 
12271 	return (0);
12272 }
12273 
12274 void
idn_close_channel(int channel,idn_chanop_t chanop)12275 idn_close_channel(int channel, idn_chanop_t chanop)
12276 {
12277 	idn_chansvr_t	*csp;
12278 	procname_t	proc = "idn_close_channel";
12279 
12280 
12281 	ASSERT(idn.chan_servers != NULL);
12282 
12283 	csp = &idn.chan_servers[channel];
12284 
12285 	IDN_GLOCK_EXCL();
12286 
12287 	IDN_CHAN_LOCK_GLOBAL(csp);
12288 	if (IDN_CHANNEL_IS_DETACHED(csp)) {
12289 		PR_CHAN("%s: channel %d already closed\n", proc, channel);
12290 		IDN_CHAN_UNLOCK_GLOBAL(csp);
12291 		IDN_GUNLOCK();
12292 		return;
12293 	}
12294 	IDN_CHAN_UNLOCK_GLOBAL(csp);
12295 
12296 	idn_deactivate_channel(CHANSET(channel), chanop);
12297 
12298 	IDN_CHAN_LOCK_GLOBAL(csp);
12299 
12300 	if (chanop == IDNCHAN_HARD_CLOSE) {
12301 		idn.nchannels--;
12302 		CHANSET_DEL(idn.chanset, channel);
12303 		/*
12304 		 * We increase our window threshold each time a channel
12305 		 * is opened.
12306 		 */
12307 		if (idn.nchannels <= 0)
12308 			IDN_WINDOW_EMAX = 0;
12309 		else
12310 			IDN_WINDOW_EMAX = IDN_WINDOW_MAX +
12311 			    ((idn.nchannels - 1) * IDN_WINDOW_INCR);
12312 	}
12313 
12314 	PR_CHAN("%s: channel %d is (%s) CLOSED (nchannels = %d)\n",
12315 	    proc, channel,
12316 	    (chanop == IDNCHAN_SOFT_CLOSE) ? "SOFT"
12317 	    : (chanop == IDNCHAN_HARD_CLOSE) ? "HARD" : "OFFLINE",
12318 	    idn.nchannels);
12319 
12320 	IDN_CHAN_UNLOCK_GLOBAL(csp);
12321 	IDN_GUNLOCK();
12322 }
12323 
12324 static int
idn_activate_channel(idn_chanset_t chanset,idn_chanop_t chanop)12325 idn_activate_channel(idn_chanset_t chanset, idn_chanop_t chanop)
12326 {
12327 	int		c, rv = 0;
12328 	procname_t	proc = "idn_activate_channel";
12329 
12330 	PR_CHAN("%s: chanset = 0x%x, chanop = %s\n",
12331 	    proc, chanset, chanop_str[chanop]);
12332 
12333 	if (idn.state != IDNGS_ONLINE) {
12334 		/*
12335 		 * Can't activate any channels unless local
12336 		 * domain is connected and thus has a master.
12337 		 */
12338 		PR_CHAN("%s: local domain not connected.  no data servers\n",
12339 		    proc);
12340 		return (-1);
12341 	}
12342 
12343 	for (c = 0; c < IDN_MAX_NETS; c++) {
12344 		idn_chansvr_t	*csp;
12345 		idn_mboxhdr_t	*mainhp;
12346 		struct idn	*sip;
12347 
12348 		if (!CHAN_IN_SET(chanset, c))
12349 			continue;
12350 		csp = &idn.chan_servers[c];
12351 
12352 		if (chanop == IDNCHAN_ONLINE) {
12353 			IDN_CHAN_LOCK_GLOBAL(csp);
12354 		} else {
12355 			/*
12356 			 * We don't wait to grab the global lock
12357 			 * if IDNCHAN_OPEN since these occur along
12358 			 * critical data paths and will be retried
12359 			 * anyway if needed.
12360 			 */
12361 			if (IDN_CHAN_TRYLOCK_GLOBAL(csp) == 0) {
12362 				PR_CHAN("%s: failed to acquire global "
12363 				    "lock for channel %d\n",
12364 				    proc, c);
12365 				continue;
12366 			}
12367 		}
12368 
12369 		if (!IDN_CHANNEL_IS_ATTACHED(csp)) {
12370 			PR_CHAN("%s: channel %d NOT open\n", proc, c);
12371 			IDN_CHAN_UNLOCK_GLOBAL(csp);
12372 			continue;
12373 
12374 		}
12375 
12376 		if (IDN_CHANNEL_IS_ACTIVE(csp)) {
12377 
12378 			PR_CHAN("%s: channel %d already active\n", proc, c);
12379 			rv++;
12380 			IDN_CHAN_UNLOCK_GLOBAL(csp);
12381 			continue;
12382 
12383 		}
12384 		/*
12385 		 * Channel activation can happen asynchronously.
12386 		 */
12387 		IDN_CHANNEL_SUSPEND(c, 0);
12388 
12389 		if (IDN_CHANNEL_IS_PENDING(csp) && (chanop == IDNCHAN_OPEN)) {
12390 
12391 			PR_CHAN("%s: ACTIVATING channel %d\n", proc, c);
12392 
12393 			if (idn_activate_channel_services(c) >= 0) {
12394 				PR_CHAN("%s: Setting channel %d ACTIVE\n",
12395 				    proc, c);
12396 				IDN_CHANSVC_MARK_ACTIVE(csp);
12397 				rv++;
12398 			}
12399 		} else if (!IDN_CHANNEL_IS_PENDING(csp) &&
12400 		    (chanop == IDNCHAN_ONLINE)) {
12401 			PR_CHAN("%s: Setting channel %d PENDING\n", proc, c);
12402 
12403 			IDN_CHANSVC_MARK_PENDING(csp);
12404 		}
12405 		/*
12406 		 * Don't syncheader (i.e. touch SMR) unless
12407 		 * channel is at least ENABLED.  For a DISABLED
12408 		 * channel, the SMR may be invalid so do NOT
12409 		 * touch it.
12410 		 */
12411 		if (IDN_CHANNEL_IS_ENABLED(csp) &&
12412 		    ((mainhp = idn_chan_server_syncheader(c)) != NULL)) {
12413 			PR_CHAN("%s: marking chansvr (mhp=0x%p) %d READY\n",
12414 			    proc, (void *)mainhp, c);
12415 			mainhp->mh_svr_ready = 1;
12416 		}
12417 
12418 		IDN_CHANNEL_RESUME(c);
12419 		sip = IDN_INST2SIP(c);
12420 		ASSERT(sip);
12421 		if (sip->si_wantw) {
12422 			mutex_enter(&idn.sipwenlock);
12423 			idndl_wenable(sip);
12424 			mutex_exit(&idn.sipwenlock);
12425 		}
12426 		IDN_CHAN_UNLOCK_GLOBAL(csp);
12427 
12428 	}
12429 	/*
12430 	 * Returns "not active", i.e. value of 0 indicates
12431 	 * no channels are activated.
12432 	 */
12433 	return (rv == 0);
12434 }
12435 
12436 static void
idn_deactivate_channel(idn_chanset_t chanset,idn_chanop_t chanop)12437 idn_deactivate_channel(idn_chanset_t chanset, idn_chanop_t chanop)
12438 {
12439 	int		c;
12440 	procname_t	proc = "idn_deactivate_channel";
12441 
12442 	PR_CHAN("%s: chanset = 0x%x, chanop = %s\n",
12443 	    proc, chanset, chanop_str[chanop]);
12444 
12445 	for (c = 0; c < IDN_MAX_NETS; c++) {
12446 		idn_chansvr_t	*csp;
12447 		idn_mboxhdr_t	*mainhp;
12448 
12449 		if (!CHAN_IN_SET(chanset, c))
12450 			continue;
12451 
12452 		csp = &idn.chan_servers[c];
12453 
12454 		IDN_CHAN_LOCK_GLOBAL(csp);
12455 
12456 		if (((chanop == IDNCHAN_SOFT_CLOSE) &&
12457 		    !IDN_CHANNEL_IS_ACTIVE(csp)) ||
12458 		    ((chanop == IDNCHAN_HARD_CLOSE) &&
12459 		    IDN_CHANNEL_IS_DETACHED(csp)) ||
12460 		    ((chanop == IDNCHAN_OFFLINE) &&
12461 		    !IDN_CHANNEL_IS_ENABLED(csp))) {
12462 
12463 			ASSERT(!IDN_CHANNEL_IS_RECV_ACTIVE(csp));
12464 			ASSERT(!IDN_CHANNEL_IS_SEND_ACTIVE(csp));
12465 
12466 			PR_CHAN("%s: channel %d already deactivated\n",
12467 			    proc, c);
12468 			IDN_CHAN_UNLOCK_GLOBAL(csp);
12469 			continue;
12470 		}
12471 
12472 		switch (chanop) {
12473 		case IDNCHAN_OFFLINE:
12474 			IDN_CHANSVC_MARK_IDLE(csp);
12475 			IDN_CHANSVC_MARK_DISABLED(csp);
12476 			IDN_CHANNEL_STOP(c, 1);
12477 			mainhp = idn_chan_server_syncheader(c);
12478 			if (mainhp != NULL)
12479 				mainhp->mh_svr_ready = 0;
12480 			break;
12481 
12482 		case IDNCHAN_HARD_CLOSE:
12483 			IDN_CHANSVC_MARK_DETACHED(csp);
12484 			IDN_CHANNEL_DETACH(c, 1);
12485 			mainhp = idn_chan_server_syncheader(c);
12486 			if (mainhp != NULL)
12487 				mainhp->mh_svr_ready = 0;
12488 			break;
12489 
12490 		default:
12491 			IDN_CHANSVC_MARK_IDLE(csp);
12492 			IDN_CHANNEL_SUSPEND(c, 1);
12493 			ASSERT(IDN_CHANNEL_IS_ATTACHED(csp));
12494 			break;
12495 		}
12496 
12497 		lock_clear(&csp->ch_actvlck);
12498 		lock_clear(&csp->ch_initlck);
12499 
12500 		PR_CHAN("%s: DEACTIVATING channel %d (%s)\n", proc, c,
12501 		    chanop_str[chanop]);
12502 		PR_CHAN("%s: removing chanset 0x%x data svrs for "
12503 		    "each domain link\n", proc, chanset);
12504 
12505 		(void) idn_deactivate_channel_services(c, chanop);
12506 	}
12507 	/*
12508 	 * Returns with channels unlocked.
12509 	 */
12510 }
12511 
12512 /*
12513  * The priority of the channel server must be less than that
12514  * of the protocol server since the protocol server tasks
12515  * are (can be) of more importance.
12516  *
12517  * Possible range: 60-99.
12518  */
12519 static pri_t	idn_chansvr_pri = (7 * MAXCLSYSPRI) / 8;
12520 
12521 static int
idn_activate_channel_services(int channel)12522 idn_activate_channel_services(int channel)
12523 {
12524 	idn_chansvr_t	*csp;
12525 	procname_t	proc = "idn_activate_channel_services";
12526 
12527 
12528 	ASSERT((channel >= 0) && (channel < IDN_MAX_NETS));
12529 
12530 	csp = &idn.chan_servers[channel];
12531 
12532 	ASSERT(IDN_CHAN_GLOBAL_IS_LOCKED(csp));
12533 	ASSERT(IDN_CHAN_LOCAL_IS_LOCKED(csp));
12534 
12535 	if (csp->ch_recv_threadp) {
12536 		/*
12537 		 * There's an existing dispatcher!
12538 		 * Must have been idle'd during an earlier
12539 		 * stint.
12540 		 */
12541 		ASSERT(csp->ch_id == (uchar_t)channel);
12542 		PR_CHAN("%s: existing chansvr FOUND for (c=%d)\n",
12543 		    proc, channel);
12544 
12545 		if (IDN_CHANNEL_IS_PENDING(csp) == 0)
12546 			return (-1);
12547 
12548 		PR_CHAN("%s: chansvr (c=%d) Rstate = 0x%x, Sstate = 0x%x\n",
12549 		    proc, channel, csp->ch_recv.c_state,
12550 		    csp->ch_send.c_state);
12551 
12552 		cv_signal(&csp->ch_recv_cv);
12553 
12554 		return (0);
12555 	}
12556 
12557 	if (IDN_CHANNEL_IS_PENDING(csp) == 0)
12558 		return (-1);
12559 
12560 	csp->ch_id = (uchar_t)channel;
12561 
12562 	PR_CHAN("%s: init channel %d server\n", proc, channel);
12563 
12564 	csp->ch_recv_morguep = GETSTRUCT(ksema_t, 1);
12565 	sema_init(csp->ch_recv_morguep, 0, NULL, SEMA_DRIVER, NULL);
12566 
12567 	csp->ch_recv.c_inprogress = 0;
12568 	csp->ch_recv.c_waiters = 0;
12569 	csp->ch_recv.c_checkin = 0;
12570 	csp->ch_recv_changed = 1;
12571 
12572 	csp->ch_recv_domset = csp->ch_reg_domset;
12573 
12574 	csp->ch_recv_waittime = IDN_NETSVR_WAIT_MIN;
12575 
12576 	csp->ch_recv_threadp = thread_create(NULL, 0,
12577 	    idn_chan_server, &csp, sizeof (csp), &p0, TS_RUN, idn_chansvr_pri);
12578 
12579 	csp->ch_send.c_inprogress = 0;
12580 	csp->ch_send.c_waiters = 0;
12581 	csp->ch_send.c_checkin = 0;
12582 
12583 	return (0);
12584 }
12585 
12586 /*
12587  * This routine can handle terminating a set of channel
12588  * servers all at once, however currently only used
12589  * for serial killing, i.e. one-at-a-time.
12590  *
12591  * Entered with RECV locks held on chanset.
12592  * Acquires SEND locks if needed.
12593  * Leaves with all RECV and SEND locks dropped.
12594  */
12595 static int
idn_deactivate_channel_services(int channel,idn_chanop_t chanop)12596 idn_deactivate_channel_services(int channel, idn_chanop_t chanop)
12597 {
12598 	idn_chansvr_t	*csp;
12599 	int		cs_count;
12600 	int		c;
12601 	idn_chanset_t	chanset;
12602 	ksema_t		*central_morguep = NULL;
12603 	procname_t	proc = "idn_deactivate_channel_services";
12604 
12605 
12606 	ASSERT(idn.chan_servers);
12607 
12608 	PR_CHAN("%s: deactivating channel %d services\n", proc, channel);
12609 
12610 	/*
12611 	 * XXX
12612 	 * Old code allowed us to deactivate multiple channel
12613 	 * servers at once.  Keep for now just in case.
12614 	 */
12615 	chanset = CHANSET(channel);
12616 
12617 	/*
12618 	 * Point all the data dispatchers to the same morgue
12619 	 * so we can kill them all at once.
12620 	 */
12621 	cs_count = 0;
12622 	for (c = 0; c < IDN_MAX_NETS; c++) {
12623 		if (!CHAN_IN_SET(chanset, c))
12624 			continue;
12625 
12626 		csp = &idn.chan_servers[c];
12627 		ASSERT(IDN_CHAN_GLOBAL_IS_LOCKED(csp));
12628 		ASSERT(IDN_CHAN_LOCAL_IS_LOCKED(csp));
12629 
12630 		if (csp->ch_recv_threadp == NULL) {
12631 			/*
12632 			 * No channel server home.
12633 			 * But we're still holding the c_mutex.
12634 			 * At mark him idle incase we start him up.
12635 			 */
12636 			PR_CHAN("%s: no channel server found for chan %d\n",
12637 			    proc, c);
12638 			IDN_CHAN_UNLOCK_LOCAL(csp);
12639 			IDN_CHAN_UNLOCK_GLOBAL(csp);
12640 			continue;
12641 		}
12642 		ASSERT(csp->ch_id == (uchar_t)c);
12643 
12644 		/*
12645 		 * Okay, now we've blocked the send and receive sides.
12646 		 */
12647 
12648 		if ((chanop == IDNCHAN_SOFT_CLOSE) ||
12649 		    (chanop == IDNCHAN_OFFLINE)) {
12650 			/*
12651 			 * We set turned off the ACTIVE flag, but there's
12652 			 * no guarantee he stopped because of it.  He may
12653 			 * have already been sleeping.  We need to be
12654 			 * sure he recognizes the IDLE, so we need to
12655 			 * signal him and give him a chance to see it.
12656 			 */
12657 			cv_signal(&csp->ch_recv_cv);
12658 			IDN_CHAN_UNLOCK_LOCAL(csp);
12659 			IDN_CHAN_UNLOCK_GLOBAL(csp);
12660 			cs_count++;
12661 			continue;
12662 		}
12663 
12664 		PR_CHAN("%s: pointing chansvr %d to morgue (0x%p)\n",
12665 		    proc, c, central_morguep ? (void *)central_morguep
12666 		    : (void *)(csp->ch_recv_morguep));
12667 
12668 		if (central_morguep == NULL) {
12669 			central_morguep = csp->ch_recv_morguep;
12670 		} else {
12671 			sema_destroy(csp->ch_recv_morguep);
12672 			FREESTRUCT(csp->ch_recv_morguep, ksema_t, 1);
12673 
12674 			csp->ch_recv_morguep = central_morguep;
12675 		}
12676 		cv_signal(&csp->ch_recv_cv);
12677 		if (csp->ch_recv.c_waiters > 0)
12678 			cv_broadcast(&csp->ch_recv.c_cv);
12679 		/*
12680 		 * Save any existing binding for next reincarnation.
12681 		 * Note that we're holding the local and global
12682 		 * locks so we're protected against others touchers
12683 		 * of the ch_bound_cpuid fields.
12684 		 */
12685 		csp->ch_bound_cpuid_pending = csp->ch_bound_cpuid;
12686 		csp->ch_bound_cpuid = -1;
12687 		IDN_CHAN_UNLOCK_LOCAL(csp);
12688 		IDN_CHAN_UNLOCK_GLOBAL(csp);
12689 		cs_count++;
12690 	}
12691 	PR_CHAN("%s: signaled %d chansvrs for chanset 0x%x\n",
12692 	    proc, cs_count, chanset);
12693 
12694 	if ((chanop == IDNCHAN_SOFT_CLOSE) || (chanop == IDNCHAN_OFFLINE))
12695 		return (cs_count);
12696 
12697 	PR_CHAN("%s: waiting for %d (chnset=0x%x) chan svrs to term\n",
12698 	    proc, cs_count, chanset);
12699 	PR_CHAN("%s: morguep = 0x%p\n", proc, (void *)central_morguep);
12700 
12701 	ASSERT((cs_count > 0) ? (central_morguep != NULL) : 1);
12702 	while (cs_count-- > 0)
12703 		sema_p(central_morguep);
12704 
12705 	if (central_morguep) {
12706 		sema_destroy(central_morguep);
12707 		FREESTRUCT(central_morguep, ksema_t, 1);
12708 	}
12709 
12710 	return (cs_count);
12711 }
12712 
12713 int
idn_chanservers_init()12714 idn_chanservers_init()
12715 {
12716 	int		c;
12717 	idn_chansvr_t	*csp;
12718 
12719 
12720 	if (idn.chan_servers)
12721 		return (0);
12722 
12723 	idn.chan_servers = GETSTRUCT(idn_chansvr_t, IDN_MAXMAX_NETS);
12724 
12725 	for (c = 0; c < IDN_MAXMAX_NETS; c++) {
12726 		csp = &idn.chan_servers[c];
12727 		mutex_init(&csp->ch_send.c_mutex, NULL, MUTEX_DEFAULT, NULL);
12728 		mutex_init(&csp->ch_recv.c_mutex, NULL, MUTEX_DEFAULT, NULL);
12729 		cv_init(&csp->ch_send.c_cv, NULL, CV_DRIVER, NULL);
12730 		cv_init(&csp->ch_recv.c_cv, NULL, CV_DRIVER, NULL);
12731 		cv_init(&csp->ch_recv_cv, NULL, CV_DRIVER, NULL);
12732 		csp->ch_bound_cpuid = -1;
12733 		csp->ch_bound_cpuid_pending = -1;
12734 	}
12735 
12736 	return (c);
12737 }
12738 
12739 void
idn_chanservers_deinit()12740 idn_chanservers_deinit()
12741 {
12742 	int		c;
12743 	idn_chansvr_t	*csp;
12744 
12745 
12746 	if (idn.chan_servers == NULL)
12747 		return;
12748 
12749 	for (c = 0; c < IDN_MAXMAX_NETS; c++) {
12750 		csp = &idn.chan_servers[c];
12751 
12752 		mutex_destroy(&csp->ch_send.c_mutex);
12753 		mutex_destroy(&csp->ch_recv.c_mutex);
12754 		cv_destroy(&csp->ch_send.c_cv);
12755 		cv_destroy(&csp->ch_recv.c_cv);
12756 		cv_destroy(&csp->ch_recv_cv);
12757 	}
12758 
12759 	FREESTRUCT(idn.chan_servers, idn_chansvr_t, IDN_MAXMAX_NETS);
12760 	idn.chan_servers = NULL;
12761 }
12762 
12763 static void
idn_exec_chanactivate(void * chn)12764 idn_exec_chanactivate(void *chn)
12765 {
12766 	int		not_active, channel;
12767 	idn_chansvr_t	*csp;
12768 
12769 	channel = (int)(uintptr_t)chn;
12770 
12771 	IDN_GLOCK_SHARED();
12772 	if (idn.chan_servers == NULL) {
12773 		IDN_GUNLOCK();
12774 		return;
12775 	}
12776 	csp = &idn.chan_servers[channel];
12777 
12778 	if (IDN_CHAN_TRYLOCK_GLOBAL(csp) == 0) {
12779 		/*
12780 		 * If we can't grab the global lock, then
12781 		 * something is up, skip out.
12782 		 */
12783 		IDN_GUNLOCK();
12784 		return;
12785 	}
12786 	IDN_GUNLOCK();
12787 
12788 	if (IDN_CHANNEL_IS_PENDING(csp) && lock_try(&csp->ch_actvlck)) {
12789 		IDN_CHAN_UNLOCK_GLOBAL(csp);
12790 		not_active = idn_activate_channel(CHANSET(channel),
12791 		    IDNCHAN_OPEN);
12792 		if (not_active)
12793 			lock_clear(&csp->ch_actvlck);
12794 	} else {
12795 		IDN_CHAN_UNLOCK_GLOBAL(csp);
12796 	}
12797 }
12798 
12799 /*
12800  * Delayed activation of channel.  We don't want to do this within
12801  * idn_signal_data_server() since that's called within the context
12802  * of an XDC handler so we submit it as a timeout() call to be short
12803  * as soon as possible.
12804  * The ch_initlck & ch_actvlck are used to synchronize activation
12805  * of the channel so that we don't have multiple idn_activate_channel's
12806  * attempting to activate the same channel.
12807  */
12808 static void
idn_submit_chanactivate_job(int channel)12809 idn_submit_chanactivate_job(int channel)
12810 {
12811 	idn_chansvr_t	*csp;
12812 
12813 	if (idn.chan_servers == NULL)
12814 		return;
12815 	csp = &idn.chan_servers[channel];
12816 
12817 	if (lock_try(&csp->ch_initlck) == 0)
12818 		return;
12819 
12820 	(void) timeout(idn_exec_chanactivate, (caddr_t)(uintptr_t)channel, 1);
12821 }
12822 
12823 /*ARGSUSED0*/
12824 static void
idn_xmit_monitor(void * unused)12825 idn_xmit_monitor(void *unused)
12826 {
12827 	int		c, d;
12828 	idn_chansvr_t	*csp;
12829 	idn_chanset_t	wake_set;
12830 	domainset_t	conset;
12831 	smr_slab_t	*sp;
12832 	procname_t	proc = "idn_xmit_monitor";
12833 
12834 	CHANSET_ZERO(wake_set);
12835 
12836 	mutex_enter(&idn.xmit_lock);
12837 	if ((idn.xmit_tid == NULL) || !idn.xmit_chanset_wanted) {
12838 		idn.xmit_tid = NULL;
12839 		mutex_exit(&idn.xmit_lock);
12840 		PR_XMON("%s: bailing out\n", proc);
12841 		return;
12842 	}
12843 
12844 	/*
12845 	 * No point in transmitting unless state
12846 	 * is ONLINE.
12847 	 */
12848 	if (idn.state != IDNGS_ONLINE)
12849 		goto retry;
12850 
12851 	conset = idn.domset.ds_connected;
12852 
12853 	/*
12854 	 * Try and reclaim some buffers if possible.
12855 	 */
12856 	for (d = 0; d < MAX_DOMAINS; d++) {
12857 		if (!DOMAIN_IN_SET(conset, d))
12858 			continue;
12859 
12860 		if (!IDN_DLOCK_TRY_SHARED(d))
12861 			continue;
12862 
12863 		if (idn_domain[d].dcpu != IDN_NIL_DCPU)
12864 			(void) idn_reclaim_mboxdata(d, 0, -1);
12865 
12866 		IDN_DUNLOCK(d);
12867 	}
12868 
12869 	/*
12870 	 * Now check if we were successful in getting
12871 	 * any buffers.
12872 	 */
12873 	DSLAB_LOCK_SHARED(idn.localid);
12874 	sp = idn_domain[idn.localid].dslab;
12875 	for (; sp; sp = sp->sl_next)
12876 		if (sp->sl_free)
12877 			break;
12878 	DSLAB_UNLOCK(idn.localid);
12879 
12880 	/*
12881 	 * If there are no buffers available,
12882 	 * no point in reenabling the queues.
12883 	 */
12884 	if (sp == NULL)
12885 		goto retry;
12886 
12887 	CHANSET_ZERO(wake_set);
12888 	for (c = 0; c < IDN_MAX_NETS; c++) {
12889 		int		pending_bits;
12890 		struct idn	*sip;
12891 
12892 		if (!CHAN_IN_SET(idn.xmit_chanset_wanted, c))
12893 			continue;
12894 
12895 		csp = &idn.chan_servers[c];
12896 		if (!IDN_CHAN_TRYLOCK_GLOBAL(csp))
12897 			continue;
12898 
12899 		pending_bits = csp->ch_state & IDN_CHANSVC_PENDING_BITS;
12900 
12901 		sip = IDN_INST2SIP(c);
12902 
12903 		if (!csp->ch_send.c_checkin &&
12904 		    (pending_bits == IDN_CHANSVC_PENDING_BITS) &&
12905 		    sip && (sip->si_flags & IDNRUNNING)) {
12906 
12907 			IDN_CHAN_UNLOCK_GLOBAL(csp);
12908 			CHANSET_ADD(wake_set, c);
12909 
12910 			PR_XMON("%s: QENABLE for channel %d\n", proc, c);
12911 
12912 			rw_enter(&idn.struprwlock, RW_READER);
12913 			mutex_enter(&idn.sipwenlock);
12914 			idndl_wenable(sip);
12915 			mutex_exit(&idn.sipwenlock);
12916 			rw_exit(&idn.struprwlock);
12917 		} else {
12918 			IDN_CHAN_UNLOCK_GLOBAL(csp);
12919 		}
12920 	}
12921 
12922 	/*
12923 	 * Clear the channels we enabled.
12924 	 */
12925 	idn.xmit_chanset_wanted &= ~wake_set;
12926 
12927 retry:
12928 
12929 	if (idn.xmit_chanset_wanted == 0)
12930 		idn.xmit_tid = NULL;
12931 	else
12932 		idn.xmit_tid = timeout(idn_xmit_monitor, NULL,
12933 		    idn_xmit_monitor_freq);
12934 
12935 	mutex_exit(&idn.xmit_lock);
12936 }
12937 
12938 void
idn_xmit_monitor_kickoff(int chan_wanted)12939 idn_xmit_monitor_kickoff(int chan_wanted)
12940 {
12941 	procname_t	proc = "idn_xmit_monitor_kickoff";
12942 
12943 	mutex_enter(&idn.xmit_lock);
12944 
12945 	if (chan_wanted < 0) {
12946 		/*
12947 		 * Wants all channels.
12948 		 */
12949 		idn.xmit_chanset_wanted = CHANSET_ALL;
12950 	} else {
12951 		CHANSET_ADD(idn.xmit_chanset_wanted, chan_wanted);
12952 	}
12953 
12954 	if (idn.xmit_tid != (timeout_id_t)NULL) {
12955 		/*
12956 		 * A monitor is already running, so
12957 		 * he will catch the new "wants" when
12958 		 * he comes around.
12959 		 */
12960 		mutex_exit(&idn.xmit_lock);
12961 		return;
12962 	}
12963 
12964 	PR_XMON("%s: xmit_mon kicked OFF (chanset = 0x%x)\n",
12965 	    proc, idn.xmit_chanset_wanted);
12966 
12967 	idn.xmit_tid = timeout(idn_xmit_monitor, NULL, idn_xmit_monitor_freq);
12968 
12969 	mutex_exit(&idn.xmit_lock);
12970 }
12971