xref: /illumos-gate/usr/src/uts/sun4/os/x_call.c (revision 4983fdb5373239f810c1288128badcb36d079b78)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 #pragma ident	"%Z%%M%	%I%	%E% SMI"
27 
28 #include <sys/systm.h>
29 #include <sys/archsystm.h>
30 #include <sys/machsystm.h>
31 #include <sys/cpuvar.h>
32 #include <sys/intreg.h>
33 #include <sys/x_call.h>
34 #include <sys/cmn_err.h>
35 #include <sys/membar.h>
36 #include <sys/disp.h>
37 #include <sys/debug.h>
38 #include <sys/privregs.h>
39 #include <sys/xc_impl.h>
40 #include <sys/ivintr.h>
41 #include <sys/dmv.h>
42 #include <sys/sysmacros.h>
43 
44 #ifdef TRAPTRACE
45 uint_t x_dstat[NCPU][XC_LOOP_EXIT+1];
46 uint_t x_rstat[NCPU][4];
47 #endif /* TRAPTRACE */
48 
49 static uint64_t xc_serv_inum;	/* software interrupt number for xc_serv() */
50 static uint64_t xc_loop_inum;	/* software interrupt number for xc_loop() */
51 kmutex_t xc_sys_mutex;		/* protect xcall session and xc_mbox */
52 int xc_spl_enter[NCPU];		/* protect sending x-call */
53 static int xc_holder = -1; /* the cpu who initiates xc_attention, 0 is valid */
54 
55 /*
56  * Mail box for handshaking and xcall request; protected by xc_sys_mutex
57  */
58 static struct xc_mbox {
59 	xcfunc_t *xc_func;
60 	uint64_t xc_arg1;
61 	uint64_t xc_arg2;
62 	cpuset_t xc_cpuset;
63 	volatile uint_t	xc_state;
64 } xc_mbox[NCPU];
65 
66 uint64_t xc_tick_limit;		/* send_mondo() tick limit value */
67 uint64_t xc_tick_limit_scale = 1;	/* scale used to increase the limit */
68 uint64_t xc_tick_jump_limit;	/* send_mondo() irregular tick jump limit */
69 uint64_t xc_sync_tick_limit;	/* timeout limit for xt_sync() calls */
70 
71 /* timeout value for xcalls to be received by the target CPU */
72 uint64_t xc_mondo_time_limit;
73 
74 /* timeout value for xcall functions to be executed on the target CPU */
75 uint64_t xc_func_time_limit;
76 
77 uint64_t xc_scale = 1;	/* scale used to calculate timeout limits */
78 uint64_t xc_mondo_multiplier = 10;
79 
80 uint_t sendmondo_in_recover;
81 
82 /*
83  * sending x-calls
84  */
85 void	init_mondo(xcfunc_t *func, uint64_t arg1, uint64_t arg2);
86 void	send_one_mondo(int cpuid);
87 void	send_mondo_set(cpuset_t set);
88 
89 /*
90  * Adjust xc_attention timeout if a faster cpu is dynamically added.
91  * Ignore the dynamic removal of a cpu that would lower these timeout
92  * values.
93  */
94 static int
95 xc_func_timeout_adj(cpu_setup_t what, int cpuid) {
96 	uint64_t freq = cpunodes[cpuid].clock_freq;
97 
98 	switch (what) {
99 	case CPU_ON:
100 	case CPU_INIT:
101 	case CPU_CONFIG:
102 	case CPU_CPUPART_IN:
103 		if (freq * xc_scale > xc_mondo_time_limit) {
104 			xc_mondo_time_limit = freq * xc_scale;
105 			xc_func_time_limit = xc_mondo_time_limit *
106 			    xc_mondo_multiplier;
107 		}
108 		break;
109 	case CPU_OFF:
110 	case CPU_UNCONFIG:
111 	case CPU_CPUPART_OUT:
112 	default:
113 		break;
114 	}
115 
116 	return (0);
117 }
118 
119 /*
120  * xc_init - initialize x-call related locks
121  */
122 void
123 xc_init(void)
124 {
125 	int pix;
126 	uint64_t maxfreq = 0;
127 
128 	mutex_init(&xc_sys_mutex, NULL, MUTEX_SPIN,
129 	    (void *)ipltospl(XCALL_PIL));
130 
131 #ifdef TRAPTRACE
132 	/* Initialize for all possible CPUs. */
133 	for (pix = 0; pix < NCPU; pix++) {
134 		XC_STAT_INIT(pix);
135 	}
136 #endif /* TRAPTRACE */
137 
138 	xc_serv_inum = add_softintr(XCALL_PIL, (softintrfunc)xc_serv, 0,
139 	    SOFTINT_MT);
140 	xc_loop_inum = add_softintr(XCALL_PIL, (softintrfunc)xc_loop, 0,
141 	    SOFTINT_MT);
142 
143 	/*
144 	 * Initialize the calibrated tick limit for send_mondo.
145 	 * The value represents the maximum tick count to wait.
146 	 */
147 	xc_tick_limit =
148 	    ((uint64_t)sys_tick_freq * XC_SEND_MONDO_MSEC) / 1000;
149 	xc_tick_jump_limit = xc_tick_limit / 32;
150 	xc_tick_limit *= xc_tick_limit_scale;
151 	xc_sync_tick_limit = xc_tick_limit;
152 
153 	/*
154 	 * Maximum number of loops to wait before timing out in xc_attention.
155 	 */
156 	for (pix = 0; pix < NCPU; pix++) {
157 		maxfreq = MAX(cpunodes[pix].clock_freq, maxfreq);
158 	}
159 	xc_mondo_time_limit = maxfreq * xc_scale;
160 	register_cpu_setup_func((cpu_setup_func_t *)xc_func_timeout_adj, NULL);
161 
162 	/*
163 	 * Maximum number of loops to wait for a xcall function to be
164 	 * executed on the target CPU.
165 	 */
166 	xc_func_time_limit = xc_mondo_time_limit * xc_mondo_multiplier;
167 }
168 
169 /*
170  * The following routines basically provide callers with two kinds of
171  * inter-processor interrupt services:
172  *	1. cross calls (x-calls) - requests are handled at target cpu's TL=0
173  *	2. cross traps (c-traps) - requests are handled at target cpu's TL>0
174  *
175  * Although these routines protect the services from migrating to other cpus
176  * "after" they are called, it is the caller's choice or responsibility to
177  * prevent the cpu migration "before" calling them.
178  *
179  * X-call routines:
180  *
181  *	xc_one()  - send a request to one processor
182  *	xc_some() - send a request to some processors
183  *	xc_all()  - send a request to all processors
184  *
185  *	Their common parameters:
186  *		func - a TL=0 handler address
187  *		arg1 and arg2  - optional
188  *
189  *	The services provided by x-call routines allow callers
190  *	to send a request to target cpus to execute a TL=0
191  *	handler.
192  *	The interface of the registers of the TL=0 handler:
193  *		%o0: arg1
194  *		%o1: arg2
195  *
196  * X-trap routines:
197  *
198  *	xt_one()  - send a request to one processor
199  *	xt_some() - send a request to some processors
200  *	xt_all()  - send a request to all processors
201  *
202  *	Their common parameters:
203  *		func - a TL>0 handler address or an interrupt number
204  *		arg1, arg2
205  *		       optional when "func" is an address;
206  *		       0        when "func" is an interrupt number
207  *
208  *	If the request of "func" is a kernel address, then
209  *	the target cpu will execute the request of "func" with
210  *	args at "TL>0" level.
211  *	The interface of the registers of the TL>0 handler:
212  *		%g1: arg1
213  *		%g2: arg2
214  *
215  *	If the request of "func" is not a kernel address, then it has
216  *	to be an assigned interrupt number through add_softintr().
217  *	An interrupt number is an index to the interrupt vector table,
218  *	which entry contains an interrupt handler address with its
219  *	corresponding interrupt level and argument.
220  *	The target cpu will arrange the request to be serviced according
221  *	to its pre-registered information.
222  *	args are assumed to be zeros in this case.
223  *
224  * In addition, callers are allowed to capture and release cpus by
225  * calling the routines: xc_attention() and xc_dismissed().
226  */
227 
228 /*
229  * xt_one - send a "x-trap" to a cpu
230  */
231 void
232 xt_one(int cix, xcfunc_t *func, uint64_t arg1, uint64_t arg2)
233 {
234 	if (!CPU_IN_SET(cpu_ready_set, cix)) {
235 		return;
236 	}
237 	xt_one_unchecked(cix, func, arg1, arg2);
238 }
239 
240 /*
241  * xt_one_unchecked - send a "x-trap" to a cpu without checking for its
242  * existance in cpu_ready_set
243  */
244 void
245 xt_one_unchecked(int cix, xcfunc_t *func, uint64_t arg1, uint64_t arg2)
246 {
247 	int lcx;
248 	int opl;
249 	cpuset_t tset;
250 
251 	/*
252 	 * Make sure the function address will not be interpreted as a
253 	 * dmv interrupt
254 	 */
255 	ASSERT(!DMV_IS_DMV(func));
256 
257 	/*
258 	 * It's illegal to send software inums through the cross-trap
259 	 * interface.
260 	 */
261 	ASSERT((uintptr_t)func >= KERNELBASE);
262 
263 	CPUSET_ZERO(tset);
264 
265 	XC_SPL_ENTER(lcx, opl);			/* lcx set by the macro */
266 
267 	CPUSET_ADD(tset, cix);
268 
269 	if (cix == lcx) {
270 		/*
271 		 * same cpu - use software fast trap
272 		 */
273 		send_self_xcall(CPU, arg1, arg2, func);
274 		XC_STAT_INC(x_dstat[lcx][XT_ONE_SELF]);
275 		XC_TRACE(XT_ONE_SELF, &tset, func, arg1, arg2);
276 	} else {	/* other cpu - send a mondo to the target cpu */
277 		/*
278 		 * other cpu - send a mondo to the target cpu
279 		 */
280 		XC_TRACE(XT_ONE_OTHER, &tset, func, arg1, arg2);
281 		init_mondo(func, arg1, arg2);
282 		send_one_mondo(cix);
283 		XC_STAT_INC(x_dstat[lcx][XT_ONE_OTHER]);
284 	}
285 	XC_SPL_EXIT(lcx, opl);
286 }
287 
288 /*
289  * xt_some - send a "x-trap" to some cpus
290  */
291 void
292 xt_some(cpuset_t cpuset, xcfunc_t *func, uint64_t arg1, uint64_t arg2)
293 {
294 	int lcx;
295 	int opl;
296 	cpuset_t xc_cpuset, tset;
297 
298 	/*
299 	 * Make sure the function address will not be interpreted as a
300 	 * dmv interrupt
301 	 */
302 	ASSERT(!DMV_IS_DMV(func));
303 
304 	/*
305 	 * It's illegal to send software inums through the cross-trap
306 	 * interface.
307 	 */
308 	ASSERT((uintptr_t)func >= KERNELBASE);
309 
310 	CPUSET_ZERO(tset);
311 
312 	XC_SPL_ENTER(lcx, opl);		/* lcx set by the macro */
313 
314 	CPUSET_ADD(tset, lcx);
315 
316 	/*
317 	 * only send to the CPU_READY ones
318 	 */
319 	xc_cpuset = cpu_ready_set;
320 	CPUSET_AND(xc_cpuset, cpuset);
321 
322 	/*
323 	 * send to nobody; just return
324 	 */
325 	if (CPUSET_ISNULL(xc_cpuset)) {
326 		XC_SPL_EXIT(lcx, opl);
327 		return;
328 	}
329 
330 	/*
331 	 * don't send mondo to self
332 	 */
333 	if (CPU_IN_SET(xc_cpuset, lcx)) {
334 		/*
335 		 * same cpu - use software fast trap
336 		 */
337 		send_self_xcall(CPU, arg1, arg2, func);
338 		XC_STAT_INC(x_dstat[lcx][XT_SOME_SELF]);
339 		XC_TRACE(XT_SOME_SELF, &tset, func, arg1, arg2);
340 		CPUSET_DEL(xc_cpuset, lcx);
341 		if (CPUSET_ISNULL(xc_cpuset)) {
342 			XC_SPL_EXIT(lcx, opl);
343 			return;
344 		}
345 	}
346 	XC_TRACE(XT_SOME_OTHER, &xc_cpuset, func, arg1, arg2);
347 	init_mondo(func, arg1, arg2);
348 	send_mondo_set(xc_cpuset);
349 	XC_STAT_INC(x_dstat[lcx][XT_SOME_OTHER]);
350 
351 	XC_SPL_EXIT(lcx, opl);
352 }
353 
354 /*
355  * xt_all - send a "x-trap" to all cpus
356  */
357 void
358 xt_all(xcfunc_t *func, uint64_t arg1, uint64_t arg2)
359 {
360 	int lcx;
361 	int opl;
362 	cpuset_t xc_cpuset, tset;
363 
364 	/*
365 	 * Make sure the function address will not be interpreted as a
366 	 * dmv interrupt
367 	 */
368 	ASSERT(!DMV_IS_DMV(func));
369 
370 	/*
371 	 * It's illegal to send software inums through the cross-trap
372 	 * interface.
373 	 */
374 	ASSERT((uintptr_t)func >= KERNELBASE);
375 
376 	CPUSET_ZERO(tset);
377 
378 	XC_SPL_ENTER(lcx, opl);		/* lcx set by the macro */
379 
380 	CPUSET_ADD(tset, lcx);
381 
382 	/*
383 	 * same cpu - use software fast trap
384 	 */
385 	if (CPU_IN_SET(cpu_ready_set, lcx))
386 		send_self_xcall(CPU, arg1, arg2, func);
387 
388 	XC_TRACE(XT_ALL_OTHER, &cpu_ready_set, func, arg1, arg2);
389 
390 	/*
391 	 * don't send mondo to self
392 	 */
393 	xc_cpuset = cpu_ready_set;
394 	CPUSET_DEL(xc_cpuset, lcx);
395 
396 	if (CPUSET_ISNULL(xc_cpuset)) {
397 		XC_STAT_INC(x_dstat[lcx][XT_ALL_SELF]);
398 		XC_TRACE(XT_ALL_SELF, &tset, func, arg1, arg2);
399 		XC_SPL_EXIT(lcx, opl);
400 		return;
401 	}
402 
403 	init_mondo(func, arg1, arg2);
404 	send_mondo_set(xc_cpuset);
405 
406 	XC_STAT_INC(x_dstat[lcx][XT_ALL_OTHER]);
407 	XC_SPL_EXIT(lcx, opl);
408 }
409 
410 /*
411  * xc_one - send a "x-call" to a cpu
412  */
413 void
414 xc_one(int cix, xcfunc_t *func, uint64_t arg1, uint64_t arg2)
415 {
416 	int lcx;
417 	int opl;
418 	uint64_t loop_cnt = 0;
419 	cpuset_t tset;
420 	int first_time = 1;
421 
422 	/*
423 	 * send to nobody; just return
424 	 */
425 	if (!CPU_IN_SET(cpu_ready_set, cix))
426 		return;
427 
428 	ASSERT((uintptr_t)func > KERNELBASE);
429 	ASSERT(((uintptr_t)func % PC_ALIGN) == 0);
430 
431 	CPUSET_ZERO(tset);
432 
433 	kpreempt_disable();
434 
435 	XC_SPL_ENTER(lcx, opl);		/* lcx set by the macro */
436 
437 	CPUSET_ADD(tset, cix);
438 
439 	if (cix == lcx) {	/* same cpu just do it */
440 		XC_TRACE(XC_ONE_SELF, &tset, func, arg1, arg2);
441 		(*func)(arg1, arg2);
442 		XC_STAT_INC(x_dstat[lcx][XC_ONE_SELF]);
443 		XC_SPL_EXIT(lcx, opl);
444 		kpreempt_enable();
445 		return;
446 	}
447 
448 	if (xc_holder == lcx) {		/* got the xc_sys_mutex already */
449 		ASSERT(MUTEX_HELD(&xc_sys_mutex));
450 		ASSERT(CPU_IN_SET(xc_mbox[lcx].xc_cpuset, lcx));
451 		ASSERT(CPU_IN_SET(xc_mbox[cix].xc_cpuset, cix));
452 		ASSERT(xc_mbox[cix].xc_state == XC_WAIT);
453 		XC_TRACE(XC_ONE_OTHER_H, &tset, func, arg1, arg2);
454 
455 		/*
456 		 * target processor's xc_loop should be waiting
457 		 * for the work to do; just set up the xc_mbox
458 		 */
459 		XC_SETUP(cix, func, arg1, arg2);
460 		membar_stld();
461 
462 		while (xc_mbox[cix].xc_state != XC_WAIT) {
463 			if (loop_cnt++ > xc_func_time_limit) {
464 				if (sendmondo_in_recover) {
465 					drv_usecwait(1);
466 					loop_cnt = 0;
467 					continue;
468 				}
469 				cmn_err(CE_PANIC, "xc_one() timeout, "
470 				    "xc_state[%d] != XC_WAIT", cix);
471 			}
472 		}
473 		XC_STAT_INC(x_dstat[lcx][XC_ONE_OTHER_H]);
474 		XC_SPL_EXIT(lcx, opl);
475 		kpreempt_enable();
476 		return;
477 	}
478 
479 	/*
480 	 * Avoid dead lock if someone has sent us a xc_loop request while
481 	 * we are trying to grab xc_sys_mutex.
482 	 */
483 	XC_SPL_EXIT(lcx, opl);
484 
485 	/*
486 	 * At this point, since we don't own xc_sys_mutex,
487 	 * our pil shouldn't run at or above the XCALL_PIL.
488 	 */
489 	ASSERT(getpil() < XCALL_PIL);
490 
491 	/*
492 	 * Since xc_holder is not owned by us, it could be that
493 	 * no one owns it, or we are not informed to enter into
494 	 * xc_loop(). In either case, we need to grab the
495 	 * xc_sys_mutex before we write to the xc_mbox, and
496 	 * we shouldn't release it until the request is finished.
497 	 */
498 
499 	mutex_enter(&xc_sys_mutex);
500 	xc_spl_enter[lcx] = 1;
501 
502 	/*
503 	 * Since we own xc_sys_mutex now, we are safe to
504 	 * write to the xc_mobx.
505 	 */
506 	ASSERT(xc_mbox[cix].xc_state == XC_IDLE);
507 	XC_TRACE(XC_ONE_OTHER, &tset, func, arg1, arg2);
508 	XC_SETUP(cix, func, arg1, arg2);
509 	init_mondo(setsoftint_tl1, xc_serv_inum, 0);
510 	send_one_mondo(cix);
511 
512 	/* xc_serv does membar_stld */
513 	while (xc_mbox[cix].xc_state != XC_IDLE) {
514 		if (loop_cnt++ > xc_func_time_limit) {
515 			if (sendmondo_in_recover) {
516 				drv_usecwait(1);
517 				loop_cnt = 0;
518 				continue;
519 			}
520 			if (first_time) {
521 				XT_SYNC_ONE(cix);
522 				first_time = 0;
523 				loop_cnt = 0;
524 				continue;
525 			}
526 			cmn_err(CE_PANIC, "xc_one() timeout, "
527 			    "xc_state[%d] != XC_IDLE", cix);
528 		}
529 	}
530 	xc_spl_enter[lcx] = 0;
531 	XC_STAT_INC(x_dstat[lcx][XC_ONE_OTHER]);
532 	mutex_exit(&xc_sys_mutex);
533 
534 	kpreempt_enable();
535 }
536 
537 /*
538  * xc_some - send a "x-call" to some cpus; sending to self is excluded
539  */
540 void
541 xc_some(cpuset_t cpuset, xcfunc_t *func, uint64_t arg1, uint64_t arg2)
542 {
543 	int lcx;
544 	int opl;
545 	cpuset_t xc_cpuset, tset;
546 
547 	ASSERT((uintptr_t)func > KERNELBASE);
548 	ASSERT(((uintptr_t)func % PC_ALIGN) == 0);
549 
550 	CPUSET_ZERO(tset);
551 
552 	kpreempt_disable();
553 	XC_SPL_ENTER(lcx, opl);			/* lcx set by the macro */
554 
555 	CPUSET_ADD(tset, lcx);
556 
557 	/*
558 	 * only send to the CPU_READY ones
559 	 */
560 	xc_cpuset = cpu_ready_set;
561 	CPUSET_AND(xc_cpuset, cpuset);
562 
563 	/*
564 	 * send to nobody; just return
565 	 */
566 	if (CPUSET_ISNULL(xc_cpuset)) {
567 		XC_SPL_EXIT(lcx, opl);
568 		kpreempt_enable();
569 		return;
570 	}
571 
572 	if (CPU_IN_SET(xc_cpuset, lcx)) {
573 		/*
574 		 * same cpu just do it
575 		 */
576 		(*func)(arg1, arg2);
577 		CPUSET_DEL(xc_cpuset, lcx);
578 		if (CPUSET_ISNULL(xc_cpuset)) {
579 			XC_STAT_INC(x_dstat[lcx][XC_SOME_SELF]);
580 			XC_TRACE(XC_SOME_SELF, &tset, func, arg1, arg2);
581 			XC_SPL_EXIT(lcx, opl);
582 			kpreempt_enable();
583 			return;
584 		}
585 	}
586 
587 	if (xc_holder == lcx) {		/* got the xc_sys_mutex already */
588 		cpuset_t mset = xc_mbox[lcx].xc_cpuset;
589 
590 		CPUSET_AND(mset, cpuset);
591 		ASSERT(MUTEX_HELD(&xc_sys_mutex));
592 		ASSERT(CPUSET_ISEQUAL(mset, cpuset));
593 		SEND_MBOX_ONLY(xc_cpuset, func, arg1, arg2, lcx, XC_WAIT);
594 		WAIT_MBOX_DONE(xc_cpuset, lcx, XC_WAIT, 0);
595 		XC_STAT_INC(x_dstat[lcx][XC_SOME_OTHER_H]);
596 		XC_TRACE(XC_SOME_OTHER_H, &xc_cpuset, func, arg1, arg2);
597 		XC_SPL_EXIT(lcx, opl);
598 		kpreempt_enable();
599 		return;
600 	}
601 
602 	/*
603 	 * Avoid dead lock if someone has sent us a xc_loop request while
604 	 * we are trying to grab xc_sys_mutex.
605 	 */
606 	XC_SPL_EXIT(lcx, opl);
607 
608 	/*
609 	 * At this point, since we don't own xc_sys_mutex,
610 	 * our pil shouldn't run at or above the XCALL_PIL.
611 	 */
612 	ASSERT(getpil() < XCALL_PIL);
613 
614 	/*
615 	 * grab xc_sys_mutex before writing to the xc_mbox
616 	 */
617 	mutex_enter(&xc_sys_mutex);
618 	xc_spl_enter[lcx] = 1;
619 
620 	XC_TRACE(XC_SOME_OTHER, &xc_cpuset, func, arg1, arg2);
621 	init_mondo(setsoftint_tl1, xc_serv_inum, 0);
622 	SEND_MBOX_MONDO(xc_cpuset, func, arg1, arg2, XC_IDLE);
623 	WAIT_MBOX_DONE(xc_cpuset, lcx, XC_IDLE, 1);
624 
625 	xc_spl_enter[lcx] = 0;
626 	XC_STAT_INC(x_dstat[lcx][XC_SOME_OTHER]);
627 	mutex_exit(&xc_sys_mutex);
628 	kpreempt_enable();
629 }
630 
631 /*
632  * xc_all - send a "x-call" to all cpus
633  */
634 void
635 xc_all(xcfunc_t *func, uint64_t arg1, uint64_t arg2)
636 {
637 	int lcx;
638 	int opl;
639 	cpuset_t xc_cpuset, tset;
640 
641 	ASSERT((uintptr_t)func > KERNELBASE);
642 	ASSERT(((uintptr_t)func % PC_ALIGN) == 0);
643 
644 	CPUSET_ZERO(tset);
645 
646 	kpreempt_disable();
647 	XC_SPL_ENTER(lcx, opl);			/* lcx set by the macro */
648 
649 	CPUSET_ADD(tset, lcx);
650 
651 	/*
652 	 * same cpu just do it
653 	 */
654 	(*func)(arg1, arg2);
655 	xc_cpuset = cpu_ready_set;
656 	CPUSET_DEL(xc_cpuset, lcx);
657 
658 	if (CPUSET_ISNULL(xc_cpuset)) {
659 		XC_STAT_INC(x_dstat[lcx][XC_ALL_SELF]);
660 		XC_TRACE(XC_ALL_SELF, &tset, func, arg1, arg2);
661 		XC_SPL_EXIT(lcx, opl);
662 		kpreempt_enable();
663 		return;
664 	}
665 
666 	if (xc_holder == lcx) {		/* got the xc_sys_mutex already */
667 		cpuset_t mset = xc_mbox[lcx].xc_cpuset;
668 
669 		CPUSET_AND(mset, xc_cpuset);
670 		ASSERT(MUTEX_HELD(&xc_sys_mutex));
671 		ASSERT(CPUSET_ISEQUAL(mset, xc_cpuset));
672 		XC_TRACE(XC_ALL_OTHER_H, &xc_cpuset, func, arg1, arg2);
673 		SEND_MBOX_ONLY(xc_cpuset, func, arg1, arg2, lcx, XC_WAIT);
674 		WAIT_MBOX_DONE(xc_cpuset, lcx, XC_WAIT, 0);
675 		XC_STAT_INC(x_dstat[lcx][XC_ALL_OTHER_H]);
676 		XC_SPL_EXIT(lcx, opl);
677 		kpreempt_enable();
678 		return;
679 	}
680 
681 	/*
682 	 * Avoid dead lock if someone has sent us a xc_loop request while
683 	 * we are trying to grab xc_sys_mutex.
684 	 */
685 	XC_SPL_EXIT(lcx, opl);
686 
687 	/*
688 	 * At this point, since we don't own xc_sys_mutex,
689 	 * our pil shouldn't run at or above the XCALL_PIL.
690 	 */
691 	ASSERT(getpil() < XCALL_PIL);
692 
693 	/*
694 	 * grab xc_sys_mutex before writing to the xc_mbox
695 	 */
696 	mutex_enter(&xc_sys_mutex);
697 	xc_spl_enter[lcx] = 1;
698 
699 	XC_TRACE(XC_ALL_OTHER, &xc_cpuset, func, arg1, arg2);
700 	init_mondo(setsoftint_tl1, xc_serv_inum, 0);
701 	SEND_MBOX_MONDO(xc_cpuset, func, arg1, arg2, XC_IDLE);
702 	WAIT_MBOX_DONE(xc_cpuset, lcx, XC_IDLE, 1);
703 
704 	xc_spl_enter[lcx] = 0;
705 	XC_STAT_INC(x_dstat[lcx][XC_ALL_OTHER]);
706 	mutex_exit(&xc_sys_mutex);
707 	kpreempt_enable();
708 }
709 
710 /*
711  * xc_attention - paired with xc_dismissed()
712  *
713  * xt_attention() holds the xc_sys_mutex and xc_dismissed() releases it
714  * called when an initiator wants to capture some/all cpus for a critical
715  * session.
716  */
717 void
718 xc_attention(cpuset_t cpuset)
719 {
720 	int pix, lcx;
721 	cpuset_t xc_cpuset, tmpset;
722 	cpuset_t recv_cpuset;
723 	uint64_t loop_cnt = 0;
724 	int first_time = 1;
725 
726 	CPUSET_ZERO(recv_cpuset);
727 
728 	/*
729 	 * don't migrate the cpu until xc_dismissed() is finished
730 	 */
731 	ASSERT(getpil() < XCALL_PIL);
732 	mutex_enter(&xc_sys_mutex);
733 	lcx = (int)(CPU->cpu_id);
734 	ASSERT(x_dstat[lcx][XC_ATTENTION] ==
735 	    x_dstat[lcx][XC_DISMISSED]);
736 	ASSERT(xc_holder == -1);
737 	xc_mbox[lcx].xc_cpuset = cpuset;
738 	xc_holder = lcx; /* no membar; only current cpu needs the right lcx */
739 
740 	/*
741 	 * only send to the CPU_READY ones
742 	 */
743 	xc_cpuset = cpu_ready_set;
744 	CPUSET_AND(xc_cpuset, cpuset);
745 
746 	/*
747 	 * don't send mondo to self
748 	 */
749 	CPUSET_DEL(xc_cpuset, lcx);
750 
751 	XC_STAT_INC(x_dstat[lcx][XC_ATTENTION]);
752 	XC_TRACE(XC_ATTENTION, &xc_cpuset, NULL, NULL, NULL);
753 
754 	if (CPUSET_ISNULL(xc_cpuset))
755 		return;
756 
757 	xc_spl_enter[lcx] = 1;
758 	/*
759 	 * inform the target processors to enter into xc_loop()
760 	 */
761 	init_mondo(setsoftint_tl1, xc_loop_inum, 0);
762 	SEND_MBOX_MONDO_XC_ENTER(xc_cpuset);
763 	xc_spl_enter[lcx] = 0;
764 
765 	/*
766 	 * make sure target processors have entered into xc_loop()
767 	 */
768 	while (!CPUSET_ISEQUAL(recv_cpuset, xc_cpuset)) {
769 		tmpset = xc_cpuset;
770 		for (pix = 0; pix < NCPU; pix++) {
771 			if (CPU_IN_SET(tmpset, pix)) {
772 				/*
773 				 * membar_stld() is done in xc_loop
774 				 */
775 				if (xc_mbox[pix].xc_state == XC_WAIT) {
776 					CPUSET_ADD(recv_cpuset, pix);
777 				}
778 				CPUSET_DEL(tmpset, pix);
779 				if (CPUSET_ISNULL(tmpset)) {
780 					break;
781 				}
782 			}
783 		}
784 		if (loop_cnt++ > xc_mondo_time_limit) {
785 			if (sendmondo_in_recover) {
786 				drv_usecwait(1);
787 				loop_cnt = 0;
788 				continue;
789 			}
790 			if (first_time) {
791 				XT_SYNC_SOME(xc_cpuset);
792 				first_time = 0;
793 				loop_cnt = 0;
794 				continue;
795 			}
796 			cmn_err(CE_PANIC, "xc_attention() timeout");
797 		}
798 	}
799 
800 	/*
801 	 * xc_sys_mutex remains held until xc_dismissed() is finished
802 	 */
803 }
804 
805 /*
806  * xc_dismissed - paired with xc_attention()
807  *
808  * Called after the critical session is finished.
809  */
810 void
811 xc_dismissed(cpuset_t cpuset)
812 {
813 	int pix;
814 	int lcx = (int)(CPU->cpu_id);
815 	cpuset_t xc_cpuset, tmpset;
816 	cpuset_t recv_cpuset;
817 	uint64_t loop_cnt = 0;
818 
819 	ASSERT(lcx == xc_holder);
820 	ASSERT(CPUSET_ISEQUAL(xc_mbox[lcx].xc_cpuset, cpuset));
821 	ASSERT(getpil() >= XCALL_PIL);
822 	CPUSET_ZERO(xc_mbox[lcx].xc_cpuset);
823 	CPUSET_ZERO(recv_cpuset);
824 	membar_stld();
825 
826 	XC_STAT_INC(x_dstat[lcx][XC_DISMISSED]);
827 	ASSERT(x_dstat[lcx][XC_DISMISSED] == x_dstat[lcx][XC_ATTENTION]);
828 
829 	/*
830 	 * only send to the CPU_READY ones
831 	 */
832 	xc_cpuset = cpu_ready_set;
833 	CPUSET_AND(xc_cpuset, cpuset);
834 
835 	/*
836 	 * exclude itself
837 	 */
838 	CPUSET_DEL(xc_cpuset, lcx);
839 	XC_TRACE(XC_DISMISSED, &xc_cpuset, NULL, NULL, NULL);
840 	if (CPUSET_ISNULL(xc_cpuset)) {
841 		xc_holder = -1;
842 		mutex_exit(&xc_sys_mutex);
843 		return;
844 	}
845 
846 	/*
847 	 * inform other processors to get out of xc_loop()
848 	 */
849 	tmpset = xc_cpuset;
850 	for (pix = 0; pix < NCPU; pix++) {
851 		if (CPU_IN_SET(tmpset, pix)) {
852 			xc_mbox[pix].xc_state = XC_EXIT;
853 			membar_stld();
854 			CPUSET_DEL(tmpset, pix);
855 			if (CPUSET_ISNULL(tmpset)) {
856 				break;
857 			}
858 		}
859 	}
860 
861 	/*
862 	 * make sure target processors have exited from xc_loop()
863 	 */
864 	while (!CPUSET_ISEQUAL(recv_cpuset, xc_cpuset)) {
865 		tmpset = xc_cpuset;
866 		for (pix = 0; pix < NCPU; pix++) {
867 			if (CPU_IN_SET(tmpset, pix)) {
868 				/*
869 				 * membar_stld() is done in xc_loop
870 				 */
871 				if (xc_mbox[pix].xc_state == XC_IDLE) {
872 					CPUSET_ADD(recv_cpuset, pix);
873 				}
874 				CPUSET_DEL(tmpset, pix);
875 				if (CPUSET_ISNULL(tmpset)) {
876 					break;
877 				}
878 			}
879 		}
880 		if (loop_cnt++ > xc_func_time_limit) {
881 				if (sendmondo_in_recover) {
882 					drv_usecwait(1);
883 					loop_cnt = 0;
884 					continue;
885 				}
886 			cmn_err(CE_PANIC, "xc_dismissed() timeout");
887 		}
888 	}
889 	xc_holder = -1;
890 	mutex_exit(&xc_sys_mutex);
891 }
892 
893 /*
894  * xc_serv - "x-call" handler at TL=0; serves only one x-call request
895  * runs at XCALL_PIL level.
896  */
897 uint_t
898 xc_serv(void)
899 {
900 	int lcx = (int)(CPU->cpu_id);
901 	struct xc_mbox *xmp;
902 	xcfunc_t *func;
903 	uint64_t arg1, arg2;
904 	cpuset_t tset;
905 
906 	ASSERT(getpil() == XCALL_PIL);
907 	CPUSET_ZERO(tset);
908 	CPUSET_ADD(tset, lcx);
909 	flush_windows();
910 	xmp = &xc_mbox[lcx];
911 	ASSERT(lcx != xc_holder);
912 	ASSERT(xmp->xc_state == XC_DOIT);
913 	func = xmp->xc_func;
914 	XC_TRACE(XC_SERV, &tset, func, xmp->xc_arg1, xmp->xc_arg2);
915 	if (func != NULL) {
916 		arg1 = xmp->xc_arg1;
917 		arg2 = xmp->xc_arg2;
918 		(*func)(arg1, arg2);
919 	}
920 	XC_STAT_INC(x_rstat[lcx][XC_SERV]);
921 	XC_TRACE(XC_SERV, &tset, func, arg1, arg2);
922 	xmp->xc_state = XC_IDLE;
923 	membar_stld();
924 	return (1);
925 }
926 
927 /*
928  * if == 1, an xc_loop timeout will cause a panic
929  * otherwise print a warning
930  */
931 uint_t xc_loop_panic = 0;
932 
933 /*
934  * xc_loop - "x-call" handler at TL=0; capture the cpu for a critial
935  * session, or serve multiple x-call requests runs at XCALL_PIL level.
936  */
937 uint_t
938 xc_loop(void)
939 {
940 	int lcx = (int)(CPU->cpu_id);
941 	struct xc_mbox *xmp;
942 	xcfunc_t *func;
943 	uint64_t arg1, arg2;
944 	uint64_t loop_cnt = 0;
945 	cpuset_t tset;
946 
947 	ASSERT(getpil() == XCALL_PIL);
948 
949 	CPUSET_ZERO(tset);
950 	flush_windows();
951 
952 	/*
953 	 * Some one must have owned the xc_sys_mutex;
954 	 * no further interrupt (at XCALL_PIL or below) can
955 	 * be taken by this processor until xc_loop exits.
956 	 *
957 	 * The owner of xc_sys_mutex (or xc_holder) can expect
958 	 * its xc/xt requests are handled as follows:
959 	 * 	xc requests use xc_mbox's handshaking for their services
960 	 * 	xt requests at TL>0 will be handled immediately
961 	 * 	xt requests at TL=0:
962 	 *		if their handlers'pils are <= XCALL_PIL, then
963 	 *			they will be handled after xc_loop exits
964 	 *			(so, they probably should not be used)
965 	 *		else they will be handled immediately
966 	 *
967 	 * For those who are not informed to enter xc_loop, if they
968 	 * send xc/xt requests to this processor at this moment,
969 	 * the requests will be handled as follows:
970 	 *	xc requests will be handled after they grab xc_sys_mutex
971 	 *	xt requests at TL>0 will be handled immediately
972 	 * 	xt requests at TL=0:
973 	 *		if their handlers'pils are <= XCALL_PIL, then
974 	 *			they will be handled after xc_loop exits
975 	 *		else they will be handled immediately
976 	 */
977 	xmp = &xc_mbox[lcx];
978 	ASSERT(lcx != xc_holder);
979 	ASSERT(xmp->xc_state == XC_ENTER);
980 	xmp->xc_state = XC_WAIT;
981 	CPUSET_ADD(tset, lcx);
982 	membar_stld();
983 	XC_STAT_INC(x_rstat[lcx][XC_LOOP]);
984 	XC_TRACE(XC_LOOP_ENTER, &tset, NULL, NULL, NULL);
985 	while (xmp->xc_state != XC_EXIT) {
986 		if (xmp->xc_state == XC_DOIT) {
987 			func = xmp->xc_func;
988 			arg1 = xmp->xc_arg1;
989 			arg2 = xmp->xc_arg2;
990 			XC_TRACE(XC_LOOP_DOIT, &tset, func, arg1, arg2);
991 			if (func != NULL)
992 				(*func)(arg1, arg2);
993 			xmp->xc_state = XC_WAIT;
994 			membar_stld();
995 			/*
996 			 * reset the timeout counter
997 			 * since some work was done
998 			 */
999 			loop_cnt = 0;
1000 		} else {
1001 			/* patience is a virtue... */
1002 			loop_cnt++;
1003 		}
1004 
1005 		if (loop_cnt > xc_func_time_limit) {
1006 			if (sendmondo_in_recover) {
1007 				drv_usecwait(1);
1008 				loop_cnt = 0;
1009 				continue;
1010 			}
1011 			cmn_err(xc_loop_panic ? CE_PANIC : CE_WARN,
1012 			    "xc_loop() timeout");
1013 			/*
1014 			 * if the above displayed a warning,
1015 			 * reset the timeout counter and be patient
1016 			 */
1017 			loop_cnt = 0;
1018 		}
1019 	}
1020 	ASSERT(xmp->xc_state == XC_EXIT);
1021 	ASSERT(xc_holder != -1);
1022 	XC_TRACE(XC_LOOP_EXIT, &tset, NULL, NULL, NULL);
1023 	xmp->xc_state = XC_IDLE;
1024 	membar_stld();
1025 	return (1);
1026 }
1027