xref: /titanic_50/usr/src/uts/sun4/os/x_call.c (revision d51e90740114c60620c0febffd4d3ce6e280a107)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2006 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 #pragma ident	"%Z%%M%	%I%	%E% SMI"
27 
28 #include <sys/systm.h>
29 #include <sys/archsystm.h>
30 #include <sys/machsystm.h>
31 #include <sys/cpuvar.h>
32 #include <sys/intreg.h>
33 #include <sys/x_call.h>
34 #include <sys/cmn_err.h>
35 #include <sys/membar.h>
36 #include <sys/disp.h>
37 #include <sys/debug.h>
38 #include <sys/privregs.h>
39 #include <sys/xc_impl.h>
40 #include <sys/ivintr.h>
41 #include <sys/dmv.h>
42 #include <sys/sysmacros.h>
43 
44 #ifdef TRAPTRACE
45 uint_t x_dstat[NCPU][XC_LOOP_EXIT+1];
46 uint_t x_rstat[NCPU][4];
47 #endif /* TRAPTRACE */
48 
49 static uint64_t xc_serv_inum;	/* software interrupt number for xc_serv() */
50 static uint64_t xc_loop_inum;	/* software interrupt number for xc_loop() */
51 kmutex_t xc_sys_mutex;		/* protect xcall session and xc_mbox */
52 int xc_spl_enter[NCPU];		/* protect sending x-call */
53 static int xc_holder = -1; /* the cpu who initiates xc_attention, 0 is valid */
54 
55 /*
56  * Mail box for handshaking and xcall request; protected by xc_sys_mutex
57  */
58 static struct xc_mbox {
59 	xcfunc_t *xc_func;
60 	uint64_t xc_arg1;
61 	uint64_t xc_arg2;
62 	cpuset_t xc_cpuset;
63 	volatile uint_t	xc_state;
64 } xc_mbox[NCPU];
65 
66 uint64_t xc_tick_limit;		/* send_mondo() tick limit value */
67 uint64_t xc_tick_limit_scale = 1;	/* scale used to increase the limit */
68 uint64_t xc_tick_jump_limit;	/* send_mondo() irregular tick jump limit */
69 
70 /* timeout value for xcalls to be received by the target CPU */
71 uint64_t xc_mondo_time_limit;
72 
73 /* timeout value for xcall functions to be executed on the target CPU */
74 uint64_t xc_func_time_limit;
75 
76 uint64_t xc_scale = 1;	/* scale used to calculate timeout limits */
77 uint64_t xc_mondo_multiplier = 10;
78 
79 uint_t sendmondo_in_recover;
80 
81 /*
82  * sending x-calls
83  */
84 void	init_mondo(xcfunc_t *func, uint64_t arg1, uint64_t arg2);
85 void	send_one_mondo(int cpuid);
86 void	send_mondo_set(cpuset_t set);
87 
88 /*
89  * Adjust xc_attention timeout if a faster cpu is dynamically added.
90  * Ignore the dynamic removal of a cpu that would lower these timeout
91  * values.
92  */
93 static int
94 xc_func_timeout_adj(cpu_setup_t what, int cpuid) {
95 	uint64_t freq = cpunodes[cpuid].clock_freq;
96 
97 	switch (what) {
98 	case CPU_ON:
99 	case CPU_INIT:
100 	case CPU_CONFIG:
101 	case CPU_CPUPART_IN:
102 		if (freq * xc_scale > xc_mondo_time_limit) {
103 			xc_mondo_time_limit = freq * xc_scale;
104 			xc_func_time_limit = xc_mondo_time_limit *
105 			    xc_mondo_multiplier;
106 		}
107 		break;
108 	case CPU_OFF:
109 	case CPU_UNCONFIG:
110 	case CPU_CPUPART_OUT:
111 	default:
112 		break;
113 	}
114 
115 	return (0);
116 }
117 
118 /*
119  * xc_init - initialize x-call related locks
120  */
121 void
122 xc_init(void)
123 {
124 	int pix;
125 	uint64_t maxfreq = 0;
126 
127 	mutex_init(&xc_sys_mutex, NULL, MUTEX_SPIN,
128 	    (void *)ipltospl(XCALL_PIL));
129 
130 #ifdef TRAPTRACE
131 	/* Initialize for all possible CPUs. */
132 	for (pix = 0; pix < NCPU; pix++) {
133 		XC_STAT_INIT(pix);
134 	}
135 #endif /* TRAPTRACE */
136 
137 	xc_serv_inum = add_softintr(XCALL_PIL, (softintrfunc)xc_serv, 0,
138 	    SOFTINT_MT);
139 	xc_loop_inum = add_softintr(XCALL_PIL, (softintrfunc)xc_loop, 0,
140 	    SOFTINT_MT);
141 
142 	/*
143 	 * Initialize the calibrated tick limit for send_mondo.
144 	 * The value represents the maximum tick count to wait.
145 	 */
146 	xc_tick_limit =
147 	    ((uint64_t)sys_tick_freq * XC_SEND_MONDO_MSEC) / 1000;
148 	xc_tick_jump_limit = xc_tick_limit / 32;
149 	xc_tick_limit *= xc_tick_limit_scale;
150 
151 	/*
152 	 * Maximum number of loops to wait before timing out in xc_attention.
153 	 */
154 	for (pix = 0; pix < NCPU; pix++) {
155 		maxfreq = MAX(cpunodes[pix].clock_freq, maxfreq);
156 	}
157 	xc_mondo_time_limit = maxfreq * xc_scale;
158 	register_cpu_setup_func((cpu_setup_func_t *)xc_func_timeout_adj, NULL);
159 
160 	/*
161 	 * Maximum number of loops to wait for a xcall function to be
162 	 * executed on the target CPU.
163 	 */
164 	xc_func_time_limit = xc_mondo_time_limit * xc_mondo_multiplier;
165 }
166 
167 /*
168  * The following routines basically provide callers with two kinds of
169  * inter-processor interrupt services:
170  *	1. cross calls (x-calls) - requests are handled at target cpu's TL=0
171  *	2. cross traps (c-traps) - requests are handled at target cpu's TL>0
172  *
173  * Although these routines protect the services from migrating to other cpus
174  * "after" they are called, it is the caller's choice or responsibility to
175  * prevent the cpu migration "before" calling them.
176  *
177  * X-call routines:
178  *
179  *	xc_one()  - send a request to one processor
180  *	xc_some() - send a request to some processors
181  *	xc_all()  - send a request to all processors
182  *
183  *	Their common parameters:
184  *		func - a TL=0 handler address
185  *		arg1 and arg2  - optional
186  *
187  *	The services provided by x-call routines allow callers
188  *	to send a request to target cpus to execute a TL=0
189  *	handler.
190  *	The interface of the registers of the TL=0 handler:
191  *		%o0: arg1
192  *		%o1: arg2
193  *
194  * X-trap routines:
195  *
196  *	xt_one()  - send a request to one processor
197  *	xt_some() - send a request to some processors
198  *	xt_all()  - send a request to all processors
199  *
200  *	Their common parameters:
201  *		func - a TL>0 handler address or an interrupt number
202  *		arg1, arg2
203  *		       optional when "func" is an address;
204  *		       0        when "func" is an interrupt number
205  *
206  *	If the request of "func" is a kernel address, then
207  *	the target cpu will execute the request of "func" with
208  *	args at "TL>0" level.
209  *	The interface of the registers of the TL>0 handler:
210  *		%g1: arg1
211  *		%g2: arg2
212  *
213  *	If the request of "func" is not a kernel address, then it has
214  *	to be an assigned interrupt number through add_softintr().
215  *	An interrupt number is an index to the interrupt vector table,
216  *	which entry contains an interrupt handler address with its
217  *	corresponding interrupt level and argument.
218  *	The target cpu will arrange the request to be serviced according
219  *	to its pre-registered information.
220  *	args are assumed to be zeros in this case.
221  *
222  * In addition, callers are allowed to capture and release cpus by
223  * calling the routines: xc_attention() and xc_dismissed().
224  */
225 
226 /*
227  * xt_one - send a "x-trap" to a cpu
228  */
229 void
230 xt_one(int cix, xcfunc_t *func, uint64_t arg1, uint64_t arg2)
231 {
232 	if (!CPU_IN_SET(cpu_ready_set, cix)) {
233 		return;
234 	}
235 	xt_one_unchecked(cix, func, arg1, arg2);
236 }
237 
238 /*
239  * xt_one_unchecked - send a "x-trap" to a cpu without checking for its
240  * existance in cpu_ready_set
241  */
242 void
243 xt_one_unchecked(int cix, xcfunc_t *func, uint64_t arg1, uint64_t arg2)
244 {
245 	int lcx;
246 	int opl;
247 	cpuset_t tset;
248 
249 	/*
250 	 * Make sure the function address will not be interpreted as a
251 	 * dmv interrupt
252 	 */
253 	ASSERT(!DMV_IS_DMV(func));
254 
255 	/*
256 	 * It's illegal to send software inums through the cross-trap
257 	 * interface.
258 	 */
259 	ASSERT((uintptr_t)func >= KERNELBASE);
260 
261 	CPUSET_ZERO(tset);
262 
263 	XC_SPL_ENTER(lcx, opl);			/* lcx set by the macro */
264 
265 	CPUSET_ADD(tset, cix);
266 
267 	if (cix == lcx) {
268 		/*
269 		 * same cpu - use software fast trap
270 		 */
271 		send_self_xcall(CPU, arg1, arg2, func);
272 		XC_STAT_INC(x_dstat[lcx][XT_ONE_SELF]);
273 		XC_TRACE(XT_ONE_SELF, &tset, func, arg1, arg2);
274 	} else {	/* other cpu - send a mondo to the target cpu */
275 		/*
276 		 * other cpu - send a mondo to the target cpu
277 		 */
278 		XC_TRACE(XT_ONE_OTHER, &tset, func, arg1, arg2);
279 		init_mondo(func, arg1, arg2);
280 		send_one_mondo(cix);
281 		XC_STAT_INC(x_dstat[lcx][XT_ONE_OTHER]);
282 	}
283 	XC_SPL_EXIT(lcx, opl);
284 }
285 
286 /*
287  * xt_some - send a "x-trap" to some cpus
288  */
289 void
290 xt_some(cpuset_t cpuset, xcfunc_t *func, uint64_t arg1, uint64_t arg2)
291 {
292 	int lcx;
293 	int opl;
294 	cpuset_t xc_cpuset, tset;
295 
296 	/*
297 	 * Make sure the function address will not be interpreted as a
298 	 * dmv interrupt
299 	 */
300 	ASSERT(!DMV_IS_DMV(func));
301 
302 	/*
303 	 * It's illegal to send software inums through the cross-trap
304 	 * interface.
305 	 */
306 	ASSERT((uintptr_t)func >= KERNELBASE);
307 
308 	CPUSET_ZERO(tset);
309 
310 	XC_SPL_ENTER(lcx, opl);		/* lcx set by the macro */
311 
312 	CPUSET_ADD(tset, lcx);
313 
314 	/*
315 	 * only send to the CPU_READY ones
316 	 */
317 	xc_cpuset = cpu_ready_set;
318 	CPUSET_AND(xc_cpuset, cpuset);
319 
320 	/*
321 	 * send to nobody; just return
322 	 */
323 	if (CPUSET_ISNULL(xc_cpuset)) {
324 		XC_SPL_EXIT(lcx, opl);
325 		return;
326 	}
327 
328 	/*
329 	 * don't send mondo to self
330 	 */
331 	if (CPU_IN_SET(xc_cpuset, lcx)) {
332 		/*
333 		 * same cpu - use software fast trap
334 		 */
335 		send_self_xcall(CPU, arg1, arg2, func);
336 		XC_STAT_INC(x_dstat[lcx][XT_SOME_SELF]);
337 		XC_TRACE(XT_SOME_SELF, &tset, func, arg1, arg2);
338 		CPUSET_DEL(xc_cpuset, lcx);
339 		if (CPUSET_ISNULL(xc_cpuset)) {
340 			XC_SPL_EXIT(lcx, opl);
341 			return;
342 		}
343 	}
344 	XC_TRACE(XT_SOME_OTHER, &xc_cpuset, func, arg1, arg2);
345 	init_mondo(func, arg1, arg2);
346 	send_mondo_set(xc_cpuset);
347 	XC_STAT_INC(x_dstat[lcx][XT_SOME_OTHER]);
348 
349 	XC_SPL_EXIT(lcx, opl);
350 }
351 
352 /*
353  * xt_all - send a "x-trap" to all cpus
354  */
355 void
356 xt_all(xcfunc_t *func, uint64_t arg1, uint64_t arg2)
357 {
358 	int lcx;
359 	int opl;
360 	cpuset_t xc_cpuset, tset;
361 
362 	/*
363 	 * Make sure the function address will not be interpreted as a
364 	 * dmv interrupt
365 	 */
366 	ASSERT(!DMV_IS_DMV(func));
367 
368 	/*
369 	 * It's illegal to send software inums through the cross-trap
370 	 * interface.
371 	 */
372 	ASSERT((uintptr_t)func >= KERNELBASE);
373 
374 	CPUSET_ZERO(tset);
375 
376 	XC_SPL_ENTER(lcx, opl);		/* lcx set by the macro */
377 
378 	CPUSET_ADD(tset, lcx);
379 
380 	/*
381 	 * same cpu - use software fast trap
382 	 */
383 	if (CPU_IN_SET(cpu_ready_set, lcx))
384 		send_self_xcall(CPU, arg1, arg2, func);
385 
386 	XC_TRACE(XT_ALL_OTHER, &cpu_ready_set, func, arg1, arg2);
387 
388 	/*
389 	 * don't send mondo to self
390 	 */
391 	xc_cpuset = cpu_ready_set;
392 	CPUSET_DEL(xc_cpuset, lcx);
393 
394 	if (CPUSET_ISNULL(xc_cpuset)) {
395 		XC_STAT_INC(x_dstat[lcx][XT_ALL_SELF]);
396 		XC_TRACE(XT_ALL_SELF, &tset, func, arg1, arg2);
397 		XC_SPL_EXIT(lcx, opl);
398 		return;
399 	}
400 
401 	init_mondo(func, arg1, arg2);
402 	send_mondo_set(xc_cpuset);
403 
404 	XC_STAT_INC(x_dstat[lcx][XT_ALL_OTHER]);
405 	XC_SPL_EXIT(lcx, opl);
406 }
407 
408 /*
409  * xc_one - send a "x-call" to a cpu
410  */
411 void
412 xc_one(int cix, xcfunc_t *func, uint64_t arg1, uint64_t arg2)
413 {
414 	int lcx;
415 	int opl;
416 	uint64_t loop_cnt = 0;
417 	cpuset_t tset;
418 	int first_time = 1;
419 
420 	/*
421 	 * send to nobody; just return
422 	 */
423 	if (!CPU_IN_SET(cpu_ready_set, cix))
424 		return;
425 
426 	ASSERT((uintptr_t)func > KERNELBASE);
427 	ASSERT(((uintptr_t)func % PC_ALIGN) == 0);
428 
429 	CPUSET_ZERO(tset);
430 
431 	kpreempt_disable();
432 
433 	XC_SPL_ENTER(lcx, opl);		/* lcx set by the macro */
434 
435 	CPUSET_ADD(tset, cix);
436 
437 	if (cix == lcx) {	/* same cpu just do it */
438 		XC_TRACE(XC_ONE_SELF, &tset, func, arg1, arg2);
439 		(*func)(arg1, arg2);
440 		XC_STAT_INC(x_dstat[lcx][XC_ONE_SELF]);
441 		XC_SPL_EXIT(lcx, opl);
442 		kpreempt_enable();
443 		return;
444 	}
445 
446 	if (xc_holder == lcx) {		/* got the xc_sys_mutex already */
447 		ASSERT(MUTEX_HELD(&xc_sys_mutex));
448 		ASSERT(CPU_IN_SET(xc_mbox[lcx].xc_cpuset, lcx));
449 		ASSERT(CPU_IN_SET(xc_mbox[cix].xc_cpuset, cix));
450 		ASSERT(xc_mbox[cix].xc_state == XC_WAIT);
451 		XC_TRACE(XC_ONE_OTHER_H, &tset, func, arg1, arg2);
452 
453 		/*
454 		 * target processor's xc_loop should be waiting
455 		 * for the work to do; just set up the xc_mbox
456 		 */
457 		XC_SETUP(cix, func, arg1, arg2);
458 		membar_stld();
459 
460 		while (xc_mbox[cix].xc_state != XC_WAIT) {
461 			if (loop_cnt++ > xc_func_time_limit) {
462 				if (sendmondo_in_recover) {
463 					drv_usecwait(1);
464 					loop_cnt = 0;
465 					continue;
466 				}
467 				cmn_err(CE_PANIC, "xc_one() timeout, "
468 				    "xc_state[%d] != XC_WAIT", cix);
469 			}
470 		}
471 		XC_STAT_INC(x_dstat[lcx][XC_ONE_OTHER_H]);
472 		XC_SPL_EXIT(lcx, opl);
473 		kpreempt_enable();
474 		return;
475 	}
476 
477 	/*
478 	 * Avoid dead lock if someone has sent us a xc_loop request while
479 	 * we are trying to grab xc_sys_mutex.
480 	 */
481 	XC_SPL_EXIT(lcx, opl);
482 
483 	/*
484 	 * At this point, since we don't own xc_sys_mutex,
485 	 * our pil shouldn't run at or above the XCALL_PIL.
486 	 */
487 	ASSERT(getpil() < XCALL_PIL);
488 
489 	/*
490 	 * Since xc_holder is not owned by us, it could be that
491 	 * no one owns it, or we are not informed to enter into
492 	 * xc_loop(). In either case, we need to grab the
493 	 * xc_sys_mutex before we write to the xc_mbox, and
494 	 * we shouldn't release it until the request is finished.
495 	 */
496 
497 	mutex_enter(&xc_sys_mutex);
498 	xc_spl_enter[lcx] = 1;
499 
500 	/*
501 	 * Since we own xc_sys_mutex now, we are safe to
502 	 * write to the xc_mobx.
503 	 */
504 	ASSERT(xc_mbox[cix].xc_state == XC_IDLE);
505 	XC_TRACE(XC_ONE_OTHER, &tset, func, arg1, arg2);
506 	XC_SETUP(cix, func, arg1, arg2);
507 	init_mondo(setsoftint_tl1, xc_serv_inum, 0);
508 	send_one_mondo(cix);
509 
510 	/* xc_serv does membar_stld */
511 	while (xc_mbox[cix].xc_state != XC_IDLE) {
512 		if (loop_cnt++ > xc_func_time_limit) {
513 			if (sendmondo_in_recover) {
514 				drv_usecwait(1);
515 				loop_cnt = 0;
516 				continue;
517 			}
518 			if (first_time) {
519 				XT_SYNC_ONE(cix);
520 				first_time = 0;
521 				loop_cnt = 0;
522 				continue;
523 			}
524 			cmn_err(CE_PANIC, "xc_one() timeout, "
525 			    "xc_state[%d] != XC_IDLE", cix);
526 		}
527 	}
528 	xc_spl_enter[lcx] = 0;
529 	XC_STAT_INC(x_dstat[lcx][XC_ONE_OTHER]);
530 	mutex_exit(&xc_sys_mutex);
531 
532 	kpreempt_enable();
533 }
534 
535 /*
536  * xc_some - send a "x-call" to some cpus; sending to self is excluded
537  */
538 void
539 xc_some(cpuset_t cpuset, xcfunc_t *func, uint64_t arg1, uint64_t arg2)
540 {
541 	int lcx;
542 	int opl;
543 	cpuset_t xc_cpuset, tset;
544 
545 	ASSERT((uintptr_t)func > KERNELBASE);
546 	ASSERT(((uintptr_t)func % PC_ALIGN) == 0);
547 
548 	CPUSET_ZERO(tset);
549 
550 	kpreempt_disable();
551 	XC_SPL_ENTER(lcx, opl);			/* lcx set by the macro */
552 
553 	CPUSET_ADD(tset, lcx);
554 
555 	/*
556 	 * only send to the CPU_READY ones
557 	 */
558 	xc_cpuset = cpu_ready_set;
559 	CPUSET_AND(xc_cpuset, cpuset);
560 
561 	/*
562 	 * send to nobody; just return
563 	 */
564 	if (CPUSET_ISNULL(xc_cpuset)) {
565 		XC_SPL_EXIT(lcx, opl);
566 		kpreempt_enable();
567 		return;
568 	}
569 
570 	if (CPU_IN_SET(xc_cpuset, lcx)) {
571 		/*
572 		 * same cpu just do it
573 		 */
574 		(*func)(arg1, arg2);
575 		CPUSET_DEL(xc_cpuset, lcx);
576 		if (CPUSET_ISNULL(xc_cpuset)) {
577 			XC_STAT_INC(x_dstat[lcx][XC_SOME_SELF]);
578 			XC_TRACE(XC_SOME_SELF, &tset, func, arg1, arg2);
579 			XC_SPL_EXIT(lcx, opl);
580 			kpreempt_enable();
581 			return;
582 		}
583 	}
584 
585 	if (xc_holder == lcx) {		/* got the xc_sys_mutex already */
586 		cpuset_t mset = xc_mbox[lcx].xc_cpuset;
587 
588 		CPUSET_AND(mset, cpuset);
589 		ASSERT(MUTEX_HELD(&xc_sys_mutex));
590 		ASSERT(CPUSET_ISEQUAL(mset, cpuset));
591 		SEND_MBOX_ONLY(xc_cpuset, func, arg1, arg2, lcx, XC_WAIT);
592 		WAIT_MBOX_DONE(xc_cpuset, lcx, XC_WAIT, 0);
593 		XC_STAT_INC(x_dstat[lcx][XC_SOME_OTHER_H]);
594 		XC_TRACE(XC_SOME_OTHER_H, &xc_cpuset, func, arg1, arg2);
595 		XC_SPL_EXIT(lcx, opl);
596 		kpreempt_enable();
597 		return;
598 	}
599 
600 	/*
601 	 * Avoid dead lock if someone has sent us a xc_loop request while
602 	 * we are trying to grab xc_sys_mutex.
603 	 */
604 	XC_SPL_EXIT(lcx, opl);
605 
606 	/*
607 	 * At this point, since we don't own xc_sys_mutex,
608 	 * our pil shouldn't run at or above the XCALL_PIL.
609 	 */
610 	ASSERT(getpil() < XCALL_PIL);
611 
612 	/*
613 	 * grab xc_sys_mutex before writing to the xc_mbox
614 	 */
615 	mutex_enter(&xc_sys_mutex);
616 	xc_spl_enter[lcx] = 1;
617 
618 	XC_TRACE(XC_SOME_OTHER, &xc_cpuset, func, arg1, arg2);
619 	init_mondo(setsoftint_tl1, xc_serv_inum, 0);
620 	SEND_MBOX_MONDO(xc_cpuset, func, arg1, arg2, XC_IDLE);
621 	WAIT_MBOX_DONE(xc_cpuset, lcx, XC_IDLE, 1);
622 
623 	xc_spl_enter[lcx] = 0;
624 	XC_STAT_INC(x_dstat[lcx][XC_SOME_OTHER]);
625 	mutex_exit(&xc_sys_mutex);
626 	kpreempt_enable();
627 }
628 
629 /*
630  * xc_all - send a "x-call" to all cpus
631  */
632 void
633 xc_all(xcfunc_t *func, uint64_t arg1, uint64_t arg2)
634 {
635 	int lcx;
636 	int opl;
637 	cpuset_t xc_cpuset, tset;
638 
639 	ASSERT((uintptr_t)func > KERNELBASE);
640 	ASSERT(((uintptr_t)func % PC_ALIGN) == 0);
641 
642 	CPUSET_ZERO(tset);
643 
644 	kpreempt_disable();
645 	XC_SPL_ENTER(lcx, opl);			/* lcx set by the macro */
646 
647 	CPUSET_ADD(tset, lcx);
648 
649 	/*
650 	 * same cpu just do it
651 	 */
652 	(*func)(arg1, arg2);
653 	xc_cpuset = cpu_ready_set;
654 	CPUSET_DEL(xc_cpuset, lcx);
655 
656 	if (CPUSET_ISNULL(xc_cpuset)) {
657 		XC_STAT_INC(x_dstat[lcx][XC_ALL_SELF]);
658 		XC_TRACE(XC_ALL_SELF, &tset, func, arg1, arg2);
659 		XC_SPL_EXIT(lcx, opl);
660 		kpreempt_enable();
661 		return;
662 	}
663 
664 	if (xc_holder == lcx) {		/* got the xc_sys_mutex already */
665 		cpuset_t mset = xc_mbox[lcx].xc_cpuset;
666 
667 		CPUSET_AND(mset, xc_cpuset);
668 		ASSERT(MUTEX_HELD(&xc_sys_mutex));
669 		ASSERT(CPUSET_ISEQUAL(mset, xc_cpuset));
670 		XC_TRACE(XC_ALL_OTHER_H, &xc_cpuset, func, arg1, arg2);
671 		SEND_MBOX_ONLY(xc_cpuset, func, arg1, arg2, lcx, XC_WAIT);
672 		WAIT_MBOX_DONE(xc_cpuset, lcx, XC_WAIT, 0);
673 		XC_STAT_INC(x_dstat[lcx][XC_ALL_OTHER_H]);
674 		XC_SPL_EXIT(lcx, opl);
675 		kpreempt_enable();
676 		return;
677 	}
678 
679 	/*
680 	 * Avoid dead lock if someone has sent us a xc_loop request while
681 	 * we are trying to grab xc_sys_mutex.
682 	 */
683 	XC_SPL_EXIT(lcx, opl);
684 
685 	/*
686 	 * At this point, since we don't own xc_sys_mutex,
687 	 * our pil shouldn't run at or above the XCALL_PIL.
688 	 */
689 	ASSERT(getpil() < XCALL_PIL);
690 
691 	/*
692 	 * grab xc_sys_mutex before writing to the xc_mbox
693 	 */
694 	mutex_enter(&xc_sys_mutex);
695 	xc_spl_enter[lcx] = 1;
696 
697 	XC_TRACE(XC_ALL_OTHER, &xc_cpuset, func, arg1, arg2);
698 	init_mondo(setsoftint_tl1, xc_serv_inum, 0);
699 	SEND_MBOX_MONDO(xc_cpuset, func, arg1, arg2, XC_IDLE);
700 	WAIT_MBOX_DONE(xc_cpuset, lcx, XC_IDLE, 1);
701 
702 	xc_spl_enter[lcx] = 0;
703 	XC_STAT_INC(x_dstat[lcx][XC_ALL_OTHER]);
704 	mutex_exit(&xc_sys_mutex);
705 	kpreempt_enable();
706 }
707 
708 /*
709  * xc_attention - paired with xc_dismissed()
710  *
711  * xt_attention() holds the xc_sys_mutex and xc_dismissed() releases it
712  * called when an initiator wants to capture some/all cpus for a critical
713  * session.
714  */
715 void
716 xc_attention(cpuset_t cpuset)
717 {
718 	int pix, lcx;
719 	cpuset_t xc_cpuset, tmpset;
720 	cpuset_t recv_cpuset;
721 	uint64_t loop_cnt = 0;
722 	int first_time = 1;
723 
724 	CPUSET_ZERO(recv_cpuset);
725 
726 	/*
727 	 * don't migrate the cpu until xc_dismissed() is finished
728 	 */
729 	ASSERT(getpil() < XCALL_PIL);
730 	mutex_enter(&xc_sys_mutex);
731 	lcx = (int)(CPU->cpu_id);
732 	ASSERT(x_dstat[lcx][XC_ATTENTION] ==
733 	    x_dstat[lcx][XC_DISMISSED]);
734 	ASSERT(xc_holder == -1);
735 	xc_mbox[lcx].xc_cpuset = cpuset;
736 	xc_holder = lcx; /* no membar; only current cpu needs the right lcx */
737 
738 	/*
739 	 * only send to the CPU_READY ones
740 	 */
741 	xc_cpuset = cpu_ready_set;
742 	CPUSET_AND(xc_cpuset, cpuset);
743 
744 	/*
745 	 * don't send mondo to self
746 	 */
747 	CPUSET_DEL(xc_cpuset, lcx);
748 
749 	XC_STAT_INC(x_dstat[lcx][XC_ATTENTION]);
750 	XC_TRACE(XC_ATTENTION, &xc_cpuset, NULL, NULL, NULL);
751 
752 	if (CPUSET_ISNULL(xc_cpuset))
753 		return;
754 
755 	xc_spl_enter[lcx] = 1;
756 	/*
757 	 * inform the target processors to enter into xc_loop()
758 	 */
759 	init_mondo(setsoftint_tl1, xc_loop_inum, 0);
760 	SEND_MBOX_MONDO_XC_ENTER(xc_cpuset);
761 	xc_spl_enter[lcx] = 0;
762 
763 	/*
764 	 * make sure target processors have entered into xc_loop()
765 	 */
766 	while (!CPUSET_ISEQUAL(recv_cpuset, xc_cpuset)) {
767 		tmpset = xc_cpuset;
768 		for (pix = 0; pix < NCPU; pix++) {
769 			if (CPU_IN_SET(tmpset, pix)) {
770 				/*
771 				 * membar_stld() is done in xc_loop
772 				 */
773 				if (xc_mbox[pix].xc_state == XC_WAIT) {
774 					CPUSET_ADD(recv_cpuset, pix);
775 				}
776 				CPUSET_DEL(tmpset, pix);
777 				if (CPUSET_ISNULL(tmpset)) {
778 					break;
779 				}
780 			}
781 		}
782 		if (loop_cnt++ > xc_mondo_time_limit) {
783 			if (sendmondo_in_recover) {
784 				drv_usecwait(1);
785 				loop_cnt = 0;
786 				continue;
787 			}
788 			if (first_time) {
789 				XT_SYNC_SOME(xc_cpuset);
790 				first_time = 0;
791 				loop_cnt = 0;
792 				continue;
793 			}
794 			cmn_err(CE_PANIC, "xc_attention() timeout");
795 		}
796 	}
797 
798 	/*
799 	 * xc_sys_mutex remains held until xc_dismissed() is finished
800 	 */
801 }
802 
803 /*
804  * xc_dismissed - paired with xc_attention()
805  *
806  * Called after the critical session is finished.
807  */
808 void
809 xc_dismissed(cpuset_t cpuset)
810 {
811 	int pix;
812 	int lcx = (int)(CPU->cpu_id);
813 	cpuset_t xc_cpuset, tmpset;
814 	cpuset_t recv_cpuset;
815 	uint64_t loop_cnt = 0;
816 
817 	ASSERT(lcx == xc_holder);
818 	ASSERT(CPUSET_ISEQUAL(xc_mbox[lcx].xc_cpuset, cpuset));
819 	ASSERT(getpil() >= XCALL_PIL);
820 	CPUSET_ZERO(xc_mbox[lcx].xc_cpuset);
821 	CPUSET_ZERO(recv_cpuset);
822 	membar_stld();
823 
824 	XC_STAT_INC(x_dstat[lcx][XC_DISMISSED]);
825 	ASSERT(x_dstat[lcx][XC_DISMISSED] == x_dstat[lcx][XC_ATTENTION]);
826 
827 	/*
828 	 * only send to the CPU_READY ones
829 	 */
830 	xc_cpuset = cpu_ready_set;
831 	CPUSET_AND(xc_cpuset, cpuset);
832 
833 	/*
834 	 * exclude itself
835 	 */
836 	CPUSET_DEL(xc_cpuset, lcx);
837 	XC_TRACE(XC_DISMISSED, &xc_cpuset, NULL, NULL, NULL);
838 	if (CPUSET_ISNULL(xc_cpuset)) {
839 		xc_holder = -1;
840 		mutex_exit(&xc_sys_mutex);
841 		return;
842 	}
843 
844 	/*
845 	 * inform other processors to get out of xc_loop()
846 	 */
847 	tmpset = xc_cpuset;
848 	for (pix = 0; pix < NCPU; pix++) {
849 		if (CPU_IN_SET(tmpset, pix)) {
850 			xc_mbox[pix].xc_state = XC_EXIT;
851 			membar_stld();
852 			CPUSET_DEL(tmpset, pix);
853 			if (CPUSET_ISNULL(tmpset)) {
854 				break;
855 			}
856 		}
857 	}
858 
859 	/*
860 	 * make sure target processors have exited from xc_loop()
861 	 */
862 	while (!CPUSET_ISEQUAL(recv_cpuset, xc_cpuset)) {
863 		tmpset = xc_cpuset;
864 		for (pix = 0; pix < NCPU; pix++) {
865 			if (CPU_IN_SET(tmpset, pix)) {
866 				/*
867 				 * membar_stld() is done in xc_loop
868 				 */
869 				if (xc_mbox[pix].xc_state == XC_IDLE) {
870 					CPUSET_ADD(recv_cpuset, pix);
871 				}
872 				CPUSET_DEL(tmpset, pix);
873 				if (CPUSET_ISNULL(tmpset)) {
874 					break;
875 				}
876 			}
877 		}
878 		if (loop_cnt++ > xc_func_time_limit) {
879 				if (sendmondo_in_recover) {
880 					drv_usecwait(1);
881 					loop_cnt = 0;
882 					continue;
883 				}
884 			cmn_err(CE_PANIC, "xc_dismissed() timeout");
885 		}
886 	}
887 	xc_holder = -1;
888 	mutex_exit(&xc_sys_mutex);
889 }
890 
891 /*
892  * xc_serv - "x-call" handler at TL=0; serves only one x-call request
893  * runs at XCALL_PIL level.
894  */
895 uint_t
896 xc_serv(void)
897 {
898 	int lcx = (int)(CPU->cpu_id);
899 	struct xc_mbox *xmp;
900 	xcfunc_t *func;
901 	uint64_t arg1, arg2;
902 	cpuset_t tset;
903 
904 	ASSERT(getpil() == XCALL_PIL);
905 	CPUSET_ZERO(tset);
906 	CPUSET_ADD(tset, lcx);
907 	flush_windows();
908 	xmp = &xc_mbox[lcx];
909 	ASSERT(lcx != xc_holder);
910 	ASSERT(xmp->xc_state == XC_DOIT);
911 	func = xmp->xc_func;
912 	XC_TRACE(XC_SERV, &tset, func, xmp->xc_arg1, xmp->xc_arg2);
913 	if (func != NULL) {
914 		arg1 = xmp->xc_arg1;
915 		arg2 = xmp->xc_arg2;
916 		(*func)(arg1, arg2);
917 	}
918 	XC_STAT_INC(x_rstat[lcx][XC_SERV]);
919 	XC_TRACE(XC_SERV, &tset, func, arg1, arg2);
920 	xmp->xc_state = XC_IDLE;
921 	membar_stld();
922 	return (1);
923 }
924 
925 /*
926  * if == 1, an xc_loop timeout will cause a panic
927  * otherwise print a warning
928  */
929 uint_t xc_loop_panic = 0;
930 
931 /*
932  * xc_loop - "x-call" handler at TL=0; capture the cpu for a critial
933  * session, or serve multiple x-call requests runs at XCALL_PIL level.
934  */
935 uint_t
936 xc_loop(void)
937 {
938 	int lcx = (int)(CPU->cpu_id);
939 	struct xc_mbox *xmp;
940 	xcfunc_t *func;
941 	uint64_t arg1, arg2;
942 	uint64_t loop_cnt = 0;
943 	cpuset_t tset;
944 
945 	ASSERT(getpil() == XCALL_PIL);
946 
947 	CPUSET_ZERO(tset);
948 	flush_windows();
949 
950 	/*
951 	 * Some one must have owned the xc_sys_mutex;
952 	 * no further interrupt (at XCALL_PIL or below) can
953 	 * be taken by this processor until xc_loop exits.
954 	 *
955 	 * The owner of xc_sys_mutex (or xc_holder) can expect
956 	 * its xc/xt requests are handled as follows:
957 	 * 	xc requests use xc_mbox's handshaking for their services
958 	 * 	xt requests at TL>0 will be handled immediately
959 	 * 	xt requests at TL=0:
960 	 *		if their handlers'pils are <= XCALL_PIL, then
961 	 *			they will be handled after xc_loop exits
962 	 *			(so, they probably should not be used)
963 	 *		else they will be handled immediately
964 	 *
965 	 * For those who are not informed to enter xc_loop, if they
966 	 * send xc/xt requests to this processor at this moment,
967 	 * the requests will be handled as follows:
968 	 *	xc requests will be handled after they grab xc_sys_mutex
969 	 *	xt requests at TL>0 will be handled immediately
970 	 * 	xt requests at TL=0:
971 	 *		if their handlers'pils are <= XCALL_PIL, then
972 	 *			they will be handled after xc_loop exits
973 	 *		else they will be handled immediately
974 	 */
975 	xmp = &xc_mbox[lcx];
976 	ASSERT(lcx != xc_holder);
977 	ASSERT(xmp->xc_state == XC_ENTER);
978 	xmp->xc_state = XC_WAIT;
979 	CPUSET_ADD(tset, lcx);
980 	membar_stld();
981 	XC_STAT_INC(x_rstat[lcx][XC_LOOP]);
982 	XC_TRACE(XC_LOOP_ENTER, &tset, NULL, NULL, NULL);
983 	while (xmp->xc_state != XC_EXIT) {
984 		if (xmp->xc_state == XC_DOIT) {
985 			func = xmp->xc_func;
986 			arg1 = xmp->xc_arg1;
987 			arg2 = xmp->xc_arg2;
988 			XC_TRACE(XC_LOOP_DOIT, &tset, func, arg1, arg2);
989 			if (func != NULL)
990 				(*func)(arg1, arg2);
991 			xmp->xc_state = XC_WAIT;
992 			membar_stld();
993 			/*
994 			 * reset the timeout counter
995 			 * since some work was done
996 			 */
997 			loop_cnt = 0;
998 		} else {
999 			/* patience is a virtue... */
1000 			loop_cnt++;
1001 		}
1002 
1003 		if (loop_cnt > xc_func_time_limit) {
1004 			if (sendmondo_in_recover) {
1005 				drv_usecwait(1);
1006 				loop_cnt = 0;
1007 				continue;
1008 			}
1009 			cmn_err(xc_loop_panic ? CE_PANIC : CE_WARN,
1010 			    "xc_loop() timeout");
1011 			/*
1012 			 * if the above displayed a warning,
1013 			 * reset the timeout counter and be patient
1014 			 */
1015 			loop_cnt = 0;
1016 		}
1017 	}
1018 	ASSERT(xmp->xc_state == XC_EXIT);
1019 	ASSERT(xc_holder != -1);
1020 	XC_TRACE(XC_LOOP_EXIT, &tset, NULL, NULL, NULL);
1021 	xmp->xc_state = XC_IDLE;
1022 	membar_stld();
1023 	return (1);
1024 }
1025