xref: /illumos-gate/usr/src/uts/sun4/os/x_call.c (revision 12551037071c8ef2216bb540edd94a5bff5e90ae)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 #include <sys/systm.h>
27 #include <sys/archsystm.h>
28 #include <sys/machsystm.h>
29 #include <sys/cpuvar.h>
30 #include <sys/intreg.h>
31 #include <sys/x_call.h>
32 #include <sys/cmn_err.h>
33 #include <sys/membar.h>
34 #include <sys/disp.h>
35 #include <sys/debug.h>
36 #include <sys/privregs.h>
37 #include <sys/xc_impl.h>
38 #include <sys/ivintr.h>
39 #include <sys/dmv.h>
40 #include <sys/sysmacros.h>
41 
42 #ifdef TRAPTRACE
43 uint_t x_dstat[NCPU][XC_LOOP_EXIT+1];
44 uint_t x_rstat[NCPU][4];
45 #endif /* TRAPTRACE */
46 
47 static uint64_t xc_serv_inum;	/* software interrupt number for xc_serv() */
48 static uint64_t xc_loop_inum;	/* software interrupt number for xc_loop() */
49 kmutex_t xc_sys_mutex;		/* protect xcall session and xc_mbox */
50 int xc_spl_enter[NCPU];		/* protect sending x-call */
51 static int xc_holder = -1; /* the cpu who initiates xc_attention, 0 is valid */
52 
53 /*
54  * Mail box for handshaking and xcall request; protected by xc_sys_mutex
55  */
56 static struct xc_mbox {
57 	xcfunc_t *xc_func;
58 	uint64_t xc_arg1;
59 	uint64_t xc_arg2;
60 	cpuset_t xc_cpuset;
61 	volatile uint_t	xc_state;
62 } xc_mbox[NCPU];
63 
64 uint64_t xc_tick_limit;		/* send_mondo() tick limit value */
65 uint64_t xc_tick_limit_scale = 1;	/* scale used to increase the limit */
66 uint64_t xc_tick_jump_limit;	/* send_mondo() irregular tick jump limit */
67 uint64_t xc_sync_tick_limit;	/* timeout limit for xt_sync() calls */
68 
69 /* timeout value for xcalls to be received by the target CPU */
70 uint64_t xc_mondo_time_limit;
71 
72 /* timeout value for xcall functions to be executed on the target CPU */
73 uint64_t xc_func_time_limit;
74 
75 uint64_t xc_scale = 1;	/* scale used to calculate timeout limits */
76 uint64_t xc_mondo_multiplier = 10;
77 
78 uint_t sendmondo_in_recover;
79 
80 /*
81  * sending x-calls
82  */
83 void	init_mondo(xcfunc_t *func, uint64_t arg1, uint64_t arg2);
84 void	send_one_mondo(int cpuid);
85 void	send_mondo_set(cpuset_t set);
86 
87 /*
88  * Adjust xc_attention timeout if a faster cpu is dynamically added.
89  * Ignore the dynamic removal of a cpu that would lower these timeout
90  * values.
91  */
92 static int
xc_func_timeout_adj(cpu_setup_t what,int cpuid)93 xc_func_timeout_adj(cpu_setup_t what, int cpuid)
94 {
95 	uint64_t freq = cpunodes[cpuid].clock_freq;
96 
97 	switch (what) {
98 	case CPU_ON:
99 	case CPU_INIT:
100 	case CPU_CONFIG:
101 	case CPU_CPUPART_IN:
102 		if (freq * xc_scale > xc_mondo_time_limit) {
103 			xc_mondo_time_limit = freq * xc_scale;
104 			xc_func_time_limit = xc_mondo_time_limit *
105 			    xc_mondo_multiplier;
106 		}
107 		break;
108 	case CPU_OFF:
109 	case CPU_UNCONFIG:
110 	case CPU_CPUPART_OUT:
111 	default:
112 		break;
113 	}
114 
115 	return (0);
116 }
117 
118 /*
119  * xc_init - initialize x-call related locks
120  */
121 void
xc_init(void)122 xc_init(void)
123 {
124 	int pix;
125 	uint64_t maxfreq = 0;
126 
127 	mutex_init(&xc_sys_mutex, NULL, MUTEX_SPIN,
128 	    (void *)ipltospl(XCALL_PIL));
129 
130 #ifdef TRAPTRACE
131 	/* Initialize for all possible CPUs. */
132 	for (pix = 0; pix < NCPU; pix++) {
133 		XC_STAT_INIT(pix);
134 	}
135 #endif /* TRAPTRACE */
136 
137 	xc_serv_inum = add_softintr(XCALL_PIL, (softintrfunc)xc_serv, 0,
138 	    SOFTINT_MT);
139 	xc_loop_inum = add_softintr(XCALL_PIL, (softintrfunc)xc_loop, 0,
140 	    SOFTINT_MT);
141 
142 	/*
143 	 * Initialize the calibrated tick limit for send_mondo.
144 	 * The value represents the maximum tick count to wait.
145 	 */
146 	xc_tick_limit =
147 	    ((uint64_t)sys_tick_freq * XC_SEND_MONDO_MSEC) / 1000;
148 	xc_tick_jump_limit = xc_tick_limit / 32;
149 	xc_tick_limit *= xc_tick_limit_scale;
150 	xc_sync_tick_limit = xc_tick_limit;
151 
152 	/*
153 	 * Maximum number of loops to wait before timing out in xc_attention.
154 	 */
155 	for (pix = 0; pix < NCPU; pix++) {
156 		maxfreq = MAX(cpunodes[pix].clock_freq, maxfreq);
157 	}
158 	xc_mondo_time_limit = maxfreq * xc_scale;
159 	register_cpu_setup_func((cpu_setup_func_t *)xc_func_timeout_adj, NULL);
160 
161 	/*
162 	 * Maximum number of loops to wait for a xcall function to be
163 	 * executed on the target CPU.
164 	 */
165 	xc_func_time_limit = xc_mondo_time_limit * xc_mondo_multiplier;
166 }
167 
168 /*
169  * The following routines basically provide callers with two kinds of
170  * inter-processor interrupt services:
171  *	1. cross calls (x-calls) - requests are handled at target cpu's TL=0
172  *	2. cross traps (c-traps) - requests are handled at target cpu's TL>0
173  *
174  * Although these routines protect the services from migrating to other cpus
175  * "after" they are called, it is the caller's choice or responsibility to
176  * prevent the cpu migration "before" calling them.
177  *
178  * X-call routines:
179  *
180  *	xc_one()  - send a request to one processor
181  *	xc_some() - send a request to some processors
182  *	xc_all()  - send a request to all processors
183  *
184  *	Their common parameters:
185  *		func - a TL=0 handler address
186  *		arg1 and arg2  - optional
187  *
188  *	The services provided by x-call routines allow callers
189  *	to send a request to target cpus to execute a TL=0
190  *	handler.
191  *	The interface of the registers of the TL=0 handler:
192  *		%o0: arg1
193  *		%o1: arg2
194  *
195  * X-trap routines:
196  *
197  *	xt_one()  - send a request to one processor
198  *	xt_some() - send a request to some processors
199  *	xt_all()  - send a request to all processors
200  *
201  *	Their common parameters:
202  *		func - a TL>0 handler address or an interrupt number
203  *		arg1, arg2
204  *		       optional when "func" is an address;
205  *		       0        when "func" is an interrupt number
206  *
207  *	If the request of "func" is a kernel address, then
208  *	the target cpu will execute the request of "func" with
209  *	args at "TL>0" level.
210  *	The interface of the registers of the TL>0 handler:
211  *		%g1: arg1
212  *		%g2: arg2
213  *
214  *	If the request of "func" is not a kernel address, then it has
215  *	to be an assigned interrupt number through add_softintr().
216  *	An interrupt number is an index to the interrupt vector table,
217  *	which entry contains an interrupt handler address with its
218  *	corresponding interrupt level and argument.
219  *	The target cpu will arrange the request to be serviced according
220  *	to its pre-registered information.
221  *	args are assumed to be zeros in this case.
222  *
223  * In addition, callers are allowed to capture and release cpus by
224  * calling the routines: xc_attention() and xc_dismissed().
225  */
226 
227 /*
228  * spl_xcall - set PIL to xcall level
229  */
230 int
spl_xcall(void)231 spl_xcall(void)
232 {
233 	return (splr(XCALL_PIL));
234 }
235 
236 /*
237  * xt_one - send a "x-trap" to a cpu
238  */
239 void
xt_one(int cix,xcfunc_t * func,uint64_t arg1,uint64_t arg2)240 xt_one(int cix, xcfunc_t *func, uint64_t arg1, uint64_t arg2)
241 {
242 	if (!CPU_IN_SET(cpu_ready_set, cix)) {
243 		return;
244 	}
245 	xt_one_unchecked(cix, func, arg1, arg2);
246 }
247 
248 /*
249  * xt_one_unchecked - send a "x-trap" to a cpu without checking for its
250  * existance in cpu_ready_set
251  */
252 void
xt_one_unchecked(int cix,xcfunc_t * func,uint64_t arg1,uint64_t arg2)253 xt_one_unchecked(int cix, xcfunc_t *func, uint64_t arg1, uint64_t arg2)
254 {
255 	int lcx;
256 	int opl;
257 	cpuset_t tset;
258 
259 	/*
260 	 * Make sure the function address will not be interpreted as a
261 	 * dmv interrupt
262 	 */
263 	ASSERT(!DMV_IS_DMV(func));
264 
265 	/*
266 	 * It's illegal to send software inums through the cross-trap
267 	 * interface.
268 	 */
269 	ASSERT((uintptr_t)func >= KERNELBASE);
270 
271 	CPUSET_ZERO(tset);
272 
273 	XC_SPL_ENTER(lcx, opl);			/* lcx set by the macro */
274 
275 	CPUSET_ADD(tset, cix);
276 
277 	if (cix == lcx) {
278 		/*
279 		 * same cpu - use software fast trap
280 		 */
281 		send_self_xcall(CPU, arg1, arg2, func);
282 		XC_STAT_INC(x_dstat[lcx][XT_ONE_SELF]);
283 		XC_TRACE(XT_ONE_SELF, &tset, func, arg1, arg2);
284 	} else {	/* other cpu - send a mondo to the target cpu */
285 		/*
286 		 * other cpu - send a mondo to the target cpu
287 		 */
288 		XC_TRACE(XT_ONE_OTHER, &tset, func, arg1, arg2);
289 		init_mondo(func, arg1, arg2);
290 		send_one_mondo(cix);
291 		XC_STAT_INC(x_dstat[lcx][XT_ONE_OTHER]);
292 	}
293 	XC_SPL_EXIT(lcx, opl);
294 }
295 
296 /*
297  * xt_some - send a "x-trap" to some cpus
298  */
299 void
xt_some(cpuset_t cpuset,xcfunc_t * func,uint64_t arg1,uint64_t arg2)300 xt_some(cpuset_t cpuset, xcfunc_t *func, uint64_t arg1, uint64_t arg2)
301 {
302 	int lcx;
303 	int opl;
304 	cpuset_t xc_cpuset, tset;
305 
306 	/*
307 	 * Make sure the function address will not be interpreted as a
308 	 * dmv interrupt
309 	 */
310 	ASSERT(!DMV_IS_DMV(func));
311 
312 	/*
313 	 * It's illegal to send software inums through the cross-trap
314 	 * interface.
315 	 */
316 	ASSERT((uintptr_t)func >= KERNELBASE);
317 
318 	CPUSET_ZERO(tset);
319 
320 	XC_SPL_ENTER(lcx, opl);		/* lcx set by the macro */
321 
322 	CPUSET_ADD(tset, lcx);
323 
324 	/*
325 	 * only send to the CPU_READY ones
326 	 */
327 	xc_cpuset = cpu_ready_set;
328 	CPUSET_AND(xc_cpuset, cpuset);
329 
330 	/*
331 	 * send to nobody; just return
332 	 */
333 	if (CPUSET_ISNULL(xc_cpuset)) {
334 		XC_SPL_EXIT(lcx, opl);
335 		return;
336 	}
337 
338 	/*
339 	 * don't send mondo to self
340 	 */
341 	if (CPU_IN_SET(xc_cpuset, lcx)) {
342 		/*
343 		 * same cpu - use software fast trap
344 		 */
345 		send_self_xcall(CPU, arg1, arg2, func);
346 		XC_STAT_INC(x_dstat[lcx][XT_SOME_SELF]);
347 		XC_TRACE(XT_SOME_SELF, &tset, func, arg1, arg2);
348 		CPUSET_DEL(xc_cpuset, lcx);
349 		if (CPUSET_ISNULL(xc_cpuset)) {
350 			XC_SPL_EXIT(lcx, opl);
351 			return;
352 		}
353 	}
354 	XC_TRACE(XT_SOME_OTHER, &xc_cpuset, func, arg1, arg2);
355 	init_mondo(func, arg1, arg2);
356 	send_mondo_set(xc_cpuset);
357 	XC_STAT_INC(x_dstat[lcx][XT_SOME_OTHER]);
358 
359 	XC_SPL_EXIT(lcx, opl);
360 }
361 
362 /*
363  * xt_all - send a "x-trap" to all cpus
364  */
365 void
xt_all(xcfunc_t * func,uint64_t arg1,uint64_t arg2)366 xt_all(xcfunc_t *func, uint64_t arg1, uint64_t arg2)
367 {
368 	int lcx;
369 	int opl;
370 	cpuset_t xc_cpuset, tset;
371 
372 	/*
373 	 * Make sure the function address will not be interpreted as a
374 	 * dmv interrupt
375 	 */
376 	ASSERT(!DMV_IS_DMV(func));
377 
378 	/*
379 	 * It's illegal to send software inums through the cross-trap
380 	 * interface.
381 	 */
382 	ASSERT((uintptr_t)func >= KERNELBASE);
383 
384 	CPUSET_ZERO(tset);
385 
386 	XC_SPL_ENTER(lcx, opl);		/* lcx set by the macro */
387 
388 	CPUSET_ADD(tset, lcx);
389 
390 	/*
391 	 * same cpu - use software fast trap
392 	 */
393 	if (CPU_IN_SET(cpu_ready_set, lcx))
394 		send_self_xcall(CPU, arg1, arg2, func);
395 
396 	XC_TRACE(XT_ALL_OTHER, &cpu_ready_set, func, arg1, arg2);
397 
398 	/*
399 	 * don't send mondo to self
400 	 */
401 	xc_cpuset = cpu_ready_set;
402 	CPUSET_DEL(xc_cpuset, lcx);
403 
404 	if (CPUSET_ISNULL(xc_cpuset)) {
405 		XC_STAT_INC(x_dstat[lcx][XT_ALL_SELF]);
406 		XC_TRACE(XT_ALL_SELF, &tset, func, arg1, arg2);
407 		XC_SPL_EXIT(lcx, opl);
408 		return;
409 	}
410 
411 	init_mondo(func, arg1, arg2);
412 	send_mondo_set(xc_cpuset);
413 
414 	XC_STAT_INC(x_dstat[lcx][XT_ALL_OTHER]);
415 	XC_SPL_EXIT(lcx, opl);
416 }
417 
418 /*
419  * xc_one - send a "x-call" to a cpu
420  */
421 void
xc_one(int cix,xcfunc_t * func,uint64_t arg1,uint64_t arg2)422 xc_one(int cix, xcfunc_t *func, uint64_t arg1, uint64_t arg2)
423 {
424 	int lcx;
425 	int opl;
426 	uint64_t loop_cnt = 0;
427 	cpuset_t tset;
428 	int first_time = 1;
429 
430 	/*
431 	 * send to nobody; just return
432 	 */
433 	if (!CPU_IN_SET(cpu_ready_set, cix))
434 		return;
435 
436 	ASSERT((uintptr_t)func > KERNELBASE);
437 	ASSERT(((uintptr_t)func % PC_ALIGN) == 0);
438 
439 	CPUSET_ZERO(tset);
440 
441 	kpreempt_disable();
442 
443 	XC_SPL_ENTER(lcx, opl);		/* lcx set by the macro */
444 
445 	CPUSET_ADD(tset, cix);
446 
447 	if (cix == lcx) {	/* same cpu just do it */
448 		XC_TRACE(XC_ONE_SELF, &tset, func, arg1, arg2);
449 		(*func)(arg1, arg2);
450 		XC_STAT_INC(x_dstat[lcx][XC_ONE_SELF]);
451 		XC_SPL_EXIT(lcx, opl);
452 		kpreempt_enable();
453 		return;
454 	}
455 
456 	if (xc_holder == lcx) {		/* got the xc_sys_mutex already */
457 		ASSERT(MUTEX_HELD(&xc_sys_mutex));
458 		ASSERT(CPU_IN_SET(xc_mbox[lcx].xc_cpuset, lcx));
459 		ASSERT(CPU_IN_SET(xc_mbox[cix].xc_cpuset, cix));
460 		ASSERT(xc_mbox[cix].xc_state == XC_WAIT);
461 		XC_TRACE(XC_ONE_OTHER_H, &tset, func, arg1, arg2);
462 
463 		/*
464 		 * target processor's xc_loop should be waiting
465 		 * for the work to do; just set up the xc_mbox
466 		 */
467 		XC_SETUP(cix, func, arg1, arg2);
468 		membar_stld();
469 
470 		while (xc_mbox[cix].xc_state != XC_WAIT) {
471 			if (loop_cnt++ > xc_func_time_limit) {
472 				if (sendmondo_in_recover) {
473 					drv_usecwait(1);
474 					loop_cnt = 0;
475 					continue;
476 				}
477 				cmn_err(CE_PANIC, "xc_one() timeout, "
478 				    "xc_state[%d] != XC_WAIT", cix);
479 			}
480 		}
481 		XC_STAT_INC(x_dstat[lcx][XC_ONE_OTHER_H]);
482 		XC_SPL_EXIT(lcx, opl);
483 		kpreempt_enable();
484 		return;
485 	}
486 
487 	/*
488 	 * Avoid dead lock if someone has sent us a xc_loop request while
489 	 * we are trying to grab xc_sys_mutex.
490 	 */
491 	XC_SPL_EXIT(lcx, opl);
492 
493 	/*
494 	 * At this point, since we don't own xc_sys_mutex,
495 	 * our pil shouldn't run at or above the XCALL_PIL.
496 	 */
497 	ASSERT(getpil() < XCALL_PIL);
498 
499 	/*
500 	 * Since xc_holder is not owned by us, it could be that
501 	 * no one owns it, or we are not informed to enter into
502 	 * xc_loop(). In either case, we need to grab the
503 	 * xc_sys_mutex before we write to the xc_mbox, and
504 	 * we shouldn't release it until the request is finished.
505 	 */
506 
507 	mutex_enter(&xc_sys_mutex);
508 	xc_spl_enter[lcx] = 1;
509 
510 	/*
511 	 * Since we own xc_sys_mutex now, we are safe to
512 	 * write to the xc_mbox.
513 	 */
514 	ASSERT(xc_mbox[cix].xc_state == XC_IDLE);
515 	XC_TRACE(XC_ONE_OTHER, &tset, func, arg1, arg2);
516 	XC_SETUP(cix, func, arg1, arg2);
517 	init_mondo(setsoftint_tl1, xc_serv_inum, 0);
518 	send_one_mondo(cix);
519 	xc_spl_enter[lcx] = 0;
520 
521 	/* xc_serv does membar_stld */
522 	while (xc_mbox[cix].xc_state != XC_IDLE) {
523 		if (loop_cnt++ > xc_func_time_limit) {
524 			if (sendmondo_in_recover) {
525 				drv_usecwait(1);
526 				loop_cnt = 0;
527 				continue;
528 			}
529 			if (first_time) {
530 				XT_SYNC_ONE(cix);
531 				first_time = 0;
532 				loop_cnt = 0;
533 				continue;
534 			}
535 			cmn_err(CE_PANIC, "xc_one() timeout, "
536 			    "xc_state[%d] != XC_IDLE", cix);
537 		}
538 	}
539 	XC_STAT_INC(x_dstat[lcx][XC_ONE_OTHER]);
540 	mutex_exit(&xc_sys_mutex);
541 
542 	kpreempt_enable();
543 }
544 
545 /*
546  * xc_some - send a "x-call" to some cpus; sending to self is excluded
547  */
548 void
xc_some(cpuset_t cpuset,xcfunc_t * func,uint64_t arg1,uint64_t arg2)549 xc_some(cpuset_t cpuset, xcfunc_t *func, uint64_t arg1, uint64_t arg2)
550 {
551 	int lcx;
552 	int opl;
553 	cpuset_t xc_cpuset, tset;
554 
555 	ASSERT((uintptr_t)func > KERNELBASE);
556 	ASSERT(((uintptr_t)func % PC_ALIGN) == 0);
557 
558 	CPUSET_ZERO(tset);
559 
560 	kpreempt_disable();
561 	XC_SPL_ENTER(lcx, opl);			/* lcx set by the macro */
562 
563 	CPUSET_ADD(tset, lcx);
564 
565 	/*
566 	 * only send to the CPU_READY ones
567 	 */
568 	xc_cpuset = cpu_ready_set;
569 	CPUSET_AND(xc_cpuset, cpuset);
570 
571 	/*
572 	 * send to nobody; just return
573 	 */
574 	if (CPUSET_ISNULL(xc_cpuset)) {
575 		XC_SPL_EXIT(lcx, opl);
576 		kpreempt_enable();
577 		return;
578 	}
579 
580 	if (CPU_IN_SET(xc_cpuset, lcx)) {
581 		/*
582 		 * same cpu just do it
583 		 */
584 		(*func)(arg1, arg2);
585 		CPUSET_DEL(xc_cpuset, lcx);
586 		if (CPUSET_ISNULL(xc_cpuset)) {
587 			XC_STAT_INC(x_dstat[lcx][XC_SOME_SELF]);
588 			XC_TRACE(XC_SOME_SELF, &tset, func, arg1, arg2);
589 			XC_SPL_EXIT(lcx, opl);
590 			kpreempt_enable();
591 			return;
592 		}
593 	}
594 
595 	if (xc_holder == lcx) {		/* got the xc_sys_mutex already */
596 		cpuset_t mset = xc_mbox[lcx].xc_cpuset;
597 
598 		CPUSET_AND(mset, cpuset);
599 		ASSERT(MUTEX_HELD(&xc_sys_mutex));
600 		ASSERT(CPUSET_ISEQUAL(mset, cpuset));
601 		SEND_MBOX_ONLY(xc_cpuset, func, arg1, arg2, lcx, XC_WAIT);
602 		WAIT_MBOX_DONE(xc_cpuset, lcx, XC_WAIT, 0);
603 		XC_STAT_INC(x_dstat[lcx][XC_SOME_OTHER_H]);
604 		XC_TRACE(XC_SOME_OTHER_H, &xc_cpuset, func, arg1, arg2);
605 		XC_SPL_EXIT(lcx, opl);
606 		kpreempt_enable();
607 		return;
608 	}
609 
610 	/*
611 	 * Avoid dead lock if someone has sent us a xc_loop request while
612 	 * we are trying to grab xc_sys_mutex.
613 	 */
614 	XC_SPL_EXIT(lcx, opl);
615 
616 	/*
617 	 * At this point, since we don't own xc_sys_mutex,
618 	 * our pil shouldn't run at or above the XCALL_PIL.
619 	 */
620 	ASSERT(getpil() < XCALL_PIL);
621 
622 	/*
623 	 * grab xc_sys_mutex before writing to the xc_mbox
624 	 */
625 	mutex_enter(&xc_sys_mutex);
626 	xc_spl_enter[lcx] = 1;
627 
628 	XC_TRACE(XC_SOME_OTHER, &xc_cpuset, func, arg1, arg2);
629 	init_mondo(setsoftint_tl1, xc_serv_inum, 0);
630 	SEND_MBOX_MONDO(xc_cpuset, func, arg1, arg2, XC_IDLE);
631 	WAIT_MBOX_DONE(xc_cpuset, lcx, XC_IDLE, 1);
632 
633 	xc_spl_enter[lcx] = 0;
634 	XC_STAT_INC(x_dstat[lcx][XC_SOME_OTHER]);
635 	mutex_exit(&xc_sys_mutex);
636 	kpreempt_enable();
637 }
638 
639 /*
640  * xc_all - send a "x-call" to all cpus
641  */
642 void
xc_all(xcfunc_t * func,uint64_t arg1,uint64_t arg2)643 xc_all(xcfunc_t *func, uint64_t arg1, uint64_t arg2)
644 {
645 	int lcx;
646 	int opl;
647 	cpuset_t xc_cpuset, tset;
648 
649 	ASSERT((uintptr_t)func > KERNELBASE);
650 	ASSERT(((uintptr_t)func % PC_ALIGN) == 0);
651 
652 	CPUSET_ZERO(tset);
653 
654 	kpreempt_disable();
655 	XC_SPL_ENTER(lcx, opl);			/* lcx set by the macro */
656 
657 	CPUSET_ADD(tset, lcx);
658 
659 	/*
660 	 * same cpu just do it
661 	 */
662 	(*func)(arg1, arg2);
663 	xc_cpuset = cpu_ready_set;
664 	CPUSET_DEL(xc_cpuset, lcx);
665 
666 	if (CPUSET_ISNULL(xc_cpuset)) {
667 		XC_STAT_INC(x_dstat[lcx][XC_ALL_SELF]);
668 		XC_TRACE(XC_ALL_SELF, &tset, func, arg1, arg2);
669 		XC_SPL_EXIT(lcx, opl);
670 		kpreempt_enable();
671 		return;
672 	}
673 
674 	if (xc_holder == lcx) {		/* got the xc_sys_mutex already */
675 		cpuset_t mset = xc_mbox[lcx].xc_cpuset;
676 
677 		CPUSET_AND(mset, xc_cpuset);
678 		ASSERT(MUTEX_HELD(&xc_sys_mutex));
679 		ASSERT(CPUSET_ISEQUAL(mset, xc_cpuset));
680 		XC_TRACE(XC_ALL_OTHER_H, &xc_cpuset, func, arg1, arg2);
681 		SEND_MBOX_ONLY(xc_cpuset, func, arg1, arg2, lcx, XC_WAIT);
682 		WAIT_MBOX_DONE(xc_cpuset, lcx, XC_WAIT, 0);
683 		XC_STAT_INC(x_dstat[lcx][XC_ALL_OTHER_H]);
684 		XC_SPL_EXIT(lcx, opl);
685 		kpreempt_enable();
686 		return;
687 	}
688 
689 	/*
690 	 * Avoid dead lock if someone has sent us a xc_loop request while
691 	 * we are trying to grab xc_sys_mutex.
692 	 */
693 	XC_SPL_EXIT(lcx, opl);
694 
695 	/*
696 	 * At this point, since we don't own xc_sys_mutex,
697 	 * our pil shouldn't run at or above the XCALL_PIL.
698 	 */
699 	ASSERT(getpil() < XCALL_PIL);
700 
701 	/*
702 	 * grab xc_sys_mutex before writing to the xc_mbox
703 	 */
704 	mutex_enter(&xc_sys_mutex);
705 	xc_spl_enter[lcx] = 1;
706 
707 	XC_TRACE(XC_ALL_OTHER, &xc_cpuset, func, arg1, arg2);
708 	init_mondo(setsoftint_tl1, xc_serv_inum, 0);
709 	SEND_MBOX_MONDO(xc_cpuset, func, arg1, arg2, XC_IDLE);
710 	WAIT_MBOX_DONE(xc_cpuset, lcx, XC_IDLE, 1);
711 
712 	xc_spl_enter[lcx] = 0;
713 	XC_STAT_INC(x_dstat[lcx][XC_ALL_OTHER]);
714 	mutex_exit(&xc_sys_mutex);
715 	kpreempt_enable();
716 }
717 
718 /*
719  * xc_attention - paired with xc_dismissed()
720  *
721  * xt_attention() holds the xc_sys_mutex and xc_dismissed() releases it
722  * called when an initiator wants to capture some/all cpus for a critical
723  * session.
724  */
725 void
xc_attention(cpuset_t cpuset)726 xc_attention(cpuset_t cpuset)
727 {
728 	int pix, lcx;
729 	cpuset_t xc_cpuset, tmpset;
730 	cpuset_t recv_cpuset;
731 	uint64_t loop_cnt = 0;
732 	int first_time = 1;
733 
734 	CPUSET_ZERO(recv_cpuset);
735 
736 	/*
737 	 * don't migrate the cpu until xc_dismissed() is finished
738 	 */
739 	ASSERT(getpil() < XCALL_PIL);
740 	mutex_enter(&xc_sys_mutex);
741 	lcx = (int)(CPU->cpu_id);
742 	ASSERT(x_dstat[lcx][XC_ATTENTION] ==
743 	    x_dstat[lcx][XC_DISMISSED]);
744 	ASSERT(xc_holder == -1);
745 	xc_mbox[lcx].xc_cpuset = cpuset;
746 	xc_holder = lcx; /* no membar; only current cpu needs the right lcx */
747 
748 	/*
749 	 * only send to the CPU_READY ones
750 	 */
751 	xc_cpuset = cpu_ready_set;
752 	CPUSET_AND(xc_cpuset, cpuset);
753 
754 	/*
755 	 * don't send mondo to self
756 	 */
757 	CPUSET_DEL(xc_cpuset, lcx);
758 
759 	XC_STAT_INC(x_dstat[lcx][XC_ATTENTION]);
760 	XC_TRACE(XC_ATTENTION, &xc_cpuset, NULL, 0, 0);
761 
762 	if (CPUSET_ISNULL(xc_cpuset))
763 		return;
764 
765 	xc_spl_enter[lcx] = 1;
766 	/*
767 	 * inform the target processors to enter into xc_loop()
768 	 */
769 	init_mondo(setsoftint_tl1, xc_loop_inum, 0);
770 	SEND_MBOX_MONDO_XC_ENTER(xc_cpuset);
771 	xc_spl_enter[lcx] = 0;
772 
773 	/*
774 	 * make sure target processors have entered into xc_loop()
775 	 */
776 	while (!CPUSET_ISEQUAL(recv_cpuset, xc_cpuset)) {
777 		tmpset = xc_cpuset;
778 		for (pix = 0; pix < NCPU; pix++) {
779 			if (CPU_IN_SET(tmpset, pix)) {
780 				/*
781 				 * membar_stld() is done in xc_loop
782 				 */
783 				if (xc_mbox[pix].xc_state == XC_WAIT) {
784 					CPUSET_ADD(recv_cpuset, pix);
785 				}
786 				CPUSET_DEL(tmpset, pix);
787 				if (CPUSET_ISNULL(tmpset)) {
788 					break;
789 				}
790 			}
791 		}
792 		if (loop_cnt++ > xc_mondo_time_limit) {
793 			if (sendmondo_in_recover) {
794 				drv_usecwait(1);
795 				loop_cnt = 0;
796 				continue;
797 			}
798 			if (first_time) {
799 				XT_SYNC_SOME(xc_cpuset);
800 				first_time = 0;
801 				loop_cnt = 0;
802 				continue;
803 			}
804 			cmn_err(CE_PANIC, "xc_attention() timeout");
805 		}
806 	}
807 
808 	/*
809 	 * xc_sys_mutex remains held until xc_dismissed() is finished
810 	 */
811 }
812 
813 /*
814  * xc_dismissed - paired with xc_attention()
815  *
816  * Called after the critical session is finished.
817  */
818 void
xc_dismissed(cpuset_t cpuset)819 xc_dismissed(cpuset_t cpuset)
820 {
821 	int pix;
822 	int lcx = (int)(CPU->cpu_id);
823 	cpuset_t xc_cpuset, tmpset;
824 	cpuset_t recv_cpuset;
825 	uint64_t loop_cnt = 0;
826 
827 	ASSERT(lcx == xc_holder);
828 	ASSERT(CPUSET_ISEQUAL(xc_mbox[lcx].xc_cpuset, cpuset));
829 	ASSERT(getpil() >= XCALL_PIL);
830 	CPUSET_ZERO(xc_mbox[lcx].xc_cpuset);
831 	CPUSET_ZERO(recv_cpuset);
832 	membar_stld();
833 
834 	XC_STAT_INC(x_dstat[lcx][XC_DISMISSED]);
835 	ASSERT(x_dstat[lcx][XC_DISMISSED] == x_dstat[lcx][XC_ATTENTION]);
836 
837 	/*
838 	 * only send to the CPU_READY ones
839 	 */
840 	xc_cpuset = cpu_ready_set;
841 	CPUSET_AND(xc_cpuset, cpuset);
842 
843 	/*
844 	 * exclude itself
845 	 */
846 	CPUSET_DEL(xc_cpuset, lcx);
847 	XC_TRACE(XC_DISMISSED, &xc_cpuset, NULL, 0, 0);
848 	if (CPUSET_ISNULL(xc_cpuset)) {
849 		xc_holder = -1;
850 		mutex_exit(&xc_sys_mutex);
851 		return;
852 	}
853 
854 	/*
855 	 * inform other processors to get out of xc_loop()
856 	 */
857 	tmpset = xc_cpuset;
858 	for (pix = 0; pix < NCPU; pix++) {
859 		if (CPU_IN_SET(tmpset, pix)) {
860 			xc_mbox[pix].xc_state = XC_EXIT;
861 			membar_stld();
862 			CPUSET_DEL(tmpset, pix);
863 			if (CPUSET_ISNULL(tmpset)) {
864 				break;
865 			}
866 		}
867 	}
868 
869 	/*
870 	 * make sure target processors have exited from xc_loop()
871 	 */
872 	while (!CPUSET_ISEQUAL(recv_cpuset, xc_cpuset)) {
873 		tmpset = xc_cpuset;
874 		for (pix = 0; pix < NCPU; pix++) {
875 			if (CPU_IN_SET(tmpset, pix)) {
876 				/*
877 				 * membar_stld() is done in xc_loop
878 				 */
879 				if (xc_mbox[pix].xc_state == XC_IDLE) {
880 					CPUSET_ADD(recv_cpuset, pix);
881 				}
882 				CPUSET_DEL(tmpset, pix);
883 				if (CPUSET_ISNULL(tmpset)) {
884 					break;
885 				}
886 			}
887 		}
888 		if (loop_cnt++ > xc_func_time_limit) {
889 				if (sendmondo_in_recover) {
890 					drv_usecwait(1);
891 					loop_cnt = 0;
892 					continue;
893 				}
894 			cmn_err(CE_PANIC, "xc_dismissed() timeout");
895 		}
896 	}
897 	xc_holder = -1;
898 	mutex_exit(&xc_sys_mutex);
899 }
900 
901 /*
902  * xc_serv - "x-call" handler at TL=0; serves only one x-call request
903  * runs at XCALL_PIL level.
904  */
905 uint_t
xc_serv(void)906 xc_serv(void)
907 {
908 	int lcx = (int)(CPU->cpu_id);
909 	struct xc_mbox *xmp;
910 	xcfunc_t *func;
911 	uint64_t arg1, arg2;
912 	cpuset_t tset;
913 
914 	ASSERT(getpil() == XCALL_PIL);
915 	CPUSET_ZERO(tset);
916 	CPUSET_ADD(tset, lcx);
917 	flush_windows();
918 	xmp = &xc_mbox[lcx];
919 	ASSERT(lcx != xc_holder);
920 	ASSERT(xmp->xc_state == XC_DOIT);
921 	func = xmp->xc_func;
922 	XC_TRACE(XC_SERV, &tset, func, xmp->xc_arg1, xmp->xc_arg2);
923 	if (func != NULL) {
924 		arg1 = xmp->xc_arg1;
925 		arg2 = xmp->xc_arg2;
926 		(*func)(arg1, arg2);
927 	}
928 	XC_STAT_INC(x_rstat[lcx][XC_SERV]);
929 	XC_TRACE(XC_SERV, &tset, func, arg1, arg2);
930 	xmp->xc_state = XC_IDLE;
931 	membar_stld();
932 	return (1);
933 }
934 
935 /*
936  * if == 1, an xc_loop timeout will cause a panic
937  * otherwise print a warning
938  */
939 uint_t xc_loop_panic = 0;
940 
941 /*
942  * xc_loop - "x-call" handler at TL=0; capture the cpu for a critial
943  * session, or serve multiple x-call requests runs at XCALL_PIL level.
944  */
945 uint_t
xc_loop(void)946 xc_loop(void)
947 {
948 	int lcx = (int)(CPU->cpu_id);
949 	struct xc_mbox *xmp;
950 	xcfunc_t *func;
951 	uint64_t arg1, arg2;
952 	uint64_t loop_cnt = 0;
953 	cpuset_t tset;
954 
955 	ASSERT(getpil() == XCALL_PIL);
956 
957 	CPUSET_ZERO(tset);
958 	flush_windows();
959 
960 	/*
961 	 * Some one must have owned the xc_sys_mutex;
962 	 * no further interrupt (at XCALL_PIL or below) can
963 	 * be taken by this processor until xc_loop exits.
964 	 *
965 	 * The owner of xc_sys_mutex (or xc_holder) can expect
966 	 * its xc/xt requests are handled as follows:
967 	 *	xc requests use xc_mbox's handshaking for their services
968 	 *	xt requests at TL>0 will be handled immediately
969 	 *	xt requests at TL=0:
970 	 *		if their handlers'pils are <= XCALL_PIL, then
971 	 *			they will be handled after xc_loop exits
972 	 *			(so, they probably should not be used)
973 	 *		else they will be handled immediately
974 	 *
975 	 * For those who are not informed to enter xc_loop, if they
976 	 * send xc/xt requests to this processor at this moment,
977 	 * the requests will be handled as follows:
978 	 *	xc requests will be handled after they grab xc_sys_mutex
979 	 *	xt requests at TL>0 will be handled immediately
980 	 *	xt requests at TL=0:
981 	 *		if their handlers'pils are <= XCALL_PIL, then
982 	 *			they will be handled after xc_loop exits
983 	 *		else they will be handled immediately
984 	 */
985 	xmp = &xc_mbox[lcx];
986 	ASSERT(lcx != xc_holder);
987 	ASSERT(xmp->xc_state == XC_ENTER);
988 	xmp->xc_state = XC_WAIT;
989 	CPUSET_ADD(tset, lcx);
990 	membar_stld();
991 	XC_STAT_INC(x_rstat[lcx][XC_LOOP]);
992 	XC_TRACE(XC_LOOP_ENTER, &tset, NULL, 0, 0);
993 	while (xmp->xc_state != XC_EXIT) {
994 		if (xmp->xc_state == XC_DOIT) {
995 			func = xmp->xc_func;
996 			arg1 = xmp->xc_arg1;
997 			arg2 = xmp->xc_arg2;
998 			XC_TRACE(XC_LOOP_DOIT, &tset, func, arg1, arg2);
999 			if (func != NULL)
1000 				(*func)(arg1, arg2);
1001 			xmp->xc_state = XC_WAIT;
1002 			membar_stld();
1003 			/*
1004 			 * reset the timeout counter
1005 			 * since some work was done
1006 			 */
1007 			loop_cnt = 0;
1008 		} else {
1009 			/* patience is a virtue... */
1010 			loop_cnt++;
1011 		}
1012 
1013 		if (loop_cnt > xc_func_time_limit) {
1014 			if (sendmondo_in_recover) {
1015 				drv_usecwait(1);
1016 				loop_cnt = 0;
1017 				continue;
1018 			}
1019 			cmn_err(xc_loop_panic ? CE_PANIC : CE_WARN,
1020 			    "xc_loop() timeout");
1021 			/*
1022 			 * if the above displayed a warning,
1023 			 * reset the timeout counter and be patient
1024 			 */
1025 			loop_cnt = 0;
1026 		}
1027 	}
1028 	ASSERT(xmp->xc_state == XC_EXIT);
1029 	ASSERT(xc_holder != -1);
1030 	XC_TRACE(XC_LOOP_EXIT, &tset, NULL, 0, 0);
1031 	xmp->xc_state = XC_IDLE;
1032 	membar_stld();
1033 	return (1);
1034 }
1035