17c478bd9Sstevel@tonic-gate /* 27c478bd9Sstevel@tonic-gate * CDDL HEADER START 37c478bd9Sstevel@tonic-gate * 47c478bd9Sstevel@tonic-gate * The contents of this file are subject to the terms of the 5b0fc0e77Sgovinda * Common Development and Distribution License (the "License"). 6b0fc0e77Sgovinda * You may not use this file except in compliance with the License. 77c478bd9Sstevel@tonic-gate * 87c478bd9Sstevel@tonic-gate * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 97c478bd9Sstevel@tonic-gate * or http://www.opensolaris.org/os/licensing. 107c478bd9Sstevel@tonic-gate * See the License for the specific language governing permissions 117c478bd9Sstevel@tonic-gate * and limitations under the License. 127c478bd9Sstevel@tonic-gate * 137c478bd9Sstevel@tonic-gate * When distributing Covered Code, include this CDDL HEADER in each 147c478bd9Sstevel@tonic-gate * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 157c478bd9Sstevel@tonic-gate * If applicable, add the following below this CDDL HEADER, with the 167c478bd9Sstevel@tonic-gate * fields enclosed by brackets "[]" replaced with your own identifying 177c478bd9Sstevel@tonic-gate * information: Portions Copyright [yyyy] [name of copyright owner] 187c478bd9Sstevel@tonic-gate * 197c478bd9Sstevel@tonic-gate * CDDL HEADER END 207c478bd9Sstevel@tonic-gate */ 217c478bd9Sstevel@tonic-gate /* 22*b885580bSAlexander Kolbasov * Copyright 2009 Sun Microsystems, Inc. All rights reserved. 237c478bd9Sstevel@tonic-gate * Use is subject to license terms. 247c478bd9Sstevel@tonic-gate */ 257c478bd9Sstevel@tonic-gate 267c478bd9Sstevel@tonic-gate #include <sys/systm.h> 277c478bd9Sstevel@tonic-gate #include <sys/archsystm.h> 287c478bd9Sstevel@tonic-gate #include <sys/machsystm.h> 297c478bd9Sstevel@tonic-gate #include <sys/cpuvar.h> 307c478bd9Sstevel@tonic-gate #include <sys/intreg.h> 317c478bd9Sstevel@tonic-gate #include <sys/x_call.h> 327c478bd9Sstevel@tonic-gate #include <sys/cmn_err.h> 337c478bd9Sstevel@tonic-gate #include <sys/membar.h> 347c478bd9Sstevel@tonic-gate #include <sys/disp.h> 357c478bd9Sstevel@tonic-gate #include <sys/debug.h> 367c478bd9Sstevel@tonic-gate #include <sys/privregs.h> 377c478bd9Sstevel@tonic-gate #include <sys/xc_impl.h> 387c478bd9Sstevel@tonic-gate #include <sys/ivintr.h> 397c478bd9Sstevel@tonic-gate #include <sys/dmv.h> 40370b8e80Sjkennedy #include <sys/sysmacros.h> 417c478bd9Sstevel@tonic-gate 428b9d661eSrjnoe #ifdef TRAPTRACE 437c478bd9Sstevel@tonic-gate uint_t x_dstat[NCPU][XC_LOOP_EXIT+1]; 447c478bd9Sstevel@tonic-gate uint_t x_rstat[NCPU][4]; 458b9d661eSrjnoe #endif /* TRAPTRACE */ 467c478bd9Sstevel@tonic-gate 47b0fc0e77Sgovinda static uint64_t xc_serv_inum; /* software interrupt number for xc_serv() */ 48b0fc0e77Sgovinda static uint64_t xc_loop_inum; /* software interrupt number for xc_loop() */ 497c478bd9Sstevel@tonic-gate kmutex_t xc_sys_mutex; /* protect xcall session and xc_mbox */ 507c478bd9Sstevel@tonic-gate int xc_spl_enter[NCPU]; /* protect sending x-call */ 517c478bd9Sstevel@tonic-gate static int xc_holder = -1; /* the cpu who initiates xc_attention, 0 is valid */ 527c478bd9Sstevel@tonic-gate 537c478bd9Sstevel@tonic-gate /* 547c478bd9Sstevel@tonic-gate * Mail box for handshaking and xcall request; protected by xc_sys_mutex 557c478bd9Sstevel@tonic-gate */ 567c478bd9Sstevel@tonic-gate static struct xc_mbox { 577c478bd9Sstevel@tonic-gate xcfunc_t *xc_func; 587c478bd9Sstevel@tonic-gate uint64_t xc_arg1; 597c478bd9Sstevel@tonic-gate uint64_t xc_arg2; 607c478bd9Sstevel@tonic-gate cpuset_t xc_cpuset; 617c478bd9Sstevel@tonic-gate volatile uint_t xc_state; 627c478bd9Sstevel@tonic-gate } xc_mbox[NCPU]; 637c478bd9Sstevel@tonic-gate 647c478bd9Sstevel@tonic-gate uint64_t xc_tick_limit; /* send_mondo() tick limit value */ 657c478bd9Sstevel@tonic-gate uint64_t xc_tick_limit_scale = 1; /* scale used to increase the limit */ 667c478bd9Sstevel@tonic-gate uint64_t xc_tick_jump_limit; /* send_mondo() irregular tick jump limit */ 67374ae87fSsvemuri uint64_t xc_sync_tick_limit; /* timeout limit for xt_sync() calls */ 687c478bd9Sstevel@tonic-gate 697c478bd9Sstevel@tonic-gate /* timeout value for xcalls to be received by the target CPU */ 707c478bd9Sstevel@tonic-gate uint64_t xc_mondo_time_limit; 717c478bd9Sstevel@tonic-gate 727c478bd9Sstevel@tonic-gate /* timeout value for xcall functions to be executed on the target CPU */ 737c478bd9Sstevel@tonic-gate uint64_t xc_func_time_limit; 747c478bd9Sstevel@tonic-gate 757c478bd9Sstevel@tonic-gate uint64_t xc_scale = 1; /* scale used to calculate timeout limits */ 76370b8e80Sjkennedy uint64_t xc_mondo_multiplier = 10; 777c478bd9Sstevel@tonic-gate 787c478bd9Sstevel@tonic-gate uint_t sendmondo_in_recover; 797c478bd9Sstevel@tonic-gate 807c478bd9Sstevel@tonic-gate /* 817c478bd9Sstevel@tonic-gate * sending x-calls 827c478bd9Sstevel@tonic-gate */ 837c478bd9Sstevel@tonic-gate void init_mondo(xcfunc_t *func, uint64_t arg1, uint64_t arg2); 847c478bd9Sstevel@tonic-gate void send_one_mondo(int cpuid); 857c478bd9Sstevel@tonic-gate void send_mondo_set(cpuset_t set); 867c478bd9Sstevel@tonic-gate 877c478bd9Sstevel@tonic-gate /* 88370b8e80Sjkennedy * Adjust xc_attention timeout if a faster cpu is dynamically added. 89370b8e80Sjkennedy * Ignore the dynamic removal of a cpu that would lower these timeout 90370b8e80Sjkennedy * values. 91370b8e80Sjkennedy */ 92370b8e80Sjkennedy static int 93370b8e80Sjkennedy xc_func_timeout_adj(cpu_setup_t what, int cpuid) { 94370b8e80Sjkennedy uint64_t freq = cpunodes[cpuid].clock_freq; 95370b8e80Sjkennedy 96370b8e80Sjkennedy switch (what) { 97370b8e80Sjkennedy case CPU_ON: 98370b8e80Sjkennedy case CPU_INIT: 99370b8e80Sjkennedy case CPU_CONFIG: 100370b8e80Sjkennedy case CPU_CPUPART_IN: 101370b8e80Sjkennedy if (freq * xc_scale > xc_mondo_time_limit) { 102370b8e80Sjkennedy xc_mondo_time_limit = freq * xc_scale; 103370b8e80Sjkennedy xc_func_time_limit = xc_mondo_time_limit * 104370b8e80Sjkennedy xc_mondo_multiplier; 105370b8e80Sjkennedy } 106370b8e80Sjkennedy break; 107370b8e80Sjkennedy case CPU_OFF: 108370b8e80Sjkennedy case CPU_UNCONFIG: 109370b8e80Sjkennedy case CPU_CPUPART_OUT: 110370b8e80Sjkennedy default: 111370b8e80Sjkennedy break; 112370b8e80Sjkennedy } 113370b8e80Sjkennedy 114370b8e80Sjkennedy return (0); 115370b8e80Sjkennedy } 116370b8e80Sjkennedy 117370b8e80Sjkennedy /* 1187c478bd9Sstevel@tonic-gate * xc_init - initialize x-call related locks 1197c478bd9Sstevel@tonic-gate */ 1207c478bd9Sstevel@tonic-gate void 1217c478bd9Sstevel@tonic-gate xc_init(void) 1227c478bd9Sstevel@tonic-gate { 1237c478bd9Sstevel@tonic-gate int pix; 124370b8e80Sjkennedy uint64_t maxfreq = 0; 1257c478bd9Sstevel@tonic-gate 1267c478bd9Sstevel@tonic-gate mutex_init(&xc_sys_mutex, NULL, MUTEX_SPIN, 1277c478bd9Sstevel@tonic-gate (void *)ipltospl(XCALL_PIL)); 1287c478bd9Sstevel@tonic-gate 1298b9d661eSrjnoe #ifdef TRAPTRACE 1307c478bd9Sstevel@tonic-gate /* Initialize for all possible CPUs. */ 1317c478bd9Sstevel@tonic-gate for (pix = 0; pix < NCPU; pix++) { 1327c478bd9Sstevel@tonic-gate XC_STAT_INIT(pix); 1337c478bd9Sstevel@tonic-gate } 1348b9d661eSrjnoe #endif /* TRAPTRACE */ 1357c478bd9Sstevel@tonic-gate 136b0fc0e77Sgovinda xc_serv_inum = add_softintr(XCALL_PIL, (softintrfunc)xc_serv, 0, 137b0fc0e77Sgovinda SOFTINT_MT); 138b0fc0e77Sgovinda xc_loop_inum = add_softintr(XCALL_PIL, (softintrfunc)xc_loop, 0, 139b0fc0e77Sgovinda SOFTINT_MT); 1407c478bd9Sstevel@tonic-gate 1417c478bd9Sstevel@tonic-gate /* 1427c478bd9Sstevel@tonic-gate * Initialize the calibrated tick limit for send_mondo. 1437c478bd9Sstevel@tonic-gate * The value represents the maximum tick count to wait. 1447c478bd9Sstevel@tonic-gate */ 1457c478bd9Sstevel@tonic-gate xc_tick_limit = 1467c478bd9Sstevel@tonic-gate ((uint64_t)sys_tick_freq * XC_SEND_MONDO_MSEC) / 1000; 1477c478bd9Sstevel@tonic-gate xc_tick_jump_limit = xc_tick_limit / 32; 1487c478bd9Sstevel@tonic-gate xc_tick_limit *= xc_tick_limit_scale; 149374ae87fSsvemuri xc_sync_tick_limit = xc_tick_limit; 1507c478bd9Sstevel@tonic-gate 1517c478bd9Sstevel@tonic-gate /* 1527c478bd9Sstevel@tonic-gate * Maximum number of loops to wait before timing out in xc_attention. 1537c478bd9Sstevel@tonic-gate */ 154370b8e80Sjkennedy for (pix = 0; pix < NCPU; pix++) { 155370b8e80Sjkennedy maxfreq = MAX(cpunodes[pix].clock_freq, maxfreq); 156370b8e80Sjkennedy } 157370b8e80Sjkennedy xc_mondo_time_limit = maxfreq * xc_scale; 158370b8e80Sjkennedy register_cpu_setup_func((cpu_setup_func_t *)xc_func_timeout_adj, NULL); 1597c478bd9Sstevel@tonic-gate 1607c478bd9Sstevel@tonic-gate /* 1617c478bd9Sstevel@tonic-gate * Maximum number of loops to wait for a xcall function to be 162370b8e80Sjkennedy * executed on the target CPU. 1637c478bd9Sstevel@tonic-gate */ 164370b8e80Sjkennedy xc_func_time_limit = xc_mondo_time_limit * xc_mondo_multiplier; 1657c478bd9Sstevel@tonic-gate } 1667c478bd9Sstevel@tonic-gate 1677c478bd9Sstevel@tonic-gate /* 1687c478bd9Sstevel@tonic-gate * The following routines basically provide callers with two kinds of 1697c478bd9Sstevel@tonic-gate * inter-processor interrupt services: 1707c478bd9Sstevel@tonic-gate * 1. cross calls (x-calls) - requests are handled at target cpu's TL=0 1717c478bd9Sstevel@tonic-gate * 2. cross traps (c-traps) - requests are handled at target cpu's TL>0 1727c478bd9Sstevel@tonic-gate * 1737c478bd9Sstevel@tonic-gate * Although these routines protect the services from migrating to other cpus 1747c478bd9Sstevel@tonic-gate * "after" they are called, it is the caller's choice or responsibility to 1757c478bd9Sstevel@tonic-gate * prevent the cpu migration "before" calling them. 1767c478bd9Sstevel@tonic-gate * 1777c478bd9Sstevel@tonic-gate * X-call routines: 1787c478bd9Sstevel@tonic-gate * 1797c478bd9Sstevel@tonic-gate * xc_one() - send a request to one processor 1807c478bd9Sstevel@tonic-gate * xc_some() - send a request to some processors 1817c478bd9Sstevel@tonic-gate * xc_all() - send a request to all processors 1827c478bd9Sstevel@tonic-gate * 1837c478bd9Sstevel@tonic-gate * Their common parameters: 1847c478bd9Sstevel@tonic-gate * func - a TL=0 handler address 1857c478bd9Sstevel@tonic-gate * arg1 and arg2 - optional 1867c478bd9Sstevel@tonic-gate * 1877c478bd9Sstevel@tonic-gate * The services provided by x-call routines allow callers 1887c478bd9Sstevel@tonic-gate * to send a request to target cpus to execute a TL=0 1897c478bd9Sstevel@tonic-gate * handler. 1907c478bd9Sstevel@tonic-gate * The interface of the registers of the TL=0 handler: 1917c478bd9Sstevel@tonic-gate * %o0: arg1 1927c478bd9Sstevel@tonic-gate * %o1: arg2 1937c478bd9Sstevel@tonic-gate * 1947c478bd9Sstevel@tonic-gate * X-trap routines: 1957c478bd9Sstevel@tonic-gate * 1967c478bd9Sstevel@tonic-gate * xt_one() - send a request to one processor 1977c478bd9Sstevel@tonic-gate * xt_some() - send a request to some processors 1987c478bd9Sstevel@tonic-gate * xt_all() - send a request to all processors 1997c478bd9Sstevel@tonic-gate * 2007c478bd9Sstevel@tonic-gate * Their common parameters: 2017c478bd9Sstevel@tonic-gate * func - a TL>0 handler address or an interrupt number 2027c478bd9Sstevel@tonic-gate * arg1, arg2 2037c478bd9Sstevel@tonic-gate * optional when "func" is an address; 2047c478bd9Sstevel@tonic-gate * 0 when "func" is an interrupt number 2057c478bd9Sstevel@tonic-gate * 2067c478bd9Sstevel@tonic-gate * If the request of "func" is a kernel address, then 2077c478bd9Sstevel@tonic-gate * the target cpu will execute the request of "func" with 2087c478bd9Sstevel@tonic-gate * args at "TL>0" level. 2097c478bd9Sstevel@tonic-gate * The interface of the registers of the TL>0 handler: 2107c478bd9Sstevel@tonic-gate * %g1: arg1 2117c478bd9Sstevel@tonic-gate * %g2: arg2 2127c478bd9Sstevel@tonic-gate * 2137c478bd9Sstevel@tonic-gate * If the request of "func" is not a kernel address, then it has 2147c478bd9Sstevel@tonic-gate * to be an assigned interrupt number through add_softintr(). 2157c478bd9Sstevel@tonic-gate * An interrupt number is an index to the interrupt vector table, 2167c478bd9Sstevel@tonic-gate * which entry contains an interrupt handler address with its 2177c478bd9Sstevel@tonic-gate * corresponding interrupt level and argument. 2187c478bd9Sstevel@tonic-gate * The target cpu will arrange the request to be serviced according 2197c478bd9Sstevel@tonic-gate * to its pre-registered information. 2207c478bd9Sstevel@tonic-gate * args are assumed to be zeros in this case. 2217c478bd9Sstevel@tonic-gate * 2227c478bd9Sstevel@tonic-gate * In addition, callers are allowed to capture and release cpus by 2237c478bd9Sstevel@tonic-gate * calling the routines: xc_attention() and xc_dismissed(). 2247c478bd9Sstevel@tonic-gate */ 2257c478bd9Sstevel@tonic-gate 2267c478bd9Sstevel@tonic-gate /* 227*b885580bSAlexander Kolbasov * spl_xcall - set PIL to xcall level 228*b885580bSAlexander Kolbasov */ 229*b885580bSAlexander Kolbasov int 230*b885580bSAlexander Kolbasov spl_xcall(void) 231*b885580bSAlexander Kolbasov { 232*b885580bSAlexander Kolbasov return (splr(XCALL_PIL)); 233*b885580bSAlexander Kolbasov } 234*b885580bSAlexander Kolbasov 235*b885580bSAlexander Kolbasov /* 2367c478bd9Sstevel@tonic-gate * xt_one - send a "x-trap" to a cpu 2377c478bd9Sstevel@tonic-gate */ 2387c478bd9Sstevel@tonic-gate void 2397c478bd9Sstevel@tonic-gate xt_one(int cix, xcfunc_t *func, uint64_t arg1, uint64_t arg2) 2407c478bd9Sstevel@tonic-gate { 2417c478bd9Sstevel@tonic-gate if (!CPU_IN_SET(cpu_ready_set, cix)) { 2427c478bd9Sstevel@tonic-gate return; 2437c478bd9Sstevel@tonic-gate } 2447c478bd9Sstevel@tonic-gate xt_one_unchecked(cix, func, arg1, arg2); 2457c478bd9Sstevel@tonic-gate } 2467c478bd9Sstevel@tonic-gate 2477c478bd9Sstevel@tonic-gate /* 2487c478bd9Sstevel@tonic-gate * xt_one_unchecked - send a "x-trap" to a cpu without checking for its 2497c478bd9Sstevel@tonic-gate * existance in cpu_ready_set 2507c478bd9Sstevel@tonic-gate */ 2517c478bd9Sstevel@tonic-gate void 2527c478bd9Sstevel@tonic-gate xt_one_unchecked(int cix, xcfunc_t *func, uint64_t arg1, uint64_t arg2) 2537c478bd9Sstevel@tonic-gate { 2547c478bd9Sstevel@tonic-gate int lcx; 2557c478bd9Sstevel@tonic-gate int opl; 2567c478bd9Sstevel@tonic-gate cpuset_t tset; 2577c478bd9Sstevel@tonic-gate 2587c478bd9Sstevel@tonic-gate /* 2597c478bd9Sstevel@tonic-gate * Make sure the function address will not be interpreted as a 2607c478bd9Sstevel@tonic-gate * dmv interrupt 2617c478bd9Sstevel@tonic-gate */ 2627c478bd9Sstevel@tonic-gate ASSERT(!DMV_IS_DMV(func)); 2637c478bd9Sstevel@tonic-gate 2647c478bd9Sstevel@tonic-gate /* 2657c478bd9Sstevel@tonic-gate * It's illegal to send software inums through the cross-trap 2667c478bd9Sstevel@tonic-gate * interface. 2677c478bd9Sstevel@tonic-gate */ 2687c478bd9Sstevel@tonic-gate ASSERT((uintptr_t)func >= KERNELBASE); 2697c478bd9Sstevel@tonic-gate 2707c478bd9Sstevel@tonic-gate CPUSET_ZERO(tset); 2717c478bd9Sstevel@tonic-gate 2727c478bd9Sstevel@tonic-gate XC_SPL_ENTER(lcx, opl); /* lcx set by the macro */ 2737c478bd9Sstevel@tonic-gate 2747c478bd9Sstevel@tonic-gate CPUSET_ADD(tset, cix); 2757c478bd9Sstevel@tonic-gate 2767c478bd9Sstevel@tonic-gate if (cix == lcx) { 2777c478bd9Sstevel@tonic-gate /* 2787c478bd9Sstevel@tonic-gate * same cpu - use software fast trap 2797c478bd9Sstevel@tonic-gate */ 2807c478bd9Sstevel@tonic-gate send_self_xcall(CPU, arg1, arg2, func); 2817c478bd9Sstevel@tonic-gate XC_STAT_INC(x_dstat[lcx][XT_ONE_SELF]); 2827c478bd9Sstevel@tonic-gate XC_TRACE(XT_ONE_SELF, &tset, func, arg1, arg2); 2837c478bd9Sstevel@tonic-gate } else { /* other cpu - send a mondo to the target cpu */ 2847c478bd9Sstevel@tonic-gate /* 2857c478bd9Sstevel@tonic-gate * other cpu - send a mondo to the target cpu 2867c478bd9Sstevel@tonic-gate */ 2877c478bd9Sstevel@tonic-gate XC_TRACE(XT_ONE_OTHER, &tset, func, arg1, arg2); 2887c478bd9Sstevel@tonic-gate init_mondo(func, arg1, arg2); 2897c478bd9Sstevel@tonic-gate send_one_mondo(cix); 2907c478bd9Sstevel@tonic-gate XC_STAT_INC(x_dstat[lcx][XT_ONE_OTHER]); 2917c478bd9Sstevel@tonic-gate } 2927c478bd9Sstevel@tonic-gate XC_SPL_EXIT(lcx, opl); 2937c478bd9Sstevel@tonic-gate } 2947c478bd9Sstevel@tonic-gate 2957c478bd9Sstevel@tonic-gate /* 2967c478bd9Sstevel@tonic-gate * xt_some - send a "x-trap" to some cpus 2977c478bd9Sstevel@tonic-gate */ 2987c478bd9Sstevel@tonic-gate void 2997c478bd9Sstevel@tonic-gate xt_some(cpuset_t cpuset, xcfunc_t *func, uint64_t arg1, uint64_t arg2) 3007c478bd9Sstevel@tonic-gate { 3017c478bd9Sstevel@tonic-gate int lcx; 3027c478bd9Sstevel@tonic-gate int opl; 3037c478bd9Sstevel@tonic-gate cpuset_t xc_cpuset, tset; 3047c478bd9Sstevel@tonic-gate 3057c478bd9Sstevel@tonic-gate /* 3067c478bd9Sstevel@tonic-gate * Make sure the function address will not be interpreted as a 3077c478bd9Sstevel@tonic-gate * dmv interrupt 3087c478bd9Sstevel@tonic-gate */ 3097c478bd9Sstevel@tonic-gate ASSERT(!DMV_IS_DMV(func)); 3107c478bd9Sstevel@tonic-gate 3117c478bd9Sstevel@tonic-gate /* 3127c478bd9Sstevel@tonic-gate * It's illegal to send software inums through the cross-trap 3137c478bd9Sstevel@tonic-gate * interface. 3147c478bd9Sstevel@tonic-gate */ 3157c478bd9Sstevel@tonic-gate ASSERT((uintptr_t)func >= KERNELBASE); 3167c478bd9Sstevel@tonic-gate 3177c478bd9Sstevel@tonic-gate CPUSET_ZERO(tset); 3187c478bd9Sstevel@tonic-gate 3197c478bd9Sstevel@tonic-gate XC_SPL_ENTER(lcx, opl); /* lcx set by the macro */ 3207c478bd9Sstevel@tonic-gate 3217c478bd9Sstevel@tonic-gate CPUSET_ADD(tset, lcx); 3227c478bd9Sstevel@tonic-gate 3237c478bd9Sstevel@tonic-gate /* 3247c478bd9Sstevel@tonic-gate * only send to the CPU_READY ones 3257c478bd9Sstevel@tonic-gate */ 3267c478bd9Sstevel@tonic-gate xc_cpuset = cpu_ready_set; 3277c478bd9Sstevel@tonic-gate CPUSET_AND(xc_cpuset, cpuset); 3287c478bd9Sstevel@tonic-gate 3297c478bd9Sstevel@tonic-gate /* 3307c478bd9Sstevel@tonic-gate * send to nobody; just return 3317c478bd9Sstevel@tonic-gate */ 3327c478bd9Sstevel@tonic-gate if (CPUSET_ISNULL(xc_cpuset)) { 3337c478bd9Sstevel@tonic-gate XC_SPL_EXIT(lcx, opl); 3347c478bd9Sstevel@tonic-gate return; 3357c478bd9Sstevel@tonic-gate } 3367c478bd9Sstevel@tonic-gate 3377c478bd9Sstevel@tonic-gate /* 3387c478bd9Sstevel@tonic-gate * don't send mondo to self 3397c478bd9Sstevel@tonic-gate */ 3407c478bd9Sstevel@tonic-gate if (CPU_IN_SET(xc_cpuset, lcx)) { 3417c478bd9Sstevel@tonic-gate /* 3427c478bd9Sstevel@tonic-gate * same cpu - use software fast trap 3437c478bd9Sstevel@tonic-gate */ 3447c478bd9Sstevel@tonic-gate send_self_xcall(CPU, arg1, arg2, func); 3457c478bd9Sstevel@tonic-gate XC_STAT_INC(x_dstat[lcx][XT_SOME_SELF]); 3467c478bd9Sstevel@tonic-gate XC_TRACE(XT_SOME_SELF, &tset, func, arg1, arg2); 3477c478bd9Sstevel@tonic-gate CPUSET_DEL(xc_cpuset, lcx); 3487c478bd9Sstevel@tonic-gate if (CPUSET_ISNULL(xc_cpuset)) { 3497c478bd9Sstevel@tonic-gate XC_SPL_EXIT(lcx, opl); 3507c478bd9Sstevel@tonic-gate return; 3517c478bd9Sstevel@tonic-gate } 3527c478bd9Sstevel@tonic-gate } 3537c478bd9Sstevel@tonic-gate XC_TRACE(XT_SOME_OTHER, &xc_cpuset, func, arg1, arg2); 3547c478bd9Sstevel@tonic-gate init_mondo(func, arg1, arg2); 3557c478bd9Sstevel@tonic-gate send_mondo_set(xc_cpuset); 3567c478bd9Sstevel@tonic-gate XC_STAT_INC(x_dstat[lcx][XT_SOME_OTHER]); 3577c478bd9Sstevel@tonic-gate 3587c478bd9Sstevel@tonic-gate XC_SPL_EXIT(lcx, opl); 3597c478bd9Sstevel@tonic-gate } 3607c478bd9Sstevel@tonic-gate 3617c478bd9Sstevel@tonic-gate /* 3627c478bd9Sstevel@tonic-gate * xt_all - send a "x-trap" to all cpus 3637c478bd9Sstevel@tonic-gate */ 3647c478bd9Sstevel@tonic-gate void 3657c478bd9Sstevel@tonic-gate xt_all(xcfunc_t *func, uint64_t arg1, uint64_t arg2) 3667c478bd9Sstevel@tonic-gate { 3677c478bd9Sstevel@tonic-gate int lcx; 3687c478bd9Sstevel@tonic-gate int opl; 3697c478bd9Sstevel@tonic-gate cpuset_t xc_cpuset, tset; 3707c478bd9Sstevel@tonic-gate 3717c478bd9Sstevel@tonic-gate /* 3727c478bd9Sstevel@tonic-gate * Make sure the function address will not be interpreted as a 3737c478bd9Sstevel@tonic-gate * dmv interrupt 3747c478bd9Sstevel@tonic-gate */ 3757c478bd9Sstevel@tonic-gate ASSERT(!DMV_IS_DMV(func)); 3767c478bd9Sstevel@tonic-gate 3777c478bd9Sstevel@tonic-gate /* 3787c478bd9Sstevel@tonic-gate * It's illegal to send software inums through the cross-trap 3797c478bd9Sstevel@tonic-gate * interface. 3807c478bd9Sstevel@tonic-gate */ 3817c478bd9Sstevel@tonic-gate ASSERT((uintptr_t)func >= KERNELBASE); 3827c478bd9Sstevel@tonic-gate 3837c478bd9Sstevel@tonic-gate CPUSET_ZERO(tset); 3847c478bd9Sstevel@tonic-gate 3857c478bd9Sstevel@tonic-gate XC_SPL_ENTER(lcx, opl); /* lcx set by the macro */ 3867c478bd9Sstevel@tonic-gate 3877c478bd9Sstevel@tonic-gate CPUSET_ADD(tset, lcx); 3887c478bd9Sstevel@tonic-gate 3897c478bd9Sstevel@tonic-gate /* 3907c478bd9Sstevel@tonic-gate * same cpu - use software fast trap 3917c478bd9Sstevel@tonic-gate */ 3927c478bd9Sstevel@tonic-gate if (CPU_IN_SET(cpu_ready_set, lcx)) 3937c478bd9Sstevel@tonic-gate send_self_xcall(CPU, arg1, arg2, func); 3947c478bd9Sstevel@tonic-gate 3957c478bd9Sstevel@tonic-gate XC_TRACE(XT_ALL_OTHER, &cpu_ready_set, func, arg1, arg2); 3967c478bd9Sstevel@tonic-gate 3977c478bd9Sstevel@tonic-gate /* 3987c478bd9Sstevel@tonic-gate * don't send mondo to self 3997c478bd9Sstevel@tonic-gate */ 4007c478bd9Sstevel@tonic-gate xc_cpuset = cpu_ready_set; 4017c478bd9Sstevel@tonic-gate CPUSET_DEL(xc_cpuset, lcx); 4027c478bd9Sstevel@tonic-gate 4037c478bd9Sstevel@tonic-gate if (CPUSET_ISNULL(xc_cpuset)) { 4047c478bd9Sstevel@tonic-gate XC_STAT_INC(x_dstat[lcx][XT_ALL_SELF]); 4057c478bd9Sstevel@tonic-gate XC_TRACE(XT_ALL_SELF, &tset, func, arg1, arg2); 4067c478bd9Sstevel@tonic-gate XC_SPL_EXIT(lcx, opl); 4077c478bd9Sstevel@tonic-gate return; 4087c478bd9Sstevel@tonic-gate } 4097c478bd9Sstevel@tonic-gate 4107c478bd9Sstevel@tonic-gate init_mondo(func, arg1, arg2); 4117c478bd9Sstevel@tonic-gate send_mondo_set(xc_cpuset); 4127c478bd9Sstevel@tonic-gate 4137c478bd9Sstevel@tonic-gate XC_STAT_INC(x_dstat[lcx][XT_ALL_OTHER]); 4147c478bd9Sstevel@tonic-gate XC_SPL_EXIT(lcx, opl); 4157c478bd9Sstevel@tonic-gate } 4167c478bd9Sstevel@tonic-gate 4177c478bd9Sstevel@tonic-gate /* 4187c478bd9Sstevel@tonic-gate * xc_one - send a "x-call" to a cpu 4197c478bd9Sstevel@tonic-gate */ 4207c478bd9Sstevel@tonic-gate void 4217c478bd9Sstevel@tonic-gate xc_one(int cix, xcfunc_t *func, uint64_t arg1, uint64_t arg2) 4227c478bd9Sstevel@tonic-gate { 4237c478bd9Sstevel@tonic-gate int lcx; 4247c478bd9Sstevel@tonic-gate int opl; 4257c478bd9Sstevel@tonic-gate uint64_t loop_cnt = 0; 4267c478bd9Sstevel@tonic-gate cpuset_t tset; 4277c478bd9Sstevel@tonic-gate int first_time = 1; 4287c478bd9Sstevel@tonic-gate 4297c478bd9Sstevel@tonic-gate /* 4307c478bd9Sstevel@tonic-gate * send to nobody; just return 4317c478bd9Sstevel@tonic-gate */ 4327c478bd9Sstevel@tonic-gate if (!CPU_IN_SET(cpu_ready_set, cix)) 4337c478bd9Sstevel@tonic-gate return; 4347c478bd9Sstevel@tonic-gate 4357c478bd9Sstevel@tonic-gate ASSERT((uintptr_t)func > KERNELBASE); 4367c478bd9Sstevel@tonic-gate ASSERT(((uintptr_t)func % PC_ALIGN) == 0); 4377c478bd9Sstevel@tonic-gate 4387c478bd9Sstevel@tonic-gate CPUSET_ZERO(tset); 4397c478bd9Sstevel@tonic-gate 4407c478bd9Sstevel@tonic-gate kpreempt_disable(); 4417c478bd9Sstevel@tonic-gate 4427c478bd9Sstevel@tonic-gate XC_SPL_ENTER(lcx, opl); /* lcx set by the macro */ 4437c478bd9Sstevel@tonic-gate 4447c478bd9Sstevel@tonic-gate CPUSET_ADD(tset, cix); 4457c478bd9Sstevel@tonic-gate 4467c478bd9Sstevel@tonic-gate if (cix == lcx) { /* same cpu just do it */ 4477c478bd9Sstevel@tonic-gate XC_TRACE(XC_ONE_SELF, &tset, func, arg1, arg2); 4487c478bd9Sstevel@tonic-gate (*func)(arg1, arg2); 4497c478bd9Sstevel@tonic-gate XC_STAT_INC(x_dstat[lcx][XC_ONE_SELF]); 4507c478bd9Sstevel@tonic-gate XC_SPL_EXIT(lcx, opl); 4517c478bd9Sstevel@tonic-gate kpreempt_enable(); 4527c478bd9Sstevel@tonic-gate return; 4537c478bd9Sstevel@tonic-gate } 4547c478bd9Sstevel@tonic-gate 4557c478bd9Sstevel@tonic-gate if (xc_holder == lcx) { /* got the xc_sys_mutex already */ 4567c478bd9Sstevel@tonic-gate ASSERT(MUTEX_HELD(&xc_sys_mutex)); 4577c478bd9Sstevel@tonic-gate ASSERT(CPU_IN_SET(xc_mbox[lcx].xc_cpuset, lcx)); 4587c478bd9Sstevel@tonic-gate ASSERT(CPU_IN_SET(xc_mbox[cix].xc_cpuset, cix)); 4597c478bd9Sstevel@tonic-gate ASSERT(xc_mbox[cix].xc_state == XC_WAIT); 4607c478bd9Sstevel@tonic-gate XC_TRACE(XC_ONE_OTHER_H, &tset, func, arg1, arg2); 4617c478bd9Sstevel@tonic-gate 4627c478bd9Sstevel@tonic-gate /* 4637c478bd9Sstevel@tonic-gate * target processor's xc_loop should be waiting 4647c478bd9Sstevel@tonic-gate * for the work to do; just set up the xc_mbox 4657c478bd9Sstevel@tonic-gate */ 4667c478bd9Sstevel@tonic-gate XC_SETUP(cix, func, arg1, arg2); 4677c478bd9Sstevel@tonic-gate membar_stld(); 4687c478bd9Sstevel@tonic-gate 4697c478bd9Sstevel@tonic-gate while (xc_mbox[cix].xc_state != XC_WAIT) { 4707c478bd9Sstevel@tonic-gate if (loop_cnt++ > xc_func_time_limit) { 4717c478bd9Sstevel@tonic-gate if (sendmondo_in_recover) { 4727c478bd9Sstevel@tonic-gate drv_usecwait(1); 4737c478bd9Sstevel@tonic-gate loop_cnt = 0; 4747c478bd9Sstevel@tonic-gate continue; 4757c478bd9Sstevel@tonic-gate } 4767c478bd9Sstevel@tonic-gate cmn_err(CE_PANIC, "xc_one() timeout, " 4777c478bd9Sstevel@tonic-gate "xc_state[%d] != XC_WAIT", cix); 4787c478bd9Sstevel@tonic-gate } 4797c478bd9Sstevel@tonic-gate } 4807c478bd9Sstevel@tonic-gate XC_STAT_INC(x_dstat[lcx][XC_ONE_OTHER_H]); 4817c478bd9Sstevel@tonic-gate XC_SPL_EXIT(lcx, opl); 4827c478bd9Sstevel@tonic-gate kpreempt_enable(); 4837c478bd9Sstevel@tonic-gate return; 4847c478bd9Sstevel@tonic-gate } 4857c478bd9Sstevel@tonic-gate 4867c478bd9Sstevel@tonic-gate /* 4877c478bd9Sstevel@tonic-gate * Avoid dead lock if someone has sent us a xc_loop request while 4887c478bd9Sstevel@tonic-gate * we are trying to grab xc_sys_mutex. 4897c478bd9Sstevel@tonic-gate */ 4907c478bd9Sstevel@tonic-gate XC_SPL_EXIT(lcx, opl); 4917c478bd9Sstevel@tonic-gate 4927c478bd9Sstevel@tonic-gate /* 4937c478bd9Sstevel@tonic-gate * At this point, since we don't own xc_sys_mutex, 4947c478bd9Sstevel@tonic-gate * our pil shouldn't run at or above the XCALL_PIL. 4957c478bd9Sstevel@tonic-gate */ 4967c478bd9Sstevel@tonic-gate ASSERT(getpil() < XCALL_PIL); 4977c478bd9Sstevel@tonic-gate 4987c478bd9Sstevel@tonic-gate /* 4997c478bd9Sstevel@tonic-gate * Since xc_holder is not owned by us, it could be that 5007c478bd9Sstevel@tonic-gate * no one owns it, or we are not informed to enter into 5017c478bd9Sstevel@tonic-gate * xc_loop(). In either case, we need to grab the 5027c478bd9Sstevel@tonic-gate * xc_sys_mutex before we write to the xc_mbox, and 5037c478bd9Sstevel@tonic-gate * we shouldn't release it until the request is finished. 5047c478bd9Sstevel@tonic-gate */ 5057c478bd9Sstevel@tonic-gate 5067c478bd9Sstevel@tonic-gate mutex_enter(&xc_sys_mutex); 5077c478bd9Sstevel@tonic-gate xc_spl_enter[lcx] = 1; 5087c478bd9Sstevel@tonic-gate 5097c478bd9Sstevel@tonic-gate /* 5107c478bd9Sstevel@tonic-gate * Since we own xc_sys_mutex now, we are safe to 511254020a7Scb222892 * write to the xc_mbox. 5127c478bd9Sstevel@tonic-gate */ 5137c478bd9Sstevel@tonic-gate ASSERT(xc_mbox[cix].xc_state == XC_IDLE); 5147c478bd9Sstevel@tonic-gate XC_TRACE(XC_ONE_OTHER, &tset, func, arg1, arg2); 5157c478bd9Sstevel@tonic-gate XC_SETUP(cix, func, arg1, arg2); 5167c478bd9Sstevel@tonic-gate init_mondo(setsoftint_tl1, xc_serv_inum, 0); 5177c478bd9Sstevel@tonic-gate send_one_mondo(cix); 518254020a7Scb222892 xc_spl_enter[lcx] = 0; 5197c478bd9Sstevel@tonic-gate 5207c478bd9Sstevel@tonic-gate /* xc_serv does membar_stld */ 5217c478bd9Sstevel@tonic-gate while (xc_mbox[cix].xc_state != XC_IDLE) { 5227c478bd9Sstevel@tonic-gate if (loop_cnt++ > xc_func_time_limit) { 5237c478bd9Sstevel@tonic-gate if (sendmondo_in_recover) { 5247c478bd9Sstevel@tonic-gate drv_usecwait(1); 5257c478bd9Sstevel@tonic-gate loop_cnt = 0; 5267c478bd9Sstevel@tonic-gate continue; 5277c478bd9Sstevel@tonic-gate } 5287c478bd9Sstevel@tonic-gate if (first_time) { 5297c478bd9Sstevel@tonic-gate XT_SYNC_ONE(cix); 5307c478bd9Sstevel@tonic-gate first_time = 0; 5317c478bd9Sstevel@tonic-gate loop_cnt = 0; 5327c478bd9Sstevel@tonic-gate continue; 5337c478bd9Sstevel@tonic-gate } 5347c478bd9Sstevel@tonic-gate cmn_err(CE_PANIC, "xc_one() timeout, " 5357c478bd9Sstevel@tonic-gate "xc_state[%d] != XC_IDLE", cix); 5367c478bd9Sstevel@tonic-gate } 5377c478bd9Sstevel@tonic-gate } 5387c478bd9Sstevel@tonic-gate XC_STAT_INC(x_dstat[lcx][XC_ONE_OTHER]); 5397c478bd9Sstevel@tonic-gate mutex_exit(&xc_sys_mutex); 5407c478bd9Sstevel@tonic-gate 5417c478bd9Sstevel@tonic-gate kpreempt_enable(); 5427c478bd9Sstevel@tonic-gate } 5437c478bd9Sstevel@tonic-gate 5447c478bd9Sstevel@tonic-gate /* 5457c478bd9Sstevel@tonic-gate * xc_some - send a "x-call" to some cpus; sending to self is excluded 5467c478bd9Sstevel@tonic-gate */ 5477c478bd9Sstevel@tonic-gate void 5487c478bd9Sstevel@tonic-gate xc_some(cpuset_t cpuset, xcfunc_t *func, uint64_t arg1, uint64_t arg2) 5497c478bd9Sstevel@tonic-gate { 5507c478bd9Sstevel@tonic-gate int lcx; 5517c478bd9Sstevel@tonic-gate int opl; 5527c478bd9Sstevel@tonic-gate cpuset_t xc_cpuset, tset; 5537c478bd9Sstevel@tonic-gate 5547c478bd9Sstevel@tonic-gate ASSERT((uintptr_t)func > KERNELBASE); 5557c478bd9Sstevel@tonic-gate ASSERT(((uintptr_t)func % PC_ALIGN) == 0); 5567c478bd9Sstevel@tonic-gate 5577c478bd9Sstevel@tonic-gate CPUSET_ZERO(tset); 5587c478bd9Sstevel@tonic-gate 5597c478bd9Sstevel@tonic-gate kpreempt_disable(); 5607c478bd9Sstevel@tonic-gate XC_SPL_ENTER(lcx, opl); /* lcx set by the macro */ 5617c478bd9Sstevel@tonic-gate 5627c478bd9Sstevel@tonic-gate CPUSET_ADD(tset, lcx); 5637c478bd9Sstevel@tonic-gate 5647c478bd9Sstevel@tonic-gate /* 5657c478bd9Sstevel@tonic-gate * only send to the CPU_READY ones 5667c478bd9Sstevel@tonic-gate */ 5677c478bd9Sstevel@tonic-gate xc_cpuset = cpu_ready_set; 5687c478bd9Sstevel@tonic-gate CPUSET_AND(xc_cpuset, cpuset); 5697c478bd9Sstevel@tonic-gate 5707c478bd9Sstevel@tonic-gate /* 5717c478bd9Sstevel@tonic-gate * send to nobody; just return 5727c478bd9Sstevel@tonic-gate */ 5737c478bd9Sstevel@tonic-gate if (CPUSET_ISNULL(xc_cpuset)) { 5747c478bd9Sstevel@tonic-gate XC_SPL_EXIT(lcx, opl); 5757c478bd9Sstevel@tonic-gate kpreempt_enable(); 5767c478bd9Sstevel@tonic-gate return; 5777c478bd9Sstevel@tonic-gate } 5787c478bd9Sstevel@tonic-gate 5797c478bd9Sstevel@tonic-gate if (CPU_IN_SET(xc_cpuset, lcx)) { 5807c478bd9Sstevel@tonic-gate /* 5817c478bd9Sstevel@tonic-gate * same cpu just do it 5827c478bd9Sstevel@tonic-gate */ 5837c478bd9Sstevel@tonic-gate (*func)(arg1, arg2); 5847c478bd9Sstevel@tonic-gate CPUSET_DEL(xc_cpuset, lcx); 5857c478bd9Sstevel@tonic-gate if (CPUSET_ISNULL(xc_cpuset)) { 5867c478bd9Sstevel@tonic-gate XC_STAT_INC(x_dstat[lcx][XC_SOME_SELF]); 5877c478bd9Sstevel@tonic-gate XC_TRACE(XC_SOME_SELF, &tset, func, arg1, arg2); 5887c478bd9Sstevel@tonic-gate XC_SPL_EXIT(lcx, opl); 5897c478bd9Sstevel@tonic-gate kpreempt_enable(); 5907c478bd9Sstevel@tonic-gate return; 5917c478bd9Sstevel@tonic-gate } 5927c478bd9Sstevel@tonic-gate } 5937c478bd9Sstevel@tonic-gate 5947c478bd9Sstevel@tonic-gate if (xc_holder == lcx) { /* got the xc_sys_mutex already */ 5957c478bd9Sstevel@tonic-gate cpuset_t mset = xc_mbox[lcx].xc_cpuset; 5967c478bd9Sstevel@tonic-gate 5977c478bd9Sstevel@tonic-gate CPUSET_AND(mset, cpuset); 5987c478bd9Sstevel@tonic-gate ASSERT(MUTEX_HELD(&xc_sys_mutex)); 5997c478bd9Sstevel@tonic-gate ASSERT(CPUSET_ISEQUAL(mset, cpuset)); 6007c478bd9Sstevel@tonic-gate SEND_MBOX_ONLY(xc_cpuset, func, arg1, arg2, lcx, XC_WAIT); 6017c478bd9Sstevel@tonic-gate WAIT_MBOX_DONE(xc_cpuset, lcx, XC_WAIT, 0); 6027c478bd9Sstevel@tonic-gate XC_STAT_INC(x_dstat[lcx][XC_SOME_OTHER_H]); 6037c478bd9Sstevel@tonic-gate XC_TRACE(XC_SOME_OTHER_H, &xc_cpuset, func, arg1, arg2); 6047c478bd9Sstevel@tonic-gate XC_SPL_EXIT(lcx, opl); 6057c478bd9Sstevel@tonic-gate kpreempt_enable(); 6067c478bd9Sstevel@tonic-gate return; 6077c478bd9Sstevel@tonic-gate } 6087c478bd9Sstevel@tonic-gate 6097c478bd9Sstevel@tonic-gate /* 6107c478bd9Sstevel@tonic-gate * Avoid dead lock if someone has sent us a xc_loop request while 6117c478bd9Sstevel@tonic-gate * we are trying to grab xc_sys_mutex. 6127c478bd9Sstevel@tonic-gate */ 6137c478bd9Sstevel@tonic-gate XC_SPL_EXIT(lcx, opl); 6147c478bd9Sstevel@tonic-gate 6157c478bd9Sstevel@tonic-gate /* 6167c478bd9Sstevel@tonic-gate * At this point, since we don't own xc_sys_mutex, 6177c478bd9Sstevel@tonic-gate * our pil shouldn't run at or above the XCALL_PIL. 6187c478bd9Sstevel@tonic-gate */ 6197c478bd9Sstevel@tonic-gate ASSERT(getpil() < XCALL_PIL); 6207c478bd9Sstevel@tonic-gate 6217c478bd9Sstevel@tonic-gate /* 6227c478bd9Sstevel@tonic-gate * grab xc_sys_mutex before writing to the xc_mbox 6237c478bd9Sstevel@tonic-gate */ 6247c478bd9Sstevel@tonic-gate mutex_enter(&xc_sys_mutex); 6257c478bd9Sstevel@tonic-gate xc_spl_enter[lcx] = 1; 6267c478bd9Sstevel@tonic-gate 6277c478bd9Sstevel@tonic-gate XC_TRACE(XC_SOME_OTHER, &xc_cpuset, func, arg1, arg2); 6287c478bd9Sstevel@tonic-gate init_mondo(setsoftint_tl1, xc_serv_inum, 0); 6297c478bd9Sstevel@tonic-gate SEND_MBOX_MONDO(xc_cpuset, func, arg1, arg2, XC_IDLE); 6307c478bd9Sstevel@tonic-gate WAIT_MBOX_DONE(xc_cpuset, lcx, XC_IDLE, 1); 6317c478bd9Sstevel@tonic-gate 6327c478bd9Sstevel@tonic-gate xc_spl_enter[lcx] = 0; 6337c478bd9Sstevel@tonic-gate XC_STAT_INC(x_dstat[lcx][XC_SOME_OTHER]); 6347c478bd9Sstevel@tonic-gate mutex_exit(&xc_sys_mutex); 6357c478bd9Sstevel@tonic-gate kpreempt_enable(); 6367c478bd9Sstevel@tonic-gate } 6377c478bd9Sstevel@tonic-gate 6387c478bd9Sstevel@tonic-gate /* 6397c478bd9Sstevel@tonic-gate * xc_all - send a "x-call" to all cpus 6407c478bd9Sstevel@tonic-gate */ 6417c478bd9Sstevel@tonic-gate void 6427c478bd9Sstevel@tonic-gate xc_all(xcfunc_t *func, uint64_t arg1, uint64_t arg2) 6437c478bd9Sstevel@tonic-gate { 6447c478bd9Sstevel@tonic-gate int lcx; 6457c478bd9Sstevel@tonic-gate int opl; 6467c478bd9Sstevel@tonic-gate cpuset_t xc_cpuset, tset; 6477c478bd9Sstevel@tonic-gate 6487c478bd9Sstevel@tonic-gate ASSERT((uintptr_t)func > KERNELBASE); 6497c478bd9Sstevel@tonic-gate ASSERT(((uintptr_t)func % PC_ALIGN) == 0); 6507c478bd9Sstevel@tonic-gate 6517c478bd9Sstevel@tonic-gate CPUSET_ZERO(tset); 6527c478bd9Sstevel@tonic-gate 6537c478bd9Sstevel@tonic-gate kpreempt_disable(); 6547c478bd9Sstevel@tonic-gate XC_SPL_ENTER(lcx, opl); /* lcx set by the macro */ 6557c478bd9Sstevel@tonic-gate 6567c478bd9Sstevel@tonic-gate CPUSET_ADD(tset, lcx); 6577c478bd9Sstevel@tonic-gate 6587c478bd9Sstevel@tonic-gate /* 6597c478bd9Sstevel@tonic-gate * same cpu just do it 6607c478bd9Sstevel@tonic-gate */ 6617c478bd9Sstevel@tonic-gate (*func)(arg1, arg2); 6627c478bd9Sstevel@tonic-gate xc_cpuset = cpu_ready_set; 6637c478bd9Sstevel@tonic-gate CPUSET_DEL(xc_cpuset, lcx); 6647c478bd9Sstevel@tonic-gate 6657c478bd9Sstevel@tonic-gate if (CPUSET_ISNULL(xc_cpuset)) { 6667c478bd9Sstevel@tonic-gate XC_STAT_INC(x_dstat[lcx][XC_ALL_SELF]); 6677c478bd9Sstevel@tonic-gate XC_TRACE(XC_ALL_SELF, &tset, func, arg1, arg2); 6687c478bd9Sstevel@tonic-gate XC_SPL_EXIT(lcx, opl); 6697c478bd9Sstevel@tonic-gate kpreempt_enable(); 6707c478bd9Sstevel@tonic-gate return; 6717c478bd9Sstevel@tonic-gate } 6727c478bd9Sstevel@tonic-gate 6737c478bd9Sstevel@tonic-gate if (xc_holder == lcx) { /* got the xc_sys_mutex already */ 6747c478bd9Sstevel@tonic-gate cpuset_t mset = xc_mbox[lcx].xc_cpuset; 6757c478bd9Sstevel@tonic-gate 6767c478bd9Sstevel@tonic-gate CPUSET_AND(mset, xc_cpuset); 6777c478bd9Sstevel@tonic-gate ASSERT(MUTEX_HELD(&xc_sys_mutex)); 6787c478bd9Sstevel@tonic-gate ASSERT(CPUSET_ISEQUAL(mset, xc_cpuset)); 6797c478bd9Sstevel@tonic-gate XC_TRACE(XC_ALL_OTHER_H, &xc_cpuset, func, arg1, arg2); 6807c478bd9Sstevel@tonic-gate SEND_MBOX_ONLY(xc_cpuset, func, arg1, arg2, lcx, XC_WAIT); 6817c478bd9Sstevel@tonic-gate WAIT_MBOX_DONE(xc_cpuset, lcx, XC_WAIT, 0); 6827c478bd9Sstevel@tonic-gate XC_STAT_INC(x_dstat[lcx][XC_ALL_OTHER_H]); 6837c478bd9Sstevel@tonic-gate XC_SPL_EXIT(lcx, opl); 6847c478bd9Sstevel@tonic-gate kpreempt_enable(); 6857c478bd9Sstevel@tonic-gate return; 6867c478bd9Sstevel@tonic-gate } 6877c478bd9Sstevel@tonic-gate 6887c478bd9Sstevel@tonic-gate /* 6897c478bd9Sstevel@tonic-gate * Avoid dead lock if someone has sent us a xc_loop request while 6907c478bd9Sstevel@tonic-gate * we are trying to grab xc_sys_mutex. 6917c478bd9Sstevel@tonic-gate */ 6927c478bd9Sstevel@tonic-gate XC_SPL_EXIT(lcx, opl); 6937c478bd9Sstevel@tonic-gate 6947c478bd9Sstevel@tonic-gate /* 6957c478bd9Sstevel@tonic-gate * At this point, since we don't own xc_sys_mutex, 6967c478bd9Sstevel@tonic-gate * our pil shouldn't run at or above the XCALL_PIL. 6977c478bd9Sstevel@tonic-gate */ 6987c478bd9Sstevel@tonic-gate ASSERT(getpil() < XCALL_PIL); 6997c478bd9Sstevel@tonic-gate 7007c478bd9Sstevel@tonic-gate /* 7017c478bd9Sstevel@tonic-gate * grab xc_sys_mutex before writing to the xc_mbox 7027c478bd9Sstevel@tonic-gate */ 7037c478bd9Sstevel@tonic-gate mutex_enter(&xc_sys_mutex); 7047c478bd9Sstevel@tonic-gate xc_spl_enter[lcx] = 1; 7057c478bd9Sstevel@tonic-gate 7067c478bd9Sstevel@tonic-gate XC_TRACE(XC_ALL_OTHER, &xc_cpuset, func, arg1, arg2); 7077c478bd9Sstevel@tonic-gate init_mondo(setsoftint_tl1, xc_serv_inum, 0); 7087c478bd9Sstevel@tonic-gate SEND_MBOX_MONDO(xc_cpuset, func, arg1, arg2, XC_IDLE); 7097c478bd9Sstevel@tonic-gate WAIT_MBOX_DONE(xc_cpuset, lcx, XC_IDLE, 1); 7107c478bd9Sstevel@tonic-gate 7117c478bd9Sstevel@tonic-gate xc_spl_enter[lcx] = 0; 7127c478bd9Sstevel@tonic-gate XC_STAT_INC(x_dstat[lcx][XC_ALL_OTHER]); 7137c478bd9Sstevel@tonic-gate mutex_exit(&xc_sys_mutex); 7147c478bd9Sstevel@tonic-gate kpreempt_enable(); 7157c478bd9Sstevel@tonic-gate } 7167c478bd9Sstevel@tonic-gate 7177c478bd9Sstevel@tonic-gate /* 7187c478bd9Sstevel@tonic-gate * xc_attention - paired with xc_dismissed() 7197c478bd9Sstevel@tonic-gate * 7207c478bd9Sstevel@tonic-gate * xt_attention() holds the xc_sys_mutex and xc_dismissed() releases it 7217c478bd9Sstevel@tonic-gate * called when an initiator wants to capture some/all cpus for a critical 7227c478bd9Sstevel@tonic-gate * session. 7237c478bd9Sstevel@tonic-gate */ 7247c478bd9Sstevel@tonic-gate void 7257c478bd9Sstevel@tonic-gate xc_attention(cpuset_t cpuset) 7267c478bd9Sstevel@tonic-gate { 7277c478bd9Sstevel@tonic-gate int pix, lcx; 7287c478bd9Sstevel@tonic-gate cpuset_t xc_cpuset, tmpset; 7297c478bd9Sstevel@tonic-gate cpuset_t recv_cpuset; 7307c478bd9Sstevel@tonic-gate uint64_t loop_cnt = 0; 7317c478bd9Sstevel@tonic-gate int first_time = 1; 7327c478bd9Sstevel@tonic-gate 7337c478bd9Sstevel@tonic-gate CPUSET_ZERO(recv_cpuset); 7347c478bd9Sstevel@tonic-gate 7357c478bd9Sstevel@tonic-gate /* 7367c478bd9Sstevel@tonic-gate * don't migrate the cpu until xc_dismissed() is finished 7377c478bd9Sstevel@tonic-gate */ 7387c478bd9Sstevel@tonic-gate ASSERT(getpil() < XCALL_PIL); 7397c478bd9Sstevel@tonic-gate mutex_enter(&xc_sys_mutex); 7407c478bd9Sstevel@tonic-gate lcx = (int)(CPU->cpu_id); 7417c478bd9Sstevel@tonic-gate ASSERT(x_dstat[lcx][XC_ATTENTION] == 7427c478bd9Sstevel@tonic-gate x_dstat[lcx][XC_DISMISSED]); 7437c478bd9Sstevel@tonic-gate ASSERT(xc_holder == -1); 7447c478bd9Sstevel@tonic-gate xc_mbox[lcx].xc_cpuset = cpuset; 7457c478bd9Sstevel@tonic-gate xc_holder = lcx; /* no membar; only current cpu needs the right lcx */ 7467c478bd9Sstevel@tonic-gate 7477c478bd9Sstevel@tonic-gate /* 7487c478bd9Sstevel@tonic-gate * only send to the CPU_READY ones 7497c478bd9Sstevel@tonic-gate */ 7507c478bd9Sstevel@tonic-gate xc_cpuset = cpu_ready_set; 7517c478bd9Sstevel@tonic-gate CPUSET_AND(xc_cpuset, cpuset); 7527c478bd9Sstevel@tonic-gate 7537c478bd9Sstevel@tonic-gate /* 7547c478bd9Sstevel@tonic-gate * don't send mondo to self 7557c478bd9Sstevel@tonic-gate */ 7567c478bd9Sstevel@tonic-gate CPUSET_DEL(xc_cpuset, lcx); 7577c478bd9Sstevel@tonic-gate 7587c478bd9Sstevel@tonic-gate XC_STAT_INC(x_dstat[lcx][XC_ATTENTION]); 7597c478bd9Sstevel@tonic-gate XC_TRACE(XC_ATTENTION, &xc_cpuset, NULL, NULL, NULL); 7607c478bd9Sstevel@tonic-gate 7617c478bd9Sstevel@tonic-gate if (CPUSET_ISNULL(xc_cpuset)) 7627c478bd9Sstevel@tonic-gate return; 7637c478bd9Sstevel@tonic-gate 7647c478bd9Sstevel@tonic-gate xc_spl_enter[lcx] = 1; 7657c478bd9Sstevel@tonic-gate /* 7667c478bd9Sstevel@tonic-gate * inform the target processors to enter into xc_loop() 7677c478bd9Sstevel@tonic-gate */ 7687c478bd9Sstevel@tonic-gate init_mondo(setsoftint_tl1, xc_loop_inum, 0); 769e5900f74Sha137994 SEND_MBOX_MONDO_XC_ENTER(xc_cpuset); 7707c478bd9Sstevel@tonic-gate xc_spl_enter[lcx] = 0; 7717c478bd9Sstevel@tonic-gate 7727c478bd9Sstevel@tonic-gate /* 7737c478bd9Sstevel@tonic-gate * make sure target processors have entered into xc_loop() 7747c478bd9Sstevel@tonic-gate */ 7757c478bd9Sstevel@tonic-gate while (!CPUSET_ISEQUAL(recv_cpuset, xc_cpuset)) { 7767c478bd9Sstevel@tonic-gate tmpset = xc_cpuset; 7777c478bd9Sstevel@tonic-gate for (pix = 0; pix < NCPU; pix++) { 7787c478bd9Sstevel@tonic-gate if (CPU_IN_SET(tmpset, pix)) { 7797c478bd9Sstevel@tonic-gate /* 7807c478bd9Sstevel@tonic-gate * membar_stld() is done in xc_loop 7817c478bd9Sstevel@tonic-gate */ 7827c478bd9Sstevel@tonic-gate if (xc_mbox[pix].xc_state == XC_WAIT) { 7837c478bd9Sstevel@tonic-gate CPUSET_ADD(recv_cpuset, pix); 7847c478bd9Sstevel@tonic-gate } 7857c478bd9Sstevel@tonic-gate CPUSET_DEL(tmpset, pix); 7867c478bd9Sstevel@tonic-gate if (CPUSET_ISNULL(tmpset)) { 7877c478bd9Sstevel@tonic-gate break; 7887c478bd9Sstevel@tonic-gate } 7897c478bd9Sstevel@tonic-gate } 7907c478bd9Sstevel@tonic-gate } 7917c478bd9Sstevel@tonic-gate if (loop_cnt++ > xc_mondo_time_limit) { 7927c478bd9Sstevel@tonic-gate if (sendmondo_in_recover) { 7937c478bd9Sstevel@tonic-gate drv_usecwait(1); 7947c478bd9Sstevel@tonic-gate loop_cnt = 0; 7957c478bd9Sstevel@tonic-gate continue; 7967c478bd9Sstevel@tonic-gate } 7977c478bd9Sstevel@tonic-gate if (first_time) { 7987c478bd9Sstevel@tonic-gate XT_SYNC_SOME(xc_cpuset); 7997c478bd9Sstevel@tonic-gate first_time = 0; 8007c478bd9Sstevel@tonic-gate loop_cnt = 0; 8017c478bd9Sstevel@tonic-gate continue; 8027c478bd9Sstevel@tonic-gate } 8037c478bd9Sstevel@tonic-gate cmn_err(CE_PANIC, "xc_attention() timeout"); 8047c478bd9Sstevel@tonic-gate } 8057c478bd9Sstevel@tonic-gate } 8067c478bd9Sstevel@tonic-gate 8077c478bd9Sstevel@tonic-gate /* 8087c478bd9Sstevel@tonic-gate * xc_sys_mutex remains held until xc_dismissed() is finished 8097c478bd9Sstevel@tonic-gate */ 8107c478bd9Sstevel@tonic-gate } 8117c478bd9Sstevel@tonic-gate 8127c478bd9Sstevel@tonic-gate /* 8137c478bd9Sstevel@tonic-gate * xc_dismissed - paired with xc_attention() 8147c478bd9Sstevel@tonic-gate * 8157c478bd9Sstevel@tonic-gate * Called after the critical session is finished. 8167c478bd9Sstevel@tonic-gate */ 8177c478bd9Sstevel@tonic-gate void 8187c478bd9Sstevel@tonic-gate xc_dismissed(cpuset_t cpuset) 8197c478bd9Sstevel@tonic-gate { 8207c478bd9Sstevel@tonic-gate int pix; 8217c478bd9Sstevel@tonic-gate int lcx = (int)(CPU->cpu_id); 8227c478bd9Sstevel@tonic-gate cpuset_t xc_cpuset, tmpset; 8237c478bd9Sstevel@tonic-gate cpuset_t recv_cpuset; 8247c478bd9Sstevel@tonic-gate uint64_t loop_cnt = 0; 8257c478bd9Sstevel@tonic-gate 8267c478bd9Sstevel@tonic-gate ASSERT(lcx == xc_holder); 8277c478bd9Sstevel@tonic-gate ASSERT(CPUSET_ISEQUAL(xc_mbox[lcx].xc_cpuset, cpuset)); 8287c478bd9Sstevel@tonic-gate ASSERT(getpil() >= XCALL_PIL); 8297c478bd9Sstevel@tonic-gate CPUSET_ZERO(xc_mbox[lcx].xc_cpuset); 8307c478bd9Sstevel@tonic-gate CPUSET_ZERO(recv_cpuset); 8317c478bd9Sstevel@tonic-gate membar_stld(); 8327c478bd9Sstevel@tonic-gate 8337c478bd9Sstevel@tonic-gate XC_STAT_INC(x_dstat[lcx][XC_DISMISSED]); 8347c478bd9Sstevel@tonic-gate ASSERT(x_dstat[lcx][XC_DISMISSED] == x_dstat[lcx][XC_ATTENTION]); 8357c478bd9Sstevel@tonic-gate 8367c478bd9Sstevel@tonic-gate /* 8377c478bd9Sstevel@tonic-gate * only send to the CPU_READY ones 8387c478bd9Sstevel@tonic-gate */ 8397c478bd9Sstevel@tonic-gate xc_cpuset = cpu_ready_set; 8407c478bd9Sstevel@tonic-gate CPUSET_AND(xc_cpuset, cpuset); 8417c478bd9Sstevel@tonic-gate 8427c478bd9Sstevel@tonic-gate /* 8437c478bd9Sstevel@tonic-gate * exclude itself 8447c478bd9Sstevel@tonic-gate */ 8457c478bd9Sstevel@tonic-gate CPUSET_DEL(xc_cpuset, lcx); 8467c478bd9Sstevel@tonic-gate XC_TRACE(XC_DISMISSED, &xc_cpuset, NULL, NULL, NULL); 8477c478bd9Sstevel@tonic-gate if (CPUSET_ISNULL(xc_cpuset)) { 8487c478bd9Sstevel@tonic-gate xc_holder = -1; 8497c478bd9Sstevel@tonic-gate mutex_exit(&xc_sys_mutex); 8507c478bd9Sstevel@tonic-gate return; 8517c478bd9Sstevel@tonic-gate } 8527c478bd9Sstevel@tonic-gate 8537c478bd9Sstevel@tonic-gate /* 8547c478bd9Sstevel@tonic-gate * inform other processors to get out of xc_loop() 8557c478bd9Sstevel@tonic-gate */ 8567c478bd9Sstevel@tonic-gate tmpset = xc_cpuset; 8577c478bd9Sstevel@tonic-gate for (pix = 0; pix < NCPU; pix++) { 8587c478bd9Sstevel@tonic-gate if (CPU_IN_SET(tmpset, pix)) { 8597c478bd9Sstevel@tonic-gate xc_mbox[pix].xc_state = XC_EXIT; 8607c478bd9Sstevel@tonic-gate membar_stld(); 8617c478bd9Sstevel@tonic-gate CPUSET_DEL(tmpset, pix); 8627c478bd9Sstevel@tonic-gate if (CPUSET_ISNULL(tmpset)) { 8637c478bd9Sstevel@tonic-gate break; 8647c478bd9Sstevel@tonic-gate } 8657c478bd9Sstevel@tonic-gate } 8667c478bd9Sstevel@tonic-gate } 8677c478bd9Sstevel@tonic-gate 8687c478bd9Sstevel@tonic-gate /* 8697c478bd9Sstevel@tonic-gate * make sure target processors have exited from xc_loop() 8707c478bd9Sstevel@tonic-gate */ 8717c478bd9Sstevel@tonic-gate while (!CPUSET_ISEQUAL(recv_cpuset, xc_cpuset)) { 8727c478bd9Sstevel@tonic-gate tmpset = xc_cpuset; 8737c478bd9Sstevel@tonic-gate for (pix = 0; pix < NCPU; pix++) { 8747c478bd9Sstevel@tonic-gate if (CPU_IN_SET(tmpset, pix)) { 8757c478bd9Sstevel@tonic-gate /* 8767c478bd9Sstevel@tonic-gate * membar_stld() is done in xc_loop 8777c478bd9Sstevel@tonic-gate */ 8787c478bd9Sstevel@tonic-gate if (xc_mbox[pix].xc_state == XC_IDLE) { 8797c478bd9Sstevel@tonic-gate CPUSET_ADD(recv_cpuset, pix); 8807c478bd9Sstevel@tonic-gate } 8817c478bd9Sstevel@tonic-gate CPUSET_DEL(tmpset, pix); 8827c478bd9Sstevel@tonic-gate if (CPUSET_ISNULL(tmpset)) { 8837c478bd9Sstevel@tonic-gate break; 8847c478bd9Sstevel@tonic-gate } 8857c478bd9Sstevel@tonic-gate } 8867c478bd9Sstevel@tonic-gate } 8877c478bd9Sstevel@tonic-gate if (loop_cnt++ > xc_func_time_limit) { 8887c478bd9Sstevel@tonic-gate if (sendmondo_in_recover) { 8897c478bd9Sstevel@tonic-gate drv_usecwait(1); 8907c478bd9Sstevel@tonic-gate loop_cnt = 0; 8917c478bd9Sstevel@tonic-gate continue; 8927c478bd9Sstevel@tonic-gate } 8937c478bd9Sstevel@tonic-gate cmn_err(CE_PANIC, "xc_dismissed() timeout"); 8947c478bd9Sstevel@tonic-gate } 8957c478bd9Sstevel@tonic-gate } 8967c478bd9Sstevel@tonic-gate xc_holder = -1; 8977c478bd9Sstevel@tonic-gate mutex_exit(&xc_sys_mutex); 8987c478bd9Sstevel@tonic-gate } 8997c478bd9Sstevel@tonic-gate 9007c478bd9Sstevel@tonic-gate /* 9017c478bd9Sstevel@tonic-gate * xc_serv - "x-call" handler at TL=0; serves only one x-call request 9027c478bd9Sstevel@tonic-gate * runs at XCALL_PIL level. 9037c478bd9Sstevel@tonic-gate */ 9047c478bd9Sstevel@tonic-gate uint_t 9057c478bd9Sstevel@tonic-gate xc_serv(void) 9067c478bd9Sstevel@tonic-gate { 9077c478bd9Sstevel@tonic-gate int lcx = (int)(CPU->cpu_id); 9087c478bd9Sstevel@tonic-gate struct xc_mbox *xmp; 9097c478bd9Sstevel@tonic-gate xcfunc_t *func; 9107c478bd9Sstevel@tonic-gate uint64_t arg1, arg2; 9117c478bd9Sstevel@tonic-gate cpuset_t tset; 9127c478bd9Sstevel@tonic-gate 9137c478bd9Sstevel@tonic-gate ASSERT(getpil() == XCALL_PIL); 9147c478bd9Sstevel@tonic-gate CPUSET_ZERO(tset); 9157c478bd9Sstevel@tonic-gate CPUSET_ADD(tset, lcx); 9167c478bd9Sstevel@tonic-gate flush_windows(); 9177c478bd9Sstevel@tonic-gate xmp = &xc_mbox[lcx]; 9187c478bd9Sstevel@tonic-gate ASSERT(lcx != xc_holder); 9197c478bd9Sstevel@tonic-gate ASSERT(xmp->xc_state == XC_DOIT); 9207c478bd9Sstevel@tonic-gate func = xmp->xc_func; 9217c478bd9Sstevel@tonic-gate XC_TRACE(XC_SERV, &tset, func, xmp->xc_arg1, xmp->xc_arg2); 9227c478bd9Sstevel@tonic-gate if (func != NULL) { 9237c478bd9Sstevel@tonic-gate arg1 = xmp->xc_arg1; 9247c478bd9Sstevel@tonic-gate arg2 = xmp->xc_arg2; 9257c478bd9Sstevel@tonic-gate (*func)(arg1, arg2); 9267c478bd9Sstevel@tonic-gate } 9277c478bd9Sstevel@tonic-gate XC_STAT_INC(x_rstat[lcx][XC_SERV]); 9287c478bd9Sstevel@tonic-gate XC_TRACE(XC_SERV, &tset, func, arg1, arg2); 9297c478bd9Sstevel@tonic-gate xmp->xc_state = XC_IDLE; 9307c478bd9Sstevel@tonic-gate membar_stld(); 9317c478bd9Sstevel@tonic-gate return (1); 9327c478bd9Sstevel@tonic-gate } 9337c478bd9Sstevel@tonic-gate 9347c478bd9Sstevel@tonic-gate /* 9357c478bd9Sstevel@tonic-gate * if == 1, an xc_loop timeout will cause a panic 9367c478bd9Sstevel@tonic-gate * otherwise print a warning 9377c478bd9Sstevel@tonic-gate */ 9387c478bd9Sstevel@tonic-gate uint_t xc_loop_panic = 0; 9397c478bd9Sstevel@tonic-gate 9407c478bd9Sstevel@tonic-gate /* 9417c478bd9Sstevel@tonic-gate * xc_loop - "x-call" handler at TL=0; capture the cpu for a critial 9427c478bd9Sstevel@tonic-gate * session, or serve multiple x-call requests runs at XCALL_PIL level. 9437c478bd9Sstevel@tonic-gate */ 9447c478bd9Sstevel@tonic-gate uint_t 9457c478bd9Sstevel@tonic-gate xc_loop(void) 9467c478bd9Sstevel@tonic-gate { 9477c478bd9Sstevel@tonic-gate int lcx = (int)(CPU->cpu_id); 9487c478bd9Sstevel@tonic-gate struct xc_mbox *xmp; 9497c478bd9Sstevel@tonic-gate xcfunc_t *func; 9507c478bd9Sstevel@tonic-gate uint64_t arg1, arg2; 9517c478bd9Sstevel@tonic-gate uint64_t loop_cnt = 0; 9527c478bd9Sstevel@tonic-gate cpuset_t tset; 9537c478bd9Sstevel@tonic-gate 9547c478bd9Sstevel@tonic-gate ASSERT(getpil() == XCALL_PIL); 9557c478bd9Sstevel@tonic-gate 9567c478bd9Sstevel@tonic-gate CPUSET_ZERO(tset); 9577c478bd9Sstevel@tonic-gate flush_windows(); 9587c478bd9Sstevel@tonic-gate 9597c478bd9Sstevel@tonic-gate /* 9607c478bd9Sstevel@tonic-gate * Some one must have owned the xc_sys_mutex; 9617c478bd9Sstevel@tonic-gate * no further interrupt (at XCALL_PIL or below) can 9627c478bd9Sstevel@tonic-gate * be taken by this processor until xc_loop exits. 9637c478bd9Sstevel@tonic-gate * 9647c478bd9Sstevel@tonic-gate * The owner of xc_sys_mutex (or xc_holder) can expect 9657c478bd9Sstevel@tonic-gate * its xc/xt requests are handled as follows: 9667c478bd9Sstevel@tonic-gate * xc requests use xc_mbox's handshaking for their services 9677c478bd9Sstevel@tonic-gate * xt requests at TL>0 will be handled immediately 9687c478bd9Sstevel@tonic-gate * xt requests at TL=0: 9697c478bd9Sstevel@tonic-gate * if their handlers'pils are <= XCALL_PIL, then 9707c478bd9Sstevel@tonic-gate * they will be handled after xc_loop exits 9717c478bd9Sstevel@tonic-gate * (so, they probably should not be used) 9727c478bd9Sstevel@tonic-gate * else they will be handled immediately 9737c478bd9Sstevel@tonic-gate * 9747c478bd9Sstevel@tonic-gate * For those who are not informed to enter xc_loop, if they 9757c478bd9Sstevel@tonic-gate * send xc/xt requests to this processor at this moment, 9767c478bd9Sstevel@tonic-gate * the requests will be handled as follows: 9777c478bd9Sstevel@tonic-gate * xc requests will be handled after they grab xc_sys_mutex 9787c478bd9Sstevel@tonic-gate * xt requests at TL>0 will be handled immediately 9797c478bd9Sstevel@tonic-gate * xt requests at TL=0: 9807c478bd9Sstevel@tonic-gate * if their handlers'pils are <= XCALL_PIL, then 9817c478bd9Sstevel@tonic-gate * they will be handled after xc_loop exits 9827c478bd9Sstevel@tonic-gate * else they will be handled immediately 9837c478bd9Sstevel@tonic-gate */ 9847c478bd9Sstevel@tonic-gate xmp = &xc_mbox[lcx]; 9857c478bd9Sstevel@tonic-gate ASSERT(lcx != xc_holder); 9867c478bd9Sstevel@tonic-gate ASSERT(xmp->xc_state == XC_ENTER); 9877c478bd9Sstevel@tonic-gate xmp->xc_state = XC_WAIT; 9887c478bd9Sstevel@tonic-gate CPUSET_ADD(tset, lcx); 9897c478bd9Sstevel@tonic-gate membar_stld(); 9907c478bd9Sstevel@tonic-gate XC_STAT_INC(x_rstat[lcx][XC_LOOP]); 9917c478bd9Sstevel@tonic-gate XC_TRACE(XC_LOOP_ENTER, &tset, NULL, NULL, NULL); 9927c478bd9Sstevel@tonic-gate while (xmp->xc_state != XC_EXIT) { 9937c478bd9Sstevel@tonic-gate if (xmp->xc_state == XC_DOIT) { 9947c478bd9Sstevel@tonic-gate func = xmp->xc_func; 9957c478bd9Sstevel@tonic-gate arg1 = xmp->xc_arg1; 9967c478bd9Sstevel@tonic-gate arg2 = xmp->xc_arg2; 9977c478bd9Sstevel@tonic-gate XC_TRACE(XC_LOOP_DOIT, &tset, func, arg1, arg2); 9987c478bd9Sstevel@tonic-gate if (func != NULL) 9997c478bd9Sstevel@tonic-gate (*func)(arg1, arg2); 10007c478bd9Sstevel@tonic-gate xmp->xc_state = XC_WAIT; 10017c478bd9Sstevel@tonic-gate membar_stld(); 10027c478bd9Sstevel@tonic-gate /* 10037c478bd9Sstevel@tonic-gate * reset the timeout counter 10047c478bd9Sstevel@tonic-gate * since some work was done 10057c478bd9Sstevel@tonic-gate */ 10067c478bd9Sstevel@tonic-gate loop_cnt = 0; 10077c478bd9Sstevel@tonic-gate } else { 10087c478bd9Sstevel@tonic-gate /* patience is a virtue... */ 10097c478bd9Sstevel@tonic-gate loop_cnt++; 10107c478bd9Sstevel@tonic-gate } 10117c478bd9Sstevel@tonic-gate 10127c478bd9Sstevel@tonic-gate if (loop_cnt > xc_func_time_limit) { 10137c478bd9Sstevel@tonic-gate if (sendmondo_in_recover) { 10147c478bd9Sstevel@tonic-gate drv_usecwait(1); 10157c478bd9Sstevel@tonic-gate loop_cnt = 0; 10167c478bd9Sstevel@tonic-gate continue; 10177c478bd9Sstevel@tonic-gate } 10187c478bd9Sstevel@tonic-gate cmn_err(xc_loop_panic ? CE_PANIC : CE_WARN, 10197c478bd9Sstevel@tonic-gate "xc_loop() timeout"); 10207c478bd9Sstevel@tonic-gate /* 10217c478bd9Sstevel@tonic-gate * if the above displayed a warning, 10227c478bd9Sstevel@tonic-gate * reset the timeout counter and be patient 10237c478bd9Sstevel@tonic-gate */ 10247c478bd9Sstevel@tonic-gate loop_cnt = 0; 10257c478bd9Sstevel@tonic-gate } 10267c478bd9Sstevel@tonic-gate } 10277c478bd9Sstevel@tonic-gate ASSERT(xmp->xc_state == XC_EXIT); 10287c478bd9Sstevel@tonic-gate ASSERT(xc_holder != -1); 10297c478bd9Sstevel@tonic-gate XC_TRACE(XC_LOOP_EXIT, &tset, NULL, NULL, NULL); 10307c478bd9Sstevel@tonic-gate xmp->xc_state = XC_IDLE; 10317c478bd9Sstevel@tonic-gate membar_stld(); 10327c478bd9Sstevel@tonic-gate return (1); 10337c478bd9Sstevel@tonic-gate } 1034