17c478bd9Sstevel@tonic-gate /* 27c478bd9Sstevel@tonic-gate * CDDL HEADER START 37c478bd9Sstevel@tonic-gate * 47c478bd9Sstevel@tonic-gate * The contents of this file are subject to the terms of the 5a1af7ba0Scwb * Common Development and Distribution License (the "License"). 6a1af7ba0Scwb * You may not use this file except in compliance with the License. 77c478bd9Sstevel@tonic-gate * 87c478bd9Sstevel@tonic-gate * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 97c478bd9Sstevel@tonic-gate * or http://www.opensolaris.org/os/licensing. 107c478bd9Sstevel@tonic-gate * See the License for the specific language governing permissions 117c478bd9Sstevel@tonic-gate * and limitations under the License. 127c478bd9Sstevel@tonic-gate * 137c478bd9Sstevel@tonic-gate * When distributing Covered Code, include this CDDL HEADER in each 147c478bd9Sstevel@tonic-gate * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 157c478bd9Sstevel@tonic-gate * If applicable, add the following below this CDDL HEADER, with the 167c478bd9Sstevel@tonic-gate * fields enclosed by brackets "[]" replaced with your own identifying 177c478bd9Sstevel@tonic-gate * information: Portions Copyright [yyyy] [name of copyright owner] 187c478bd9Sstevel@tonic-gate * 197c478bd9Sstevel@tonic-gate * CDDL HEADER END 207c478bd9Sstevel@tonic-gate */ 217c478bd9Sstevel@tonic-gate /* 22a1af7ba0Scwb * Copyright 2007 Sun Microsystems, Inc. All rights reserved. 237c478bd9Sstevel@tonic-gate * Use is subject to license terms. 247c478bd9Sstevel@tonic-gate */ 257c478bd9Sstevel@tonic-gate 267c478bd9Sstevel@tonic-gate #pragma ident "%Z%%M% %I% %E% SMI" 277c478bd9Sstevel@tonic-gate 287c478bd9Sstevel@tonic-gate #include <sys/types.h> 297c478bd9Sstevel@tonic-gate #include <sys/param.h> 307c478bd9Sstevel@tonic-gate #include <sys/t_lock.h> 317c478bd9Sstevel@tonic-gate #include <sys/systm.h> 327c478bd9Sstevel@tonic-gate #include <sys/spl.h> 337c478bd9Sstevel@tonic-gate #include <sys/cmn_err.h> 347c478bd9Sstevel@tonic-gate #include <sys/debug.h> 357c478bd9Sstevel@tonic-gate #include <sys/kdi_impl.h> 363aedfe0bSmishra #include <sys/cpuvar.h> 373aedfe0bSmishra #include <sys/cpuvar.h> 383aedfe0bSmishra #include <sys/archsystm.h> 397c478bd9Sstevel@tonic-gate 407c478bd9Sstevel@tonic-gate /* 417c478bd9Sstevel@tonic-gate * Handle software interrupts through 'softcall' mechanism 42f8047eabSsudheer * 43f8047eabSsudheer * At present softcall mechanism uses a global list headed by softhead. 44f8047eabSsudheer * Entries are added to tail and removed from head so as to preserve FIFO 45f8047eabSsudheer * nature of entries in the softcall list. softcall() takes care of adding 46f8047eabSsudheer * entries to the softtail. 47f8047eabSsudheer * 48f8047eabSsudheer * softint must take care of executing the entries in the FIFO 49f8047eabSsudheer * order. It could be called simultaneously from multiple cpus, however only 503aedfe0bSmishra * one instance of softint should process the softcall list with the exception 513aedfe0bSmishra * when CPU is stuck due to high interrupt load and can't execute callbacks. 523aedfe0bSmishra * State diagram is as follows :- 533aedfe0bSmishra * 543aedfe0bSmishra * - Upper half which is same as old state machine 55f8047eabSsudheer * (IDLE->PEND->DRAIN->IDLE) 56f8047eabSsudheer * 573aedfe0bSmishra * - Lower half which steals the entries from softcall queue and execute 583aedfe0bSmishra * in the context of softint interrupt handler. The interrupt handler 593aedfe0bSmishra * is fired on a different CPU by sending a cross-call. 603aedfe0bSmishra * 613aedfe0bSmishra * Starting state is IDLE. 623aedfe0bSmishra * 633aedfe0bSmishra * softint() 643aedfe0bSmishra * 653aedfe0bSmishra * 663aedfe0bSmishra * (c) 673aedfe0bSmishra * ____________________________________________________ 683aedfe0bSmishra * | ^ ^ 693aedfe0bSmishra * v (a) | (b) | 703aedfe0bSmishra * IDLE--------------------->PEND--------------------->DRAIN 713aedfe0bSmishra * ^ | | 723aedfe0bSmishra * | | | 733aedfe0bSmishra * | | | 743aedfe0bSmishra * | | | 753aedfe0bSmishra * | | | 763aedfe0bSmishra * | d d 773aedfe0bSmishra * | | | 783aedfe0bSmishra * | v v 793aedfe0bSmishra * | PEND DRAIN 803aedfe0bSmishra * | (e) & & 813aedfe0bSmishra * |<-----------------------STEAL STEAL 823aedfe0bSmishra * ^ | 833aedfe0bSmishra * | | 843aedfe0bSmishra * | (e) v 853aedfe0bSmishra * |_________________________<__________________________| 863aedfe0bSmishra * 873aedfe0bSmishra * 883aedfe0bSmishra * 893aedfe0bSmishra * Edge (a)->(b)->(c) are same as old state machine and these 903aedfe0bSmishra * are mutually exclusive state. 913aedfe0bSmishra * 923aedfe0bSmishra * a - When an entry is being enqueued to softcall queue then the state 933aedfe0bSmishra * moves from IDLE to PEND. 943aedfe0bSmishra * 953aedfe0bSmishra * b - When interrupt handler has started processing softcall queue. 963aedfe0bSmishra * 973aedfe0bSmishra * c - When interrupt handler finished processing softcall queue, the 983aedfe0bSmishra * state of machines goes back to IDLE. 993aedfe0bSmishra * 1003aedfe0bSmishra * d - softcall() generates another softlevel1 iff interrupt handler 1013aedfe0bSmishra * hasn't run recently. 1023aedfe0bSmishra * 1033aedfe0bSmishra * e - Either PEND|STEAL or DRAIN|STEAL is set. We let softlevel1 1043aedfe0bSmishra * handler exit because we have processed all the entries. 1053aedfe0bSmishra * 1063aedfe0bSmishra * When CPU is being pinned by higher level interrupts for more than 1073aedfe0bSmishra * softcall_delay clock ticks, SOFT_STEAL is OR'ed so that softlevel1 1083aedfe0bSmishra * handler on the other CPU can drain the queue. 1093aedfe0bSmishra * 110f8047eabSsudheer * These states are needed for softcall mechanism since Solaris has only 1113aedfe0bSmishra * one interface (ie. siron ) as of now for : 1123aedfe0bSmishra * 113f8047eabSsudheer * - raising a soft interrupt architecture independently (ie not through 114f8047eabSsudheer * setsoftint(..) ) 115f8047eabSsudheer * - to process the softcall queue. 1167c478bd9Sstevel@tonic-gate */ 1177c478bd9Sstevel@tonic-gate 1187c478bd9Sstevel@tonic-gate #define NSOFTCALLS 200 1193aedfe0bSmishra 120f8047eabSsudheer /* 121f8047eabSsudheer * Defined states for softcall processing. 122f8047eabSsudheer */ 123f8047eabSsudheer #define SOFT_IDLE 0x01 /* no processing is needed */ 124f8047eabSsudheer #define SOFT_PEND 0x02 /* softcall list needs processing */ 1253aedfe0bSmishra #define SOFT_DRAIN 0x04 /* list is being processed */ 1263aedfe0bSmishra #define SOFT_STEAL 0x08 /* list is being stolen for draining */ 1277c478bd9Sstevel@tonic-gate 1287c478bd9Sstevel@tonic-gate typedef struct softcall { 1297c478bd9Sstevel@tonic-gate void (*sc_func)(void *); /* function to call */ 1307c478bd9Sstevel@tonic-gate void *sc_arg; /* arg to pass to func */ 1317c478bd9Sstevel@tonic-gate struct softcall *sc_next; /* next in list */ 1327c478bd9Sstevel@tonic-gate } softcall_t; 1337c478bd9Sstevel@tonic-gate 1343aedfe0bSmishra /* 1353aedfe0bSmishra * softcall list and state variables. 1363aedfe0bSmishra */ 1373aedfe0bSmishra static softcall_t *softcalls; 1383aedfe0bSmishra static softcall_t *softhead, *softtail, *softfree; 139f8047eabSsudheer static uint_t softcall_state; 1403aedfe0bSmishra static clock_t softcall_tick; 141*b9bc7f78Ssmaybe static clock_t softcall_countstart, softcall_lastpoke; 142*b9bc7f78Ssmaybe static uint_t softcall_pokecount; 143*b9bc7f78Ssmaybe 144*b9bc7f78Ssmaybe /* 145*b9bc7f78Ssmaybe * Max number of pokes per second before increasing softcall_delay 146*b9bc7f78Ssmaybe */ 147*b9bc7f78Ssmaybe uint_t softcall_pokemax = 10; 1483aedfe0bSmishra 1493aedfe0bSmishra /* 1503aedfe0bSmishra * This ensures that softcall entries don't get stuck for long. It's expressed 1513aedfe0bSmishra * in 10 milliseconds as 1 unit. When hires_tick is set or other clock frequency 1523aedfe0bSmishra * is used, softcall_init() ensures that it's still expressed as 1 = 10 milli 1533aedfe0bSmishra * seconds. 1543aedfe0bSmishra */ 155*b9bc7f78Ssmaybe unsigned int softcall_delay = 1; 1563aedfe0bSmishra 1573aedfe0bSmishra /* 1583aedfe0bSmishra * The last CPU which will drain softcall queue. 1593aedfe0bSmishra */ 1603aedfe0bSmishra static int softcall_latest_cpuid = -1; 1613aedfe0bSmishra 1623aedfe0bSmishra /* 1633aedfe0bSmishra * CPUSET to hold the CPU which is processing softcall queue 1643aedfe0bSmishra * currently. There can be more than one CPU having bit set 1653aedfe0bSmishra * but it will happen only when they are stuck. 1663aedfe0bSmishra */ 1673aedfe0bSmishra static cpuset_t *softcall_cpuset = NULL; 1687c478bd9Sstevel@tonic-gate 169f8047eabSsudheer /* 170f8047eabSsudheer * protects softcall lists and control variable softcall_state. 171f8047eabSsudheer */ 172f8047eabSsudheer static kmutex_t softcall_lock; 1737c478bd9Sstevel@tonic-gate 1747c478bd9Sstevel@tonic-gate static void (*kdi_softcall_func)(void); 1753aedfe0bSmishra extern void siron_poke_cpu(cpuset_t); 1767c478bd9Sstevel@tonic-gate 1777c478bd9Sstevel@tonic-gate extern void siron(void); 178f1fa5dcfSmishra extern void kdi_siron(void); 1797c478bd9Sstevel@tonic-gate 180*b9bc7f78Ssmaybe 1817c478bd9Sstevel@tonic-gate void 1827c478bd9Sstevel@tonic-gate softcall_init(void) 1837c478bd9Sstevel@tonic-gate { 1847c478bd9Sstevel@tonic-gate softcall_t *sc; 1857c478bd9Sstevel@tonic-gate 1863aedfe0bSmishra softcalls = kmem_zalloc(sizeof (softcall_t) * NSOFTCALLS, KM_SLEEP); 1873aedfe0bSmishra softcall_cpuset = kmem_zalloc(sizeof (cpuset_t), KM_SLEEP); 1887c478bd9Sstevel@tonic-gate for (sc = softcalls; sc < &softcalls[NSOFTCALLS]; sc++) { 1897c478bd9Sstevel@tonic-gate sc->sc_next = softfree; 1907c478bd9Sstevel@tonic-gate softfree = sc; 1917c478bd9Sstevel@tonic-gate } 1923aedfe0bSmishra mutex_init(&softcall_lock, NULL, MUTEX_SPIN, 1933aedfe0bSmishra (void *)ipltospl(SPL8)); 1943aedfe0bSmishra softcall_state = SOFT_IDLE; 1953aedfe0bSmishra softcall_tick = lbolt; 1963aedfe0bSmishra 1973aedfe0bSmishra /* 1983aedfe0bSmishra * Since softcall_delay is expressed as 1 = 10 milliseconds. 1993aedfe0bSmishra */ 2003aedfe0bSmishra softcall_delay = softcall_delay * (hz/100); 2013aedfe0bSmishra CPUSET_ZERO(*softcall_cpuset); 2023aedfe0bSmishra } 2033aedfe0bSmishra 2043aedfe0bSmishra /* 2053aedfe0bSmishra * Gets called when softcall queue is not moving forward. We choose 2063aedfe0bSmishra * a CPU and poke except the ones which are already poked. 2073aedfe0bSmishra */ 2083aedfe0bSmishra static int 2093aedfe0bSmishra softcall_choose_cpu() 2103aedfe0bSmishra { 2113aedfe0bSmishra cpu_t *cplist = CPU; 2123aedfe0bSmishra cpu_t *cp; 2133aedfe0bSmishra int intr_load = INT_MAX; 2143aedfe0bSmishra int cpuid = -1; 2153aedfe0bSmishra cpuset_t poke; 2163aedfe0bSmishra int s; 2173aedfe0bSmishra 2183aedfe0bSmishra ASSERT(getpil() >= DISP_LEVEL); 2193aedfe0bSmishra ASSERT(ncpus > 1); 2203aedfe0bSmishra ASSERT(MUTEX_HELD(&softcall_lock)); 2213aedfe0bSmishra 2223aedfe0bSmishra CPUSET_ZERO(poke); 2233aedfe0bSmishra 2243aedfe0bSmishra /* 2253aedfe0bSmishra * The hint is to start from current CPU. 2263aedfe0bSmishra */ 2273aedfe0bSmishra cp = cplist; 2283aedfe0bSmishra do { 229f1fa5dcfSmishra /* 230f1fa5dcfSmishra * Don't select this CPU if : 231f1fa5dcfSmishra * - in cpuset already 232f1fa5dcfSmishra * - CPU is not accepting interrupts 233f1fa5dcfSmishra * - CPU is being offlined 234f1fa5dcfSmishra */ 2353aedfe0bSmishra if (CPU_IN_SET(*softcall_cpuset, cp->cpu_id) || 236f1fa5dcfSmishra (cp->cpu_flags & CPU_ENABLE) == 0 || 237f1fa5dcfSmishra (cp == cpu_inmotion)) 2383aedfe0bSmishra continue; 239*b9bc7f78Ssmaybe #if defined(__x86) 240*b9bc7f78Ssmaybe /* 241*b9bc7f78Ssmaybe * Don't select this CPU if a hypervisor indicates it 242*b9bc7f78Ssmaybe * isn't currently scheduled onto a physical cpu. We are 243*b9bc7f78Ssmaybe * looking for a cpu that can respond quickly and the time 244*b9bc7f78Ssmaybe * to get the virtual cpu scheduled and switched to running 245*b9bc7f78Ssmaybe * state is likely to be relatively lengthy. 246*b9bc7f78Ssmaybe */ 247*b9bc7f78Ssmaybe if (vcpu_on_pcpu(cp->cpu_id) == VCPU_NOT_ON_PCPU) 248*b9bc7f78Ssmaybe continue; 249*b9bc7f78Ssmaybe #endif /* __x86 */ 2503aedfe0bSmishra 2513aedfe0bSmishra /* if CPU is not busy */ 2523aedfe0bSmishra if (cp->cpu_intrload == 0) { 2533aedfe0bSmishra cpuid = cp->cpu_id; 2543aedfe0bSmishra break; 2553aedfe0bSmishra } 2563aedfe0bSmishra 2573aedfe0bSmishra if (cp->cpu_intrload < intr_load) { 2583aedfe0bSmishra cpuid = cp->cpu_id; 2593aedfe0bSmishra intr_load = cp->cpu_intrload; 2603aedfe0bSmishra } else if (cp->cpu_intrload == intr_load) { 2613aedfe0bSmishra /* 2623aedfe0bSmishra * We want to poke CPUs having similar 2633aedfe0bSmishra * load because we don't know which CPU is 2643aedfe0bSmishra * can acknowledge level1 interrupt. The 2653aedfe0bSmishra * list of such CPUs should not be large. 2663aedfe0bSmishra */ 2673aedfe0bSmishra if (cpuid != -1) { 2683aedfe0bSmishra /* 2693aedfe0bSmishra * Put the last CPU chosen because 2703aedfe0bSmishra * it also has same interrupt load. 2713aedfe0bSmishra */ 2723aedfe0bSmishra CPUSET_ADD(poke, cpuid); 2733aedfe0bSmishra cpuid = -1; 2743aedfe0bSmishra } 2753aedfe0bSmishra 2763aedfe0bSmishra CPUSET_ADD(poke, cp->cpu_id); 2773aedfe0bSmishra } 2783aedfe0bSmishra } while ((cp = cp->cpu_next_onln) != cplist); 2793aedfe0bSmishra 2803aedfe0bSmishra /* if we found a CPU which suits best to poke */ 2813aedfe0bSmishra if (cpuid != -1) { 2823aedfe0bSmishra CPUSET_ZERO(poke); 2833aedfe0bSmishra CPUSET_ADD(poke, cpuid); 2843aedfe0bSmishra } 2853aedfe0bSmishra 2863aedfe0bSmishra if (CPUSET_ISNULL(poke)) { 2873aedfe0bSmishra mutex_exit(&softcall_lock); 2883aedfe0bSmishra return (0); 2893aedfe0bSmishra } 2903aedfe0bSmishra 2913aedfe0bSmishra /* 2923aedfe0bSmishra * We first set the bit in cpuset and then poke. 2933aedfe0bSmishra */ 2943aedfe0bSmishra CPUSET_XOR(*softcall_cpuset, poke); 2953aedfe0bSmishra mutex_exit(&softcall_lock); 2963aedfe0bSmishra 2973aedfe0bSmishra /* 2983aedfe0bSmishra * If softcall() was called at low pil then we may 2993aedfe0bSmishra * get preempted before we raise PIL. It should be okay 3003aedfe0bSmishra * because we are just going to poke CPUs now or at most 3013aedfe0bSmishra * another thread may start choosing CPUs in this routine. 3023aedfe0bSmishra */ 3033aedfe0bSmishra s = splhigh(); 3043aedfe0bSmishra siron_poke_cpu(poke); 3053aedfe0bSmishra splx(s); 3063aedfe0bSmishra return (1); 3077c478bd9Sstevel@tonic-gate } 3087c478bd9Sstevel@tonic-gate 309*b9bc7f78Ssmaybe 3107c478bd9Sstevel@tonic-gate /* 3117c478bd9Sstevel@tonic-gate * Call function func with argument arg 3127c478bd9Sstevel@tonic-gate * at some later time at software interrupt priority 3137c478bd9Sstevel@tonic-gate */ 3147c478bd9Sstevel@tonic-gate void 3157c478bd9Sstevel@tonic-gate softcall(void (*func)(void *), void *arg) 3167c478bd9Sstevel@tonic-gate { 3177c478bd9Sstevel@tonic-gate softcall_t *sc; 318*b9bc7f78Ssmaybe clock_t w, now; 3197c478bd9Sstevel@tonic-gate 3207c478bd9Sstevel@tonic-gate /* 3217c478bd9Sstevel@tonic-gate * protect against cross-calls 3227c478bd9Sstevel@tonic-gate */ 3237c478bd9Sstevel@tonic-gate mutex_enter(&softcall_lock); 3247c478bd9Sstevel@tonic-gate /* coalesce identical softcalls */ 3257c478bd9Sstevel@tonic-gate for (sc = softhead; sc != 0; sc = sc->sc_next) { 3267c478bd9Sstevel@tonic-gate if (sc->sc_func == func && sc->sc_arg == arg) { 3273aedfe0bSmishra goto intr; 3287c478bd9Sstevel@tonic-gate } 3297c478bd9Sstevel@tonic-gate } 3307c478bd9Sstevel@tonic-gate 3317c478bd9Sstevel@tonic-gate if ((sc = softfree) == 0) 3327c478bd9Sstevel@tonic-gate panic("too many softcalls"); 3333aedfe0bSmishra 3347c478bd9Sstevel@tonic-gate softfree = sc->sc_next; 3357c478bd9Sstevel@tonic-gate sc->sc_func = func; 3367c478bd9Sstevel@tonic-gate sc->sc_arg = arg; 3377c478bd9Sstevel@tonic-gate sc->sc_next = 0; 3387c478bd9Sstevel@tonic-gate 3397c478bd9Sstevel@tonic-gate if (softhead) { 3407c478bd9Sstevel@tonic-gate softtail->sc_next = sc; 3417c478bd9Sstevel@tonic-gate softtail = sc; 3423aedfe0bSmishra } else 3437c478bd9Sstevel@tonic-gate softhead = softtail = sc; 3443aedfe0bSmishra 3453aedfe0bSmishra intr: 3463aedfe0bSmishra if (softcall_state & SOFT_IDLE) { 347f8047eabSsudheer softcall_state = SOFT_PEND; 3483aedfe0bSmishra softcall_tick = lbolt; 3497c478bd9Sstevel@tonic-gate mutex_exit(&softcall_lock); 3507c478bd9Sstevel@tonic-gate siron(); 3513aedfe0bSmishra } else if (softcall_state & (SOFT_DRAIN|SOFT_PEND)) { 352*b9bc7f78Ssmaybe now = lbolt; 353*b9bc7f78Ssmaybe w = now - softcall_tick; 3543aedfe0bSmishra if (w <= softcall_delay || ncpus == 1) { 3553aedfe0bSmishra mutex_exit(&softcall_lock); 3563aedfe0bSmishra return; 3577c478bd9Sstevel@tonic-gate } 358*b9bc7f78Ssmaybe /* 359*b9bc7f78Ssmaybe * Did we poke less than a second ago? 360*b9bc7f78Ssmaybe */ 361*b9bc7f78Ssmaybe if (now - softcall_lastpoke < hz) { 362*b9bc7f78Ssmaybe /* 363*b9bc7f78Ssmaybe * We did, increment the poke count and 364*b9bc7f78Ssmaybe * see if we are poking too often 365*b9bc7f78Ssmaybe */ 366*b9bc7f78Ssmaybe if (softcall_pokecount++ == 0) 367*b9bc7f78Ssmaybe softcall_countstart = now; 368*b9bc7f78Ssmaybe if (softcall_pokecount > softcall_pokemax) { 369*b9bc7f78Ssmaybe /* 370*b9bc7f78Ssmaybe * If poking too much increase the delay 371*b9bc7f78Ssmaybe */ 372*b9bc7f78Ssmaybe if (now - softcall_countstart <= hz) 373*b9bc7f78Ssmaybe softcall_delay++; 374*b9bc7f78Ssmaybe softcall_pokecount = 0; 375*b9bc7f78Ssmaybe } 376*b9bc7f78Ssmaybe } else { 377*b9bc7f78Ssmaybe /* 378*b9bc7f78Ssmaybe * poke rate has dropped off, reset the poke monitor 379*b9bc7f78Ssmaybe */ 380*b9bc7f78Ssmaybe softcall_pokecount = 0; 381*b9bc7f78Ssmaybe } 382*b9bc7f78Ssmaybe softcall_lastpoke = lbolt; 3833aedfe0bSmishra if (!(softcall_state & SOFT_STEAL)) { 3843aedfe0bSmishra softcall_state |= SOFT_STEAL; 3853aedfe0bSmishra 3863aedfe0bSmishra /* 3873aedfe0bSmishra * We want to give some more chance before 3883aedfe0bSmishra * fishing around again. 3893aedfe0bSmishra */ 3903aedfe0bSmishra softcall_tick = lbolt; 3913aedfe0bSmishra } 3923aedfe0bSmishra 3933aedfe0bSmishra /* softcall_lock will be released by this routine */ 3943aedfe0bSmishra (void) softcall_choose_cpu(); 3957c478bd9Sstevel@tonic-gate } 396f8047eabSsudheer } 3977c478bd9Sstevel@tonic-gate 3987c478bd9Sstevel@tonic-gate void 3997c478bd9Sstevel@tonic-gate kdi_softcall(void (*func)(void)) 4007c478bd9Sstevel@tonic-gate { 4017c478bd9Sstevel@tonic-gate kdi_softcall_func = func; 4027c478bd9Sstevel@tonic-gate 4037c478bd9Sstevel@tonic-gate if (softhead == NULL) 404f1fa5dcfSmishra kdi_siron(); 4057c478bd9Sstevel@tonic-gate } 4067c478bd9Sstevel@tonic-gate 4077c478bd9Sstevel@tonic-gate /* 408f8047eabSsudheer * Called to process software interrupts take one off queue, call it, 409f8047eabSsudheer * repeat. 410f8047eabSsudheer * 4113aedfe0bSmishra * Note queue may change during call; softcall_lock, state variables 4123aedfe0bSmishra * softcall_state and softcall_latest_cpuid ensures that - 413f8047eabSsudheer * - we don't have multiple cpus pulling from the list (thus causing 4143aedfe0bSmishra * a violation of FIFO order with an exception when we are stuck). 415f8047eabSsudheer * - we don't miss a new entry having been added to the head. 416f8047eabSsudheer * - we don't miss a wakeup. 4177c478bd9Sstevel@tonic-gate */ 418f8047eabSsudheer 4197c478bd9Sstevel@tonic-gate void 4207c478bd9Sstevel@tonic-gate softint(void) 4217c478bd9Sstevel@tonic-gate { 4223aedfe0bSmishra softcall_t *sc = NULL; 4237c478bd9Sstevel@tonic-gate void (*func)(); 4247c478bd9Sstevel@tonic-gate caddr_t arg; 4253aedfe0bSmishra int cpu_id = CPU->cpu_id; 4267c478bd9Sstevel@tonic-gate 427f1fa5dcfSmishra /* 428f1fa5dcfSmishra * Don't process softcall queue if current CPU is quiesced or 429f1fa5dcfSmishra * offlined. This can happen when a CPU is running pause 430f1fa5dcfSmishra * thread but softcall already sent a xcall. 431f1fa5dcfSmishra */ 432f1fa5dcfSmishra if (CPU->cpu_flags & (CPU_QUIESCED|CPU_OFFLINE)) { 433f1fa5dcfSmishra if (softcall_cpuset != NULL && 434f1fa5dcfSmishra CPU_IN_SET(*softcall_cpuset, cpu_id)) { 435f1fa5dcfSmishra CPUSET_DEL(*softcall_cpuset, cpu_id); 436f1fa5dcfSmishra goto out; 437f1fa5dcfSmishra } 438f1fa5dcfSmishra } 439f1fa5dcfSmishra 4407c478bd9Sstevel@tonic-gate mutex_enter(&softcall_lock); 4413aedfe0bSmishra 4423aedfe0bSmishra if (softcall_state & (SOFT_STEAL|SOFT_PEND)) { 4433aedfe0bSmishra softcall_state = SOFT_DRAIN; 4443aedfe0bSmishra } else { 4453aedfe0bSmishra /* 4463aedfe0bSmishra * The check for softcall_cpuset being 4473aedfe0bSmishra * NULL is required because it may get 4483aedfe0bSmishra * called very early during boot. 4493aedfe0bSmishra */ 4503aedfe0bSmishra if (softcall_cpuset != NULL && 4513aedfe0bSmishra CPU_IN_SET(*softcall_cpuset, cpu_id)) 4523aedfe0bSmishra CPUSET_DEL(*softcall_cpuset, cpu_id); 453f8047eabSsudheer mutex_exit(&softcall_lock); 454f8047eabSsudheer goto out; 455f8047eabSsudheer } 4563aedfe0bSmishra 4573aedfe0bSmishra /* 4583aedfe0bSmishra * Setting softcall_latest_cpuid to current CPU ensures 4593aedfe0bSmishra * that there is only one active softlevel1 handler to 4603aedfe0bSmishra * process softcall queues. 4613aedfe0bSmishra * 4623aedfe0bSmishra * Since softcall_lock lock is dropped before calling 4633aedfe0bSmishra * func (callback), we need softcall_latest_cpuid 4643aedfe0bSmishra * to prevent two softlevel1 hanlders working on the 4653aedfe0bSmishra * queue when the first softlevel1 handler gets 4663aedfe0bSmishra * stuck due to high interrupt load. 4673aedfe0bSmishra */ 4683aedfe0bSmishra softcall_latest_cpuid = cpu_id; 4693aedfe0bSmishra 4703aedfe0bSmishra /* add ourself to the cpuset */ 4713aedfe0bSmishra if (!CPU_IN_SET(*softcall_cpuset, cpu_id)) 4723aedfe0bSmishra CPUSET_ADD(*softcall_cpuset, cpu_id); 473f8047eabSsudheer 474f8047eabSsudheer for (;;) { 4753aedfe0bSmishra softcall_tick = lbolt; 4767c478bd9Sstevel@tonic-gate if ((sc = softhead) != NULL) { 4777c478bd9Sstevel@tonic-gate func = sc->sc_func; 4787c478bd9Sstevel@tonic-gate arg = sc->sc_arg; 4797c478bd9Sstevel@tonic-gate softhead = sc->sc_next; 4807c478bd9Sstevel@tonic-gate sc->sc_next = softfree; 4817c478bd9Sstevel@tonic-gate softfree = sc; 4827c478bd9Sstevel@tonic-gate } 4833aedfe0bSmishra 484f8047eabSsudheer if (sc == NULL) { 4853aedfe0bSmishra if (CPU_IN_SET(*softcall_cpuset, cpu_id)) 4863aedfe0bSmishra CPUSET_DEL(*softcall_cpuset, cpu_id); 4873aedfe0bSmishra 488f8047eabSsudheer softcall_state = SOFT_IDLE; 4893aedfe0bSmishra ASSERT(softcall_latest_cpuid == cpu_id); 4903aedfe0bSmishra softcall_latest_cpuid = -1; 4913aedfe0bSmishra 4927c478bd9Sstevel@tonic-gate mutex_exit(&softcall_lock); 4937c478bd9Sstevel@tonic-gate break; 4947c478bd9Sstevel@tonic-gate } 4953aedfe0bSmishra 496f8047eabSsudheer mutex_exit(&softcall_lock); 497f8047eabSsudheer func(arg); 498f8047eabSsudheer mutex_enter(&softcall_lock); 4993aedfe0bSmishra 5003aedfe0bSmishra /* 5013aedfe0bSmishra * No longer need softcall processing from current 5023aedfe0bSmishra * interrupt handler because either 5033aedfe0bSmishra * (a) softcall is in SOFT_IDLE state or 5043aedfe0bSmishra * (b) There is a CPU already draining softcall 5053aedfe0bSmishra * queue and the current softlevel1 is no 5063aedfe0bSmishra * longer required. 5073aedfe0bSmishra */ 5083aedfe0bSmishra if (softcall_latest_cpuid != cpu_id) { 5093aedfe0bSmishra if (CPU_IN_SET(*softcall_cpuset, cpu_id)) 5103aedfe0bSmishra CPUSET_DEL(*softcall_cpuset, cpu_id); 5113aedfe0bSmishra 5123aedfe0bSmishra mutex_exit(&softcall_lock); 5133aedfe0bSmishra break; 514f8047eabSsudheer } 5153aedfe0bSmishra } 5163aedfe0bSmishra 517f8047eabSsudheer out: 5187c478bd9Sstevel@tonic-gate if ((func = kdi_softcall_func) != NULL) { 5197c478bd9Sstevel@tonic-gate kdi_softcall_func = NULL; 5207c478bd9Sstevel@tonic-gate func(); 5217c478bd9Sstevel@tonic-gate } 5227c478bd9Sstevel@tonic-gate } 523