17c478bd9Sstevel@tonic-gate /*
27c478bd9Sstevel@tonic-gate * CDDL HEADER START
37c478bd9Sstevel@tonic-gate *
47c478bd9Sstevel@tonic-gate * The contents of this file are subject to the terms of the
5a1af7ba0Scwb * Common Development and Distribution License (the "License").
6a1af7ba0Scwb * You may not use this file except in compliance with the License.
77c478bd9Sstevel@tonic-gate *
87c478bd9Sstevel@tonic-gate * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
97c478bd9Sstevel@tonic-gate * or http://www.opensolaris.org/os/licensing.
107c478bd9Sstevel@tonic-gate * See the License for the specific language governing permissions
117c478bd9Sstevel@tonic-gate * and limitations under the License.
127c478bd9Sstevel@tonic-gate *
137c478bd9Sstevel@tonic-gate * When distributing Covered Code, include this CDDL HEADER in each
147c478bd9Sstevel@tonic-gate * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
157c478bd9Sstevel@tonic-gate * If applicable, add the following below this CDDL HEADER, with the
167c478bd9Sstevel@tonic-gate * fields enclosed by brackets "[]" replaced with your own identifying
177c478bd9Sstevel@tonic-gate * information: Portions Copyright [yyyy] [name of copyright owner]
187c478bd9Sstevel@tonic-gate *
197c478bd9Sstevel@tonic-gate * CDDL HEADER END
207c478bd9Sstevel@tonic-gate */
217c478bd9Sstevel@tonic-gate /*
22*d3d50737SRafael Vanoni * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
237c478bd9Sstevel@tonic-gate * Use is subject to license terms.
247c478bd9Sstevel@tonic-gate */
257c478bd9Sstevel@tonic-gate
267c478bd9Sstevel@tonic-gate #include <sys/types.h>
277c478bd9Sstevel@tonic-gate #include <sys/param.h>
287c478bd9Sstevel@tonic-gate #include <sys/t_lock.h>
297c478bd9Sstevel@tonic-gate #include <sys/systm.h>
307c478bd9Sstevel@tonic-gate #include <sys/spl.h>
317c478bd9Sstevel@tonic-gate #include <sys/cmn_err.h>
327c478bd9Sstevel@tonic-gate #include <sys/debug.h>
337c478bd9Sstevel@tonic-gate #include <sys/kdi_impl.h>
343aedfe0bSmishra #include <sys/cpuvar.h>
353aedfe0bSmishra #include <sys/cpuvar.h>
363aedfe0bSmishra #include <sys/archsystm.h>
377c478bd9Sstevel@tonic-gate
387c478bd9Sstevel@tonic-gate /*
397c478bd9Sstevel@tonic-gate * Handle software interrupts through 'softcall' mechanism
40f8047eabSsudheer *
41f8047eabSsudheer * At present softcall mechanism uses a global list headed by softhead.
42f8047eabSsudheer * Entries are added to tail and removed from head so as to preserve FIFO
43f8047eabSsudheer * nature of entries in the softcall list. softcall() takes care of adding
44f8047eabSsudheer * entries to the softtail.
45f8047eabSsudheer *
46f8047eabSsudheer * softint must take care of executing the entries in the FIFO
47f8047eabSsudheer * order. It could be called simultaneously from multiple cpus, however only
483aedfe0bSmishra * one instance of softint should process the softcall list with the exception
493aedfe0bSmishra * when CPU is stuck due to high interrupt load and can't execute callbacks.
503aedfe0bSmishra * State diagram is as follows :-
513aedfe0bSmishra *
523aedfe0bSmishra * - Upper half which is same as old state machine
53f8047eabSsudheer * (IDLE->PEND->DRAIN->IDLE)
54f8047eabSsudheer *
553aedfe0bSmishra * - Lower half which steals the entries from softcall queue and execute
563aedfe0bSmishra * in the context of softint interrupt handler. The interrupt handler
573aedfe0bSmishra * is fired on a different CPU by sending a cross-call.
583aedfe0bSmishra *
593aedfe0bSmishra * Starting state is IDLE.
603aedfe0bSmishra *
613aedfe0bSmishra * softint()
623aedfe0bSmishra *
633aedfe0bSmishra *
643aedfe0bSmishra * (c)
653aedfe0bSmishra * ____________________________________________________
663aedfe0bSmishra * | ^ ^
673aedfe0bSmishra * v (a) | (b) |
683aedfe0bSmishra * IDLE--------------------->PEND--------------------->DRAIN
693aedfe0bSmishra * ^ | |
703aedfe0bSmishra * | | |
713aedfe0bSmishra * | | |
723aedfe0bSmishra * | | |
733aedfe0bSmishra * | | |
743aedfe0bSmishra * | d d
753aedfe0bSmishra * | | |
763aedfe0bSmishra * | v v
773aedfe0bSmishra * | PEND DRAIN
783aedfe0bSmishra * | (e) & &
793aedfe0bSmishra * |<-----------------------STEAL STEAL
803aedfe0bSmishra * ^ |
813aedfe0bSmishra * | |
823aedfe0bSmishra * | (e) v
833aedfe0bSmishra * |_________________________<__________________________|
843aedfe0bSmishra *
853aedfe0bSmishra *
863aedfe0bSmishra *
873aedfe0bSmishra * Edge (a)->(b)->(c) are same as old state machine and these
883aedfe0bSmishra * are mutually exclusive state.
893aedfe0bSmishra *
903aedfe0bSmishra * a - When an entry is being enqueued to softcall queue then the state
913aedfe0bSmishra * moves from IDLE to PEND.
923aedfe0bSmishra *
933aedfe0bSmishra * b - When interrupt handler has started processing softcall queue.
943aedfe0bSmishra *
953aedfe0bSmishra * c - When interrupt handler finished processing softcall queue, the
963aedfe0bSmishra * state of machines goes back to IDLE.
973aedfe0bSmishra *
983aedfe0bSmishra * d - softcall() generates another softlevel1 iff interrupt handler
993aedfe0bSmishra * hasn't run recently.
1003aedfe0bSmishra *
1013aedfe0bSmishra * e - Either PEND|STEAL or DRAIN|STEAL is set. We let softlevel1
1023aedfe0bSmishra * handler exit because we have processed all the entries.
1033aedfe0bSmishra *
1043aedfe0bSmishra * When CPU is being pinned by higher level interrupts for more than
1053aedfe0bSmishra * softcall_delay clock ticks, SOFT_STEAL is OR'ed so that softlevel1
1063aedfe0bSmishra * handler on the other CPU can drain the queue.
1073aedfe0bSmishra *
108f8047eabSsudheer * These states are needed for softcall mechanism since Solaris has only
1093aedfe0bSmishra * one interface (ie. siron ) as of now for :
1103aedfe0bSmishra *
111f8047eabSsudheer * - raising a soft interrupt architecture independently (ie not through
112f8047eabSsudheer * setsoftint(..) )
113f8047eabSsudheer * - to process the softcall queue.
1147c478bd9Sstevel@tonic-gate */
1157c478bd9Sstevel@tonic-gate
1167c478bd9Sstevel@tonic-gate #define NSOFTCALLS 200
1173aedfe0bSmishra
118f8047eabSsudheer /*
119f8047eabSsudheer * Defined states for softcall processing.
120f8047eabSsudheer */
121f8047eabSsudheer #define SOFT_IDLE 0x01 /* no processing is needed */
122f8047eabSsudheer #define SOFT_PEND 0x02 /* softcall list needs processing */
1233aedfe0bSmishra #define SOFT_DRAIN 0x04 /* list is being processed */
1243aedfe0bSmishra #define SOFT_STEAL 0x08 /* list is being stolen for draining */
1257c478bd9Sstevel@tonic-gate
1267c478bd9Sstevel@tonic-gate typedef struct softcall {
1277c478bd9Sstevel@tonic-gate void (*sc_func)(void *); /* function to call */
1287c478bd9Sstevel@tonic-gate void *sc_arg; /* arg to pass to func */
1297c478bd9Sstevel@tonic-gate struct softcall *sc_next; /* next in list */
1307c478bd9Sstevel@tonic-gate } softcall_t;
1317c478bd9Sstevel@tonic-gate
1323aedfe0bSmishra /*
1333aedfe0bSmishra * softcall list and state variables.
1343aedfe0bSmishra */
1353aedfe0bSmishra static softcall_t *softcalls;
1363aedfe0bSmishra static softcall_t *softhead, *softtail, *softfree;
137f8047eabSsudheer static uint_t softcall_state;
1383aedfe0bSmishra static clock_t softcall_tick;
139b9bc7f78Ssmaybe static clock_t softcall_countstart, softcall_lastpoke;
140b9bc7f78Ssmaybe static uint_t softcall_pokecount;
141b9bc7f78Ssmaybe
142b9bc7f78Ssmaybe /*
143b9bc7f78Ssmaybe * Max number of pokes per second before increasing softcall_delay
144b9bc7f78Ssmaybe */
145b9bc7f78Ssmaybe uint_t softcall_pokemax = 10;
1463aedfe0bSmishra
1473aedfe0bSmishra /*
1483aedfe0bSmishra * This ensures that softcall entries don't get stuck for long. It's expressed
1493aedfe0bSmishra * in 10 milliseconds as 1 unit. When hires_tick is set or other clock frequency
1503aedfe0bSmishra * is used, softcall_init() ensures that it's still expressed as 1 = 10 milli
1513aedfe0bSmishra * seconds.
1523aedfe0bSmishra */
153b9bc7f78Ssmaybe unsigned int softcall_delay = 1;
1543aedfe0bSmishra
1553aedfe0bSmishra /*
1563aedfe0bSmishra * The last CPU which will drain softcall queue.
1573aedfe0bSmishra */
1583aedfe0bSmishra static int softcall_latest_cpuid = -1;
1593aedfe0bSmishra
1603aedfe0bSmishra /*
1613aedfe0bSmishra * CPUSET to hold the CPU which is processing softcall queue
1623aedfe0bSmishra * currently. There can be more than one CPU having bit set
1633aedfe0bSmishra * but it will happen only when they are stuck.
1643aedfe0bSmishra */
1653aedfe0bSmishra static cpuset_t *softcall_cpuset = NULL;
1667c478bd9Sstevel@tonic-gate
167f8047eabSsudheer /*
168f8047eabSsudheer * protects softcall lists and control variable softcall_state.
169f8047eabSsudheer */
170f8047eabSsudheer static kmutex_t softcall_lock;
1717c478bd9Sstevel@tonic-gate
1727c478bd9Sstevel@tonic-gate static void (*kdi_softcall_func)(void);
1733aedfe0bSmishra extern void siron_poke_cpu(cpuset_t);
1747c478bd9Sstevel@tonic-gate
1757c478bd9Sstevel@tonic-gate extern void siron(void);
176f1fa5dcfSmishra extern void kdi_siron(void);
1777c478bd9Sstevel@tonic-gate
178b9bc7f78Ssmaybe
1797c478bd9Sstevel@tonic-gate void
softcall_init(void)1807c478bd9Sstevel@tonic-gate softcall_init(void)
1817c478bd9Sstevel@tonic-gate {
1827c478bd9Sstevel@tonic-gate softcall_t *sc;
1837c478bd9Sstevel@tonic-gate
1843aedfe0bSmishra softcalls = kmem_zalloc(sizeof (softcall_t) * NSOFTCALLS, KM_SLEEP);
1853aedfe0bSmishra softcall_cpuset = kmem_zalloc(sizeof (cpuset_t), KM_SLEEP);
1867c478bd9Sstevel@tonic-gate for (sc = softcalls; sc < &softcalls[NSOFTCALLS]; sc++) {
1877c478bd9Sstevel@tonic-gate sc->sc_next = softfree;
1887c478bd9Sstevel@tonic-gate softfree = sc;
1897c478bd9Sstevel@tonic-gate }
1903aedfe0bSmishra mutex_init(&softcall_lock, NULL, MUTEX_SPIN,
1913aedfe0bSmishra (void *)ipltospl(SPL8));
1923aedfe0bSmishra softcall_state = SOFT_IDLE;
193*d3d50737SRafael Vanoni softcall_tick = ddi_get_lbolt();
1943aedfe0bSmishra
1953aedfe0bSmishra /*
1963aedfe0bSmishra * Since softcall_delay is expressed as 1 = 10 milliseconds.
1973aedfe0bSmishra */
1983aedfe0bSmishra softcall_delay = softcall_delay * (hz/100);
1993aedfe0bSmishra CPUSET_ZERO(*softcall_cpuset);
2003aedfe0bSmishra }
2013aedfe0bSmishra
2023aedfe0bSmishra /*
2033aedfe0bSmishra * Gets called when softcall queue is not moving forward. We choose
2043aedfe0bSmishra * a CPU and poke except the ones which are already poked.
2053aedfe0bSmishra */
2063aedfe0bSmishra static int
softcall_choose_cpu()2073aedfe0bSmishra softcall_choose_cpu()
2083aedfe0bSmishra {
2093aedfe0bSmishra cpu_t *cplist = CPU;
2103aedfe0bSmishra cpu_t *cp;
2113aedfe0bSmishra int intr_load = INT_MAX;
2123aedfe0bSmishra int cpuid = -1;
2133aedfe0bSmishra cpuset_t poke;
2143aedfe0bSmishra int s;
2153aedfe0bSmishra
2163aedfe0bSmishra ASSERT(getpil() >= DISP_LEVEL);
2173aedfe0bSmishra ASSERT(ncpus > 1);
2183aedfe0bSmishra ASSERT(MUTEX_HELD(&softcall_lock));
2193aedfe0bSmishra
2203aedfe0bSmishra CPUSET_ZERO(poke);
2213aedfe0bSmishra
2223aedfe0bSmishra /*
2233aedfe0bSmishra * The hint is to start from current CPU.
2243aedfe0bSmishra */
2253aedfe0bSmishra cp = cplist;
2263aedfe0bSmishra do {
227f1fa5dcfSmishra /*
228f1fa5dcfSmishra * Don't select this CPU if :
229f1fa5dcfSmishra * - in cpuset already
230f1fa5dcfSmishra * - CPU is not accepting interrupts
231f1fa5dcfSmishra * - CPU is being offlined
232f1fa5dcfSmishra */
2333aedfe0bSmishra if (CPU_IN_SET(*softcall_cpuset, cp->cpu_id) ||
234f1fa5dcfSmishra (cp->cpu_flags & CPU_ENABLE) == 0 ||
235f1fa5dcfSmishra (cp == cpu_inmotion))
2363aedfe0bSmishra continue;
237b9bc7f78Ssmaybe #if defined(__x86)
238b9bc7f78Ssmaybe /*
239b9bc7f78Ssmaybe * Don't select this CPU if a hypervisor indicates it
240b9bc7f78Ssmaybe * isn't currently scheduled onto a physical cpu. We are
241b9bc7f78Ssmaybe * looking for a cpu that can respond quickly and the time
242b9bc7f78Ssmaybe * to get the virtual cpu scheduled and switched to running
243b9bc7f78Ssmaybe * state is likely to be relatively lengthy.
244b9bc7f78Ssmaybe */
245b9bc7f78Ssmaybe if (vcpu_on_pcpu(cp->cpu_id) == VCPU_NOT_ON_PCPU)
246b9bc7f78Ssmaybe continue;
247b9bc7f78Ssmaybe #endif /* __x86 */
2483aedfe0bSmishra
2493aedfe0bSmishra /* if CPU is not busy */
2503aedfe0bSmishra if (cp->cpu_intrload == 0) {
2513aedfe0bSmishra cpuid = cp->cpu_id;
2523aedfe0bSmishra break;
2533aedfe0bSmishra }
2543aedfe0bSmishra
2553aedfe0bSmishra if (cp->cpu_intrload < intr_load) {
2563aedfe0bSmishra cpuid = cp->cpu_id;
2573aedfe0bSmishra intr_load = cp->cpu_intrload;
2583aedfe0bSmishra } else if (cp->cpu_intrload == intr_load) {
2593aedfe0bSmishra /*
2603aedfe0bSmishra * We want to poke CPUs having similar
2613aedfe0bSmishra * load because we don't know which CPU is
2623aedfe0bSmishra * can acknowledge level1 interrupt. The
2633aedfe0bSmishra * list of such CPUs should not be large.
2643aedfe0bSmishra */
2653aedfe0bSmishra if (cpuid != -1) {
2663aedfe0bSmishra /*
2673aedfe0bSmishra * Put the last CPU chosen because
2683aedfe0bSmishra * it also has same interrupt load.
2693aedfe0bSmishra */
2703aedfe0bSmishra CPUSET_ADD(poke, cpuid);
2713aedfe0bSmishra cpuid = -1;
2723aedfe0bSmishra }
2733aedfe0bSmishra
2743aedfe0bSmishra CPUSET_ADD(poke, cp->cpu_id);
2753aedfe0bSmishra }
2763aedfe0bSmishra } while ((cp = cp->cpu_next_onln) != cplist);
2773aedfe0bSmishra
2783aedfe0bSmishra /* if we found a CPU which suits best to poke */
2793aedfe0bSmishra if (cpuid != -1) {
2803aedfe0bSmishra CPUSET_ZERO(poke);
2813aedfe0bSmishra CPUSET_ADD(poke, cpuid);
2823aedfe0bSmishra }
2833aedfe0bSmishra
2843aedfe0bSmishra if (CPUSET_ISNULL(poke)) {
2853aedfe0bSmishra mutex_exit(&softcall_lock);
2863aedfe0bSmishra return (0);
2873aedfe0bSmishra }
2883aedfe0bSmishra
2893aedfe0bSmishra /*
2903aedfe0bSmishra * We first set the bit in cpuset and then poke.
2913aedfe0bSmishra */
2923aedfe0bSmishra CPUSET_XOR(*softcall_cpuset, poke);
2933aedfe0bSmishra mutex_exit(&softcall_lock);
2943aedfe0bSmishra
2953aedfe0bSmishra /*
2963aedfe0bSmishra * If softcall() was called at low pil then we may
2973aedfe0bSmishra * get preempted before we raise PIL. It should be okay
2983aedfe0bSmishra * because we are just going to poke CPUs now or at most
2993aedfe0bSmishra * another thread may start choosing CPUs in this routine.
3003aedfe0bSmishra */
3013aedfe0bSmishra s = splhigh();
3023aedfe0bSmishra siron_poke_cpu(poke);
3033aedfe0bSmishra splx(s);
3043aedfe0bSmishra return (1);
3057c478bd9Sstevel@tonic-gate }
3067c478bd9Sstevel@tonic-gate
307b9bc7f78Ssmaybe
3087c478bd9Sstevel@tonic-gate /*
3097c478bd9Sstevel@tonic-gate * Call function func with argument arg
3107c478bd9Sstevel@tonic-gate * at some later time at software interrupt priority
3117c478bd9Sstevel@tonic-gate */
3127c478bd9Sstevel@tonic-gate void
softcall(void (* func)(void *),void * arg)3137c478bd9Sstevel@tonic-gate softcall(void (*func)(void *), void *arg)
3147c478bd9Sstevel@tonic-gate {
3157c478bd9Sstevel@tonic-gate softcall_t *sc;
316b9bc7f78Ssmaybe clock_t w, now;
3177c478bd9Sstevel@tonic-gate
3187c478bd9Sstevel@tonic-gate /*
3197c478bd9Sstevel@tonic-gate * protect against cross-calls
3207c478bd9Sstevel@tonic-gate */
3217c478bd9Sstevel@tonic-gate mutex_enter(&softcall_lock);
3227c478bd9Sstevel@tonic-gate /* coalesce identical softcalls */
3237c478bd9Sstevel@tonic-gate for (sc = softhead; sc != 0; sc = sc->sc_next) {
3247c478bd9Sstevel@tonic-gate if (sc->sc_func == func && sc->sc_arg == arg) {
3253aedfe0bSmishra goto intr;
3267c478bd9Sstevel@tonic-gate }
3277c478bd9Sstevel@tonic-gate }
3287c478bd9Sstevel@tonic-gate
3297c478bd9Sstevel@tonic-gate if ((sc = softfree) == 0)
3307c478bd9Sstevel@tonic-gate panic("too many softcalls");
3313aedfe0bSmishra
3327c478bd9Sstevel@tonic-gate softfree = sc->sc_next;
3337c478bd9Sstevel@tonic-gate sc->sc_func = func;
3347c478bd9Sstevel@tonic-gate sc->sc_arg = arg;
3357c478bd9Sstevel@tonic-gate sc->sc_next = 0;
3367c478bd9Sstevel@tonic-gate
3377c478bd9Sstevel@tonic-gate if (softhead) {
3387c478bd9Sstevel@tonic-gate softtail->sc_next = sc;
3397c478bd9Sstevel@tonic-gate softtail = sc;
3403aedfe0bSmishra } else
3417c478bd9Sstevel@tonic-gate softhead = softtail = sc;
3423aedfe0bSmishra
3433aedfe0bSmishra intr:
3443aedfe0bSmishra if (softcall_state & SOFT_IDLE) {
345f8047eabSsudheer softcall_state = SOFT_PEND;
346*d3d50737SRafael Vanoni softcall_tick = ddi_get_lbolt();
3477c478bd9Sstevel@tonic-gate mutex_exit(&softcall_lock);
3487c478bd9Sstevel@tonic-gate siron();
3493aedfe0bSmishra } else if (softcall_state & (SOFT_DRAIN|SOFT_PEND)) {
350*d3d50737SRafael Vanoni now = ddi_get_lbolt();
351b9bc7f78Ssmaybe w = now - softcall_tick;
3523aedfe0bSmishra if (w <= softcall_delay || ncpus == 1) {
3533aedfe0bSmishra mutex_exit(&softcall_lock);
3543aedfe0bSmishra return;
3557c478bd9Sstevel@tonic-gate }
356b9bc7f78Ssmaybe /*
357b9bc7f78Ssmaybe * Did we poke less than a second ago?
358b9bc7f78Ssmaybe */
359b9bc7f78Ssmaybe if (now - softcall_lastpoke < hz) {
360b9bc7f78Ssmaybe /*
361b9bc7f78Ssmaybe * We did, increment the poke count and
362b9bc7f78Ssmaybe * see if we are poking too often
363b9bc7f78Ssmaybe */
364b9bc7f78Ssmaybe if (softcall_pokecount++ == 0)
365b9bc7f78Ssmaybe softcall_countstart = now;
366b9bc7f78Ssmaybe if (softcall_pokecount > softcall_pokemax) {
367b9bc7f78Ssmaybe /*
368b9bc7f78Ssmaybe * If poking too much increase the delay
369b9bc7f78Ssmaybe */
370b9bc7f78Ssmaybe if (now - softcall_countstart <= hz)
371b9bc7f78Ssmaybe softcall_delay++;
372b9bc7f78Ssmaybe softcall_pokecount = 0;
373b9bc7f78Ssmaybe }
374b9bc7f78Ssmaybe } else {
375b9bc7f78Ssmaybe /*
376b9bc7f78Ssmaybe * poke rate has dropped off, reset the poke monitor
377b9bc7f78Ssmaybe */
378b9bc7f78Ssmaybe softcall_pokecount = 0;
379b9bc7f78Ssmaybe }
380*d3d50737SRafael Vanoni softcall_lastpoke = now;
3813aedfe0bSmishra if (!(softcall_state & SOFT_STEAL)) {
3823aedfe0bSmishra softcall_state |= SOFT_STEAL;
3833aedfe0bSmishra
3843aedfe0bSmishra /*
3853aedfe0bSmishra * We want to give some more chance before
3863aedfe0bSmishra * fishing around again.
3873aedfe0bSmishra */
388*d3d50737SRafael Vanoni softcall_tick = now;
3893aedfe0bSmishra }
3903aedfe0bSmishra
3913aedfe0bSmishra /* softcall_lock will be released by this routine */
3923aedfe0bSmishra (void) softcall_choose_cpu();
3937c478bd9Sstevel@tonic-gate }
394f8047eabSsudheer }
3957c478bd9Sstevel@tonic-gate
3967c478bd9Sstevel@tonic-gate void
kdi_softcall(void (* func)(void))3977c478bd9Sstevel@tonic-gate kdi_softcall(void (*func)(void))
3987c478bd9Sstevel@tonic-gate {
3997c478bd9Sstevel@tonic-gate kdi_softcall_func = func;
4007c478bd9Sstevel@tonic-gate
4017c478bd9Sstevel@tonic-gate if (softhead == NULL)
402f1fa5dcfSmishra kdi_siron();
4037c478bd9Sstevel@tonic-gate }
4047c478bd9Sstevel@tonic-gate
4057c478bd9Sstevel@tonic-gate /*
406f8047eabSsudheer * Called to process software interrupts take one off queue, call it,
407f8047eabSsudheer * repeat.
408f8047eabSsudheer *
4093aedfe0bSmishra * Note queue may change during call; softcall_lock, state variables
4103aedfe0bSmishra * softcall_state and softcall_latest_cpuid ensures that -
411f8047eabSsudheer * - we don't have multiple cpus pulling from the list (thus causing
4123aedfe0bSmishra * a violation of FIFO order with an exception when we are stuck).
413f8047eabSsudheer * - we don't miss a new entry having been added to the head.
414f8047eabSsudheer * - we don't miss a wakeup.
4157c478bd9Sstevel@tonic-gate */
416f8047eabSsudheer
4177c478bd9Sstevel@tonic-gate void
softint(void)4187c478bd9Sstevel@tonic-gate softint(void)
4197c478bd9Sstevel@tonic-gate {
4203aedfe0bSmishra softcall_t *sc = NULL;
4217c478bd9Sstevel@tonic-gate void (*func)();
4227c478bd9Sstevel@tonic-gate caddr_t arg;
4233aedfe0bSmishra int cpu_id = CPU->cpu_id;
4247c478bd9Sstevel@tonic-gate
425f1fa5dcfSmishra /*
426f1fa5dcfSmishra * Don't process softcall queue if current CPU is quiesced or
427f1fa5dcfSmishra * offlined. This can happen when a CPU is running pause
428f1fa5dcfSmishra * thread but softcall already sent a xcall.
429f1fa5dcfSmishra */
430f1fa5dcfSmishra if (CPU->cpu_flags & (CPU_QUIESCED|CPU_OFFLINE)) {
431f1fa5dcfSmishra if (softcall_cpuset != NULL &&
432f1fa5dcfSmishra CPU_IN_SET(*softcall_cpuset, cpu_id)) {
433f1fa5dcfSmishra CPUSET_DEL(*softcall_cpuset, cpu_id);
434f1fa5dcfSmishra goto out;
435f1fa5dcfSmishra }
436f1fa5dcfSmishra }
437f1fa5dcfSmishra
4387c478bd9Sstevel@tonic-gate mutex_enter(&softcall_lock);
4393aedfe0bSmishra
4403aedfe0bSmishra if (softcall_state & (SOFT_STEAL|SOFT_PEND)) {
4413aedfe0bSmishra softcall_state = SOFT_DRAIN;
4423aedfe0bSmishra } else {
4433aedfe0bSmishra /*
4443aedfe0bSmishra * The check for softcall_cpuset being
4453aedfe0bSmishra * NULL is required because it may get
4463aedfe0bSmishra * called very early during boot.
4473aedfe0bSmishra */
4483aedfe0bSmishra if (softcall_cpuset != NULL &&
4493aedfe0bSmishra CPU_IN_SET(*softcall_cpuset, cpu_id))
4503aedfe0bSmishra CPUSET_DEL(*softcall_cpuset, cpu_id);
451f8047eabSsudheer mutex_exit(&softcall_lock);
452f8047eabSsudheer goto out;
453f8047eabSsudheer }
4543aedfe0bSmishra
4553aedfe0bSmishra /*
4563aedfe0bSmishra * Setting softcall_latest_cpuid to current CPU ensures
4573aedfe0bSmishra * that there is only one active softlevel1 handler to
4583aedfe0bSmishra * process softcall queues.
4593aedfe0bSmishra *
4603aedfe0bSmishra * Since softcall_lock lock is dropped before calling
4613aedfe0bSmishra * func (callback), we need softcall_latest_cpuid
4623aedfe0bSmishra * to prevent two softlevel1 hanlders working on the
4633aedfe0bSmishra * queue when the first softlevel1 handler gets
4643aedfe0bSmishra * stuck due to high interrupt load.
4653aedfe0bSmishra */
4663aedfe0bSmishra softcall_latest_cpuid = cpu_id;
4673aedfe0bSmishra
4683aedfe0bSmishra /* add ourself to the cpuset */
4693aedfe0bSmishra if (!CPU_IN_SET(*softcall_cpuset, cpu_id))
4703aedfe0bSmishra CPUSET_ADD(*softcall_cpuset, cpu_id);
471f8047eabSsudheer
472f8047eabSsudheer for (;;) {
473*d3d50737SRafael Vanoni softcall_tick = ddi_get_lbolt();
4747c478bd9Sstevel@tonic-gate if ((sc = softhead) != NULL) {
4757c478bd9Sstevel@tonic-gate func = sc->sc_func;
4767c478bd9Sstevel@tonic-gate arg = sc->sc_arg;
4777c478bd9Sstevel@tonic-gate softhead = sc->sc_next;
4787c478bd9Sstevel@tonic-gate sc->sc_next = softfree;
4797c478bd9Sstevel@tonic-gate softfree = sc;
4807c478bd9Sstevel@tonic-gate }
4813aedfe0bSmishra
482f8047eabSsudheer if (sc == NULL) {
4833aedfe0bSmishra if (CPU_IN_SET(*softcall_cpuset, cpu_id))
4843aedfe0bSmishra CPUSET_DEL(*softcall_cpuset, cpu_id);
4853aedfe0bSmishra
486f8047eabSsudheer softcall_state = SOFT_IDLE;
4873aedfe0bSmishra ASSERT(softcall_latest_cpuid == cpu_id);
4883aedfe0bSmishra softcall_latest_cpuid = -1;
4893aedfe0bSmishra
4907c478bd9Sstevel@tonic-gate mutex_exit(&softcall_lock);
4917c478bd9Sstevel@tonic-gate break;
4927c478bd9Sstevel@tonic-gate }
4933aedfe0bSmishra
494f8047eabSsudheer mutex_exit(&softcall_lock);
495f8047eabSsudheer func(arg);
496f8047eabSsudheer mutex_enter(&softcall_lock);
4973aedfe0bSmishra
4983aedfe0bSmishra /*
4993aedfe0bSmishra * No longer need softcall processing from current
5003aedfe0bSmishra * interrupt handler because either
5013aedfe0bSmishra * (a) softcall is in SOFT_IDLE state or
5023aedfe0bSmishra * (b) There is a CPU already draining softcall
5033aedfe0bSmishra * queue and the current softlevel1 is no
5043aedfe0bSmishra * longer required.
5053aedfe0bSmishra */
5063aedfe0bSmishra if (softcall_latest_cpuid != cpu_id) {
5073aedfe0bSmishra if (CPU_IN_SET(*softcall_cpuset, cpu_id))
5083aedfe0bSmishra CPUSET_DEL(*softcall_cpuset, cpu_id);
5093aedfe0bSmishra
5103aedfe0bSmishra mutex_exit(&softcall_lock);
5113aedfe0bSmishra break;
512f8047eabSsudheer }
5133aedfe0bSmishra }
5143aedfe0bSmishra
515f8047eabSsudheer out:
5167c478bd9Sstevel@tonic-gate if ((func = kdi_softcall_func) != NULL) {
5177c478bd9Sstevel@tonic-gate kdi_softcall_func = NULL;
5187c478bd9Sstevel@tonic-gate func();
5197c478bd9Sstevel@tonic-gate }
5207c478bd9Sstevel@tonic-gate }
521