xref: /titanic_54/usr/src/uts/common/os/softint.c (revision 3aedfe0b5d40c671717b8bec3135984b90d27349)
17c478bd9Sstevel@tonic-gate /*
27c478bd9Sstevel@tonic-gate  * CDDL HEADER START
37c478bd9Sstevel@tonic-gate  *
47c478bd9Sstevel@tonic-gate  * The contents of this file are subject to the terms of the
5a1af7ba0Scwb  * Common Development and Distribution License (the "License").
6a1af7ba0Scwb  * You may not use this file except in compliance with the License.
77c478bd9Sstevel@tonic-gate  *
87c478bd9Sstevel@tonic-gate  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
97c478bd9Sstevel@tonic-gate  * or http://www.opensolaris.org/os/licensing.
107c478bd9Sstevel@tonic-gate  * See the License for the specific language governing permissions
117c478bd9Sstevel@tonic-gate  * and limitations under the License.
127c478bd9Sstevel@tonic-gate  *
137c478bd9Sstevel@tonic-gate  * When distributing Covered Code, include this CDDL HEADER in each
147c478bd9Sstevel@tonic-gate  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
157c478bd9Sstevel@tonic-gate  * If applicable, add the following below this CDDL HEADER, with the
167c478bd9Sstevel@tonic-gate  * fields enclosed by brackets "[]" replaced with your own identifying
177c478bd9Sstevel@tonic-gate  * information: Portions Copyright [yyyy] [name of copyright owner]
187c478bd9Sstevel@tonic-gate  *
197c478bd9Sstevel@tonic-gate  * CDDL HEADER END
207c478bd9Sstevel@tonic-gate  */
217c478bd9Sstevel@tonic-gate /*
22a1af7ba0Scwb  * Copyright 2007 Sun Microsystems, Inc.  All rights reserved.
237c478bd9Sstevel@tonic-gate  * Use is subject to license terms.
247c478bd9Sstevel@tonic-gate  */
257c478bd9Sstevel@tonic-gate 
267c478bd9Sstevel@tonic-gate #pragma ident	"%Z%%M%	%I%	%E% SMI"
277c478bd9Sstevel@tonic-gate 
287c478bd9Sstevel@tonic-gate #include <sys/types.h>
297c478bd9Sstevel@tonic-gate #include <sys/param.h>
307c478bd9Sstevel@tonic-gate #include <sys/t_lock.h>
317c478bd9Sstevel@tonic-gate #include <sys/systm.h>
327c478bd9Sstevel@tonic-gate #include <sys/spl.h>
337c478bd9Sstevel@tonic-gate #include <sys/cmn_err.h>
347c478bd9Sstevel@tonic-gate #include <sys/debug.h>
357c478bd9Sstevel@tonic-gate #include <sys/kdi_impl.h>
36*3aedfe0bSmishra #include <sys/cpuvar.h>
37*3aedfe0bSmishra #include <sys/cpuvar.h>
38*3aedfe0bSmishra #include <sys/archsystm.h>
397c478bd9Sstevel@tonic-gate 
407c478bd9Sstevel@tonic-gate /*
417c478bd9Sstevel@tonic-gate  * Handle software interrupts through 'softcall' mechanism
42f8047eabSsudheer  *
43f8047eabSsudheer  * At present softcall mechanism uses a global list headed by softhead.
44f8047eabSsudheer  * Entries are added to tail and removed from head so as to preserve FIFO
45f8047eabSsudheer  * nature of entries in the softcall list. softcall() takes care of adding
46f8047eabSsudheer  * entries to the softtail.
47f8047eabSsudheer  *
48f8047eabSsudheer  * softint must take care of executing the entries in the FIFO
49f8047eabSsudheer  * order. It could be called simultaneously from multiple cpus, however only
50*3aedfe0bSmishra  * one instance of softint should process the softcall list with the exception
51*3aedfe0bSmishra  * when CPU is stuck due to high interrupt load and can't execute callbacks.
52*3aedfe0bSmishra  * State diagram is as follows :-
53*3aedfe0bSmishra  *
54*3aedfe0bSmishra  *	- Upper half which is same as old state machine
55f8047eabSsudheer  *	  (IDLE->PEND->DRAIN->IDLE)
56f8047eabSsudheer  *
57*3aedfe0bSmishra  *	- Lower half which steals the entries from softcall queue and execute
58*3aedfe0bSmishra  *        in the context of softint interrupt handler. The interrupt handler
59*3aedfe0bSmishra  *        is fired on a different CPU by sending a cross-call.
60*3aedfe0bSmishra  *
61*3aedfe0bSmishra  * Starting state is IDLE.
62*3aedfe0bSmishra  *
63*3aedfe0bSmishra  * 				softint()
64*3aedfe0bSmishra  *
65*3aedfe0bSmishra  *
66*3aedfe0bSmishra  *				(c)
67*3aedfe0bSmishra  * 	____________________________________________________
68*3aedfe0bSmishra  * 	|                          ^                         ^
69*3aedfe0bSmishra  * 	v            (a)           |           (b)           |
70*3aedfe0bSmishra  * 	IDLE--------------------->PEND--------------------->DRAIN
71*3aedfe0bSmishra  *	^                         |                         |
72*3aedfe0bSmishra  * 	|                         |                         |
73*3aedfe0bSmishra  * 	|                         |                         |
74*3aedfe0bSmishra  * 	|                         |                         |
75*3aedfe0bSmishra  * 	|                         |                         |
76*3aedfe0bSmishra  * 	|                         d                         d
77*3aedfe0bSmishra  * 	|                         |                         |
78*3aedfe0bSmishra  * 	|                         v                         v
79*3aedfe0bSmishra  * 	|                         PEND                      DRAIN
80*3aedfe0bSmishra  * 	|            (e)           &                          &
81*3aedfe0bSmishra  * 	|<-----------------------STEAL                      STEAL
82*3aedfe0bSmishra  * 	^                                                    |
83*3aedfe0bSmishra  * 	|                                                    |
84*3aedfe0bSmishra  * 	|                         (e)                        v
85*3aedfe0bSmishra  * 	|_________________________<__________________________|
86*3aedfe0bSmishra  *
87*3aedfe0bSmishra  *
88*3aedfe0bSmishra  *
89*3aedfe0bSmishra  * Edge (a)->(b)->(c) are same as old state machine and these
90*3aedfe0bSmishra  * are mutually exclusive state.
91*3aedfe0bSmishra  *
92*3aedfe0bSmishra  * a - When an entry is being enqueued to softcall queue then the state
93*3aedfe0bSmishra  *     moves from IDLE to PEND.
94*3aedfe0bSmishra  *
95*3aedfe0bSmishra  * b - When interrupt handler has started processing softcall queue.
96*3aedfe0bSmishra  *
97*3aedfe0bSmishra  * c - When interrupt handler finished processing softcall queue, the
98*3aedfe0bSmishra  *     state of machines goes back to IDLE.
99*3aedfe0bSmishra  *
100*3aedfe0bSmishra  * d - softcall() generates another softlevel1 iff interrupt handler
101*3aedfe0bSmishra  *     hasn't run recently.
102*3aedfe0bSmishra  *
103*3aedfe0bSmishra  * e - Either PEND|STEAL or DRAIN|STEAL is set. We let softlevel1
104*3aedfe0bSmishra  *     handler exit because we have processed all the entries.
105*3aedfe0bSmishra  *
106*3aedfe0bSmishra  * When CPU is being pinned by higher level interrupts for more than
107*3aedfe0bSmishra  * softcall_delay clock ticks, SOFT_STEAL is OR'ed so that softlevel1
108*3aedfe0bSmishra  * handler on the other CPU can drain the queue.
109*3aedfe0bSmishra  *
110f8047eabSsudheer  * These states are needed for softcall mechanism since Solaris has only
111*3aedfe0bSmishra  * one interface (ie. siron ) as of now for :
112*3aedfe0bSmishra  *
113f8047eabSsudheer  * - raising a soft interrupt architecture independently (ie not through
114f8047eabSsudheer  *   setsoftint(..) )
115f8047eabSsudheer  * - to process the softcall queue.
1167c478bd9Sstevel@tonic-gate  */
1177c478bd9Sstevel@tonic-gate 
1187c478bd9Sstevel@tonic-gate #define	NSOFTCALLS	200
119*3aedfe0bSmishra 
120f8047eabSsudheer /*
121f8047eabSsudheer  * Defined states for softcall processing.
122f8047eabSsudheer  */
123f8047eabSsudheer #define	SOFT_IDLE		0x01	/* no processing is needed */
124f8047eabSsudheer #define	SOFT_PEND		0x02	/* softcall list needs processing */
125*3aedfe0bSmishra #define	SOFT_DRAIN		0x04	/* list is being processed */
126*3aedfe0bSmishra #define	SOFT_STEAL		0x08	/* list is being stolen for draining */
1277c478bd9Sstevel@tonic-gate 
1287c478bd9Sstevel@tonic-gate typedef struct softcall {
1297c478bd9Sstevel@tonic-gate 	void (*sc_func)(void *);	/* function to call */
1307c478bd9Sstevel@tonic-gate 	void *sc_arg;			/* arg to pass to func */
1317c478bd9Sstevel@tonic-gate 	struct softcall *sc_next;	/* next in list */
1327c478bd9Sstevel@tonic-gate } softcall_t;
1337c478bd9Sstevel@tonic-gate 
134*3aedfe0bSmishra /*
135*3aedfe0bSmishra  * softcall list and state variables.
136*3aedfe0bSmishra  */
137*3aedfe0bSmishra static softcall_t *softcalls;
138*3aedfe0bSmishra static softcall_t *softhead, *softtail, *softfree;
139f8047eabSsudheer static uint_t	softcall_state;
140*3aedfe0bSmishra static clock_t softcall_tick;
141*3aedfe0bSmishra 
142*3aedfe0bSmishra /*
143*3aedfe0bSmishra  * This ensures that softcall entries don't get stuck for long. It's expressed
144*3aedfe0bSmishra  * in 10 milliseconds as 1 unit. When hires_tick is set or other clock frequency
145*3aedfe0bSmishra  * is used, softcall_init() ensures that it's still expressed as 1 =  10 milli
146*3aedfe0bSmishra  * seconds.
147*3aedfe0bSmishra  */
148*3aedfe0bSmishra static int softcall_delay = 1;
149*3aedfe0bSmishra 
150*3aedfe0bSmishra /*
151*3aedfe0bSmishra  * The last CPU which will drain softcall queue.
152*3aedfe0bSmishra  */
153*3aedfe0bSmishra static int softcall_latest_cpuid = -1;
154*3aedfe0bSmishra 
155*3aedfe0bSmishra /*
156*3aedfe0bSmishra  * CPUSET to hold the CPU which is processing softcall queue
157*3aedfe0bSmishra  * currently. There can be more than one CPU having bit set
158*3aedfe0bSmishra  * but it will happen only when they are stuck.
159*3aedfe0bSmishra  */
160*3aedfe0bSmishra static cpuset_t *softcall_cpuset = NULL;
1617c478bd9Sstevel@tonic-gate 
162f8047eabSsudheer /*
163f8047eabSsudheer  * protects softcall lists and control variable softcall_state.
164f8047eabSsudheer  */
165f8047eabSsudheer static kmutex_t	softcall_lock;
1667c478bd9Sstevel@tonic-gate 
1677c478bd9Sstevel@tonic-gate static void (*kdi_softcall_func)(void);
168*3aedfe0bSmishra extern void siron_poke_cpu(cpuset_t);
1697c478bd9Sstevel@tonic-gate 
1707c478bd9Sstevel@tonic-gate extern void siron(void);
1717c478bd9Sstevel@tonic-gate 
1727c478bd9Sstevel@tonic-gate void
1737c478bd9Sstevel@tonic-gate softcall_init(void)
1747c478bd9Sstevel@tonic-gate {
1757c478bd9Sstevel@tonic-gate 	softcall_t *sc;
1767c478bd9Sstevel@tonic-gate 
177*3aedfe0bSmishra 	softcalls = kmem_zalloc(sizeof (softcall_t) * NSOFTCALLS, KM_SLEEP);
178*3aedfe0bSmishra 	softcall_cpuset = kmem_zalloc(sizeof (cpuset_t), KM_SLEEP);
1797c478bd9Sstevel@tonic-gate 	for (sc = softcalls; sc < &softcalls[NSOFTCALLS]; sc++) {
1807c478bd9Sstevel@tonic-gate 		sc->sc_next = softfree;
1817c478bd9Sstevel@tonic-gate 		softfree = sc;
1827c478bd9Sstevel@tonic-gate 	}
183*3aedfe0bSmishra 	mutex_init(&softcall_lock, NULL, MUTEX_SPIN,
184*3aedfe0bSmishra 	    (void *)ipltospl(SPL8));
185*3aedfe0bSmishra 	softcall_state = SOFT_IDLE;
186*3aedfe0bSmishra 	softcall_tick = lbolt;
187*3aedfe0bSmishra 
188*3aedfe0bSmishra 	if (softcall_delay < 0)
189*3aedfe0bSmishra 		softcall_delay = 1;
190*3aedfe0bSmishra 
191*3aedfe0bSmishra 	/*
192*3aedfe0bSmishra 	 * Since softcall_delay is expressed as 1 = 10 milliseconds.
193*3aedfe0bSmishra 	 */
194*3aedfe0bSmishra 	softcall_delay = softcall_delay * (hz/100);
195*3aedfe0bSmishra 	CPUSET_ZERO(*softcall_cpuset);
196*3aedfe0bSmishra }
197*3aedfe0bSmishra 
198*3aedfe0bSmishra /*
199*3aedfe0bSmishra  * Gets called when softcall queue is not moving forward. We choose
200*3aedfe0bSmishra  * a CPU and poke except the ones which are already poked.
201*3aedfe0bSmishra  */
202*3aedfe0bSmishra static int
203*3aedfe0bSmishra softcall_choose_cpu()
204*3aedfe0bSmishra {
205*3aedfe0bSmishra 	cpu_t *cplist = CPU;
206*3aedfe0bSmishra 	cpu_t *cp;
207*3aedfe0bSmishra 	int intr_load = INT_MAX;
208*3aedfe0bSmishra 	int cpuid = -1;
209*3aedfe0bSmishra 	cpuset_t poke;
210*3aedfe0bSmishra 	int s;
211*3aedfe0bSmishra 
212*3aedfe0bSmishra 	ASSERT(getpil() >= DISP_LEVEL);
213*3aedfe0bSmishra 	ASSERT(ncpus > 1);
214*3aedfe0bSmishra 	ASSERT(MUTEX_HELD(&softcall_lock));
215*3aedfe0bSmishra 
216*3aedfe0bSmishra 	CPUSET_ZERO(poke);
217*3aedfe0bSmishra 
218*3aedfe0bSmishra 	/*
219*3aedfe0bSmishra 	 * The hint is to start from current CPU.
220*3aedfe0bSmishra 	 */
221*3aedfe0bSmishra 	cp = cplist;
222*3aedfe0bSmishra 	do {
223*3aedfe0bSmishra 		if (CPU_IN_SET(*softcall_cpuset, cp->cpu_id) ||
224*3aedfe0bSmishra 		    (cp->cpu_flags & CPU_ENABLE) == 0)
225*3aedfe0bSmishra 			continue;
226*3aedfe0bSmishra 
227*3aedfe0bSmishra 		/* if CPU is not busy */
228*3aedfe0bSmishra 		if (cp->cpu_intrload == 0) {
229*3aedfe0bSmishra 			cpuid = cp->cpu_id;
230*3aedfe0bSmishra 			break;
231*3aedfe0bSmishra 		}
232*3aedfe0bSmishra 
233*3aedfe0bSmishra 		if (cp->cpu_intrload < intr_load) {
234*3aedfe0bSmishra 			cpuid = cp->cpu_id;
235*3aedfe0bSmishra 			intr_load = cp->cpu_intrload;
236*3aedfe0bSmishra 		} else if (cp->cpu_intrload == intr_load) {
237*3aedfe0bSmishra 			/*
238*3aedfe0bSmishra 			 * We want to poke CPUs having similar
239*3aedfe0bSmishra 			 * load because we don't know which CPU is
240*3aedfe0bSmishra 			 * can acknowledge level1 interrupt. The
241*3aedfe0bSmishra 			 * list of such CPUs should not be large.
242*3aedfe0bSmishra 			 */
243*3aedfe0bSmishra 			if (cpuid != -1) {
244*3aedfe0bSmishra 				/*
245*3aedfe0bSmishra 				 * Put the last CPU chosen because
246*3aedfe0bSmishra 				 * it also has same interrupt load.
247*3aedfe0bSmishra 				 */
248*3aedfe0bSmishra 				CPUSET_ADD(poke, cpuid);
249*3aedfe0bSmishra 				cpuid = -1;
250*3aedfe0bSmishra 			}
251*3aedfe0bSmishra 
252*3aedfe0bSmishra 			CPUSET_ADD(poke, cp->cpu_id);
253*3aedfe0bSmishra 		}
254*3aedfe0bSmishra 	} while ((cp = cp->cpu_next_onln) != cplist);
255*3aedfe0bSmishra 
256*3aedfe0bSmishra 	/* if we found a CPU which suits best to poke */
257*3aedfe0bSmishra 	if (cpuid != -1) {
258*3aedfe0bSmishra 		CPUSET_ZERO(poke);
259*3aedfe0bSmishra 		CPUSET_ADD(poke, cpuid);
260*3aedfe0bSmishra 	}
261*3aedfe0bSmishra 
262*3aedfe0bSmishra 	if (CPUSET_ISNULL(poke)) {
263*3aedfe0bSmishra 		mutex_exit(&softcall_lock);
264*3aedfe0bSmishra 		return (0);
265*3aedfe0bSmishra 	}
266*3aedfe0bSmishra 
267*3aedfe0bSmishra 	/*
268*3aedfe0bSmishra 	 * We first set the bit in cpuset and then poke.
269*3aedfe0bSmishra 	 */
270*3aedfe0bSmishra 	CPUSET_XOR(*softcall_cpuset, poke);
271*3aedfe0bSmishra 	mutex_exit(&softcall_lock);
272*3aedfe0bSmishra 
273*3aedfe0bSmishra 	/*
274*3aedfe0bSmishra 	 * If softcall() was called at low pil then we may
275*3aedfe0bSmishra 	 * get preempted before we raise PIL. It should be okay
276*3aedfe0bSmishra 	 * because we are just going to poke CPUs now or at most
277*3aedfe0bSmishra 	 * another thread may start choosing CPUs in this routine.
278*3aedfe0bSmishra 	 */
279*3aedfe0bSmishra 	s = splhigh();
280*3aedfe0bSmishra 	siron_poke_cpu(poke);
281*3aedfe0bSmishra 	splx(s);
282*3aedfe0bSmishra 	return (1);
2837c478bd9Sstevel@tonic-gate }
2847c478bd9Sstevel@tonic-gate 
2857c478bd9Sstevel@tonic-gate /*
2867c478bd9Sstevel@tonic-gate  * Call function func with argument arg
2877c478bd9Sstevel@tonic-gate  * at some later time at software interrupt priority
2887c478bd9Sstevel@tonic-gate  */
2897c478bd9Sstevel@tonic-gate void
2907c478bd9Sstevel@tonic-gate softcall(void (*func)(void *), void *arg)
2917c478bd9Sstevel@tonic-gate {
2927c478bd9Sstevel@tonic-gate 	softcall_t *sc;
293*3aedfe0bSmishra 	clock_t w;
2947c478bd9Sstevel@tonic-gate 
2957c478bd9Sstevel@tonic-gate 	/*
2967c478bd9Sstevel@tonic-gate 	 * protect against cross-calls
2977c478bd9Sstevel@tonic-gate 	 */
2987c478bd9Sstevel@tonic-gate 	mutex_enter(&softcall_lock);
2997c478bd9Sstevel@tonic-gate 	/* coalesce identical softcalls */
3007c478bd9Sstevel@tonic-gate 	for (sc = softhead; sc != 0; sc = sc->sc_next) {
3017c478bd9Sstevel@tonic-gate 		if (sc->sc_func == func && sc->sc_arg == arg) {
302*3aedfe0bSmishra 			goto intr;
3037c478bd9Sstevel@tonic-gate 		}
3047c478bd9Sstevel@tonic-gate 	}
3057c478bd9Sstevel@tonic-gate 
3067c478bd9Sstevel@tonic-gate 	if ((sc = softfree) == 0)
3077c478bd9Sstevel@tonic-gate 		panic("too many softcalls");
308*3aedfe0bSmishra 
3097c478bd9Sstevel@tonic-gate 	softfree = sc->sc_next;
3107c478bd9Sstevel@tonic-gate 	sc->sc_func = func;
3117c478bd9Sstevel@tonic-gate 	sc->sc_arg = arg;
3127c478bd9Sstevel@tonic-gate 	sc->sc_next = 0;
3137c478bd9Sstevel@tonic-gate 
3147c478bd9Sstevel@tonic-gate 	if (softhead) {
3157c478bd9Sstevel@tonic-gate 		softtail->sc_next = sc;
3167c478bd9Sstevel@tonic-gate 		softtail = sc;
317*3aedfe0bSmishra 	} else
3187c478bd9Sstevel@tonic-gate 		softhead = softtail = sc;
319*3aedfe0bSmishra 
320*3aedfe0bSmishra intr:
321*3aedfe0bSmishra 	if (softcall_state & SOFT_IDLE) {
322f8047eabSsudheer 		softcall_state = SOFT_PEND;
323*3aedfe0bSmishra 		softcall_tick = lbolt;
3247c478bd9Sstevel@tonic-gate 		mutex_exit(&softcall_lock);
3257c478bd9Sstevel@tonic-gate 		siron();
326*3aedfe0bSmishra 	} else if (softcall_state & (SOFT_DRAIN|SOFT_PEND)) {
327*3aedfe0bSmishra 		w = lbolt - softcall_tick;
328*3aedfe0bSmishra 		if (w <= softcall_delay || ncpus == 1) {
329*3aedfe0bSmishra 			mutex_exit(&softcall_lock);
330*3aedfe0bSmishra 			return;
3317c478bd9Sstevel@tonic-gate 		}
332*3aedfe0bSmishra 
333*3aedfe0bSmishra 		if (!(softcall_state & SOFT_STEAL)) {
334*3aedfe0bSmishra 			softcall_state |= SOFT_STEAL;
335*3aedfe0bSmishra 
336*3aedfe0bSmishra 			/*
337*3aedfe0bSmishra 			 * We want to give some more chance before
338*3aedfe0bSmishra 			 * fishing around again.
339*3aedfe0bSmishra 			 */
340*3aedfe0bSmishra 			softcall_tick = lbolt;
341*3aedfe0bSmishra 		}
342*3aedfe0bSmishra 
343*3aedfe0bSmishra 		/* softcall_lock will be released by this routine */
344*3aedfe0bSmishra 		(void) softcall_choose_cpu();
3457c478bd9Sstevel@tonic-gate 	}
346f8047eabSsudheer }
3477c478bd9Sstevel@tonic-gate 
3487c478bd9Sstevel@tonic-gate void
3497c478bd9Sstevel@tonic-gate kdi_softcall(void (*func)(void))
3507c478bd9Sstevel@tonic-gate {
3517c478bd9Sstevel@tonic-gate 	kdi_softcall_func = func;
3527c478bd9Sstevel@tonic-gate 
3537c478bd9Sstevel@tonic-gate 	if (softhead == NULL)
354*3aedfe0bSmishra 		siron();
3557c478bd9Sstevel@tonic-gate }
3567c478bd9Sstevel@tonic-gate 
3577c478bd9Sstevel@tonic-gate /*
358f8047eabSsudheer  * Called to process software interrupts take one off queue, call it,
359f8047eabSsudheer  * repeat.
360f8047eabSsudheer  *
361*3aedfe0bSmishra  * Note queue may change during call; softcall_lock, state variables
362*3aedfe0bSmishra  * softcall_state and softcall_latest_cpuid ensures that -
363f8047eabSsudheer  * - we don't have multiple cpus pulling from the list (thus causing
364*3aedfe0bSmishra  *   a violation of FIFO order with an exception when we are stuck).
365f8047eabSsudheer  * - we don't miss a new entry having been added to the head.
366f8047eabSsudheer  * - we don't miss a wakeup.
3677c478bd9Sstevel@tonic-gate  */
368f8047eabSsudheer 
3697c478bd9Sstevel@tonic-gate void
3707c478bd9Sstevel@tonic-gate softint(void)
3717c478bd9Sstevel@tonic-gate {
372*3aedfe0bSmishra 	softcall_t *sc = NULL;
3737c478bd9Sstevel@tonic-gate 	void (*func)();
3747c478bd9Sstevel@tonic-gate 	caddr_t arg;
375*3aedfe0bSmishra 	int cpu_id = CPU->cpu_id;
3767c478bd9Sstevel@tonic-gate 
3777c478bd9Sstevel@tonic-gate 	mutex_enter(&softcall_lock);
378*3aedfe0bSmishra 
379*3aedfe0bSmishra 	if (softcall_state & (SOFT_STEAL|SOFT_PEND)) {
380*3aedfe0bSmishra 		softcall_state = SOFT_DRAIN;
381*3aedfe0bSmishra 	} else  {
382*3aedfe0bSmishra 		/*
383*3aedfe0bSmishra 		 * The check for softcall_cpuset being
384*3aedfe0bSmishra 		 * NULL is required because it may get
385*3aedfe0bSmishra 		 * called very early during boot.
386*3aedfe0bSmishra 		 */
387*3aedfe0bSmishra 		if (softcall_cpuset != NULL &&
388*3aedfe0bSmishra 		    CPU_IN_SET(*softcall_cpuset, cpu_id))
389*3aedfe0bSmishra 			CPUSET_DEL(*softcall_cpuset, cpu_id);
390f8047eabSsudheer 		mutex_exit(&softcall_lock);
391f8047eabSsudheer 		goto out;
392f8047eabSsudheer 	}
393*3aedfe0bSmishra 
394*3aedfe0bSmishra 	/*
395*3aedfe0bSmishra 	 * Setting softcall_latest_cpuid to current CPU ensures
396*3aedfe0bSmishra 	 * that there is only one active softlevel1 handler to
397*3aedfe0bSmishra 	 * process softcall queues.
398*3aedfe0bSmishra 	 *
399*3aedfe0bSmishra 	 * Since softcall_lock lock is dropped before calling
400*3aedfe0bSmishra 	 * func (callback), we need softcall_latest_cpuid
401*3aedfe0bSmishra 	 * to prevent two softlevel1 hanlders working on the
402*3aedfe0bSmishra 	 * queue when the first softlevel1 handler gets
403*3aedfe0bSmishra 	 * stuck due to high interrupt load.
404*3aedfe0bSmishra 	 */
405*3aedfe0bSmishra 	softcall_latest_cpuid = cpu_id;
406*3aedfe0bSmishra 
407*3aedfe0bSmishra 	/* add ourself to the cpuset */
408*3aedfe0bSmishra 	if (!CPU_IN_SET(*softcall_cpuset, cpu_id))
409*3aedfe0bSmishra 		CPUSET_ADD(*softcall_cpuset, cpu_id);
410f8047eabSsudheer 
411f8047eabSsudheer 	for (;;) {
412*3aedfe0bSmishra 		softcall_tick = lbolt;
4137c478bd9Sstevel@tonic-gate 		if ((sc = softhead) != NULL) {
4147c478bd9Sstevel@tonic-gate 			func = sc->sc_func;
4157c478bd9Sstevel@tonic-gate 			arg = sc->sc_arg;
4167c478bd9Sstevel@tonic-gate 			softhead = sc->sc_next;
4177c478bd9Sstevel@tonic-gate 			sc->sc_next = softfree;
4187c478bd9Sstevel@tonic-gate 			softfree = sc;
4197c478bd9Sstevel@tonic-gate 		}
420*3aedfe0bSmishra 
421f8047eabSsudheer 		if (sc == NULL) {
422*3aedfe0bSmishra 			if (CPU_IN_SET(*softcall_cpuset, cpu_id))
423*3aedfe0bSmishra 				CPUSET_DEL(*softcall_cpuset, cpu_id);
424*3aedfe0bSmishra 
425f8047eabSsudheer 			softcall_state = SOFT_IDLE;
426*3aedfe0bSmishra 			ASSERT(softcall_latest_cpuid == cpu_id);
427*3aedfe0bSmishra 			softcall_latest_cpuid = -1;
428*3aedfe0bSmishra 
4297c478bd9Sstevel@tonic-gate 			mutex_exit(&softcall_lock);
4307c478bd9Sstevel@tonic-gate 			break;
4317c478bd9Sstevel@tonic-gate 		}
432*3aedfe0bSmishra 
433f8047eabSsudheer 		mutex_exit(&softcall_lock);
434f8047eabSsudheer 		func(arg);
435f8047eabSsudheer 		mutex_enter(&softcall_lock);
436*3aedfe0bSmishra 
437*3aedfe0bSmishra 		/*
438*3aedfe0bSmishra 		 * No longer need softcall processing from current
439*3aedfe0bSmishra 		 * interrupt handler because either
440*3aedfe0bSmishra 		 *  (a) softcall is in SOFT_IDLE state or
441*3aedfe0bSmishra 		 *  (b) There is a CPU already draining softcall
442*3aedfe0bSmishra 		 *	queue and the current softlevel1 is no
443*3aedfe0bSmishra 		 *	longer required.
444*3aedfe0bSmishra 		 */
445*3aedfe0bSmishra 		if (softcall_latest_cpuid != cpu_id) {
446*3aedfe0bSmishra 			if (CPU_IN_SET(*softcall_cpuset, cpu_id))
447*3aedfe0bSmishra 				CPUSET_DEL(*softcall_cpuset, cpu_id);
448*3aedfe0bSmishra 
449*3aedfe0bSmishra 			mutex_exit(&softcall_lock);
450*3aedfe0bSmishra 			break;
451f8047eabSsudheer 		}
452*3aedfe0bSmishra 	}
453*3aedfe0bSmishra 
454f8047eabSsudheer out:
4557c478bd9Sstevel@tonic-gate 	if ((func = kdi_softcall_func) != NULL) {
4567c478bd9Sstevel@tonic-gate 		kdi_softcall_func = NULL;
4577c478bd9Sstevel@tonic-gate 		func();
4587c478bd9Sstevel@tonic-gate 	}
4597c478bd9Sstevel@tonic-gate }
460