xref: /illumos-gate/usr/src/uts/i86pc/io/cbe.c (revision 5068e65be2e7b42bed0d9c76ac5dfb2874ff4385)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2007 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 #pragma ident	"%Z%%M%	%I%	%E% SMI"
28 
29 #include <sys/systm.h>
30 #include <sys/cyclic.h>
31 #include <sys/cyclic_impl.h>
32 #include <sys/spl.h>
33 #include <sys/x_call.h>
34 #include <sys/kmem.h>
35 #include <sys/machsystm.h>
36 #include <sys/smp_impldefs.h>
37 #include <sys/psm_types.h>
38 #include <sys/psm.h>
39 #include <sys/atomic.h>
40 #include <sys/clock.h>
41 #include <sys/x86_archext.h>
42 #include <sys/ddi_impldefs.h>
43 #include <sys/ddi_intr.h>
44 #include <sys/avintr.h>
45 
46 static int cbe_vector;
47 static int cbe_ticks = 0;
48 
49 static cyc_func_t volatile cbe_xcall_func;
50 static cpu_t *volatile cbe_xcall_cpu;
51 static void *cbe_xcall_farg;
52 static cpuset_t cbe_enabled;
53 
54 static ddi_softint_hdl_impl_t cbe_low_hdl =
55 	{0, NULL, NULL, NULL, 0, NULL, NULL, NULL};
56 static ddi_softint_hdl_impl_t cbe_clock_hdl =
57 	{0, NULL, NULL, NULL, 0, NULL, NULL, NULL};
58 
59 cyclic_id_t cbe_hres_cyclic;
60 int cbe_psm_timer_mode = TIMER_ONESHOT;
61 
62 extern int tsc_gethrtime_enable;
63 
64 void cbe_hres_tick(void);
65 
66 int
67 cbe_softclock(void)
68 {
69 	cyclic_softint(CPU, CY_LOCK_LEVEL);
70 	return (1);
71 }
72 
73 int
74 cbe_low_level(void)
75 {
76 	cpu_t *cpu = CPU;
77 
78 	cyclic_softint(cpu, CY_LOW_LEVEL);
79 	return (1);
80 }
81 
82 /*
83  * We can be in cbe_fire() either due to a cyclic-induced cross call, or due
84  * to the timer firing at level-14.  Because cyclic_fire() can tolerate
85  * spurious calls, it would not matter if we called cyclic_fire() in both
86  * cases.
87  */
88 int
89 cbe_fire(void)
90 {
91 	cpu_t *cpu = CPU;
92 	processorid_t me = cpu->cpu_id, i;
93 	int cross_call = (cbe_xcall_func != NULL && cbe_xcall_cpu == cpu);
94 
95 	cyclic_fire(cpu);
96 
97 	if (cbe_psm_timer_mode != TIMER_ONESHOT && me == 0 && !cross_call) {
98 		for (i = 1; i < NCPU; i++) {
99 			if (CPU_IN_SET(cbe_enabled, i)) {
100 				XC_TRACE(TT_XC_CBE_FIRE, -1, i);
101 				send_dirint(i, CBE_HIGH_PIL);
102 			}
103 		}
104 	}
105 
106 	if (cross_call) {
107 		ASSERT(cbe_xcall_func != NULL && cbe_xcall_cpu == cpu);
108 		(*cbe_xcall_func)(cbe_xcall_farg);
109 		cbe_xcall_func = NULL;
110 		cbe_xcall_cpu = NULL;
111 	}
112 
113 	return (1);
114 }
115 
116 /*ARGSUSED*/
117 void
118 cbe_softint(void *arg, cyc_level_t level)
119 {
120 	switch (level) {
121 	case CY_LOW_LEVEL:
122 		(*setsoftint)(CBE_LOW_PIL, cbe_low_hdl.ih_pending);
123 		break;
124 	case CY_LOCK_LEVEL:
125 		(*setsoftint)(CBE_LOCK_PIL, cbe_clock_hdl.ih_pending);
126 		break;
127 	default:
128 		panic("cbe_softint: unexpected soft level %d", level);
129 	}
130 }
131 
132 /*ARGSUSED*/
133 void
134 cbe_reprogram(void *arg, hrtime_t time)
135 {
136 	if (cbe_psm_timer_mode == TIMER_ONESHOT)
137 		(*psm_timer_reprogram)(time);
138 }
139 
140 /*ARGSUSED*/
141 cyc_cookie_t
142 cbe_set_level(void *arg, cyc_level_t level)
143 {
144 	int ipl;
145 
146 	switch (level) {
147 	case CY_LOW_LEVEL:
148 		ipl = CBE_LOW_PIL;
149 		break;
150 	case CY_LOCK_LEVEL:
151 		ipl = CBE_LOCK_PIL;
152 		break;
153 	case CY_HIGH_LEVEL:
154 		ipl = CBE_HIGH_PIL;
155 		break;
156 	default:
157 		panic("cbe_set_level: unexpected level %d", level);
158 	}
159 
160 	return (splr(ipltospl(ipl)));
161 }
162 
163 /*ARGSUSED*/
164 void
165 cbe_restore_level(void *arg, cyc_cookie_t cookie)
166 {
167 	splx(cookie);
168 }
169 
170 /*ARGSUSED*/
171 void
172 cbe_xcall(void *arg, cpu_t *dest, cyc_func_t func, void *farg)
173 {
174 	kpreempt_disable();
175 
176 	if (dest == CPU) {
177 		(*func)(farg);
178 		kpreempt_enable();
179 		return;
180 	}
181 
182 	ASSERT(cbe_xcall_func == NULL);
183 
184 	cbe_xcall_farg = farg;
185 	membar_producer();
186 	cbe_xcall_cpu = dest;
187 	cbe_xcall_func = func;
188 
189 	XC_TRACE(TT_XC_CBE_XCALL, -1, dest->cpu_id);
190 	send_dirint(dest->cpu_id, CBE_HIGH_PIL);
191 
192 	while (cbe_xcall_func != NULL || cbe_xcall_cpu != NULL)
193 		continue;
194 
195 	kpreempt_enable();
196 
197 	ASSERT(cbe_xcall_func == NULL && cbe_xcall_cpu == NULL);
198 }
199 
200 void *
201 cbe_configure(cpu_t *cpu)
202 {
203 	return (cpu);
204 }
205 
206 #ifndef __xpv
207 /*
208  * declarations needed for time adjustment
209  */
210 extern void	tsc_suspend(void);
211 extern void	tsc_resume(void);
212 /*
213  * Call the resume function in the cyclic, instead of inline in the
214  * resume path.
215  */
216 extern int	tsc_resume_in_cyclic;
217 #endif
218 
219 /*ARGSUSED*/
220 static void
221 cbe_suspend(cyb_arg_t arg)
222 {
223 #ifndef __xpv
224 	/*
225 	 * This is an x86 backend, so let the tsc_suspend
226 	 * that is specific to x86 platforms do the work.
227 	 */
228 	tsc_suspend();
229 #endif
230 }
231 
232 /*ARGSUSED*/
233 static void
234 cbe_resume(cyb_arg_t arg)
235 {
236 #ifndef __xpv
237 	if (tsc_resume_in_cyclic) {
238 		tsc_resume();
239 	}
240 #endif
241 }
242 
243 void
244 cbe_enable(void *arg)
245 {
246 	processorid_t me = ((cpu_t *)arg)->cpu_id;
247 
248 	/* neither enable nor disable cpu0 if TIMER_PERIODIC is set */
249 	if ((cbe_psm_timer_mode != TIMER_ONESHOT) && (me == 0))
250 		return;
251 
252 	/*
253 	 * Added (me == 0) to the ASSERT because the timer isn't
254 	 * disabled on CPU 0, and cbe_enable is called when we resume.
255 	 */
256 	ASSERT((me == 0) || !CPU_IN_SET(cbe_enabled, me));
257 	CPUSET_ADD(cbe_enabled, me);
258 	if (cbe_psm_timer_mode == TIMER_ONESHOT)
259 		(*psm_timer_enable)();
260 }
261 
262 void
263 cbe_disable(void *arg)
264 {
265 	processorid_t me = ((cpu_t *)arg)->cpu_id;
266 
267 	/* neither enable nor disable cpu0 if TIMER_PERIODIC is set */
268 	if ((cbe_psm_timer_mode != TIMER_ONESHOT) && (me == 0))
269 		return;
270 
271 	ASSERT(CPU_IN_SET(cbe_enabled, me));
272 	CPUSET_DEL(cbe_enabled, me);
273 	if (cbe_psm_timer_mode == TIMER_ONESHOT)
274 		(*psm_timer_disable)();
275 }
276 
277 /*
278  * Unbound cyclic, called once per tick (every nsec_per_tick ns).
279  */
280 void
281 cbe_hres_tick(void)
282 {
283 	int s;
284 
285 	dtrace_hres_tick();
286 
287 	/*
288 	 * Because hres_tick effectively locks hres_lock, we must be at the
289 	 * same PIL as that used for CLOCK_LOCK.
290 	 */
291 	s = splr(ipltospl(XC_HI_PIL));
292 	hres_tick();
293 	splx(s);
294 
295 	if ((cbe_ticks % hz) == 0)
296 		(*hrtime_tick)();
297 
298 	cbe_ticks++;
299 
300 }
301 
302 void
303 cbe_init(void)
304 {
305 	cyc_backend_t cbe = {
306 		cbe_configure,		/* cyb_configure */
307 		NULL,			/* cyb_unconfigure */
308 		cbe_enable,		/* cyb_enable */
309 		cbe_disable,		/* cyb_disable */
310 		cbe_reprogram,		/* cyb_reprogram */
311 		cbe_softint,		/* cyb_softint */
312 		cbe_set_level,		/* cyb_set_level */
313 		cbe_restore_level,	/* cyb_restore_level */
314 		cbe_xcall,		/* cyb_xcall */
315 		cbe_suspend,		/* cyb_suspend */
316 		cbe_resume		/* cyb_resume */
317 	};
318 	hrtime_t resolution;
319 	cyc_handler_t hdlr;
320 	cyc_time_t when;
321 
322 	cbe_vector = (*psm_get_clockirq)(CBE_HIGH_PIL);
323 
324 	CPUSET_ZERO(cbe_enabled);
325 
326 	resolution = (*clkinitf)(TIMER_ONESHOT, &cbe_psm_timer_mode);
327 
328 	mutex_enter(&cpu_lock);
329 	cyclic_init(&cbe, resolution);
330 	mutex_exit(&cpu_lock);
331 
332 	(void) add_avintr(NULL, CBE_HIGH_PIL, (avfunc)cbe_fire,
333 	    "cbe_fire_master", cbe_vector, 0, NULL, NULL, NULL);
334 
335 	if (psm_get_ipivect != NULL) {
336 		(void) add_avintr(NULL, CBE_HIGH_PIL, (avfunc)cbe_fire,
337 		    "cbe_fire_slave",
338 		    (*psm_get_ipivect)(CBE_HIGH_PIL, PSM_INTR_IPI_HI),
339 		    0, NULL, NULL, NULL);
340 	}
341 
342 	(void) add_avsoftintr((void *)&cbe_clock_hdl, CBE_LOCK_PIL,
343 	    (avfunc)cbe_softclock, "softclock", NULL, NULL);
344 
345 	(void) add_avsoftintr((void *)&cbe_low_hdl, CBE_LOW_PIL,
346 	    (avfunc)cbe_low_level, "low level", NULL, NULL);
347 
348 	mutex_enter(&cpu_lock);
349 
350 	hdlr.cyh_level = CY_HIGH_LEVEL;
351 	hdlr.cyh_func = (cyc_func_t)cbe_hres_tick;
352 	hdlr.cyh_arg = NULL;
353 
354 	when.cyt_when = 0;
355 	when.cyt_interval = nsec_per_tick;
356 
357 	cbe_hres_cyclic = cyclic_add(&hdlr, &when);
358 
359 	if (psm_post_cyclic_setup != NULL)
360 		(*psm_post_cyclic_setup)(NULL);
361 
362 	mutex_exit(&cpu_lock);
363 }
364