xref: /titanic_50/usr/src/uts/i86pc/io/cbe.c (revision 67318e4a54c292d543e6b077199ce492b3d3a049)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 #pragma ident	"%Z%%M%	%I%	%E% SMI"
28 
29 #include <sys/systm.h>
30 #include <sys/cyclic.h>
31 #include <sys/cyclic_impl.h>
32 #include <sys/spl.h>
33 #include <sys/x_call.h>
34 #include <sys/kmem.h>
35 #include <sys/machsystm.h>
36 #include <sys/smp_impldefs.h>
37 #include <sys/psm_types.h>
38 #include <sys/psm.h>
39 #include <sys/atomic.h>
40 #include <sys/clock.h>
41 #include <sys/x86_archext.h>
42 #include <sys/ddi_impldefs.h>
43 #include <sys/ddi_intr.h>
44 #include <sys/avintr.h>
45 
46 static int cbe_vector;
47 static int cbe_ticks = 0;
48 
49 static cyc_func_t volatile cbe_xcall_func;
50 static cpu_t *volatile cbe_xcall_cpu;
51 static void *cbe_xcall_farg;
52 static cpuset_t cbe_enabled;
53 
54 static ddi_softint_hdl_impl_t cbe_low_hdl =
55 	{0, NULL, NULL, NULL, 0, NULL, NULL, NULL};
56 static ddi_softint_hdl_impl_t cbe_clock_hdl =
57 	{0, NULL, NULL, NULL, 0, NULL, NULL, NULL};
58 
59 cyclic_id_t cbe_hres_cyclic;
60 int cbe_psm_timer_mode = TIMER_ONESHOT;
61 static hrtime_t cbe_timer_resolution;
62 
63 extern int tsc_gethrtime_enable;
64 
65 void cbe_hres_tick(void);
66 
67 int
68 cbe_softclock(void)
69 {
70 	cyclic_softint(CPU, CY_LOCK_LEVEL);
71 	return (1);
72 }
73 
74 int
75 cbe_low_level(void)
76 {
77 	cpu_t *cpu = CPU;
78 
79 	cyclic_softint(cpu, CY_LOW_LEVEL);
80 	return (1);
81 }
82 
83 /*
84  * We can be in cbe_fire() either due to a cyclic-induced cross call, or due
85  * to the timer firing at level-14.  Because cyclic_fire() can tolerate
86  * spurious calls, it would not matter if we called cyclic_fire() in both
87  * cases.
88  */
89 int
90 cbe_fire(void)
91 {
92 	cpu_t *cpu = CPU;
93 	processorid_t me = cpu->cpu_id, i;
94 	int cross_call = (cbe_xcall_func != NULL && cbe_xcall_cpu == cpu);
95 
96 	cyclic_fire(cpu);
97 
98 	if (cbe_psm_timer_mode != TIMER_ONESHOT && me == 0 && !cross_call) {
99 		for (i = 1; i < NCPU; i++) {
100 			if (CPU_IN_SET(cbe_enabled, i)) {
101 				XC_TRACE(TT_XC_CBE_FIRE, -1, i);
102 				send_dirint(i, CBE_HIGH_PIL);
103 			}
104 		}
105 	}
106 
107 	if (cross_call) {
108 		ASSERT(cbe_xcall_func != NULL && cbe_xcall_cpu == cpu);
109 		(*cbe_xcall_func)(cbe_xcall_farg);
110 		cbe_xcall_func = NULL;
111 		cbe_xcall_cpu = NULL;
112 	}
113 
114 	return (1);
115 }
116 
117 /*ARGSUSED*/
118 void
119 cbe_softint(void *arg, cyc_level_t level)
120 {
121 	switch (level) {
122 	case CY_LOW_LEVEL:
123 		(*setsoftint)(CBE_LOW_PIL, cbe_low_hdl.ih_pending);
124 		break;
125 	case CY_LOCK_LEVEL:
126 		(*setsoftint)(CBE_LOCK_PIL, cbe_clock_hdl.ih_pending);
127 		break;
128 	default:
129 		panic("cbe_softint: unexpected soft level %d", level);
130 	}
131 }
132 
133 /*ARGSUSED*/
134 void
135 cbe_reprogram(void *arg, hrtime_t time)
136 {
137 	if (cbe_psm_timer_mode == TIMER_ONESHOT)
138 		(*psm_timer_reprogram)(time);
139 }
140 
141 /*ARGSUSED*/
142 cyc_cookie_t
143 cbe_set_level(void *arg, cyc_level_t level)
144 {
145 	int ipl;
146 
147 	switch (level) {
148 	case CY_LOW_LEVEL:
149 		ipl = CBE_LOW_PIL;
150 		break;
151 	case CY_LOCK_LEVEL:
152 		ipl = CBE_LOCK_PIL;
153 		break;
154 	case CY_HIGH_LEVEL:
155 		ipl = CBE_HIGH_PIL;
156 		break;
157 	default:
158 		panic("cbe_set_level: unexpected level %d", level);
159 	}
160 
161 	return (splr(ipltospl(ipl)));
162 }
163 
164 /*ARGSUSED*/
165 void
166 cbe_restore_level(void *arg, cyc_cookie_t cookie)
167 {
168 	splx(cookie);
169 }
170 
171 /*ARGSUSED*/
172 void
173 cbe_xcall(void *arg, cpu_t *dest, cyc_func_t func, void *farg)
174 {
175 	kpreempt_disable();
176 
177 	if (dest == CPU) {
178 		(*func)(farg);
179 		kpreempt_enable();
180 		return;
181 	}
182 
183 	ASSERT(cbe_xcall_func == NULL);
184 
185 	cbe_xcall_farg = farg;
186 	membar_producer();
187 	cbe_xcall_cpu = dest;
188 	cbe_xcall_func = func;
189 
190 	XC_TRACE(TT_XC_CBE_XCALL, -1, dest->cpu_id);
191 	send_dirint(dest->cpu_id, CBE_HIGH_PIL);
192 
193 	while (cbe_xcall_func != NULL || cbe_xcall_cpu != NULL)
194 		continue;
195 
196 	kpreempt_enable();
197 
198 	ASSERT(cbe_xcall_func == NULL && cbe_xcall_cpu == NULL);
199 }
200 
201 void *
202 cbe_configure(cpu_t *cpu)
203 {
204 	return (cpu);
205 }
206 
207 #ifndef __xpv
208 /*
209  * declarations needed for time adjustment
210  */
211 extern void	tsc_suspend(void);
212 extern void	tsc_resume(void);
213 /*
214  * Call the resume function in the cyclic, instead of inline in the
215  * resume path.
216  */
217 extern int	tsc_resume_in_cyclic;
218 #endif
219 
220 /*ARGSUSED*/
221 static void
222 cbe_suspend(cyb_arg_t arg)
223 {
224 #ifndef __xpv
225 	/*
226 	 * This is an x86 backend, so let the tsc_suspend
227 	 * that is specific to x86 platforms do the work.
228 	 */
229 	tsc_suspend();
230 #endif
231 }
232 
233 /*ARGSUSED*/
234 static void
235 cbe_resume(cyb_arg_t arg)
236 {
237 #ifndef __xpv
238 	if (tsc_resume_in_cyclic) {
239 		tsc_resume();
240 	}
241 #endif
242 }
243 
244 void
245 cbe_enable(void *arg)
246 {
247 	processorid_t me = ((cpu_t *)arg)->cpu_id;
248 
249 	/* neither enable nor disable cpu0 if TIMER_PERIODIC is set */
250 	if ((cbe_psm_timer_mode != TIMER_ONESHOT) && (me == 0))
251 		return;
252 
253 	/*
254 	 * Added (me == 0) to the ASSERT because the timer isn't
255 	 * disabled on CPU 0, and cbe_enable is called when we resume.
256 	 */
257 	ASSERT((me == 0) || !CPU_IN_SET(cbe_enabled, me));
258 	CPUSET_ADD(cbe_enabled, me);
259 	if (cbe_psm_timer_mode == TIMER_ONESHOT)
260 		(*psm_timer_enable)();
261 }
262 
263 void
264 cbe_disable(void *arg)
265 {
266 	processorid_t me = ((cpu_t *)arg)->cpu_id;
267 
268 	/* neither enable nor disable cpu0 if TIMER_PERIODIC is set */
269 	if ((cbe_psm_timer_mode != TIMER_ONESHOT) && (me == 0))
270 		return;
271 
272 	ASSERT(CPU_IN_SET(cbe_enabled, me));
273 	CPUSET_DEL(cbe_enabled, me);
274 	if (cbe_psm_timer_mode == TIMER_ONESHOT)
275 		(*psm_timer_disable)();
276 }
277 
278 /*
279  * Unbound cyclic, called once per tick (every nsec_per_tick ns).
280  */
281 void
282 cbe_hres_tick(void)
283 {
284 	int s;
285 
286 	dtrace_hres_tick();
287 
288 	/*
289 	 * Because hres_tick effectively locks hres_lock, we must be at the
290 	 * same PIL as that used for CLOCK_LOCK.
291 	 */
292 	s = splr(ipltospl(XC_HI_PIL));
293 	hres_tick();
294 	splx(s);
295 
296 	if ((cbe_ticks % hz) == 0)
297 		(*hrtime_tick)();
298 
299 	cbe_ticks++;
300 
301 }
302 
303 void
304 cbe_init_pre(void)
305 {
306 	cbe_vector = (*psm_get_clockirq)(CBE_HIGH_PIL);
307 
308 	CPUSET_ZERO(cbe_enabled);
309 
310 	cbe_timer_resolution = (*clkinitf)(TIMER_ONESHOT, &cbe_psm_timer_mode);
311 }
312 
313 void
314 cbe_init(void)
315 {
316 	cyc_backend_t cbe = {
317 		cbe_configure,		/* cyb_configure */
318 		NULL,			/* cyb_unconfigure */
319 		cbe_enable,		/* cyb_enable */
320 		cbe_disable,		/* cyb_disable */
321 		cbe_reprogram,		/* cyb_reprogram */
322 		cbe_softint,		/* cyb_softint */
323 		cbe_set_level,		/* cyb_set_level */
324 		cbe_restore_level,	/* cyb_restore_level */
325 		cbe_xcall,		/* cyb_xcall */
326 		cbe_suspend,		/* cyb_suspend */
327 		cbe_resume		/* cyb_resume */
328 	};
329 	cyc_handler_t hdlr;
330 	cyc_time_t when;
331 
332 
333 	mutex_enter(&cpu_lock);
334 	cyclic_init(&cbe, cbe_timer_resolution);
335 	mutex_exit(&cpu_lock);
336 
337 	(void) add_avintr(NULL, CBE_HIGH_PIL, (avfunc)cbe_fire,
338 	    "cbe_fire_master", cbe_vector, 0, NULL, NULL, NULL);
339 
340 	if (psm_get_ipivect != NULL) {
341 		(void) add_avintr(NULL, CBE_HIGH_PIL, (avfunc)cbe_fire,
342 		    "cbe_fire_slave",
343 		    (*psm_get_ipivect)(CBE_HIGH_PIL, PSM_INTR_IPI_HI),
344 		    0, NULL, NULL, NULL);
345 	}
346 
347 	(void) add_avsoftintr((void *)&cbe_clock_hdl, CBE_LOCK_PIL,
348 	    (avfunc)cbe_softclock, "softclock", NULL, NULL);
349 
350 	(void) add_avsoftintr((void *)&cbe_low_hdl, CBE_LOW_PIL,
351 	    (avfunc)cbe_low_level, "low level", NULL, NULL);
352 
353 	mutex_enter(&cpu_lock);
354 
355 	hdlr.cyh_level = CY_HIGH_LEVEL;
356 	hdlr.cyh_func = (cyc_func_t)cbe_hres_tick;
357 	hdlr.cyh_arg = NULL;
358 
359 	when.cyt_when = 0;
360 	when.cyt_interval = nsec_per_tick;
361 
362 	cbe_hres_cyclic = cyclic_add(&hdlr, &when);
363 
364 	if (psm_post_cyclic_setup != NULL)
365 		(*psm_post_cyclic_setup)(NULL);
366 
367 	mutex_exit(&cpu_lock);
368 }
369