xref: /titanic_41/usr/src/uts/i86pc/io/cbe.c (revision 5bbb4db2c3f208d12bf0fd11769728f9e5ba66a2)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 #include <sys/systm.h>
28 #include <sys/cyclic.h>
29 #include <sys/cyclic_impl.h>
30 #include <sys/spl.h>
31 #include <sys/x_call.h>
32 #include <sys/kmem.h>
33 #include <sys/machsystm.h>
34 #include <sys/smp_impldefs.h>
35 #include <sys/psm_types.h>
36 #include <sys/psm.h>
37 #include <sys/atomic.h>
38 #include <sys/clock.h>
39 #include <sys/x86_archext.h>
40 #include <sys/ddi_impldefs.h>
41 #include <sys/ddi_intr.h>
42 #include <sys/avintr.h>
43 
44 static int cbe_vector;
45 static int cbe_ticks = 0;
46 
47 /*
48  * cbe_xcall_lock is used to protect the xcall globals since the cyclic
49  * reprogramming API does not use cpu_lock.
50  */
51 static kmutex_t cbe_xcall_lock;
52 static cyc_func_t volatile cbe_xcall_func;
53 static cpu_t *volatile cbe_xcall_cpu;
54 static void *cbe_xcall_farg;
55 static cpuset_t cbe_enabled;
56 
57 static ddi_softint_hdl_impl_t cbe_low_hdl =
58 	{0, NULL, NULL, NULL, 0, NULL, NULL, NULL};
59 static ddi_softint_hdl_impl_t cbe_clock_hdl =
60 	{0, NULL, NULL, NULL, 0, NULL, NULL, NULL};
61 
62 cyclic_id_t cbe_hres_cyclic;
63 int cbe_psm_timer_mode = TIMER_ONESHOT;
64 static hrtime_t cbe_timer_resolution;
65 
66 extern int tsc_gethrtime_enable;
67 
68 void cbe_hres_tick(void);
69 
70 int
71 cbe_softclock(void)
72 {
73 	cyclic_softint(CPU, CY_LOCK_LEVEL);
74 	return (1);
75 }
76 
77 int
78 cbe_low_level(void)
79 {
80 	cpu_t *cpu = CPU;
81 
82 	cyclic_softint(cpu, CY_LOW_LEVEL);
83 	return (1);
84 }
85 
86 /*
87  * We can be in cbe_fire() either due to a cyclic-induced cross call, or due
88  * to the timer firing at level-14.  Because cyclic_fire() can tolerate
89  * spurious calls, it would not matter if we called cyclic_fire() in both
90  * cases.
91  */
92 int
93 cbe_fire(void)
94 {
95 	cpu_t *cpu = CPU;
96 	processorid_t me = cpu->cpu_id, i;
97 	int cross_call = (cbe_xcall_func != NULL && cbe_xcall_cpu == cpu);
98 
99 	cyclic_fire(cpu);
100 
101 	if (cbe_psm_timer_mode != TIMER_ONESHOT && me == 0 && !cross_call) {
102 		for (i = 1; i < NCPU; i++) {
103 			if (CPU_IN_SET(cbe_enabled, i)) {
104 				send_dirint(i, CBE_HIGH_PIL);
105 			}
106 		}
107 	}
108 
109 	if (cross_call) {
110 		ASSERT(cbe_xcall_func != NULL && cbe_xcall_cpu == cpu);
111 		(*cbe_xcall_func)(cbe_xcall_farg);
112 		cbe_xcall_func = NULL;
113 		cbe_xcall_cpu = NULL;
114 	}
115 
116 	return (1);
117 }
118 
119 /*ARGSUSED*/
120 void
121 cbe_softint(void *arg, cyc_level_t level)
122 {
123 	switch (level) {
124 	case CY_LOW_LEVEL:
125 		(*setsoftint)(CBE_LOW_PIL, cbe_low_hdl.ih_pending);
126 		break;
127 	case CY_LOCK_LEVEL:
128 		(*setsoftint)(CBE_LOCK_PIL, cbe_clock_hdl.ih_pending);
129 		break;
130 	default:
131 		panic("cbe_softint: unexpected soft level %d", level);
132 	}
133 }
134 
135 /*ARGSUSED*/
136 void
137 cbe_reprogram(void *arg, hrtime_t time)
138 {
139 	if (cbe_psm_timer_mode == TIMER_ONESHOT)
140 		(*psm_timer_reprogram)(time);
141 }
142 
143 /*ARGSUSED*/
144 cyc_cookie_t
145 cbe_set_level(void *arg, cyc_level_t level)
146 {
147 	int ipl;
148 
149 	switch (level) {
150 	case CY_LOW_LEVEL:
151 		ipl = CBE_LOW_PIL;
152 		break;
153 	case CY_LOCK_LEVEL:
154 		ipl = CBE_LOCK_PIL;
155 		break;
156 	case CY_HIGH_LEVEL:
157 		ipl = CBE_HIGH_PIL;
158 		break;
159 	default:
160 		panic("cbe_set_level: unexpected level %d", level);
161 	}
162 
163 	return (splr(ipltospl(ipl)));
164 }
165 
166 /*ARGSUSED*/
167 void
168 cbe_restore_level(void *arg, cyc_cookie_t cookie)
169 {
170 	splx(cookie);
171 }
172 
173 /*ARGSUSED*/
174 void
175 cbe_xcall(void *arg, cpu_t *dest, cyc_func_t func, void *farg)
176 {
177 	kpreempt_disable();
178 
179 	if (dest == CPU) {
180 		(*func)(farg);
181 		kpreempt_enable();
182 		return;
183 	}
184 
185 	mutex_enter(&cbe_xcall_lock);
186 
187 	ASSERT(cbe_xcall_func == NULL);
188 
189 	cbe_xcall_farg = farg;
190 	membar_producer();
191 	cbe_xcall_cpu = dest;
192 	cbe_xcall_func = func;
193 
194 	send_dirint(dest->cpu_id, CBE_HIGH_PIL);
195 
196 	while (cbe_xcall_func != NULL || cbe_xcall_cpu != NULL)
197 		continue;
198 
199 	mutex_exit(&cbe_xcall_lock);
200 
201 	kpreempt_enable();
202 }
203 
204 void *
205 cbe_configure(cpu_t *cpu)
206 {
207 	return (cpu);
208 }
209 
210 #ifndef __xpv
211 /*
212  * declarations needed for time adjustment
213  */
214 extern void	tsc_suspend(void);
215 extern void	tsc_resume(void);
216 /*
217  * Call the resume function in the cyclic, instead of inline in the
218  * resume path.
219  */
220 extern int	tsc_resume_in_cyclic;
221 #endif
222 
223 /*ARGSUSED*/
224 static void
225 cbe_suspend(cyb_arg_t arg)
226 {
227 #ifndef __xpv
228 	/*
229 	 * This is an x86 backend, so let the tsc_suspend
230 	 * that is specific to x86 platforms do the work.
231 	 */
232 	tsc_suspend();
233 #endif
234 }
235 
236 /*ARGSUSED*/
237 static void
238 cbe_resume(cyb_arg_t arg)
239 {
240 #ifndef __xpv
241 	if (tsc_resume_in_cyclic) {
242 		tsc_resume();
243 	}
244 #endif
245 }
246 
247 void
248 cbe_enable(void *arg)
249 {
250 	processorid_t me = ((cpu_t *)arg)->cpu_id;
251 
252 	/* neither enable nor disable cpu0 if TIMER_PERIODIC is set */
253 	if ((cbe_psm_timer_mode != TIMER_ONESHOT) && (me == 0))
254 		return;
255 
256 	/*
257 	 * Added (me == 0) to the ASSERT because the timer isn't
258 	 * disabled on CPU 0, and cbe_enable is called when we resume.
259 	 */
260 	ASSERT((me == 0) || !CPU_IN_SET(cbe_enabled, me));
261 	CPUSET_ADD(cbe_enabled, me);
262 	if (cbe_psm_timer_mode == TIMER_ONESHOT)
263 		(*psm_timer_enable)();
264 }
265 
266 void
267 cbe_disable(void *arg)
268 {
269 	processorid_t me = ((cpu_t *)arg)->cpu_id;
270 
271 	/* neither enable nor disable cpu0 if TIMER_PERIODIC is set */
272 	if ((cbe_psm_timer_mode != TIMER_ONESHOT) && (me == 0))
273 		return;
274 
275 	ASSERT(CPU_IN_SET(cbe_enabled, me));
276 	CPUSET_DEL(cbe_enabled, me);
277 	if (cbe_psm_timer_mode == TIMER_ONESHOT)
278 		(*psm_timer_disable)();
279 }
280 
281 /*
282  * Unbound cyclic, called once per tick (every nsec_per_tick ns).
283  */
284 void
285 cbe_hres_tick(void)
286 {
287 	int s;
288 
289 	dtrace_hres_tick();
290 
291 	/*
292 	 * Because hres_tick effectively locks hres_lock, we must be at the
293 	 * same PIL as that used for CLOCK_LOCK.
294 	 */
295 	s = splr(ipltospl(XC_HI_PIL));
296 	hres_tick();
297 	splx(s);
298 
299 	if ((cbe_ticks % hz) == 0)
300 		(*hrtime_tick)();
301 
302 	cbe_ticks++;
303 
304 }
305 
306 void
307 cbe_init_pre(void)
308 {
309 	cbe_vector = (*psm_get_clockirq)(CBE_HIGH_PIL);
310 
311 	CPUSET_ZERO(cbe_enabled);
312 
313 	cbe_timer_resolution = (*clkinitf)(TIMER_ONESHOT, &cbe_psm_timer_mode);
314 }
315 
316 void
317 cbe_init(void)
318 {
319 	cyc_backend_t cbe = {
320 		cbe_configure,		/* cyb_configure */
321 		NULL,			/* cyb_unconfigure */
322 		cbe_enable,		/* cyb_enable */
323 		cbe_disable,		/* cyb_disable */
324 		cbe_reprogram,		/* cyb_reprogram */
325 		cbe_softint,		/* cyb_softint */
326 		cbe_set_level,		/* cyb_set_level */
327 		cbe_restore_level,	/* cyb_restore_level */
328 		cbe_xcall,		/* cyb_xcall */
329 		cbe_suspend,		/* cyb_suspend */
330 		cbe_resume		/* cyb_resume */
331 	};
332 	cyc_handler_t hdlr;
333 	cyc_time_t when;
334 
335 	mutex_init(&cbe_xcall_lock, NULL, MUTEX_DEFAULT, NULL);
336 
337 	mutex_enter(&cpu_lock);
338 	cyclic_init(&cbe, cbe_timer_resolution);
339 	mutex_exit(&cpu_lock);
340 
341 	(void) add_avintr(NULL, CBE_HIGH_PIL, (avfunc)cbe_fire,
342 	    "cbe_fire_master", cbe_vector, 0, NULL, NULL, NULL);
343 
344 	if (psm_get_ipivect != NULL) {
345 		(void) add_avintr(NULL, CBE_HIGH_PIL, (avfunc)cbe_fire,
346 		    "cbe_fire_slave",
347 		    (*psm_get_ipivect)(CBE_HIGH_PIL, PSM_INTR_IPI_HI),
348 		    0, NULL, NULL, NULL);
349 	}
350 
351 	(void) add_avsoftintr((void *)&cbe_clock_hdl, CBE_LOCK_PIL,
352 	    (avfunc)cbe_softclock, "softclock", NULL, NULL);
353 
354 	(void) add_avsoftintr((void *)&cbe_low_hdl, CBE_LOW_PIL,
355 	    (avfunc)cbe_low_level, "low level", NULL, NULL);
356 
357 	mutex_enter(&cpu_lock);
358 
359 	hdlr.cyh_level = CY_HIGH_LEVEL;
360 	hdlr.cyh_func = (cyc_func_t)cbe_hres_tick;
361 	hdlr.cyh_arg = NULL;
362 
363 	when.cyt_when = 0;
364 	when.cyt_interval = nsec_per_tick;
365 
366 	cbe_hres_cyclic = cyclic_add(&hdlr, &when);
367 
368 	if (psm_post_cyclic_setup != NULL)
369 		(*psm_post_cyclic_setup)(NULL);
370 
371 	mutex_exit(&cpu_lock);
372 }
373