xref: /titanic_41/usr/src/uts/i86pc/os/mp_pc.c (revision 8e50dcc9f00b393d43e6aa42b820bcbf1d3e1ce4)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 #pragma ident	"%Z%%M%	%I%	%E% SMI"
27 
28 /*
29  * Welcome to the world of the "real mode platter".
30  * See also startup.c, mpcore.s and apic.c for related routines.
31  */
32 
33 #include <sys/types.h>
34 #include <sys/systm.h>
35 #include <sys/cpuvar.h>
36 #include <sys/kmem.h>
37 #include <sys/archsystm.h>
38 #include <sys/machsystm.h>
39 #include <sys/controlregs.h>
40 #include <sys/x86_archext.h>
41 #include <sys/smp_impldefs.h>
42 #include <sys/sysmacros.h>
43 #include <sys/mach_mmu.h>
44 #include <sys/promif.h>
45 #include <sys/cpu.h>
46 #include <sys/sdt.h>
47 #include <vm/hat_i86.h>
48 
49 extern void real_mode_start(void);
50 extern void real_mode_end(void);
51 extern void *(*cpu_pause_func)(void *);
52 
53 void rmp_gdt_init(rm_platter_t *);
54 
55 /*
56  * Fill up the real mode platter to make it easy for real mode code to
57  * kick it off. This area should really be one passed by boot to kernel
58  * and guaranteed to be below 1MB and aligned to 16 bytes. Should also
59  * have identical physical and virtual address in paged mode.
60  */
61 static ushort_t *warm_reset_vector = NULL;
62 
63 int
64 mach_cpucontext_init(void)
65 {
66 	ushort_t *vec;
67 
68 	if (!(vec = (ushort_t *)psm_map_phys(WARM_RESET_VECTOR,
69 	    sizeof (vec), PROT_READ | PROT_WRITE)))
70 		return (-1);
71 	/*
72 	 * setup secondary cpu bios boot up vector
73 	 */
74 	*vec = (ushort_t)((caddr_t)
75 	    ((struct rm_platter *)rm_platter_va)->rm_code - rm_platter_va
76 	    + ((ulong_t)rm_platter_va & 0xf));
77 	vec[1] = (ushort_t)(rm_platter_pa >> 4);
78 	warm_reset_vector = vec;
79 
80 	bcopy((caddr_t)real_mode_start,
81 	    (caddr_t)((rm_platter_t *)rm_platter_va)->rm_code,
82 	    (size_t)real_mode_end - (size_t)real_mode_start);
83 
84 	return (0);
85 }
86 
87 void
88 mach_cpucontext_fini(void)
89 {
90 	if (warm_reset_vector)
91 		psm_unmap_phys((caddr_t)warm_reset_vector,
92 		    sizeof (warm_reset_vector));
93 	hat_unload(kas.a_hat, (caddr_t)(uintptr_t)rm_platter_pa, MMU_PAGESIZE,
94 	    HAT_UNLOAD);
95 }
96 
97 #if defined(__amd64)
98 extern void *long_mode_64(void);
99 #endif	/* __amd64 */
100 
101 void *
102 mach_cpucontext_alloc(struct cpu *cp)
103 {
104 	rm_platter_t *rm = (rm_platter_t *)rm_platter_va;
105 	struct cpu_tables *ct;
106 	struct tss *ntss;
107 
108 	/*
109 	 * Allocate space for stack, tss, gdt and idt. We round the size
110 	 * alloated for cpu_tables up, so that the TSS is on a unique page.
111 	 * This is more efficient when running in virtual machines.
112 	 */
113 	ct = kmem_zalloc(P2ROUNDUP(sizeof (*ct), PAGESIZE), KM_SLEEP);
114 	if ((uintptr_t)ct & PAGEOFFSET)
115 		panic("mp_startup_init: cpu%d misaligned tables", cp->cpu_id);
116 
117 	ntss = cp->cpu_tss = &ct->ct_tss;
118 
119 #if defined(__amd64)
120 
121 	/*
122 	 * #DF (double fault).
123 	 */
124 	ntss->tss_ist1 = (uint64_t)&ct->ct_stack[sizeof (ct->ct_stack)];
125 
126 #elif defined(__i386)
127 
128 	ntss->tss_esp0 = ntss->tss_esp1 = ntss->tss_esp2 = ntss->tss_esp =
129 	    (uint32_t)&ct->ct_stack[sizeof (ct->ct_stack)];
130 
131 	ntss->tss_ss0 = ntss->tss_ss1 = ntss->tss_ss2 = ntss->tss_ss = KDS_SEL;
132 
133 	ntss->tss_eip = (uint32_t)cp->cpu_thread->t_pc;
134 
135 	ntss->tss_cs = KCS_SEL;
136 	ntss->tss_ds = ntss->tss_es = KDS_SEL;
137 	ntss->tss_fs = KFS_SEL;
138 	ntss->tss_gs = KGS_SEL;
139 
140 #endif	/* __i386 */
141 
142 	/*
143 	 * Set I/O bit map offset equal to size of TSS segment limit
144 	 * for no I/O permission map. This will cause all user I/O
145 	 * instructions to generate #gp fault.
146 	 */
147 	ntss->tss_bitmapbase = sizeof (*ntss);
148 
149 	/*
150 	 * Setup kernel tss.
151 	 */
152 	set_syssegd((system_desc_t *)&cp->cpu_gdt[GDT_KTSS], cp->cpu_tss,
153 	    sizeof (*cp->cpu_tss) - 1, SDT_SYSTSS, SEL_KPL);
154 
155 	/*
156 	 * Now copy all that we've set up onto the real mode platter
157 	 * for the real mode code to digest as part of starting the cpu.
158 	 */
159 
160 	rm->rm_idt_base = cp->cpu_idt;
161 	rm->rm_idt_lim = sizeof (*cp->cpu_idt) * NIDT - 1;
162 	rm->rm_gdt_base = cp->cpu_gdt;
163 	rm->rm_gdt_lim = sizeof (*cp->cpu_gdt) * NGDT - 1;
164 
165 	rm->rm_pdbr = getcr3();
166 	rm->rm_cpu = cp->cpu_id;
167 	rm->rm_x86feature = x86_feature;
168 	rm->rm_cr4 = getcr4();
169 
170 	rmp_gdt_init(rm);
171 
172 	return (ct);
173 }
174 
175 /*ARGSUSED*/
176 void
177 rmp_gdt_init(rm_platter_t *rm)
178 {
179 
180 #if defined(__amd64)
181 
182 	if (getcr3() > 0xffffffffUL)
183 		panic("Cannot initialize CPUs; kernel's 64-bit page tables\n"
184 		    "located above 4G in physical memory (@ 0x%lx)", getcr3());
185 
186 	/*
187 	 * Setup pseudo-descriptors for temporary GDT and IDT for use ONLY
188 	 * by code in real_mode_start():
189 	 *
190 	 * GDT[0]:  NULL selector
191 	 * GDT[1]:  64-bit CS: Long = 1, Present = 1, bits 12, 11 = 1
192 	 *
193 	 * Clear the IDT as interrupts will be off and a limit of 0 will cause
194 	 * the CPU to triple fault and reset on an NMI, seemingly as reasonable
195 	 * a course of action as any other, though it may cause the entire
196 	 * platform to reset in some cases...
197 	 */
198 	rm->rm_temp_gdt[0] = 0;
199 	rm->rm_temp_gdt[TEMPGDT_KCODE64] = 0x20980000000000ULL;
200 
201 	rm->rm_temp_gdt_lim = (ushort_t)(sizeof (rm->rm_temp_gdt) - 1);
202 	rm->rm_temp_gdt_base = rm_platter_pa +
203 	    (uint32_t)offsetof(rm_platter_t, rm_temp_gdt);
204 	rm->rm_temp_idt_lim = 0;
205 	rm->rm_temp_idt_base = 0;
206 
207 	/*
208 	 * Since the CPU needs to jump to protected mode using an identity
209 	 * mapped address, we need to calculate it here.
210 	 */
211 	rm->rm_longmode64_addr = rm_platter_pa +
212 	    ((uint32_t)long_mode_64 - (uint32_t)real_mode_start);
213 #endif	/* __amd64 */
214 }
215 
216 /*ARGSUSED*/
217 void
218 mach_cpucontext_free(struct cpu *cp, void *arg, int err)
219 {
220 	struct cpu_tables *ct = arg;
221 
222 	ASSERT(&ct->ct_tss == cp->cpu_tss);
223 
224 	switch (err) {
225 	case 0:
226 		break;
227 	case ETIMEDOUT:
228 		/*
229 		 * The processor was poked, but failed to start before
230 		 * we gave up waiting for it.  In case it starts later,
231 		 * don't free anything.
232 		 */
233 		break;
234 	default:
235 		/*
236 		 * Some other, passive, error occurred.
237 		 */
238 		kmem_free(ct, P2ROUNDUP(sizeof (*ct), PAGESIZE));
239 		cp->cpu_tss = NULL;
240 		break;
241 	}
242 }
243 
244 /*
245  * "Enter monitor."  Called via cross-call from stop_other_cpus().
246  */
247 void
248 mach_cpu_halt(char *msg)
249 {
250 	if (msg)
251 		prom_printf("%s\n", msg);
252 
253 	/*CONSTANTCONDITION*/
254 	while (1)
255 		;
256 }
257 
258 void
259 mach_cpu_idle(void)
260 {
261 	DTRACE_PROBE1(idle__state__transition, uint_t, IDLE_STATE_C1);
262 
263 	tlb_going_idle();
264 	i86_halt();
265 	tlb_service();
266 
267 	DTRACE_PROBE1(idle__state__transition, uint_t, IDLE_STATE_C0);
268 }
269 
270 void
271 mach_cpu_pause(volatile char *safe)
272 {
273 	/*
274 	 * This cpu is now safe.
275 	 */
276 	*safe = PAUSE_WAIT;
277 	membar_enter(); /* make sure stores are flushed */
278 
279 	/*
280 	 * Now we wait.  When we are allowed to continue, safe
281 	 * will be set to PAUSE_IDLE.
282 	 */
283 	while (*safe != PAUSE_IDLE)
284 		SMT_PAUSE();
285 }
286 
287 /*
288  * Power on CPU.
289  */
290 /*ARGSUSED*/
291 int
292 mp_cpu_poweron(struct cpu *cp)
293 {
294 	ASSERT(MUTEX_HELD(&cpu_lock));
295 	return (ENOTSUP);		/* not supported */
296 }
297 
298 /*
299  * Power off CPU.
300  */
301 /*ARGSUSED*/
302 int
303 mp_cpu_poweroff(struct cpu *cp)
304 {
305 	ASSERT(MUTEX_HELD(&cpu_lock));
306 	return (ENOTSUP);		/* not supported */
307 }
308 
309 /*
310  * Return vcpu state, since this could be a virtual environment that we
311  * are unaware of, return "unknown".
312  */
313 /* ARGSUSED */
314 int
315 vcpu_on_pcpu(processorid_t cpu)
316 {
317 	return (VCPU_STATE_UNKNOWN);
318 }
319