xref: /freebsd/sys/powerpc/aim/mp_cpudep.c (revision 1f4bcc459a76b7aa664f3fd557684cd0ba6da352)
1 /*-
2  * Copyright (c) 2008 Marcel Moolenaar
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  *
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25  */
26 
27 #include <sys/cdefs.h>
28 __FBSDID("$FreeBSD$");
29 
30 #include <sys/param.h>
31 #include <sys/systm.h>
32 #include <sys/kernel.h>
33 #include <sys/bus.h>
34 #include <sys/pcpu.h>
35 #include <sys/proc.h>
36 #include <sys/smp.h>
37 
38 #include <machine/bus.h>
39 #include <machine/cpu.h>
40 #include <machine/hid.h>
41 #include <machine/intr_machdep.h>
42 #include <machine/pcb.h>
43 #include <machine/psl.h>
44 #include <machine/smp.h>
45 #include <machine/spr.h>
46 #include <machine/trap.h>
47 
48 #include <dev/ofw/openfirm.h>
49 #include <machine/ofw_machdep.h>
50 
51 void *ap_pcpu;
52 
53 static register_t bsp_state[8] __aligned(8);
54 
55 static void cpudep_save_config(void *dummy);
56 SYSINIT(cpu_save_config, SI_SUB_CPU, SI_ORDER_ANY, cpudep_save_config, NULL);
57 
58 void
59 cpudep_ap_early_bootstrap(void)
60 {
61 #ifndef __powerpc64__
62 	register_t reg;
63 #endif
64 
65 	__asm __volatile("mtsprg 0, %0" :: "r"(ap_pcpu));
66 	powerpc_sync();
67 
68 	switch (mfpvr() >> 16) {
69 	case IBM970:
70 	case IBM970FX:
71 	case IBM970MP:
72 		/* Restore HID4 and HID5, which are necessary for the MMU */
73 
74 #ifdef __powerpc64__
75 		mtspr(SPR_HID4, bsp_state[2]); powerpc_sync(); isync();
76 		mtspr(SPR_HID5, bsp_state[3]); powerpc_sync(); isync();
77 #else
78 		__asm __volatile("ld %0, 16(%2); sync; isync;	\
79 		    mtspr %1, %0; sync; isync;"
80 		    : "=r"(reg) : "K"(SPR_HID4), "b"(bsp_state));
81 		__asm __volatile("ld %0, 24(%2); sync; isync;	\
82 		    mtspr %1, %0; sync; isync;"
83 		    : "=r"(reg) : "K"(SPR_HID5), "b"(bsp_state));
84 #endif
85 		powerpc_sync();
86 		break;
87 	}
88 }
89 
90 uintptr_t
91 cpudep_ap_bootstrap(void)
92 {
93 	register_t msr, sp;
94 
95 	msr = PSL_KERNSET & ~PSL_EE;
96 	mtmsr(msr);
97 
98 	pcpup->pc_curthread = pcpup->pc_idlethread;
99 #ifdef __powerpc64__
100 	__asm __volatile("mr 13,%0" :: "r"(pcpup->pc_curthread));
101 #else
102 	__asm __volatile("mr 2,%0" :: "r"(pcpup->pc_curthread));
103 #endif
104 	pcpup->pc_curpcb = pcpup->pc_curthread->td_pcb;
105 	sp = pcpup->pc_curpcb->pcb_sp;
106 
107 	return (sp);
108 }
109 
110 static register_t
111 mpc74xx_l2_enable(register_t l2cr_config)
112 {
113 	register_t ccr, bit;
114 	uint16_t	vers;
115 
116 	vers = mfpvr() >> 16;
117 	switch (vers) {
118 	case MPC7400:
119 	case MPC7410:
120 		bit = L2CR_L2IP;
121 		break;
122 	default:
123 		bit = L2CR_L2I;
124 		break;
125 	}
126 
127 	ccr = mfspr(SPR_L2CR);
128 	if (ccr & L2CR_L2E)
129 		return (ccr);
130 
131 	/* Configure L2 cache. */
132 	ccr = l2cr_config & ~L2CR_L2E;
133 	mtspr(SPR_L2CR, ccr | L2CR_L2I);
134 	do {
135 		ccr = mfspr(SPR_L2CR);
136 	} while (ccr & bit);
137 	powerpc_sync();
138 	mtspr(SPR_L2CR, l2cr_config);
139 	powerpc_sync();
140 
141 	return (l2cr_config);
142 }
143 
144 static register_t
145 mpc745x_l3_enable(register_t l3cr_config)
146 {
147 	register_t ccr;
148 
149 	ccr = mfspr(SPR_L3CR);
150 	if (ccr & L3CR_L3E)
151 		return (ccr);
152 
153 	/* Configure L3 cache. */
154 	ccr = l3cr_config & ~(L3CR_L3E | L3CR_L3I | L3CR_L3PE | L3CR_L3CLKEN);
155 	mtspr(SPR_L3CR, ccr);
156 	ccr |= 0x4000000;       /* Magic, but documented. */
157 	mtspr(SPR_L3CR, ccr);
158 	ccr |= L3CR_L3CLKEN;
159 	mtspr(SPR_L3CR, ccr);
160 	mtspr(SPR_L3CR, ccr | L3CR_L3I);
161 	while (mfspr(SPR_L3CR) & L3CR_L3I)
162 		;
163 	mtspr(SPR_L3CR, ccr & ~L3CR_L3CLKEN);
164 	powerpc_sync();
165 	DELAY(100);
166 	mtspr(SPR_L3CR, ccr);
167 	powerpc_sync();
168 	DELAY(100);
169 	ccr |= L3CR_L3E;
170 	mtspr(SPR_L3CR, ccr);
171 	powerpc_sync();
172 
173 	return(ccr);
174 }
175 
176 static register_t
177 mpc74xx_l1d_enable(void)
178 {
179 	register_t hid;
180 
181 	hid = mfspr(SPR_HID0);
182 	if (hid & HID0_DCE)
183 		return (hid);
184 
185 	/* Enable L1 D-cache */
186 	hid |= HID0_DCE;
187 	powerpc_sync();
188 	mtspr(SPR_HID0, hid | HID0_DCFI);
189 	powerpc_sync();
190 
191 	return (hid);
192 }
193 
194 static register_t
195 mpc74xx_l1i_enable(void)
196 {
197 	register_t hid;
198 
199 	hid = mfspr(SPR_HID0);
200 	if (hid & HID0_ICE)
201 		return (hid);
202 
203 	/* Enable L1 I-cache */
204 	hid |= HID0_ICE;
205 	isync();
206 	mtspr(SPR_HID0, hid | HID0_ICFI);
207 	isync();
208 
209 	return (hid);
210 }
211 
212 static void
213 cpudep_save_config(void *dummy)
214 {
215 	uint16_t	vers;
216 
217 	vers = mfpvr() >> 16;
218 
219 	switch(vers) {
220 	case IBM970:
221 	case IBM970FX:
222 	case IBM970MP:
223 		#ifdef __powerpc64__
224 		bsp_state[0] = mfspr(SPR_HID0);
225 		bsp_state[1] = mfspr(SPR_HID1);
226 		bsp_state[2] = mfspr(SPR_HID4);
227 		bsp_state[3] = mfspr(SPR_HID5);
228 		#else
229 		__asm __volatile ("mfspr %0,%2; mr %1,%0; srdi %0,%0,32"
230 		    : "=r" (bsp_state[0]),"=r" (bsp_state[1]) : "K" (SPR_HID0));
231 		__asm __volatile ("mfspr %0,%2; mr %1,%0; srdi %0,%0,32"
232 		    : "=r" (bsp_state[2]),"=r" (bsp_state[3]) : "K" (SPR_HID1));
233 		__asm __volatile ("mfspr %0,%2; mr %1,%0; srdi %0,%0,32"
234 		    : "=r" (bsp_state[4]),"=r" (bsp_state[5]) : "K" (SPR_HID4));
235 		__asm __volatile ("mfspr %0,%2; mr %1,%0; srdi %0,%0,32"
236 		    : "=r" (bsp_state[6]),"=r" (bsp_state[7]) : "K" (SPR_HID5));
237 		#endif
238 
239 		powerpc_sync();
240 
241 		break;
242 	case IBMCELLBE:
243 		#ifdef NOTYET /* Causes problems if in instruction stream on 970 */
244 		if (mfmsr() & PSL_HV) {
245 			bsp_state[0] = mfspr(SPR_HID0);
246 			bsp_state[1] = mfspr(SPR_HID1);
247 			bsp_state[2] = mfspr(SPR_HID4);
248 			bsp_state[3] = mfspr(SPR_HID6);
249 
250 			bsp_state[4] = mfspr(SPR_CELL_TSCR);
251 		}
252 		#endif
253 
254 		bsp_state[5] = mfspr(SPR_CELL_TSRL);
255 
256 		break;
257 	case MPC7450:
258 	case MPC7455:
259 	case MPC7457:
260 		/* Only MPC745x CPUs have an L3 cache. */
261 		bsp_state[3] = mfspr(SPR_L3CR);
262 
263 		/* Fallthrough */
264 	case MPC7400:
265 	case MPC7410:
266 	case MPC7447A:
267 	case MPC7448:
268 		bsp_state[2] = mfspr(SPR_L2CR);
269 		bsp_state[1] = mfspr(SPR_HID1);
270 		bsp_state[0] = mfspr(SPR_HID0);
271 		break;
272 	}
273 }
274 
275 void
276 cpudep_ap_setup()
277 {
278 	register_t	reg;
279 	uint16_t	vers;
280 
281 	vers = mfpvr() >> 16;
282 
283 	/* The following is needed for restoring from sleep. */
284 #ifdef __powerpc64__
285 	/* Writing to the time base register is hypervisor-privileged */
286 	if (mfmsr() & PSL_HV)
287 		mttb(0);
288 #else
289 	mttb(0);
290 #endif
291 	switch(vers) {
292 	case IBM970:
293 	case IBM970FX:
294 	case IBM970MP:
295 		/* Set HIOR to 0 */
296 		__asm __volatile("mtspr 311,%0" :: "r"(0));
297 		powerpc_sync();
298 
299 		/*
300 		 * The 970 has strange rules about how to update HID registers.
301 		 * See Table 2-3, 970MP manual
302 		 *
303 		 * Note: HID4 and HID5 restored already in
304 		 * cpudep_ap_early_bootstrap()
305 		 */
306 
307 		__asm __volatile("mtasr %0; sync" :: "r"(0));
308 	#ifdef __powerpc64__
309 		__asm __volatile(" \
310 			sync; isync;					\
311 			mtspr	%1, %0;					\
312 			mfspr	%0, %1;	mfspr	%0, %1;	mfspr	%0, %1;	\
313 			mfspr	%0, %1;	mfspr	%0, %1;	mfspr	%0, %1; \
314 			sync; isync"
315 		    :: "r"(bsp_state[0]), "K"(SPR_HID0));
316 		__asm __volatile("sync; isync;	\
317 		    mtspr %1, %0; mtspr %1, %0; sync; isync"
318 		    :: "r"(bsp_state[1]), "K"(SPR_HID1));
319 	#else
320 		__asm __volatile(" \
321 			ld	%0,0(%2);				\
322 			sync; isync;					\
323 			mtspr	%1, %0;					\
324 			mfspr	%0, %1;	mfspr	%0, %1;	mfspr	%0, %1;	\
325 			mfspr	%0, %1;	mfspr	%0, %1;	mfspr	%0, %1; \
326 			sync; isync"
327 		    : "=r"(reg) : "K"(SPR_HID0), "b"(bsp_state));
328 		__asm __volatile("ld %0, 8(%2); sync; isync;	\
329 		    mtspr %1, %0; mtspr %1, %0; sync; isync"
330 		    : "=r"(reg) : "K"(SPR_HID1), "b"(bsp_state));
331 	#endif
332 
333 		powerpc_sync();
334 		break;
335 	case IBMCELLBE:
336 		#ifdef NOTYET /* Causes problems if in instruction stream on 970 */
337 		if (mfmsr() & PSL_HV) {
338 			mtspr(SPR_HID0, bsp_state[0]);
339 			mtspr(SPR_HID1, bsp_state[1]);
340 			mtspr(SPR_HID4, bsp_state[2]);
341 			mtspr(SPR_HID6, bsp_state[3]);
342 
343 			mtspr(SPR_CELL_TSCR, bsp_state[4]);
344 		}
345 		#endif
346 
347 		mtspr(SPR_CELL_TSRL, bsp_state[5]);
348 
349 		break;
350 	case MPC7400:
351 	case MPC7410:
352 	case MPC7447A:
353 	case MPC7448:
354 	case MPC7450:
355 	case MPC7455:
356 	case MPC7457:
357 		/* XXX: Program the CPU ID into PIR */
358 		__asm __volatile("mtspr 1023,%0" :: "r"(PCPU_GET(cpuid)));
359 
360 		powerpc_sync();
361 		isync();
362 
363 		mtspr(SPR_HID0, bsp_state[0]); isync();
364 		mtspr(SPR_HID1, bsp_state[1]); isync();
365 
366 		/* Now enable the L3 cache. */
367 		switch (vers) {
368 		case MPC7450:
369 		case MPC7455:
370 		case MPC7457:
371 			/* Only MPC745x CPUs have an L3 cache. */
372 			reg = mpc745x_l3_enable(bsp_state[3]);
373 		default:
374 			break;
375 		}
376 
377 		reg = mpc74xx_l2_enable(bsp_state[2]);
378 		reg = mpc74xx_l1d_enable();
379 		reg = mpc74xx_l1i_enable();
380 
381 		break;
382 	default:
383 #ifdef __powerpc64__
384 		if (!(mfmsr() & PSL_HV)) /* Rely on HV to have set things up */
385 			break;
386 #endif
387 		printf("WARNING: Unknown CPU type. Cache performace may be "
388 		    "suboptimal.\n");
389 		break;
390 	}
391 }
392 
393