xref: /freebsd/sys/powerpc/aim/mp_cpudep.c (revision 2a2234c0f41da33b8cfc938e46b54a8234b64135)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (c) 2008 Marcel Moolenaar
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  *
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27  */
28 
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD$");
31 
32 #include <sys/param.h>
33 #include <sys/systm.h>
34 #include <sys/kernel.h>
35 #include <sys/bus.h>
36 #include <sys/pcpu.h>
37 #include <sys/proc.h>
38 #include <sys/smp.h>
39 
40 #include <machine/bus.h>
41 #include <machine/cpu.h>
42 #include <machine/hid.h>
43 #include <machine/intr_machdep.h>
44 #include <machine/pcb.h>
45 #include <machine/psl.h>
46 #include <machine/smp.h>
47 #include <machine/spr.h>
48 #include <machine/trap.h>
49 
50 #include <dev/ofw/openfirm.h>
51 #include <machine/ofw_machdep.h>
52 
53 void *ap_pcpu;
54 
55 static register_t bsp_state[8] __aligned(8);
56 
57 static void cpudep_save_config(void *dummy);
58 SYSINIT(cpu_save_config, SI_SUB_CPU, SI_ORDER_ANY, cpudep_save_config, NULL);
59 
60 void
61 cpudep_ap_early_bootstrap(void)
62 {
63 #ifndef __powerpc64__
64 	register_t reg;
65 #endif
66 
67 	switch (mfpvr() >> 16) {
68 	case IBM970:
69 	case IBM970FX:
70 	case IBM970MP:
71 		/* Restore HID4 and HID5, which are necessary for the MMU */
72 
73 #ifdef __powerpc64__
74 		mtspr(SPR_HID4, bsp_state[2]); powerpc_sync(); isync();
75 		mtspr(SPR_HID5, bsp_state[3]); powerpc_sync(); isync();
76 #else
77 		__asm __volatile("ld %0, 16(%2); sync; isync;	\
78 		    mtspr %1, %0; sync; isync;"
79 		    : "=r"(reg) : "K"(SPR_HID4), "b"(bsp_state));
80 		__asm __volatile("ld %0, 24(%2); sync; isync;	\
81 		    mtspr %1, %0; sync; isync;"
82 		    : "=r"(reg) : "K"(SPR_HID5), "b"(bsp_state));
83 #endif
84 		powerpc_sync();
85 		break;
86 	case IBMPOWER8:
87 	case IBMPOWER8E:
88 #ifdef __powerpc64__
89 		if (mfmsr() & PSL_HV) {
90 			isync();
91 			/*
92 			 * Direct interrupts to SRR instead of HSRR and
93 			 * reset LPCR otherwise
94 			 */
95 			mtspr(SPR_LPID, 0);
96 			isync();
97 
98 			mtspr(SPR_LPCR, LPCR_LPES);
99 			isync();
100 		}
101 #endif
102 		break;
103 	}
104 
105 	__asm __volatile("mtsprg 0, %0" :: "r"(ap_pcpu));
106 	powerpc_sync();
107 }
108 
109 uintptr_t
110 cpudep_ap_bootstrap(void)
111 {
112 	register_t msr, sp;
113 
114 	msr = psl_kernset & ~PSL_EE;
115 	mtmsr(msr);
116 
117 	pcpup->pc_curthread = pcpup->pc_idlethread;
118 #ifdef __powerpc64__
119 	__asm __volatile("mr 13,%0" :: "r"(pcpup->pc_curthread));
120 #else
121 	__asm __volatile("mr 2,%0" :: "r"(pcpup->pc_curthread));
122 #endif
123 	pcpup->pc_curpcb = pcpup->pc_curthread->td_pcb;
124 	sp = pcpup->pc_curpcb->pcb_sp;
125 
126 	return (sp);
127 }
128 
129 static register_t
130 mpc74xx_l2_enable(register_t l2cr_config)
131 {
132 	register_t ccr, bit;
133 	uint16_t	vers;
134 
135 	vers = mfpvr() >> 16;
136 	switch (vers) {
137 	case MPC7400:
138 	case MPC7410:
139 		bit = L2CR_L2IP;
140 		break;
141 	default:
142 		bit = L2CR_L2I;
143 		break;
144 	}
145 
146 	ccr = mfspr(SPR_L2CR);
147 	if (ccr & L2CR_L2E)
148 		return (ccr);
149 
150 	/* Configure L2 cache. */
151 	ccr = l2cr_config & ~L2CR_L2E;
152 	mtspr(SPR_L2CR, ccr | L2CR_L2I);
153 	do {
154 		ccr = mfspr(SPR_L2CR);
155 	} while (ccr & bit);
156 	powerpc_sync();
157 	mtspr(SPR_L2CR, l2cr_config);
158 	powerpc_sync();
159 
160 	return (l2cr_config);
161 }
162 
163 static register_t
164 mpc745x_l3_enable(register_t l3cr_config)
165 {
166 	register_t ccr;
167 
168 	ccr = mfspr(SPR_L3CR);
169 	if (ccr & L3CR_L3E)
170 		return (ccr);
171 
172 	/* Configure L3 cache. */
173 	ccr = l3cr_config & ~(L3CR_L3E | L3CR_L3I | L3CR_L3PE | L3CR_L3CLKEN);
174 	mtspr(SPR_L3CR, ccr);
175 	ccr |= 0x4000000;       /* Magic, but documented. */
176 	mtspr(SPR_L3CR, ccr);
177 	ccr |= L3CR_L3CLKEN;
178 	mtspr(SPR_L3CR, ccr);
179 	mtspr(SPR_L3CR, ccr | L3CR_L3I);
180 	while (mfspr(SPR_L3CR) & L3CR_L3I)
181 		;
182 	mtspr(SPR_L3CR, ccr & ~L3CR_L3CLKEN);
183 	powerpc_sync();
184 	DELAY(100);
185 	mtspr(SPR_L3CR, ccr);
186 	powerpc_sync();
187 	DELAY(100);
188 	ccr |= L3CR_L3E;
189 	mtspr(SPR_L3CR, ccr);
190 	powerpc_sync();
191 
192 	return(ccr);
193 }
194 
195 static register_t
196 mpc74xx_l1d_enable(void)
197 {
198 	register_t hid;
199 
200 	hid = mfspr(SPR_HID0);
201 	if (hid & HID0_DCE)
202 		return (hid);
203 
204 	/* Enable L1 D-cache */
205 	hid |= HID0_DCE;
206 	powerpc_sync();
207 	mtspr(SPR_HID0, hid | HID0_DCFI);
208 	powerpc_sync();
209 
210 	return (hid);
211 }
212 
213 static register_t
214 mpc74xx_l1i_enable(void)
215 {
216 	register_t hid;
217 
218 	hid = mfspr(SPR_HID0);
219 	if (hid & HID0_ICE)
220 		return (hid);
221 
222 	/* Enable L1 I-cache */
223 	hid |= HID0_ICE;
224 	isync();
225 	mtspr(SPR_HID0, hid | HID0_ICFI);
226 	isync();
227 
228 	return (hid);
229 }
230 
231 static void
232 cpudep_save_config(void *dummy)
233 {
234 	uint16_t	vers;
235 
236 	vers = mfpvr() >> 16;
237 
238 	switch(vers) {
239 	case IBM970:
240 	case IBM970FX:
241 	case IBM970MP:
242 		#ifdef __powerpc64__
243 		bsp_state[0] = mfspr(SPR_HID0);
244 		bsp_state[1] = mfspr(SPR_HID1);
245 		bsp_state[2] = mfspr(SPR_HID4);
246 		bsp_state[3] = mfspr(SPR_HID5);
247 		#else
248 		__asm __volatile ("mfspr %0,%2; mr %1,%0; srdi %0,%0,32"
249 		    : "=r" (bsp_state[0]),"=r" (bsp_state[1]) : "K" (SPR_HID0));
250 		__asm __volatile ("mfspr %0,%2; mr %1,%0; srdi %0,%0,32"
251 		    : "=r" (bsp_state[2]),"=r" (bsp_state[3]) : "K" (SPR_HID1));
252 		__asm __volatile ("mfspr %0,%2; mr %1,%0; srdi %0,%0,32"
253 		    : "=r" (bsp_state[4]),"=r" (bsp_state[5]) : "K" (SPR_HID4));
254 		__asm __volatile ("mfspr %0,%2; mr %1,%0; srdi %0,%0,32"
255 		    : "=r" (bsp_state[6]),"=r" (bsp_state[7]) : "K" (SPR_HID5));
256 		#endif
257 
258 		powerpc_sync();
259 
260 		break;
261 	case IBMCELLBE:
262 		#ifdef NOTYET /* Causes problems if in instruction stream on 970 */
263 		if (mfmsr() & PSL_HV) {
264 			bsp_state[0] = mfspr(SPR_HID0);
265 			bsp_state[1] = mfspr(SPR_HID1);
266 			bsp_state[2] = mfspr(SPR_HID4);
267 			bsp_state[3] = mfspr(SPR_HID6);
268 
269 			bsp_state[4] = mfspr(SPR_CELL_TSCR);
270 		}
271 		#endif
272 
273 		bsp_state[5] = mfspr(SPR_CELL_TSRL);
274 
275 		break;
276 	case MPC7450:
277 	case MPC7455:
278 	case MPC7457:
279 		/* Only MPC745x CPUs have an L3 cache. */
280 		bsp_state[3] = mfspr(SPR_L3CR);
281 
282 		/* Fallthrough */
283 	case MPC7400:
284 	case MPC7410:
285 	case MPC7447A:
286 	case MPC7448:
287 		bsp_state[2] = mfspr(SPR_L2CR);
288 		bsp_state[1] = mfspr(SPR_HID1);
289 		bsp_state[0] = mfspr(SPR_HID0);
290 		break;
291 	}
292 }
293 
294 void
295 cpudep_ap_setup()
296 {
297 	register_t	reg;
298 	uint16_t	vers;
299 
300 	vers = mfpvr() >> 16;
301 
302 	/* The following is needed for restoring from sleep. */
303 	platform_smp_timebase_sync(0, 1);
304 
305 	switch(vers) {
306 	case IBM970:
307 	case IBM970FX:
308 	case IBM970MP:
309 		/* Set HIOR to 0 */
310 		__asm __volatile("mtspr 311,%0" :: "r"(0));
311 		powerpc_sync();
312 
313 		/*
314 		 * The 970 has strange rules about how to update HID registers.
315 		 * See Table 2-3, 970MP manual
316 		 *
317 		 * Note: HID4 and HID5 restored already in
318 		 * cpudep_ap_early_bootstrap()
319 		 */
320 
321 		__asm __volatile("mtasr %0; sync" :: "r"(0));
322 	#ifdef __powerpc64__
323 		__asm __volatile(" \
324 			sync; isync;					\
325 			mtspr	%1, %0;					\
326 			mfspr	%0, %1;	mfspr	%0, %1;	mfspr	%0, %1;	\
327 			mfspr	%0, %1;	mfspr	%0, %1;	mfspr	%0, %1; \
328 			sync; isync"
329 		    :: "r"(bsp_state[0]), "K"(SPR_HID0));
330 		__asm __volatile("sync; isync;	\
331 		    mtspr %1, %0; mtspr %1, %0; sync; isync"
332 		    :: "r"(bsp_state[1]), "K"(SPR_HID1));
333 	#else
334 		__asm __volatile(" \
335 			ld	%0,0(%2);				\
336 			sync; isync;					\
337 			mtspr	%1, %0;					\
338 			mfspr	%0, %1;	mfspr	%0, %1;	mfspr	%0, %1;	\
339 			mfspr	%0, %1;	mfspr	%0, %1;	mfspr	%0, %1; \
340 			sync; isync"
341 		    : "=r"(reg) : "K"(SPR_HID0), "b"(bsp_state));
342 		__asm __volatile("ld %0, 8(%2); sync; isync;	\
343 		    mtspr %1, %0; mtspr %1, %0; sync; isync"
344 		    : "=r"(reg) : "K"(SPR_HID1), "b"(bsp_state));
345 	#endif
346 
347 		powerpc_sync();
348 		break;
349 	case IBMCELLBE:
350 		#ifdef NOTYET /* Causes problems if in instruction stream on 970 */
351 		if (mfmsr() & PSL_HV) {
352 			mtspr(SPR_HID0, bsp_state[0]);
353 			mtspr(SPR_HID1, bsp_state[1]);
354 			mtspr(SPR_HID4, bsp_state[2]);
355 			mtspr(SPR_HID6, bsp_state[3]);
356 
357 			mtspr(SPR_CELL_TSCR, bsp_state[4]);
358 		}
359 		#endif
360 
361 		mtspr(SPR_CELL_TSRL, bsp_state[5]);
362 
363 		break;
364 	case MPC7400:
365 	case MPC7410:
366 	case MPC7447A:
367 	case MPC7448:
368 	case MPC7450:
369 	case MPC7455:
370 	case MPC7457:
371 		/* XXX: Program the CPU ID into PIR */
372 		__asm __volatile("mtspr 1023,%0" :: "r"(PCPU_GET(cpuid)));
373 
374 		powerpc_sync();
375 		isync();
376 
377 		mtspr(SPR_HID0, bsp_state[0]); isync();
378 		mtspr(SPR_HID1, bsp_state[1]); isync();
379 
380 		/* Now enable the L3 cache. */
381 		switch (vers) {
382 		case MPC7450:
383 		case MPC7455:
384 		case MPC7457:
385 			/* Only MPC745x CPUs have an L3 cache. */
386 			reg = mpc745x_l3_enable(bsp_state[3]);
387 		default:
388 			break;
389 		}
390 
391 		reg = mpc74xx_l2_enable(bsp_state[2]);
392 		reg = mpc74xx_l1d_enable();
393 		reg = mpc74xx_l1i_enable();
394 
395 		break;
396 	case IBMPOWER7:
397 	case IBMPOWER7PLUS:
398 	case IBMPOWER8:
399 	case IBMPOWER8E:
400 #ifdef __powerpc64__
401 		if (mfmsr() & PSL_HV) {
402 			mtspr(SPR_LPCR, mfspr(SPR_LPCR) | LPCR_LPES |
403 			    LPCR_PECE_WAKESET);
404 			isync();
405 		}
406 #endif
407 		break;
408 	default:
409 #ifdef __powerpc64__
410 		if (!(mfmsr() & PSL_HV)) /* Rely on HV to have set things up */
411 			break;
412 #endif
413 		printf("WARNING: Unknown CPU type. Cache performace may be "
414 		    "suboptimal.\n");
415 		break;
416 	}
417 }
418 
419