xref: /freebsd/sys/powerpc/aim/mp_cpudep.c (revision c0ce6f7d91bee6ac83c799e4a5574bd340f37f67)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (c) 2008 Marcel Moolenaar
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  *
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27  */
28 
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD$");
31 
32 #include <sys/param.h>
33 #include <sys/systm.h>
34 #include <sys/kernel.h>
35 #include <sys/bus.h>
36 #include <sys/pcpu.h>
37 #include <sys/proc.h>
38 #include <sys/smp.h>
39 
40 #include <machine/bus.h>
41 #include <machine/cpu.h>
42 #include <machine/hid.h>
43 #include <machine/intr_machdep.h>
44 #include <machine/pcb.h>
45 #include <machine/psl.h>
46 #include <machine/smp.h>
47 #include <machine/spr.h>
48 #include <machine/trap.h>
49 
50 #include <dev/ofw/openfirm.h>
51 #include <machine/ofw_machdep.h>
52 
53 void *ap_pcpu;
54 
55 static register_t bsp_state[8] __aligned(8);
56 
57 static void cpudep_save_config(void *dummy);
58 SYSINIT(cpu_save_config, SI_SUB_CPU, SI_ORDER_ANY, cpudep_save_config, NULL);
59 
60 void
61 cpudep_ap_early_bootstrap(void)
62 {
63 #ifndef __powerpc64__
64 	register_t reg;
65 #endif
66 
67 	switch (mfpvr() >> 16) {
68 	case IBM970:
69 	case IBM970FX:
70 	case IBM970MP:
71 		/* Restore HID4 and HID5, which are necessary for the MMU */
72 
73 #ifdef __powerpc64__
74 		mtspr(SPR_HID4, bsp_state[2]); powerpc_sync(); isync();
75 		mtspr(SPR_HID5, bsp_state[3]); powerpc_sync(); isync();
76 #else
77 		__asm __volatile("ld %0, 16(%2); sync; isync;	\
78 		    mtspr %1, %0; sync; isync;"
79 		    : "=r"(reg) : "K"(SPR_HID4), "b"(bsp_state));
80 		__asm __volatile("ld %0, 24(%2); sync; isync;	\
81 		    mtspr %1, %0; sync; isync;"
82 		    : "=r"(reg) : "K"(SPR_HID5), "b"(bsp_state));
83 #endif
84 		powerpc_sync();
85 		break;
86 	case IBMPOWER8:
87 	case IBMPOWER8E:
88 	case IBMPOWER8NVL:
89 	case IBMPOWER9:
90 #ifdef __powerpc64__
91 		if (mfmsr() & PSL_HV) {
92 			isync();
93 			/*
94 			 * Direct interrupts to SRR instead of HSRR and
95 			 * reset LPCR otherwise
96 			 */
97 			mtspr(SPR_LPID, 0);
98 			isync();
99 
100 			mtspr(SPR_LPCR, lpcr);
101 			isync();
102 
103 			/*
104 			 * Nuke FSCR, to be managed on a per-process basis
105 			 * later.
106 			 */
107 			mtspr(SPR_FSCR, 0);
108 		}
109 #endif
110 		break;
111 	}
112 
113 	__asm __volatile("mtsprg 0, %0" :: "r"(ap_pcpu));
114 	powerpc_sync();
115 }
116 
117 uintptr_t
118 cpudep_ap_bootstrap(void)
119 {
120 	register_t msr, sp;
121 
122 	msr = psl_kernset & ~PSL_EE;
123 	mtmsr(msr);
124 
125 	pcpup->pc_curthread = pcpup->pc_idlethread;
126 #ifdef __powerpc64__
127 	__asm __volatile("mr 13,%0" :: "r"(pcpup->pc_curthread));
128 #else
129 	__asm __volatile("mr 2,%0" :: "r"(pcpup->pc_curthread));
130 #endif
131 	pcpup->pc_curpcb = pcpup->pc_curthread->td_pcb;
132 	sp = pcpup->pc_curpcb->pcb_sp;
133 
134 	return (sp);
135 }
136 
137 static register_t
138 mpc74xx_l2_enable(register_t l2cr_config)
139 {
140 	register_t ccr, bit;
141 	uint16_t	vers;
142 
143 	vers = mfpvr() >> 16;
144 	switch (vers) {
145 	case MPC7400:
146 	case MPC7410:
147 		bit = L2CR_L2IP;
148 		break;
149 	default:
150 		bit = L2CR_L2I;
151 		break;
152 	}
153 
154 	ccr = mfspr(SPR_L2CR);
155 	if (ccr & L2CR_L2E)
156 		return (ccr);
157 
158 	/* Configure L2 cache. */
159 	ccr = l2cr_config & ~L2CR_L2E;
160 	mtspr(SPR_L2CR, ccr | L2CR_L2I);
161 	do {
162 		ccr = mfspr(SPR_L2CR);
163 	} while (ccr & bit);
164 	powerpc_sync();
165 	mtspr(SPR_L2CR, l2cr_config);
166 	powerpc_sync();
167 
168 	return (l2cr_config);
169 }
170 
171 static register_t
172 mpc745x_l3_enable(register_t l3cr_config)
173 {
174 	register_t ccr;
175 
176 	ccr = mfspr(SPR_L3CR);
177 	if (ccr & L3CR_L3E)
178 		return (ccr);
179 
180 	/* Configure L3 cache. */
181 	ccr = l3cr_config & ~(L3CR_L3E | L3CR_L3I | L3CR_L3PE | L3CR_L3CLKEN);
182 	mtspr(SPR_L3CR, ccr);
183 	ccr |= 0x4000000;       /* Magic, but documented. */
184 	mtspr(SPR_L3CR, ccr);
185 	ccr |= L3CR_L3CLKEN;
186 	mtspr(SPR_L3CR, ccr);
187 	mtspr(SPR_L3CR, ccr | L3CR_L3I);
188 	while (mfspr(SPR_L3CR) & L3CR_L3I)
189 		;
190 	mtspr(SPR_L3CR, ccr & ~L3CR_L3CLKEN);
191 	powerpc_sync();
192 	DELAY(100);
193 	mtspr(SPR_L3CR, ccr);
194 	powerpc_sync();
195 	DELAY(100);
196 	ccr |= L3CR_L3E;
197 	mtspr(SPR_L3CR, ccr);
198 	powerpc_sync();
199 
200 	return(ccr);
201 }
202 
203 static register_t
204 mpc74xx_l1d_enable(void)
205 {
206 	register_t hid;
207 
208 	hid = mfspr(SPR_HID0);
209 	if (hid & HID0_DCE)
210 		return (hid);
211 
212 	/* Enable L1 D-cache */
213 	hid |= HID0_DCE;
214 	powerpc_sync();
215 	mtspr(SPR_HID0, hid | HID0_DCFI);
216 	powerpc_sync();
217 
218 	return (hid);
219 }
220 
221 static register_t
222 mpc74xx_l1i_enable(void)
223 {
224 	register_t hid;
225 
226 	hid = mfspr(SPR_HID0);
227 	if (hid & HID0_ICE)
228 		return (hid);
229 
230 	/* Enable L1 I-cache */
231 	hid |= HID0_ICE;
232 	isync();
233 	mtspr(SPR_HID0, hid | HID0_ICFI);
234 	isync();
235 
236 	return (hid);
237 }
238 
239 static void
240 cpudep_save_config(void *dummy)
241 {
242 	uint16_t	vers;
243 
244 	vers = mfpvr() >> 16;
245 
246 	switch(vers) {
247 	case IBM970:
248 	case IBM970FX:
249 	case IBM970MP:
250 		#ifdef __powerpc64__
251 		bsp_state[0] = mfspr(SPR_HID0);
252 		bsp_state[1] = mfspr(SPR_HID1);
253 		bsp_state[2] = mfspr(SPR_HID4);
254 		bsp_state[3] = mfspr(SPR_HID5);
255 		#else
256 		__asm __volatile ("mfspr %0,%2; mr %1,%0; srdi %0,%0,32"
257 		    : "=r" (bsp_state[0]),"=r" (bsp_state[1]) : "K" (SPR_HID0));
258 		__asm __volatile ("mfspr %0,%2; mr %1,%0; srdi %0,%0,32"
259 		    : "=r" (bsp_state[2]),"=r" (bsp_state[3]) : "K" (SPR_HID1));
260 		__asm __volatile ("mfspr %0,%2; mr %1,%0; srdi %0,%0,32"
261 		    : "=r" (bsp_state[4]),"=r" (bsp_state[5]) : "K" (SPR_HID4));
262 		__asm __volatile ("mfspr %0,%2; mr %1,%0; srdi %0,%0,32"
263 		    : "=r" (bsp_state[6]),"=r" (bsp_state[7]) : "K" (SPR_HID5));
264 		#endif
265 
266 		powerpc_sync();
267 
268 		break;
269 	case IBMCELLBE:
270 		#ifdef NOTYET /* Causes problems if in instruction stream on 970 */
271 		if (mfmsr() & PSL_HV) {
272 			bsp_state[0] = mfspr(SPR_HID0);
273 			bsp_state[1] = mfspr(SPR_HID1);
274 			bsp_state[2] = mfspr(SPR_HID4);
275 			bsp_state[3] = mfspr(SPR_HID6);
276 
277 			bsp_state[4] = mfspr(SPR_CELL_TSCR);
278 		}
279 		#endif
280 
281 		bsp_state[5] = mfspr(SPR_CELL_TSRL);
282 
283 		break;
284 	case MPC7450:
285 	case MPC7455:
286 	case MPC7457:
287 		/* Only MPC745x CPUs have an L3 cache. */
288 		bsp_state[3] = mfspr(SPR_L3CR);
289 
290 		/* Fallthrough */
291 	case MPC7400:
292 	case MPC7410:
293 	case MPC7447A:
294 	case MPC7448:
295 		bsp_state[2] = mfspr(SPR_L2CR);
296 		bsp_state[1] = mfspr(SPR_HID1);
297 		bsp_state[0] = mfspr(SPR_HID0);
298 		break;
299 	}
300 }
301 
302 void
303 cpudep_ap_setup()
304 {
305 	register_t	reg;
306 	uint16_t	vers;
307 
308 	vers = mfpvr() >> 16;
309 
310 	/* The following is needed for restoring from sleep. */
311 	platform_smp_timebase_sync(0, 1);
312 
313 	switch(vers) {
314 	case IBM970:
315 	case IBM970FX:
316 	case IBM970MP:
317 		/* Set HIOR to 0 */
318 		__asm __volatile("mtspr 311,%0" :: "r"(0));
319 		powerpc_sync();
320 
321 		/*
322 		 * The 970 has strange rules about how to update HID registers.
323 		 * See Table 2-3, 970MP manual
324 		 *
325 		 * Note: HID4 and HID5 restored already in
326 		 * cpudep_ap_early_bootstrap()
327 		 */
328 
329 		__asm __volatile("mtasr %0; sync" :: "r"(0));
330 	#ifdef __powerpc64__
331 		__asm __volatile(" \
332 			sync; isync;					\
333 			mtspr	%1, %0;					\
334 			mfspr	%0, %1;	mfspr	%0, %1;	mfspr	%0, %1;	\
335 			mfspr	%0, %1;	mfspr	%0, %1;	mfspr	%0, %1; \
336 			sync; isync"
337 		    :: "r"(bsp_state[0]), "K"(SPR_HID0));
338 		__asm __volatile("sync; isync;	\
339 		    mtspr %1, %0; mtspr %1, %0; sync; isync"
340 		    :: "r"(bsp_state[1]), "K"(SPR_HID1));
341 	#else
342 		__asm __volatile(" \
343 			ld	%0,0(%2);				\
344 			sync; isync;					\
345 			mtspr	%1, %0;					\
346 			mfspr	%0, %1;	mfspr	%0, %1;	mfspr	%0, %1;	\
347 			mfspr	%0, %1;	mfspr	%0, %1;	mfspr	%0, %1; \
348 			sync; isync"
349 		    : "=r"(reg) : "K"(SPR_HID0), "b"(bsp_state));
350 		__asm __volatile("ld %0, 8(%2); sync; isync;	\
351 		    mtspr %1, %0; mtspr %1, %0; sync; isync"
352 		    : "=r"(reg) : "K"(SPR_HID1), "b"(bsp_state));
353 	#endif
354 
355 		powerpc_sync();
356 		break;
357 	case IBMCELLBE:
358 		#ifdef NOTYET /* Causes problems if in instruction stream on 970 */
359 		if (mfmsr() & PSL_HV) {
360 			mtspr(SPR_HID0, bsp_state[0]);
361 			mtspr(SPR_HID1, bsp_state[1]);
362 			mtspr(SPR_HID4, bsp_state[2]);
363 			mtspr(SPR_HID6, bsp_state[3]);
364 
365 			mtspr(SPR_CELL_TSCR, bsp_state[4]);
366 		}
367 		#endif
368 
369 		mtspr(SPR_CELL_TSRL, bsp_state[5]);
370 
371 		break;
372 	case MPC7400:
373 	case MPC7410:
374 	case MPC7447A:
375 	case MPC7448:
376 	case MPC7450:
377 	case MPC7455:
378 	case MPC7457:
379 		/* XXX: Program the CPU ID into PIR */
380 		__asm __volatile("mtspr 1023,%0" :: "r"(PCPU_GET(cpuid)));
381 
382 		powerpc_sync();
383 		isync();
384 
385 		mtspr(SPR_HID0, bsp_state[0]); isync();
386 		mtspr(SPR_HID1, bsp_state[1]); isync();
387 
388 		/* Now enable the L3 cache. */
389 		switch (vers) {
390 		case MPC7450:
391 		case MPC7455:
392 		case MPC7457:
393 			/* Only MPC745x CPUs have an L3 cache. */
394 			reg = mpc745x_l3_enable(bsp_state[3]);
395 		default:
396 			break;
397 		}
398 
399 		reg = mpc74xx_l2_enable(bsp_state[2]);
400 		reg = mpc74xx_l1d_enable();
401 		reg = mpc74xx_l1i_enable();
402 
403 		break;
404 	case IBMPOWER7:
405 	case IBMPOWER7PLUS:
406 	case IBMPOWER8:
407 	case IBMPOWER8E:
408 	case IBMPOWER8NVL:
409 	case IBMPOWER9:
410 #ifdef __powerpc64__
411 		if (mfmsr() & PSL_HV) {
412 			mtspr(SPR_LPCR, mfspr(SPR_LPCR) | lpcr |
413 			    LPCR_PECE_WAKESET);
414 			isync();
415 		}
416 #endif
417 		break;
418 	default:
419 #ifdef __powerpc64__
420 		if (!(mfmsr() & PSL_HV)) /* Rely on HV to have set things up */
421 			break;
422 #endif
423 		printf("WARNING: Unknown CPU type. Cache performace may be "
424 		    "suboptimal.\n");
425 		break;
426 	}
427 }
428 
429