xref: /freebsd/sys/powerpc/aim/mp_cpudep.c (revision e0656a491411fe65ed8b9135add026358b24951f)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (c) 2008 Marcel Moolenaar
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  *
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27  */
28 
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD$");
31 
32 #include <sys/param.h>
33 #include <sys/systm.h>
34 #include <sys/kernel.h>
35 #include <sys/bus.h>
36 #include <sys/pcpu.h>
37 #include <sys/proc.h>
38 #include <sys/smp.h>
39 
40 #include <machine/bus.h>
41 #include <machine/cpu.h>
42 #include <machine/hid.h>
43 #include <machine/intr_machdep.h>
44 #include <machine/pcb.h>
45 #include <machine/psl.h>
46 #include <machine/smp.h>
47 #include <machine/spr.h>
48 #include <machine/trap.h>
49 
50 #include <dev/ofw/openfirm.h>
51 #include <machine/ofw_machdep.h>
52 
53 void *ap_pcpu;
54 
55 static register_t bsp_state[8] __aligned(8);
56 
57 static void cpudep_save_config(void *dummy);
58 SYSINIT(cpu_save_config, SI_SUB_CPU, SI_ORDER_ANY, cpudep_save_config, NULL);
59 
60 void
61 cpudep_ap_early_bootstrap(void)
62 {
63 #ifndef __powerpc64__
64 	register_t reg;
65 #endif
66 
67 	switch (mfpvr() >> 16) {
68 	case IBM970:
69 	case IBM970FX:
70 	case IBM970MP:
71 		/* Restore HID4 and HID5, which are necessary for the MMU */
72 
73 #ifdef __powerpc64__
74 		mtspr(SPR_HID4, bsp_state[2]); powerpc_sync(); isync();
75 		mtspr(SPR_HID5, bsp_state[3]); powerpc_sync(); isync();
76 #else
77 		__asm __volatile("ld %0, 16(%2); sync; isync;	\
78 		    mtspr %1, %0; sync; isync;"
79 		    : "=r"(reg) : "K"(SPR_HID4), "b"(bsp_state));
80 		__asm __volatile("ld %0, 24(%2); sync; isync;	\
81 		    mtspr %1, %0; sync; isync;"
82 		    : "=r"(reg) : "K"(SPR_HID5), "b"(bsp_state));
83 #endif
84 		powerpc_sync();
85 		break;
86 	case IBMPOWER8:
87 	case IBMPOWER8E:
88 		isync();
89 		/* Direct interrupts to SRR instead of HSRR and reset LPCR otherwise */
90 		mtspr(SPR_LPID, 0);
91 		isync();
92 
93 		mtspr(SPR_LPCR, LPCR_LPES);
94 		isync();
95 		break;
96 	}
97 
98 	__asm __volatile("mtsprg 0, %0" :: "r"(ap_pcpu));
99 	powerpc_sync();
100 }
101 
102 uintptr_t
103 cpudep_ap_bootstrap(void)
104 {
105 	register_t msr, sp;
106 
107 	msr = PSL_KERNSET & ~PSL_EE;
108 	mtmsr(msr);
109 
110 	pcpup->pc_curthread = pcpup->pc_idlethread;
111 #ifdef __powerpc64__
112 	__asm __volatile("mr 13,%0" :: "r"(pcpup->pc_curthread));
113 #else
114 	__asm __volatile("mr 2,%0" :: "r"(pcpup->pc_curthread));
115 #endif
116 	pcpup->pc_curpcb = pcpup->pc_curthread->td_pcb;
117 	sp = pcpup->pc_curpcb->pcb_sp;
118 
119 	return (sp);
120 }
121 
122 static register_t
123 mpc74xx_l2_enable(register_t l2cr_config)
124 {
125 	register_t ccr, bit;
126 	uint16_t	vers;
127 
128 	vers = mfpvr() >> 16;
129 	switch (vers) {
130 	case MPC7400:
131 	case MPC7410:
132 		bit = L2CR_L2IP;
133 		break;
134 	default:
135 		bit = L2CR_L2I;
136 		break;
137 	}
138 
139 	ccr = mfspr(SPR_L2CR);
140 	if (ccr & L2CR_L2E)
141 		return (ccr);
142 
143 	/* Configure L2 cache. */
144 	ccr = l2cr_config & ~L2CR_L2E;
145 	mtspr(SPR_L2CR, ccr | L2CR_L2I);
146 	do {
147 		ccr = mfspr(SPR_L2CR);
148 	} while (ccr & bit);
149 	powerpc_sync();
150 	mtspr(SPR_L2CR, l2cr_config);
151 	powerpc_sync();
152 
153 	return (l2cr_config);
154 }
155 
156 static register_t
157 mpc745x_l3_enable(register_t l3cr_config)
158 {
159 	register_t ccr;
160 
161 	ccr = mfspr(SPR_L3CR);
162 	if (ccr & L3CR_L3E)
163 		return (ccr);
164 
165 	/* Configure L3 cache. */
166 	ccr = l3cr_config & ~(L3CR_L3E | L3CR_L3I | L3CR_L3PE | L3CR_L3CLKEN);
167 	mtspr(SPR_L3CR, ccr);
168 	ccr |= 0x4000000;       /* Magic, but documented. */
169 	mtspr(SPR_L3CR, ccr);
170 	ccr |= L3CR_L3CLKEN;
171 	mtspr(SPR_L3CR, ccr);
172 	mtspr(SPR_L3CR, ccr | L3CR_L3I);
173 	while (mfspr(SPR_L3CR) & L3CR_L3I)
174 		;
175 	mtspr(SPR_L3CR, ccr & ~L3CR_L3CLKEN);
176 	powerpc_sync();
177 	DELAY(100);
178 	mtspr(SPR_L3CR, ccr);
179 	powerpc_sync();
180 	DELAY(100);
181 	ccr |= L3CR_L3E;
182 	mtspr(SPR_L3CR, ccr);
183 	powerpc_sync();
184 
185 	return(ccr);
186 }
187 
188 static register_t
189 mpc74xx_l1d_enable(void)
190 {
191 	register_t hid;
192 
193 	hid = mfspr(SPR_HID0);
194 	if (hid & HID0_DCE)
195 		return (hid);
196 
197 	/* Enable L1 D-cache */
198 	hid |= HID0_DCE;
199 	powerpc_sync();
200 	mtspr(SPR_HID0, hid | HID0_DCFI);
201 	powerpc_sync();
202 
203 	return (hid);
204 }
205 
206 static register_t
207 mpc74xx_l1i_enable(void)
208 {
209 	register_t hid;
210 
211 	hid = mfspr(SPR_HID0);
212 	if (hid & HID0_ICE)
213 		return (hid);
214 
215 	/* Enable L1 I-cache */
216 	hid |= HID0_ICE;
217 	isync();
218 	mtspr(SPR_HID0, hid | HID0_ICFI);
219 	isync();
220 
221 	return (hid);
222 }
223 
224 static void
225 cpudep_save_config(void *dummy)
226 {
227 	uint16_t	vers;
228 
229 	vers = mfpvr() >> 16;
230 
231 	switch(vers) {
232 	case IBM970:
233 	case IBM970FX:
234 	case IBM970MP:
235 		#ifdef __powerpc64__
236 		bsp_state[0] = mfspr(SPR_HID0);
237 		bsp_state[1] = mfspr(SPR_HID1);
238 		bsp_state[2] = mfspr(SPR_HID4);
239 		bsp_state[3] = mfspr(SPR_HID5);
240 		#else
241 		__asm __volatile ("mfspr %0,%2; mr %1,%0; srdi %0,%0,32"
242 		    : "=r" (bsp_state[0]),"=r" (bsp_state[1]) : "K" (SPR_HID0));
243 		__asm __volatile ("mfspr %0,%2; mr %1,%0; srdi %0,%0,32"
244 		    : "=r" (bsp_state[2]),"=r" (bsp_state[3]) : "K" (SPR_HID1));
245 		__asm __volatile ("mfspr %0,%2; mr %1,%0; srdi %0,%0,32"
246 		    : "=r" (bsp_state[4]),"=r" (bsp_state[5]) : "K" (SPR_HID4));
247 		__asm __volatile ("mfspr %0,%2; mr %1,%0; srdi %0,%0,32"
248 		    : "=r" (bsp_state[6]),"=r" (bsp_state[7]) : "K" (SPR_HID5));
249 		#endif
250 
251 		powerpc_sync();
252 
253 		break;
254 	case IBMCELLBE:
255 		#ifdef NOTYET /* Causes problems if in instruction stream on 970 */
256 		if (mfmsr() & PSL_HV) {
257 			bsp_state[0] = mfspr(SPR_HID0);
258 			bsp_state[1] = mfspr(SPR_HID1);
259 			bsp_state[2] = mfspr(SPR_HID4);
260 			bsp_state[3] = mfspr(SPR_HID6);
261 
262 			bsp_state[4] = mfspr(SPR_CELL_TSCR);
263 		}
264 		#endif
265 
266 		bsp_state[5] = mfspr(SPR_CELL_TSRL);
267 
268 		break;
269 	case MPC7450:
270 	case MPC7455:
271 	case MPC7457:
272 		/* Only MPC745x CPUs have an L3 cache. */
273 		bsp_state[3] = mfspr(SPR_L3CR);
274 
275 		/* Fallthrough */
276 	case MPC7400:
277 	case MPC7410:
278 	case MPC7447A:
279 	case MPC7448:
280 		bsp_state[2] = mfspr(SPR_L2CR);
281 		bsp_state[1] = mfspr(SPR_HID1);
282 		bsp_state[0] = mfspr(SPR_HID0);
283 		break;
284 	}
285 }
286 
287 void
288 cpudep_ap_setup()
289 {
290 	register_t	reg;
291 	uint16_t	vers;
292 
293 	vers = mfpvr() >> 16;
294 
295 	/* The following is needed for restoring from sleep. */
296 	platform_smp_timebase_sync(0, 1);
297 
298 	switch(vers) {
299 	case IBM970:
300 	case IBM970FX:
301 	case IBM970MP:
302 		/* Set HIOR to 0 */
303 		__asm __volatile("mtspr 311,%0" :: "r"(0));
304 		powerpc_sync();
305 
306 		/*
307 		 * The 970 has strange rules about how to update HID registers.
308 		 * See Table 2-3, 970MP manual
309 		 *
310 		 * Note: HID4 and HID5 restored already in
311 		 * cpudep_ap_early_bootstrap()
312 		 */
313 
314 		__asm __volatile("mtasr %0; sync" :: "r"(0));
315 	#ifdef __powerpc64__
316 		__asm __volatile(" \
317 			sync; isync;					\
318 			mtspr	%1, %0;					\
319 			mfspr	%0, %1;	mfspr	%0, %1;	mfspr	%0, %1;	\
320 			mfspr	%0, %1;	mfspr	%0, %1;	mfspr	%0, %1; \
321 			sync; isync"
322 		    :: "r"(bsp_state[0]), "K"(SPR_HID0));
323 		__asm __volatile("sync; isync;	\
324 		    mtspr %1, %0; mtspr %1, %0; sync; isync"
325 		    :: "r"(bsp_state[1]), "K"(SPR_HID1));
326 	#else
327 		__asm __volatile(" \
328 			ld	%0,0(%2);				\
329 			sync; isync;					\
330 			mtspr	%1, %0;					\
331 			mfspr	%0, %1;	mfspr	%0, %1;	mfspr	%0, %1;	\
332 			mfspr	%0, %1;	mfspr	%0, %1;	mfspr	%0, %1; \
333 			sync; isync"
334 		    : "=r"(reg) : "K"(SPR_HID0), "b"(bsp_state));
335 		__asm __volatile("ld %0, 8(%2); sync; isync;	\
336 		    mtspr %1, %0; mtspr %1, %0; sync; isync"
337 		    : "=r"(reg) : "K"(SPR_HID1), "b"(bsp_state));
338 	#endif
339 
340 		powerpc_sync();
341 		break;
342 	case IBMCELLBE:
343 		#ifdef NOTYET /* Causes problems if in instruction stream on 970 */
344 		if (mfmsr() & PSL_HV) {
345 			mtspr(SPR_HID0, bsp_state[0]);
346 			mtspr(SPR_HID1, bsp_state[1]);
347 			mtspr(SPR_HID4, bsp_state[2]);
348 			mtspr(SPR_HID6, bsp_state[3]);
349 
350 			mtspr(SPR_CELL_TSCR, bsp_state[4]);
351 		}
352 		#endif
353 
354 		mtspr(SPR_CELL_TSRL, bsp_state[5]);
355 
356 		break;
357 	case MPC7400:
358 	case MPC7410:
359 	case MPC7447A:
360 	case MPC7448:
361 	case MPC7450:
362 	case MPC7455:
363 	case MPC7457:
364 		/* XXX: Program the CPU ID into PIR */
365 		__asm __volatile("mtspr 1023,%0" :: "r"(PCPU_GET(cpuid)));
366 
367 		powerpc_sync();
368 		isync();
369 
370 		mtspr(SPR_HID0, bsp_state[0]); isync();
371 		mtspr(SPR_HID1, bsp_state[1]); isync();
372 
373 		/* Now enable the L3 cache. */
374 		switch (vers) {
375 		case MPC7450:
376 		case MPC7455:
377 		case MPC7457:
378 			/* Only MPC745x CPUs have an L3 cache. */
379 			reg = mpc745x_l3_enable(bsp_state[3]);
380 		default:
381 			break;
382 		}
383 
384 		reg = mpc74xx_l2_enable(bsp_state[2]);
385 		reg = mpc74xx_l1d_enable();
386 		reg = mpc74xx_l1i_enable();
387 
388 		break;
389 	case IBMPOWER7:
390 	case IBMPOWER7PLUS:
391 	case IBMPOWER8:
392 	case IBMPOWER8E:
393 #ifdef __powerpc64__
394 		if (mfmsr() & PSL_HV)
395 			mtspr(SPR_LPCR, mfspr(SPR_LPCR) | LPCR_LPES);
396 #endif
397 		break;
398 	default:
399 #ifdef __powerpc64__
400 		if (!(mfmsr() & PSL_HV)) /* Rely on HV to have set things up */
401 			break;
402 #endif
403 		printf("WARNING: Unknown CPU type. Cache performace may be "
404 		    "suboptimal.\n");
405 		break;
406 	}
407 }
408 
409