xref: /freebsd/sys/powerpc/aim/mp_cpudep.c (revision 97cb52fa9aefd90fad38790fded50905aeeb9b9e)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (c) 2008 Marcel Moolenaar
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  *
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27  */
28 
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD$");
31 
32 #include <sys/param.h>
33 #include <sys/systm.h>
34 #include <sys/kernel.h>
35 #include <sys/bus.h>
36 #include <sys/pcpu.h>
37 #include <sys/proc.h>
38 #include <sys/smp.h>
39 
40 #include <machine/bus.h>
41 #include <machine/cpu.h>
42 #include <machine/hid.h>
43 #include <machine/intr_machdep.h>
44 #include <machine/pcb.h>
45 #include <machine/psl.h>
46 #include <machine/smp.h>
47 #include <machine/spr.h>
48 #include <machine/trap.h>
49 
50 #include <dev/ofw/openfirm.h>
51 #include <machine/ofw_machdep.h>
52 
53 void *ap_pcpu;
54 
55 static register_t bsp_state[8] __aligned(8);
56 
57 static void cpudep_save_config(void *dummy);
58 SYSINIT(cpu_save_config, SI_SUB_CPU, SI_ORDER_ANY, cpudep_save_config, NULL);
59 
60 void
61 cpudep_ap_early_bootstrap(void)
62 {
63 #ifndef __powerpc64__
64 	register_t reg;
65 #endif
66 
67 	__asm __volatile("mtsprg 0, %0" :: "r"(ap_pcpu));
68 	powerpc_sync();
69 
70 	switch (mfpvr() >> 16) {
71 	case IBM970:
72 	case IBM970FX:
73 	case IBM970MP:
74 		/* Restore HID4 and HID5, which are necessary for the MMU */
75 
76 #ifdef __powerpc64__
77 		mtspr(SPR_HID4, bsp_state[2]); powerpc_sync(); isync();
78 		mtspr(SPR_HID5, bsp_state[3]); powerpc_sync(); isync();
79 #else
80 		__asm __volatile("ld %0, 16(%2); sync; isync;	\
81 		    mtspr %1, %0; sync; isync;"
82 		    : "=r"(reg) : "K"(SPR_HID4), "b"(bsp_state));
83 		__asm __volatile("ld %0, 24(%2); sync; isync;	\
84 		    mtspr %1, %0; sync; isync;"
85 		    : "=r"(reg) : "K"(SPR_HID5), "b"(bsp_state));
86 #endif
87 		powerpc_sync();
88 		break;
89 	}
90 }
91 
92 uintptr_t
93 cpudep_ap_bootstrap(void)
94 {
95 	register_t msr, sp;
96 
97 	msr = PSL_KERNSET & ~PSL_EE;
98 	mtmsr(msr);
99 
100 	pcpup->pc_curthread = pcpup->pc_idlethread;
101 #ifdef __powerpc64__
102 	__asm __volatile("mr 13,%0" :: "r"(pcpup->pc_curthread));
103 #else
104 	__asm __volatile("mr 2,%0" :: "r"(pcpup->pc_curthread));
105 #endif
106 	pcpup->pc_curpcb = pcpup->pc_curthread->td_pcb;
107 	sp = pcpup->pc_curpcb->pcb_sp;
108 
109 	return (sp);
110 }
111 
112 static register_t
113 mpc74xx_l2_enable(register_t l2cr_config)
114 {
115 	register_t ccr, bit;
116 	uint16_t	vers;
117 
118 	vers = mfpvr() >> 16;
119 	switch (vers) {
120 	case MPC7400:
121 	case MPC7410:
122 		bit = L2CR_L2IP;
123 		break;
124 	default:
125 		bit = L2CR_L2I;
126 		break;
127 	}
128 
129 	ccr = mfspr(SPR_L2CR);
130 	if (ccr & L2CR_L2E)
131 		return (ccr);
132 
133 	/* Configure L2 cache. */
134 	ccr = l2cr_config & ~L2CR_L2E;
135 	mtspr(SPR_L2CR, ccr | L2CR_L2I);
136 	do {
137 		ccr = mfspr(SPR_L2CR);
138 	} while (ccr & bit);
139 	powerpc_sync();
140 	mtspr(SPR_L2CR, l2cr_config);
141 	powerpc_sync();
142 
143 	return (l2cr_config);
144 }
145 
146 static register_t
147 mpc745x_l3_enable(register_t l3cr_config)
148 {
149 	register_t ccr;
150 
151 	ccr = mfspr(SPR_L3CR);
152 	if (ccr & L3CR_L3E)
153 		return (ccr);
154 
155 	/* Configure L3 cache. */
156 	ccr = l3cr_config & ~(L3CR_L3E | L3CR_L3I | L3CR_L3PE | L3CR_L3CLKEN);
157 	mtspr(SPR_L3CR, ccr);
158 	ccr |= 0x4000000;       /* Magic, but documented. */
159 	mtspr(SPR_L3CR, ccr);
160 	ccr |= L3CR_L3CLKEN;
161 	mtspr(SPR_L3CR, ccr);
162 	mtspr(SPR_L3CR, ccr | L3CR_L3I);
163 	while (mfspr(SPR_L3CR) & L3CR_L3I)
164 		;
165 	mtspr(SPR_L3CR, ccr & ~L3CR_L3CLKEN);
166 	powerpc_sync();
167 	DELAY(100);
168 	mtspr(SPR_L3CR, ccr);
169 	powerpc_sync();
170 	DELAY(100);
171 	ccr |= L3CR_L3E;
172 	mtspr(SPR_L3CR, ccr);
173 	powerpc_sync();
174 
175 	return(ccr);
176 }
177 
178 static register_t
179 mpc74xx_l1d_enable(void)
180 {
181 	register_t hid;
182 
183 	hid = mfspr(SPR_HID0);
184 	if (hid & HID0_DCE)
185 		return (hid);
186 
187 	/* Enable L1 D-cache */
188 	hid |= HID0_DCE;
189 	powerpc_sync();
190 	mtspr(SPR_HID0, hid | HID0_DCFI);
191 	powerpc_sync();
192 
193 	return (hid);
194 }
195 
196 static register_t
197 mpc74xx_l1i_enable(void)
198 {
199 	register_t hid;
200 
201 	hid = mfspr(SPR_HID0);
202 	if (hid & HID0_ICE)
203 		return (hid);
204 
205 	/* Enable L1 I-cache */
206 	hid |= HID0_ICE;
207 	isync();
208 	mtspr(SPR_HID0, hid | HID0_ICFI);
209 	isync();
210 
211 	return (hid);
212 }
213 
214 static void
215 cpudep_save_config(void *dummy)
216 {
217 	uint16_t	vers;
218 
219 	vers = mfpvr() >> 16;
220 
221 	switch(vers) {
222 	case IBM970:
223 	case IBM970FX:
224 	case IBM970MP:
225 		#ifdef __powerpc64__
226 		bsp_state[0] = mfspr(SPR_HID0);
227 		bsp_state[1] = mfspr(SPR_HID1);
228 		bsp_state[2] = mfspr(SPR_HID4);
229 		bsp_state[3] = mfspr(SPR_HID5);
230 		#else
231 		__asm __volatile ("mfspr %0,%2; mr %1,%0; srdi %0,%0,32"
232 		    : "=r" (bsp_state[0]),"=r" (bsp_state[1]) : "K" (SPR_HID0));
233 		__asm __volatile ("mfspr %0,%2; mr %1,%0; srdi %0,%0,32"
234 		    : "=r" (bsp_state[2]),"=r" (bsp_state[3]) : "K" (SPR_HID1));
235 		__asm __volatile ("mfspr %0,%2; mr %1,%0; srdi %0,%0,32"
236 		    : "=r" (bsp_state[4]),"=r" (bsp_state[5]) : "K" (SPR_HID4));
237 		__asm __volatile ("mfspr %0,%2; mr %1,%0; srdi %0,%0,32"
238 		    : "=r" (bsp_state[6]),"=r" (bsp_state[7]) : "K" (SPR_HID5));
239 		#endif
240 
241 		powerpc_sync();
242 
243 		break;
244 	case IBMCELLBE:
245 		#ifdef NOTYET /* Causes problems if in instruction stream on 970 */
246 		if (mfmsr() & PSL_HV) {
247 			bsp_state[0] = mfspr(SPR_HID0);
248 			bsp_state[1] = mfspr(SPR_HID1);
249 			bsp_state[2] = mfspr(SPR_HID4);
250 			bsp_state[3] = mfspr(SPR_HID6);
251 
252 			bsp_state[4] = mfspr(SPR_CELL_TSCR);
253 		}
254 		#endif
255 
256 		bsp_state[5] = mfspr(SPR_CELL_TSRL);
257 
258 		break;
259 	case MPC7450:
260 	case MPC7455:
261 	case MPC7457:
262 		/* Only MPC745x CPUs have an L3 cache. */
263 		bsp_state[3] = mfspr(SPR_L3CR);
264 
265 		/* Fallthrough */
266 	case MPC7400:
267 	case MPC7410:
268 	case MPC7447A:
269 	case MPC7448:
270 		bsp_state[2] = mfspr(SPR_L2CR);
271 		bsp_state[1] = mfspr(SPR_HID1);
272 		bsp_state[0] = mfspr(SPR_HID0);
273 		break;
274 	}
275 }
276 
277 void
278 cpudep_ap_setup()
279 {
280 	register_t	reg;
281 	uint16_t	vers;
282 
283 	vers = mfpvr() >> 16;
284 
285 	/* The following is needed for restoring from sleep. */
286 	platform_smp_timebase_sync(0, 1);
287 
288 	switch(vers) {
289 	case IBM970:
290 	case IBM970FX:
291 	case IBM970MP:
292 		/* Set HIOR to 0 */
293 		__asm __volatile("mtspr 311,%0" :: "r"(0));
294 		powerpc_sync();
295 
296 		/*
297 		 * The 970 has strange rules about how to update HID registers.
298 		 * See Table 2-3, 970MP manual
299 		 *
300 		 * Note: HID4 and HID5 restored already in
301 		 * cpudep_ap_early_bootstrap()
302 		 */
303 
304 		__asm __volatile("mtasr %0; sync" :: "r"(0));
305 	#ifdef __powerpc64__
306 		__asm __volatile(" \
307 			sync; isync;					\
308 			mtspr	%1, %0;					\
309 			mfspr	%0, %1;	mfspr	%0, %1;	mfspr	%0, %1;	\
310 			mfspr	%0, %1;	mfspr	%0, %1;	mfspr	%0, %1; \
311 			sync; isync"
312 		    :: "r"(bsp_state[0]), "K"(SPR_HID0));
313 		__asm __volatile("sync; isync;	\
314 		    mtspr %1, %0; mtspr %1, %0; sync; isync"
315 		    :: "r"(bsp_state[1]), "K"(SPR_HID1));
316 	#else
317 		__asm __volatile(" \
318 			ld	%0,0(%2);				\
319 			sync; isync;					\
320 			mtspr	%1, %0;					\
321 			mfspr	%0, %1;	mfspr	%0, %1;	mfspr	%0, %1;	\
322 			mfspr	%0, %1;	mfspr	%0, %1;	mfspr	%0, %1; \
323 			sync; isync"
324 		    : "=r"(reg) : "K"(SPR_HID0), "b"(bsp_state));
325 		__asm __volatile("ld %0, 8(%2); sync; isync;	\
326 		    mtspr %1, %0; mtspr %1, %0; sync; isync"
327 		    : "=r"(reg) : "K"(SPR_HID1), "b"(bsp_state));
328 	#endif
329 
330 		powerpc_sync();
331 		break;
332 	case IBMCELLBE:
333 		#ifdef NOTYET /* Causes problems if in instruction stream on 970 */
334 		if (mfmsr() & PSL_HV) {
335 			mtspr(SPR_HID0, bsp_state[0]);
336 			mtspr(SPR_HID1, bsp_state[1]);
337 			mtspr(SPR_HID4, bsp_state[2]);
338 			mtspr(SPR_HID6, bsp_state[3]);
339 
340 			mtspr(SPR_CELL_TSCR, bsp_state[4]);
341 		}
342 		#endif
343 
344 		mtspr(SPR_CELL_TSRL, bsp_state[5]);
345 
346 		break;
347 	case MPC7400:
348 	case MPC7410:
349 	case MPC7447A:
350 	case MPC7448:
351 	case MPC7450:
352 	case MPC7455:
353 	case MPC7457:
354 		/* XXX: Program the CPU ID into PIR */
355 		__asm __volatile("mtspr 1023,%0" :: "r"(PCPU_GET(cpuid)));
356 
357 		powerpc_sync();
358 		isync();
359 
360 		mtspr(SPR_HID0, bsp_state[0]); isync();
361 		mtspr(SPR_HID1, bsp_state[1]); isync();
362 
363 		/* Now enable the L3 cache. */
364 		switch (vers) {
365 		case MPC7450:
366 		case MPC7455:
367 		case MPC7457:
368 			/* Only MPC745x CPUs have an L3 cache. */
369 			reg = mpc745x_l3_enable(bsp_state[3]);
370 		default:
371 			break;
372 		}
373 
374 		reg = mpc74xx_l2_enable(bsp_state[2]);
375 		reg = mpc74xx_l1d_enable();
376 		reg = mpc74xx_l1i_enable();
377 
378 		break;
379 	case IBMPOWER7:
380 	case IBMPOWER7PLUS:
381 	case IBMPOWER8:
382 	case IBMPOWER8E:
383 #ifdef __powerpc64__
384 		if (mfmsr() & PSL_HV)
385 			mtspr(SPR_LPCR, mfspr(SPR_LPCR) | LPCR_LPES);
386 #endif
387 		break;
388 	default:
389 #ifdef __powerpc64__
390 		if (!(mfmsr() & PSL_HV)) /* Rely on HV to have set things up */
391 			break;
392 #endif
393 		printf("WARNING: Unknown CPU type. Cache performace may be "
394 		    "suboptimal.\n");
395 		break;
396 	}
397 }
398 
399