1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright (c) 2008 Marcel Moolenaar 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 27 */ 28 29 #include <sys/cdefs.h> 30 __FBSDID("$FreeBSD$"); 31 32 #include <sys/param.h> 33 #include <sys/systm.h> 34 #include <sys/kernel.h> 35 #include <sys/bus.h> 36 #include <sys/pcpu.h> 37 #include <sys/proc.h> 38 #include <sys/smp.h> 39 40 #include <machine/bus.h> 41 #include <machine/cpu.h> 42 #include <machine/hid.h> 43 #include <machine/intr_machdep.h> 44 #include <machine/pcb.h> 45 #include <machine/psl.h> 46 #include <machine/smp.h> 47 #include <machine/spr.h> 48 #include <machine/trap.h> 49 50 #include <dev/ofw/openfirm.h> 51 #include <machine/ofw_machdep.h> 52 53 void *ap_pcpu; 54 55 static register_t bsp_state[8] __aligned(8); 56 57 static void cpudep_save_config(void *dummy); 58 SYSINIT(cpu_save_config, SI_SUB_CPU, SI_ORDER_ANY, cpudep_save_config, NULL); 59 60 void 61 cpudep_ap_early_bootstrap(void) 62 { 63 #ifndef __powerpc64__ 64 register_t reg; 65 #endif 66 67 switch (mfpvr() >> 16) { 68 case IBM970: 69 case IBM970FX: 70 case IBM970MP: 71 /* Restore HID4 and HID5, which are necessary for the MMU */ 72 73 #ifdef __powerpc64__ 74 mtspr(SPR_HID4, bsp_state[2]); powerpc_sync(); isync(); 75 mtspr(SPR_HID5, bsp_state[3]); powerpc_sync(); isync(); 76 #else 77 __asm __volatile("ld %0, 16(%2); sync; isync; \ 78 mtspr %1, %0; sync; isync;" 79 : "=r"(reg) : "K"(SPR_HID4), "b"(bsp_state)); 80 __asm __volatile("ld %0, 24(%2); sync; isync; \ 81 mtspr %1, %0; sync; isync;" 82 : "=r"(reg) : "K"(SPR_HID5), "b"(bsp_state)); 83 #endif 84 powerpc_sync(); 85 break; 86 case IBMPOWER8: 87 case IBMPOWER8E: 88 case IBMPOWER9: 89 #ifdef __powerpc64__ 90 if (mfmsr() & PSL_HV) { 91 isync(); 92 /* 93 * Direct interrupts to SRR instead of HSRR and 94 * reset LPCR otherwise 95 */ 96 mtspr(SPR_LPID, 0); 97 isync(); 98 99 mtspr(SPR_LPCR, lpcr); 100 isync(); 101 102 /* 103 * Nuke FSCR, to be managed on a per-process basis 104 * later. 105 */ 106 mtspr(SPR_FSCR, 0); 107 } 108 #endif 109 break; 110 } 111 112 __asm __volatile("mtsprg 0, %0" :: "r"(ap_pcpu)); 113 powerpc_sync(); 114 } 115 116 uintptr_t 117 cpudep_ap_bootstrap(void) 118 { 119 register_t msr, sp; 120 121 msr = psl_kernset & ~PSL_EE; 122 mtmsr(msr); 123 124 pcpup->pc_curthread = pcpup->pc_idlethread; 125 #ifdef __powerpc64__ 126 __asm __volatile("mr 13,%0" :: "r"(pcpup->pc_curthread)); 127 #else 128 __asm __volatile("mr 2,%0" :: "r"(pcpup->pc_curthread)); 129 #endif 130 pcpup->pc_curpcb = pcpup->pc_curthread->td_pcb; 131 sp = pcpup->pc_curpcb->pcb_sp; 132 133 return (sp); 134 } 135 136 static register_t 137 mpc74xx_l2_enable(register_t l2cr_config) 138 { 139 register_t ccr, bit; 140 uint16_t vers; 141 142 vers = mfpvr() >> 16; 143 switch (vers) { 144 case MPC7400: 145 case MPC7410: 146 bit = L2CR_L2IP; 147 break; 148 default: 149 bit = L2CR_L2I; 150 break; 151 } 152 153 ccr = mfspr(SPR_L2CR); 154 if (ccr & L2CR_L2E) 155 return (ccr); 156 157 /* Configure L2 cache. */ 158 ccr = l2cr_config & ~L2CR_L2E; 159 mtspr(SPR_L2CR, ccr | L2CR_L2I); 160 do { 161 ccr = mfspr(SPR_L2CR); 162 } while (ccr & bit); 163 powerpc_sync(); 164 mtspr(SPR_L2CR, l2cr_config); 165 powerpc_sync(); 166 167 return (l2cr_config); 168 } 169 170 static register_t 171 mpc745x_l3_enable(register_t l3cr_config) 172 { 173 register_t ccr; 174 175 ccr = mfspr(SPR_L3CR); 176 if (ccr & L3CR_L3E) 177 return (ccr); 178 179 /* Configure L3 cache. */ 180 ccr = l3cr_config & ~(L3CR_L3E | L3CR_L3I | L3CR_L3PE | L3CR_L3CLKEN); 181 mtspr(SPR_L3CR, ccr); 182 ccr |= 0x4000000; /* Magic, but documented. */ 183 mtspr(SPR_L3CR, ccr); 184 ccr |= L3CR_L3CLKEN; 185 mtspr(SPR_L3CR, ccr); 186 mtspr(SPR_L3CR, ccr | L3CR_L3I); 187 while (mfspr(SPR_L3CR) & L3CR_L3I) 188 ; 189 mtspr(SPR_L3CR, ccr & ~L3CR_L3CLKEN); 190 powerpc_sync(); 191 DELAY(100); 192 mtspr(SPR_L3CR, ccr); 193 powerpc_sync(); 194 DELAY(100); 195 ccr |= L3CR_L3E; 196 mtspr(SPR_L3CR, ccr); 197 powerpc_sync(); 198 199 return(ccr); 200 } 201 202 static register_t 203 mpc74xx_l1d_enable(void) 204 { 205 register_t hid; 206 207 hid = mfspr(SPR_HID0); 208 if (hid & HID0_DCE) 209 return (hid); 210 211 /* Enable L1 D-cache */ 212 hid |= HID0_DCE; 213 powerpc_sync(); 214 mtspr(SPR_HID0, hid | HID0_DCFI); 215 powerpc_sync(); 216 217 return (hid); 218 } 219 220 static register_t 221 mpc74xx_l1i_enable(void) 222 { 223 register_t hid; 224 225 hid = mfspr(SPR_HID0); 226 if (hid & HID0_ICE) 227 return (hid); 228 229 /* Enable L1 I-cache */ 230 hid |= HID0_ICE; 231 isync(); 232 mtspr(SPR_HID0, hid | HID0_ICFI); 233 isync(); 234 235 return (hid); 236 } 237 238 static void 239 cpudep_save_config(void *dummy) 240 { 241 uint16_t vers; 242 243 vers = mfpvr() >> 16; 244 245 switch(vers) { 246 case IBM970: 247 case IBM970FX: 248 case IBM970MP: 249 #ifdef __powerpc64__ 250 bsp_state[0] = mfspr(SPR_HID0); 251 bsp_state[1] = mfspr(SPR_HID1); 252 bsp_state[2] = mfspr(SPR_HID4); 253 bsp_state[3] = mfspr(SPR_HID5); 254 #else 255 __asm __volatile ("mfspr %0,%2; mr %1,%0; srdi %0,%0,32" 256 : "=r" (bsp_state[0]),"=r" (bsp_state[1]) : "K" (SPR_HID0)); 257 __asm __volatile ("mfspr %0,%2; mr %1,%0; srdi %0,%0,32" 258 : "=r" (bsp_state[2]),"=r" (bsp_state[3]) : "K" (SPR_HID1)); 259 __asm __volatile ("mfspr %0,%2; mr %1,%0; srdi %0,%0,32" 260 : "=r" (bsp_state[4]),"=r" (bsp_state[5]) : "K" (SPR_HID4)); 261 __asm __volatile ("mfspr %0,%2; mr %1,%0; srdi %0,%0,32" 262 : "=r" (bsp_state[6]),"=r" (bsp_state[7]) : "K" (SPR_HID5)); 263 #endif 264 265 powerpc_sync(); 266 267 break; 268 case IBMCELLBE: 269 #ifdef NOTYET /* Causes problems if in instruction stream on 970 */ 270 if (mfmsr() & PSL_HV) { 271 bsp_state[0] = mfspr(SPR_HID0); 272 bsp_state[1] = mfspr(SPR_HID1); 273 bsp_state[2] = mfspr(SPR_HID4); 274 bsp_state[3] = mfspr(SPR_HID6); 275 276 bsp_state[4] = mfspr(SPR_CELL_TSCR); 277 } 278 #endif 279 280 bsp_state[5] = mfspr(SPR_CELL_TSRL); 281 282 break; 283 case MPC7450: 284 case MPC7455: 285 case MPC7457: 286 /* Only MPC745x CPUs have an L3 cache. */ 287 bsp_state[3] = mfspr(SPR_L3CR); 288 289 /* Fallthrough */ 290 case MPC7400: 291 case MPC7410: 292 case MPC7447A: 293 case MPC7448: 294 bsp_state[2] = mfspr(SPR_L2CR); 295 bsp_state[1] = mfspr(SPR_HID1); 296 bsp_state[0] = mfspr(SPR_HID0); 297 break; 298 } 299 } 300 301 void 302 cpudep_ap_setup() 303 { 304 register_t reg; 305 uint16_t vers; 306 307 vers = mfpvr() >> 16; 308 309 /* The following is needed for restoring from sleep. */ 310 platform_smp_timebase_sync(0, 1); 311 312 switch(vers) { 313 case IBM970: 314 case IBM970FX: 315 case IBM970MP: 316 /* Set HIOR to 0 */ 317 __asm __volatile("mtspr 311,%0" :: "r"(0)); 318 powerpc_sync(); 319 320 /* 321 * The 970 has strange rules about how to update HID registers. 322 * See Table 2-3, 970MP manual 323 * 324 * Note: HID4 and HID5 restored already in 325 * cpudep_ap_early_bootstrap() 326 */ 327 328 __asm __volatile("mtasr %0; sync" :: "r"(0)); 329 #ifdef __powerpc64__ 330 __asm __volatile(" \ 331 sync; isync; \ 332 mtspr %1, %0; \ 333 mfspr %0, %1; mfspr %0, %1; mfspr %0, %1; \ 334 mfspr %0, %1; mfspr %0, %1; mfspr %0, %1; \ 335 sync; isync" 336 :: "r"(bsp_state[0]), "K"(SPR_HID0)); 337 __asm __volatile("sync; isync; \ 338 mtspr %1, %0; mtspr %1, %0; sync; isync" 339 :: "r"(bsp_state[1]), "K"(SPR_HID1)); 340 #else 341 __asm __volatile(" \ 342 ld %0,0(%2); \ 343 sync; isync; \ 344 mtspr %1, %0; \ 345 mfspr %0, %1; mfspr %0, %1; mfspr %0, %1; \ 346 mfspr %0, %1; mfspr %0, %1; mfspr %0, %1; \ 347 sync; isync" 348 : "=r"(reg) : "K"(SPR_HID0), "b"(bsp_state)); 349 __asm __volatile("ld %0, 8(%2); sync; isync; \ 350 mtspr %1, %0; mtspr %1, %0; sync; isync" 351 : "=r"(reg) : "K"(SPR_HID1), "b"(bsp_state)); 352 #endif 353 354 powerpc_sync(); 355 break; 356 case IBMCELLBE: 357 #ifdef NOTYET /* Causes problems if in instruction stream on 970 */ 358 if (mfmsr() & PSL_HV) { 359 mtspr(SPR_HID0, bsp_state[0]); 360 mtspr(SPR_HID1, bsp_state[1]); 361 mtspr(SPR_HID4, bsp_state[2]); 362 mtspr(SPR_HID6, bsp_state[3]); 363 364 mtspr(SPR_CELL_TSCR, bsp_state[4]); 365 } 366 #endif 367 368 mtspr(SPR_CELL_TSRL, bsp_state[5]); 369 370 break; 371 case MPC7400: 372 case MPC7410: 373 case MPC7447A: 374 case MPC7448: 375 case MPC7450: 376 case MPC7455: 377 case MPC7457: 378 /* XXX: Program the CPU ID into PIR */ 379 __asm __volatile("mtspr 1023,%0" :: "r"(PCPU_GET(cpuid))); 380 381 powerpc_sync(); 382 isync(); 383 384 mtspr(SPR_HID0, bsp_state[0]); isync(); 385 mtspr(SPR_HID1, bsp_state[1]); isync(); 386 387 /* Now enable the L3 cache. */ 388 switch (vers) { 389 case MPC7450: 390 case MPC7455: 391 case MPC7457: 392 /* Only MPC745x CPUs have an L3 cache. */ 393 reg = mpc745x_l3_enable(bsp_state[3]); 394 default: 395 break; 396 } 397 398 reg = mpc74xx_l2_enable(bsp_state[2]); 399 reg = mpc74xx_l1d_enable(); 400 reg = mpc74xx_l1i_enable(); 401 402 break; 403 case IBMPOWER7: 404 case IBMPOWER7PLUS: 405 case IBMPOWER8: 406 case IBMPOWER8E: 407 case IBMPOWER9: 408 #ifdef __powerpc64__ 409 if (mfmsr() & PSL_HV) { 410 mtspr(SPR_LPCR, mfspr(SPR_LPCR) | lpcr | 411 LPCR_PECE_WAKESET); 412 isync(); 413 } 414 #endif 415 break; 416 default: 417 #ifdef __powerpc64__ 418 if (!(mfmsr() & PSL_HV)) /* Rely on HV to have set things up */ 419 break; 420 #endif 421 printf("WARNING: Unknown CPU type. Cache performace may be " 422 "suboptimal.\n"); 423 break; 424 } 425 } 426 427