1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause 3 * 4 * Copyright (c) 2008 Marcel Moolenaar 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 27 */ 28 29 #include <sys/cdefs.h> 30 #include <sys/param.h> 31 #include <sys/systm.h> 32 #include <sys/kernel.h> 33 #include <sys/bus.h> 34 #include <sys/pcpu.h> 35 #include <sys/proc.h> 36 #include <sys/sched.h> 37 #include <sys/smp.h> 38 39 #include <machine/bus.h> 40 #include <machine/cpu.h> 41 #include <machine/hid.h> 42 #include <machine/intr_machdep.h> 43 #include <machine/pcb.h> 44 #include <machine/psl.h> 45 #include <machine/smp.h> 46 #include <machine/spr.h> 47 #include <machine/trap.h> 48 49 #include <dev/ofw/openfirm.h> 50 #include <machine/ofw_machdep.h> 51 52 void *ap_pcpu; 53 54 static register_t bsp_state[8] __aligned(8); 55 56 static void cpudep_save_config(void *dummy); 57 SYSINIT(cpu_save_config, SI_SUB_CPU, SI_ORDER_ANY, cpudep_save_config, NULL); 58 59 void 60 cpudep_ap_early_bootstrap(void) 61 { 62 #ifndef __powerpc64__ 63 register_t reg; 64 #endif 65 66 switch (mfpvr() >> 16) { 67 case IBM970: 68 case IBM970FX: 69 case IBM970MP: 70 /* Set HIOR to 0 */ 71 __asm __volatile("mtspr 311,%0" :: "r"(0)); 72 powerpc_sync(); 73 74 /* Restore HID4 and HID5, which are necessary for the MMU */ 75 76 #ifdef __powerpc64__ 77 mtspr(SPR_HID4, bsp_state[2]); powerpc_sync(); isync(); 78 mtspr(SPR_HID5, bsp_state[3]); powerpc_sync(); isync(); 79 #else 80 __asm __volatile("ld %0, 16(%2); sync; isync; \ 81 mtspr %1, %0; sync; isync;" 82 : "=r"(reg) : "K"(SPR_HID4), "b"(bsp_state)); 83 __asm __volatile("ld %0, 24(%2); sync; isync; \ 84 mtspr %1, %0; sync; isync;" 85 : "=r"(reg) : "K"(SPR_HID5), "b"(bsp_state)); 86 #endif 87 powerpc_sync(); 88 break; 89 case IBMPOWER8: 90 case IBMPOWER8E: 91 case IBMPOWER8NVL: 92 case IBMPOWER9: 93 #ifdef __powerpc64__ 94 if (mfmsr() & PSL_HV) { 95 isync(); 96 /* 97 * Direct interrupts to SRR instead of HSRR and 98 * reset LPCR otherwise 99 */ 100 mtspr(SPR_LPID, 0); 101 isync(); 102 103 mtspr(SPR_LPCR, lpcr); 104 isync(); 105 106 /* 107 * Nuke FSCR, to be managed on a per-process basis 108 * later. 109 */ 110 mtspr(SPR_FSCR, 0); 111 } 112 #endif 113 break; 114 } 115 116 __asm __volatile("mtsprg 0, %0" :: "r"(ap_pcpu)); 117 powerpc_sync(); 118 } 119 120 uintptr_t 121 cpudep_ap_bootstrap(void) 122 { 123 register_t msr, sp; 124 125 msr = psl_kernset & ~PSL_EE; 126 mtmsr(msr); 127 128 pcpup->pc_curthread = pcpup->pc_idlethread; 129 #ifdef __powerpc64__ 130 __asm __volatile("mr 13,%0" :: "r"(pcpup->pc_curthread)); 131 #else 132 __asm __volatile("mr 2,%0" :: "r"(pcpup->pc_curthread)); 133 #endif 134 pcpup->pc_curpcb = pcpup->pc_curthread->td_pcb; 135 sp = pcpup->pc_curpcb->pcb_sp; 136 schedinit_ap(); 137 138 return (sp); 139 } 140 141 static register_t 142 mpc74xx_l2_enable(register_t l2cr_config) 143 { 144 register_t ccr, bit; 145 uint16_t vers; 146 147 vers = mfpvr() >> 16; 148 switch (vers) { 149 case MPC7400: 150 case MPC7410: 151 bit = L2CR_L2IP; 152 break; 153 default: 154 bit = L2CR_L2I; 155 break; 156 } 157 158 ccr = mfspr(SPR_L2CR); 159 if (ccr & L2CR_L2E) 160 return (ccr); 161 162 /* Configure L2 cache. */ 163 ccr = l2cr_config & ~L2CR_L2E; 164 mtspr(SPR_L2CR, ccr | L2CR_L2I); 165 do { 166 ccr = mfspr(SPR_L2CR); 167 } while (ccr & bit); 168 powerpc_sync(); 169 mtspr(SPR_L2CR, l2cr_config); 170 powerpc_sync(); 171 172 return (l2cr_config); 173 } 174 175 static register_t 176 mpc745x_l3_enable(register_t l3cr_config) 177 { 178 register_t ccr; 179 180 ccr = mfspr(SPR_L3CR); 181 if (ccr & L3CR_L3E) 182 return (ccr); 183 184 /* Configure L3 cache. */ 185 ccr = l3cr_config & ~(L3CR_L3E | L3CR_L3I | L3CR_L3PE | L3CR_L3CLKEN); 186 mtspr(SPR_L3CR, ccr); 187 ccr |= 0x4000000; /* Magic, but documented. */ 188 mtspr(SPR_L3CR, ccr); 189 ccr |= L3CR_L3CLKEN; 190 mtspr(SPR_L3CR, ccr); 191 mtspr(SPR_L3CR, ccr | L3CR_L3I); 192 while (mfspr(SPR_L3CR) & L3CR_L3I) 193 ; 194 mtspr(SPR_L3CR, ccr & ~L3CR_L3CLKEN); 195 powerpc_sync(); 196 DELAY(100); 197 mtspr(SPR_L3CR, ccr); 198 powerpc_sync(); 199 DELAY(100); 200 ccr |= L3CR_L3E; 201 mtspr(SPR_L3CR, ccr); 202 powerpc_sync(); 203 204 return(ccr); 205 } 206 207 static register_t 208 mpc74xx_l1d_enable(void) 209 { 210 register_t hid; 211 212 hid = mfspr(SPR_HID0); 213 if (hid & HID0_DCE) 214 return (hid); 215 216 /* Enable L1 D-cache */ 217 hid |= HID0_DCE; 218 powerpc_sync(); 219 mtspr(SPR_HID0, hid | HID0_DCFI); 220 powerpc_sync(); 221 222 return (hid); 223 } 224 225 static register_t 226 mpc74xx_l1i_enable(void) 227 { 228 register_t hid; 229 230 hid = mfspr(SPR_HID0); 231 if (hid & HID0_ICE) 232 return (hid); 233 234 /* Enable L1 I-cache */ 235 hid |= HID0_ICE; 236 isync(); 237 mtspr(SPR_HID0, hid | HID0_ICFI); 238 isync(); 239 240 return (hid); 241 } 242 243 static void 244 cpudep_save_config(void *dummy) 245 { 246 uint16_t vers; 247 248 vers = mfpvr() >> 16; 249 250 switch(vers) { 251 case IBM970: 252 case IBM970FX: 253 case IBM970MP: 254 #ifdef __powerpc64__ 255 bsp_state[0] = mfspr(SPR_HID0); 256 bsp_state[1] = mfspr(SPR_HID1); 257 bsp_state[2] = mfspr(SPR_HID4); 258 bsp_state[3] = mfspr(SPR_HID5); 259 #else 260 __asm __volatile ("mfspr %0,%2; mr %1,%0; srdi %0,%0,32" 261 : "=r" (bsp_state[0]),"=r" (bsp_state[1]) : "K" (SPR_HID0)); 262 __asm __volatile ("mfspr %0,%2; mr %1,%0; srdi %0,%0,32" 263 : "=r" (bsp_state[2]),"=r" (bsp_state[3]) : "K" (SPR_HID1)); 264 __asm __volatile ("mfspr %0,%2; mr %1,%0; srdi %0,%0,32" 265 : "=r" (bsp_state[4]),"=r" (bsp_state[5]) : "K" (SPR_HID4)); 266 __asm __volatile ("mfspr %0,%2; mr %1,%0; srdi %0,%0,32" 267 : "=r" (bsp_state[6]),"=r" (bsp_state[7]) : "K" (SPR_HID5)); 268 #endif 269 270 powerpc_sync(); 271 272 break; 273 case IBMCELLBE: 274 #ifdef NOTYET /* Causes problems if in instruction stream on 970 */ 275 if (mfmsr() & PSL_HV) { 276 bsp_state[0] = mfspr(SPR_HID0); 277 bsp_state[1] = mfspr(SPR_HID1); 278 bsp_state[2] = mfspr(SPR_HID4); 279 bsp_state[3] = mfspr(SPR_HID6); 280 281 bsp_state[4] = mfspr(SPR_CELL_TSCR); 282 } 283 #endif 284 285 bsp_state[5] = mfspr(SPR_CELL_TSRL); 286 287 break; 288 case MPC7450: 289 case MPC7455: 290 case MPC7457: 291 /* Only MPC745x CPUs have an L3 cache. */ 292 bsp_state[3] = mfspr(SPR_L3CR); 293 294 /* Fallthrough */ 295 case MPC7400: 296 case MPC7410: 297 case MPC7447A: 298 case MPC7448: 299 bsp_state[2] = mfspr(SPR_L2CR); 300 bsp_state[1] = mfspr(SPR_HID1); 301 bsp_state[0] = mfspr(SPR_HID0); 302 break; 303 } 304 } 305 306 void 307 cpudep_ap_setup(void) 308 { 309 #ifndef __powerpc64__ 310 register_t reg; 311 #endif 312 uint16_t vers; 313 314 vers = mfpvr() >> 16; 315 316 switch(vers) { 317 case IBM970: 318 case IBM970FX: 319 case IBM970MP: 320 /* 321 * The 970 has strange rules about how to update HID registers. 322 * See Table 2-3, 970MP manual 323 * 324 * Note: HID4 and HID5 restored already in 325 * cpudep_ap_early_bootstrap() 326 */ 327 328 __asm __volatile("mtasr %0; sync" :: "r"(0)); 329 #ifdef __powerpc64__ 330 __asm __volatile(" \ 331 sync; isync; \ 332 mtspr %1, %0; \ 333 mfspr %0, %1; mfspr %0, %1; mfspr %0, %1; \ 334 mfspr %0, %1; mfspr %0, %1; mfspr %0, %1; \ 335 sync; isync" 336 :: "r"(bsp_state[0]), "K"(SPR_HID0)); 337 __asm __volatile("sync; isync; \ 338 mtspr %1, %0; mtspr %1, %0; sync; isync" 339 :: "r"(bsp_state[1]), "K"(SPR_HID1)); 340 #else 341 __asm __volatile(" \ 342 ld %0,0(%2); \ 343 sync; isync; \ 344 mtspr %1, %0; \ 345 mfspr %0, %1; mfspr %0, %1; mfspr %0, %1; \ 346 mfspr %0, %1; mfspr %0, %1; mfspr %0, %1; \ 347 sync; isync" 348 : "=r"(reg) : "K"(SPR_HID0), "b"(bsp_state)); 349 __asm __volatile("ld %0, 8(%2); sync; isync; \ 350 mtspr %1, %0; mtspr %1, %0; sync; isync" 351 : "=r"(reg) : "K"(SPR_HID1), "b"(bsp_state)); 352 #endif 353 354 powerpc_sync(); 355 break; 356 case IBMCELLBE: 357 #ifdef NOTYET /* Causes problems if in instruction stream on 970 */ 358 if (mfmsr() & PSL_HV) { 359 mtspr(SPR_HID0, bsp_state[0]); 360 mtspr(SPR_HID1, bsp_state[1]); 361 mtspr(SPR_HID4, bsp_state[2]); 362 mtspr(SPR_HID6, bsp_state[3]); 363 364 mtspr(SPR_CELL_TSCR, bsp_state[4]); 365 } 366 #endif 367 368 mtspr(SPR_CELL_TSRL, bsp_state[5]); 369 370 break; 371 case MPC7400: 372 case MPC7410: 373 case MPC7447A: 374 case MPC7448: 375 case MPC7450: 376 case MPC7455: 377 case MPC7457: 378 /* XXX: Program the CPU ID into PIR */ 379 __asm __volatile("mtspr 1023,%0" :: "r"(PCPU_GET(cpuid))); 380 381 powerpc_sync(); 382 isync(); 383 384 mtspr(SPR_HID0, bsp_state[0]); isync(); 385 mtspr(SPR_HID1, bsp_state[1]); isync(); 386 387 /* Now enable the L3 cache. */ 388 switch (vers) { 389 case MPC7450: 390 case MPC7455: 391 case MPC7457: 392 /* Only MPC745x CPUs have an L3 cache. */ 393 mpc745x_l3_enable(bsp_state[3]); 394 default: 395 break; 396 } 397 398 mpc74xx_l2_enable(bsp_state[2]); 399 mpc74xx_l1d_enable(); 400 mpc74xx_l1i_enable(); 401 402 break; 403 case IBMPOWER7: 404 case IBMPOWER7PLUS: 405 case IBMPOWER8: 406 case IBMPOWER8E: 407 case IBMPOWER8NVL: 408 case IBMPOWER9: 409 #ifdef __powerpc64__ 410 if (mfmsr() & PSL_HV) { 411 mtspr(SPR_LPCR, mfspr(SPR_LPCR) | lpcr | 412 LPCR_PECE_WAKESET); 413 isync(); 414 } 415 #endif 416 break; 417 default: 418 #ifdef __powerpc64__ 419 if (!(mfmsr() & PSL_HV)) /* Rely on HV to have set things up */ 420 break; 421 #endif 422 printf("WARNING: Unknown CPU type. Cache performace may be " 423 "suboptimal.\n"); 424 break; 425 } 426 } 427