1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause 3 * 4 * Copyright (c) 2008 Marcel Moolenaar 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 27 */ 28 29 #include <sys/param.h> 30 #include <sys/systm.h> 31 #include <sys/kernel.h> 32 #include <sys/bus.h> 33 #include <sys/pcpu.h> 34 #include <sys/proc.h> 35 #include <sys/sched.h> 36 #include <sys/smp.h> 37 38 #include <machine/bus.h> 39 #include <machine/cpu.h> 40 #include <machine/hid.h> 41 #include <machine/intr_machdep.h> 42 #include <machine/pcb.h> 43 #include <machine/psl.h> 44 #include <machine/smp.h> 45 #include <machine/spr.h> 46 #include <machine/trap.h> 47 48 #include <dev/ofw/openfirm.h> 49 #include <machine/ofw_machdep.h> 50 51 void *ap_pcpu; 52 53 static register_t bsp_state[8] __aligned(8); 54 55 static void cpudep_save_config(void *dummy); 56 SYSINIT(cpu_save_config, SI_SUB_CPU, SI_ORDER_ANY, cpudep_save_config, NULL); 57 58 void 59 cpudep_ap_early_bootstrap(void) 60 { 61 #ifndef __powerpc64__ 62 register_t reg; 63 #endif 64 65 switch (mfpvr() >> 16) { 66 case IBM970: 67 case IBM970FX: 68 case IBM970MP: 69 /* Set HIOR to 0 */ 70 __asm __volatile("mtspr 311,%0" :: "r"(0)); 71 powerpc_sync(); 72 73 /* Restore HID4 and HID5, which are necessary for the MMU */ 74 75 #ifdef __powerpc64__ 76 mtspr(SPR_HID4, bsp_state[2]); powerpc_sync(); isync(); 77 mtspr(SPR_HID5, bsp_state[3]); powerpc_sync(); isync(); 78 #else 79 __asm __volatile("ld %0, 16(%2); sync; isync; \ 80 mtspr %1, %0; sync; isync;" 81 : "=r"(reg) : "K"(SPR_HID4), "b"(bsp_state)); 82 __asm __volatile("ld %0, 24(%2); sync; isync; \ 83 mtspr %1, %0; sync; isync;" 84 : "=r"(reg) : "K"(SPR_HID5), "b"(bsp_state)); 85 #endif 86 powerpc_sync(); 87 break; 88 case IBMPOWER8: 89 case IBMPOWER8E: 90 case IBMPOWER8NVL: 91 case IBMPOWER9: 92 #ifdef __powerpc64__ 93 if (mfmsr() & PSL_HV) { 94 isync(); 95 /* 96 * Direct interrupts to SRR instead of HSRR and 97 * reset LPCR otherwise 98 */ 99 mtspr(SPR_LPID, 0); 100 isync(); 101 102 mtspr(SPR_LPCR, lpcr); 103 isync(); 104 105 /* 106 * Nuke FSCR, to be managed on a per-process basis 107 * later. 108 */ 109 mtspr(SPR_FSCR, 0); 110 } 111 #endif 112 break; 113 } 114 115 __asm __volatile("mtsprg 0, %0" :: "r"(ap_pcpu)); 116 powerpc_sync(); 117 } 118 119 uintptr_t 120 cpudep_ap_bootstrap(void) 121 { 122 register_t msr, sp; 123 124 msr = psl_kernset & ~PSL_EE; 125 mtmsr(msr); 126 127 pcpup->pc_curthread = pcpup->pc_idlethread; 128 #ifdef __powerpc64__ 129 __asm __volatile("mr 13,%0" :: "r"(pcpup->pc_curthread)); 130 #else 131 __asm __volatile("mr 2,%0" :: "r"(pcpup->pc_curthread)); 132 #endif 133 pcpup->pc_curpcb = pcpup->pc_curthread->td_pcb; 134 sp = pcpup->pc_curpcb->pcb_sp; 135 schedinit_ap(); 136 137 return (sp); 138 } 139 140 static register_t 141 mpc74xx_l2_enable(register_t l2cr_config) 142 { 143 register_t ccr, bit; 144 uint16_t vers; 145 146 vers = mfpvr() >> 16; 147 switch (vers) { 148 case MPC7400: 149 case MPC7410: 150 bit = L2CR_L2IP; 151 break; 152 default: 153 bit = L2CR_L2I; 154 break; 155 } 156 157 ccr = mfspr(SPR_L2CR); 158 if (ccr & L2CR_L2E) 159 return (ccr); 160 161 /* Configure L2 cache. */ 162 ccr = l2cr_config & ~L2CR_L2E; 163 mtspr(SPR_L2CR, ccr | L2CR_L2I); 164 do { 165 ccr = mfspr(SPR_L2CR); 166 } while (ccr & bit); 167 powerpc_sync(); 168 mtspr(SPR_L2CR, l2cr_config); 169 powerpc_sync(); 170 171 return (l2cr_config); 172 } 173 174 static register_t 175 mpc745x_l3_enable(register_t l3cr_config) 176 { 177 register_t ccr; 178 179 ccr = mfspr(SPR_L3CR); 180 if (ccr & L3CR_L3E) 181 return (ccr); 182 183 /* Configure L3 cache. */ 184 ccr = l3cr_config & ~(L3CR_L3E | L3CR_L3I | L3CR_L3PE | L3CR_L3CLKEN); 185 mtspr(SPR_L3CR, ccr); 186 ccr |= 0x4000000; /* Magic, but documented. */ 187 mtspr(SPR_L3CR, ccr); 188 ccr |= L3CR_L3CLKEN; 189 mtspr(SPR_L3CR, ccr); 190 mtspr(SPR_L3CR, ccr | L3CR_L3I); 191 while (mfspr(SPR_L3CR) & L3CR_L3I) 192 ; 193 mtspr(SPR_L3CR, ccr & ~L3CR_L3CLKEN); 194 powerpc_sync(); 195 DELAY(100); 196 mtspr(SPR_L3CR, ccr); 197 powerpc_sync(); 198 DELAY(100); 199 ccr |= L3CR_L3E; 200 mtspr(SPR_L3CR, ccr); 201 powerpc_sync(); 202 203 return(ccr); 204 } 205 206 static register_t 207 mpc74xx_l1d_enable(void) 208 { 209 register_t hid; 210 211 hid = mfspr(SPR_HID0); 212 if (hid & HID0_DCE) 213 return (hid); 214 215 /* Enable L1 D-cache */ 216 hid |= HID0_DCE; 217 powerpc_sync(); 218 mtspr(SPR_HID0, hid | HID0_DCFI); 219 powerpc_sync(); 220 221 return (hid); 222 } 223 224 static register_t 225 mpc74xx_l1i_enable(void) 226 { 227 register_t hid; 228 229 hid = mfspr(SPR_HID0); 230 if (hid & HID0_ICE) 231 return (hid); 232 233 /* Enable L1 I-cache */ 234 hid |= HID0_ICE; 235 isync(); 236 mtspr(SPR_HID0, hid | HID0_ICFI); 237 isync(); 238 239 return (hid); 240 } 241 242 static void 243 cpudep_save_config(void *dummy) 244 { 245 uint16_t vers; 246 247 vers = mfpvr() >> 16; 248 249 switch(vers) { 250 case IBM970: 251 case IBM970FX: 252 case IBM970MP: 253 #ifdef __powerpc64__ 254 bsp_state[0] = mfspr(SPR_HID0); 255 bsp_state[1] = mfspr(SPR_HID1); 256 bsp_state[2] = mfspr(SPR_HID4); 257 bsp_state[3] = mfspr(SPR_HID5); 258 #else 259 __asm __volatile ("mfspr %0,%2; mr %1,%0; srdi %0,%0,32" 260 : "=r" (bsp_state[0]),"=r" (bsp_state[1]) : "K" (SPR_HID0)); 261 __asm __volatile ("mfspr %0,%2; mr %1,%0; srdi %0,%0,32" 262 : "=r" (bsp_state[2]),"=r" (bsp_state[3]) : "K" (SPR_HID1)); 263 __asm __volatile ("mfspr %0,%2; mr %1,%0; srdi %0,%0,32" 264 : "=r" (bsp_state[4]),"=r" (bsp_state[5]) : "K" (SPR_HID4)); 265 __asm __volatile ("mfspr %0,%2; mr %1,%0; srdi %0,%0,32" 266 : "=r" (bsp_state[6]),"=r" (bsp_state[7]) : "K" (SPR_HID5)); 267 #endif 268 269 powerpc_sync(); 270 271 break; 272 case IBMCELLBE: 273 #ifdef NOTYET /* Causes problems if in instruction stream on 970 */ 274 if (mfmsr() & PSL_HV) { 275 bsp_state[0] = mfspr(SPR_HID0); 276 bsp_state[1] = mfspr(SPR_HID1); 277 bsp_state[2] = mfspr(SPR_HID4); 278 bsp_state[3] = mfspr(SPR_HID6); 279 280 bsp_state[4] = mfspr(SPR_CELL_TSCR); 281 } 282 #endif 283 284 bsp_state[5] = mfspr(SPR_CELL_TSRL); 285 286 break; 287 case MPC7450: 288 case MPC7455: 289 case MPC7457: 290 /* Only MPC745x CPUs have an L3 cache. */ 291 bsp_state[3] = mfspr(SPR_L3CR); 292 293 /* Fallthrough */ 294 case MPC7400: 295 case MPC7410: 296 case MPC7447A: 297 case MPC7448: 298 bsp_state[2] = mfspr(SPR_L2CR); 299 bsp_state[1] = mfspr(SPR_HID1); 300 bsp_state[0] = mfspr(SPR_HID0); 301 break; 302 } 303 } 304 305 void 306 cpudep_ap_setup(void) 307 { 308 #ifndef __powerpc64__ 309 register_t reg; 310 #endif 311 uint16_t vers; 312 313 vers = mfpvr() >> 16; 314 315 switch(vers) { 316 case IBM970: 317 case IBM970FX: 318 case IBM970MP: 319 /* 320 * The 970 has strange rules about how to update HID registers. 321 * See Table 2-3, 970MP manual 322 * 323 * Note: HID4 and HID5 restored already in 324 * cpudep_ap_early_bootstrap() 325 */ 326 327 __asm __volatile("mtasr %0; sync" :: "r"(0)); 328 #ifdef __powerpc64__ 329 __asm __volatile(" \ 330 sync; isync; \ 331 mtspr %1, %0; \ 332 mfspr %0, %1; mfspr %0, %1; mfspr %0, %1; \ 333 mfspr %0, %1; mfspr %0, %1; mfspr %0, %1; \ 334 sync; isync" 335 :: "r"(bsp_state[0]), "K"(SPR_HID0)); 336 __asm __volatile("sync; isync; \ 337 mtspr %1, %0; mtspr %1, %0; sync; isync" 338 :: "r"(bsp_state[1]), "K"(SPR_HID1)); 339 #else 340 __asm __volatile(" \ 341 ld %0,0(%2); \ 342 sync; isync; \ 343 mtspr %1, %0; \ 344 mfspr %0, %1; mfspr %0, %1; mfspr %0, %1; \ 345 mfspr %0, %1; mfspr %0, %1; mfspr %0, %1; \ 346 sync; isync" 347 : "=r"(reg) : "K"(SPR_HID0), "b"(bsp_state)); 348 __asm __volatile("ld %0, 8(%2); sync; isync; \ 349 mtspr %1, %0; mtspr %1, %0; sync; isync" 350 : "=r"(reg) : "K"(SPR_HID1), "b"(bsp_state)); 351 #endif 352 353 powerpc_sync(); 354 break; 355 case IBMCELLBE: 356 #ifdef NOTYET /* Causes problems if in instruction stream on 970 */ 357 if (mfmsr() & PSL_HV) { 358 mtspr(SPR_HID0, bsp_state[0]); 359 mtspr(SPR_HID1, bsp_state[1]); 360 mtspr(SPR_HID4, bsp_state[2]); 361 mtspr(SPR_HID6, bsp_state[3]); 362 363 mtspr(SPR_CELL_TSCR, bsp_state[4]); 364 } 365 #endif 366 367 mtspr(SPR_CELL_TSRL, bsp_state[5]); 368 369 break; 370 case MPC7400: 371 case MPC7410: 372 case MPC7447A: 373 case MPC7448: 374 case MPC7450: 375 case MPC7455: 376 case MPC7457: 377 /* XXX: Program the CPU ID into PIR */ 378 __asm __volatile("mtspr 1023,%0" :: "r"(PCPU_GET(cpuid))); 379 380 powerpc_sync(); 381 isync(); 382 383 mtspr(SPR_HID0, bsp_state[0]); isync(); 384 mtspr(SPR_HID1, bsp_state[1]); isync(); 385 386 /* Now enable the L3 cache. */ 387 switch (vers) { 388 case MPC7450: 389 case MPC7455: 390 case MPC7457: 391 /* Only MPC745x CPUs have an L3 cache. */ 392 mpc745x_l3_enable(bsp_state[3]); 393 default: 394 break; 395 } 396 397 mpc74xx_l2_enable(bsp_state[2]); 398 mpc74xx_l1d_enable(); 399 mpc74xx_l1i_enable(); 400 401 break; 402 case IBMPOWER7: 403 case IBMPOWER7PLUS: 404 case IBMPOWER8: 405 case IBMPOWER8E: 406 case IBMPOWER8NVL: 407 case IBMPOWER9: 408 #ifdef __powerpc64__ 409 if (mfmsr() & PSL_HV) { 410 mtspr(SPR_LPCR, mfspr(SPR_LPCR) | lpcr | 411 LPCR_PECE_WAKESET); 412 isync(); 413 } 414 #endif 415 break; 416 default: 417 #ifdef __powerpc64__ 418 if (!(mfmsr() & PSL_HV)) /* Rely on HV to have set things up */ 419 break; 420 #endif 421 printf("WARNING: Unknown CPU type. Cache performace may be " 422 "suboptimal.\n"); 423 break; 424 } 425 } 426