1/* 2 * This file contains low level CPU setup functions. 3 * Copyright (C) 2003 Benjamin Herrenschmidt (benh@kernel.crashing.org) 4 * 5 * This program is free software; you can redistribute it and/or 6 * modify it under the terms of the GNU General Public License 7 * as published by the Free Software Foundation; either version 8 * 2 of the License, or (at your option) any later version. 9 * 10 */ 11 12#include <linux/config.h> 13#include <asm/processor.h> 14#include <asm/page.h> 15#include <asm/cputable.h> 16#include <asm/ppc_asm.h> 17#include <asm/asm-offsets.h> 18#include <asm/cache.h> 19 20_GLOBAL(__setup_cpu_603) 21 b setup_common_caches 22_GLOBAL(__setup_cpu_604) 23 mflr r4 24 bl setup_common_caches 25 bl setup_604_hid0 26 mtlr r4 27 blr 28_GLOBAL(__setup_cpu_750) 29 mflr r4 30 bl __init_fpu_registers 31 bl setup_common_caches 32 bl setup_750_7400_hid0 33 mtlr r4 34 blr 35_GLOBAL(__setup_cpu_750cx) 36 mflr r4 37 bl __init_fpu_registers 38 bl setup_common_caches 39 bl setup_750_7400_hid0 40 bl setup_750cx 41 mtlr r4 42 blr 43_GLOBAL(__setup_cpu_750fx) 44 mflr r4 45 bl __init_fpu_registers 46 bl setup_common_caches 47 bl setup_750_7400_hid0 48 bl setup_750fx 49 mtlr r4 50 blr 51_GLOBAL(__setup_cpu_7400) 52 mflr r4 53 bl __init_fpu_registers 54 bl setup_7400_workarounds 55 bl setup_common_caches 56 bl setup_750_7400_hid0 57 mtlr r4 58 blr 59_GLOBAL(__setup_cpu_7410) 60 mflr r4 61 bl __init_fpu_registers 62 bl setup_7410_workarounds 63 bl setup_common_caches 64 bl setup_750_7400_hid0 65 li r3,0 66 mtspr SPRN_L2CR2,r3 67 mtlr r4 68 blr 69_GLOBAL(__setup_cpu_745x) 70 mflr r4 71 bl setup_common_caches 72 bl setup_745x_specifics 73 mtlr r4 74 blr 75 76/* Enable caches for 603's, 604, 750 & 7400 */ 77setup_common_caches: 78 mfspr r11,SPRN_HID0 79 andi. r0,r11,HID0_DCE 80 ori r11,r11,HID0_ICE|HID0_DCE 81 ori r8,r11,HID0_ICFI 82 bne 1f /* don't invalidate the D-cache */ 83 ori r8,r8,HID0_DCI /* unless it wasn't enabled */ 841: sync 85 mtspr SPRN_HID0,r8 /* enable and invalidate caches */ 86 sync 87 mtspr SPRN_HID0,r11 /* enable caches */ 88 sync 89 isync 90 blr 91 92/* 604, 604e, 604ev, ... 93 * Enable superscalar execution & branch history table 94 */ 95setup_604_hid0: 96 mfspr r11,SPRN_HID0 97 ori r11,r11,HID0_SIED|HID0_BHTE 98 ori r8,r11,HID0_BTCD 99 sync 100 mtspr SPRN_HID0,r8 /* flush branch target address cache */ 101 sync /* on 604e/604r */ 102 mtspr SPRN_HID0,r11 103 sync 104 isync 105 blr 106 107/* 7400 <= rev 2.7 and 7410 rev = 1.0 suffer from some 108 * erratas we work around here. 109 * Moto MPC710CE.pdf describes them, those are errata 110 * #3, #4 and #5 111 * Note that we assume the firmware didn't choose to 112 * apply other workarounds (there are other ones documented 113 * in the .pdf). It appear that Apple firmware only works 114 * around #3 and with the same fix we use. We may want to 115 * check if the CPU is using 60x bus mode in which case 116 * the workaround for errata #4 is useless. Also, we may 117 * want to explicitely clear HID0_NOPDST as this is not 118 * needed once we have applied workaround #5 (though it's 119 * not set by Apple's firmware at least). 120 */ 121setup_7400_workarounds: 122 mfpvr r3 123 rlwinm r3,r3,0,20,31 124 cmpwi 0,r3,0x0207 125 ble 1f 126 blr 127setup_7410_workarounds: 128 mfpvr r3 129 rlwinm r3,r3,0,20,31 130 cmpwi 0,r3,0x0100 131 bnelr 1321: 133 mfspr r11,SPRN_MSSSR0 134 /* Errata #3: Set L1OPQ_SIZE to 0x10 */ 135 rlwinm r11,r11,0,9,6 136 oris r11,r11,0x0100 137 /* Errata #4: Set L2MQ_SIZE to 1 (check for MPX mode first ?) */ 138 oris r11,r11,0x0002 139 /* Errata #5: Set DRLT_SIZE to 0x01 */ 140 rlwinm r11,r11,0,5,2 141 oris r11,r11,0x0800 142 sync 143 mtspr SPRN_MSSSR0,r11 144 sync 145 isync 146 blr 147 148/* 740/750/7400/7410 149 * Enable Store Gathering (SGE), Address Brodcast (ABE), 150 * Branch History Table (BHTE), Branch Target ICache (BTIC) 151 * Dynamic Power Management (DPM), Speculative (SPD) 152 * Clear Instruction cache throttling (ICTC) 153 */ 154setup_750_7400_hid0: 155 mfspr r11,SPRN_HID0 156 ori r11,r11,HID0_SGE | HID0_ABE | HID0_BHTE | HID0_BTIC 157 oris r11,r11,HID0_DPM@h 158BEGIN_FTR_SECTION 159 xori r11,r11,HID0_BTIC 160END_FTR_SECTION_IFSET(CPU_FTR_NO_BTIC) 161BEGIN_FTR_SECTION 162 xoris r11,r11,HID0_DPM@h /* disable dynamic power mgmt */ 163END_FTR_SECTION_IFSET(CPU_FTR_NO_DPM) 164 li r3,HID0_SPD 165 andc r11,r11,r3 /* clear SPD: enable speculative */ 166 li r3,0 167 mtspr SPRN_ICTC,r3 /* Instruction Cache Throttling off */ 168 isync 169 mtspr SPRN_HID0,r11 170 sync 171 isync 172 blr 173 174/* 750cx specific 175 * Looks like we have to disable NAP feature for some PLL settings... 176 * (waiting for confirmation) 177 */ 178setup_750cx: 179 mfspr r10, SPRN_HID1 180 rlwinm r10,r10,4,28,31 181 cmpwi cr0,r10,7 182 cmpwi cr1,r10,9 183 cmpwi cr2,r10,11 184 cror 4*cr0+eq,4*cr0+eq,4*cr1+eq 185 cror 4*cr0+eq,4*cr0+eq,4*cr2+eq 186 bnelr 187 lwz r6,CPU_SPEC_FEATURES(r5) 188 li r7,CPU_FTR_CAN_NAP 189 andc r6,r6,r7 190 stw r6,CPU_SPEC_FEATURES(r5) 191 blr 192 193/* 750fx specific 194 */ 195setup_750fx: 196 blr 197 198/* MPC 745x 199 * Enable Store Gathering (SGE), Branch Folding (FOLD) 200 * Branch History Table (BHTE), Branch Target ICache (BTIC) 201 * Dynamic Power Management (DPM), Speculative (SPD) 202 * Ensure our data cache instructions really operate. 203 * Timebase has to be running or we wouldn't have made it here, 204 * just ensure we don't disable it. 205 * Clear Instruction cache throttling (ICTC) 206 * Enable L2 HW prefetch 207 */ 208setup_745x_specifics: 209 /* We check for the presence of an L3 cache setup by 210 * the firmware. If any, we disable NAP capability as 211 * it's known to be bogus on rev 2.1 and earlier 212 */ 213BEGIN_FTR_SECTION 214 mfspr r11,SPRN_L3CR 215 andis. r11,r11,L3CR_L3E@h 216 beq 1f 217END_FTR_SECTION_IFSET(CPU_FTR_L3CR) 218 lwz r6,CPU_SPEC_FEATURES(r5) 219 andi. r0,r6,CPU_FTR_L3_DISABLE_NAP 220 beq 1f 221 li r7,CPU_FTR_CAN_NAP 222 andc r6,r6,r7 223 stw r6,CPU_SPEC_FEATURES(r5) 2241: 225 mfspr r11,SPRN_HID0 226 227 /* All of the bits we have to set..... 228 */ 229 ori r11,r11,HID0_SGE | HID0_FOLD | HID0_BHTE 230 ori r11,r11,HID0_LRSTK | HID0_BTIC 231 oris r11,r11,HID0_DPM@h 232BEGIN_FTR_SECTION 233 xori r11,r11,HID0_BTIC 234END_FTR_SECTION_IFSET(CPU_FTR_NO_BTIC) 235BEGIN_FTR_SECTION 236 xoris r11,r11,HID0_DPM@h /* disable dynamic power mgmt */ 237END_FTR_SECTION_IFSET(CPU_FTR_NO_DPM) 238 239 /* All of the bits we have to clear.... 240 */ 241 li r3,HID0_SPD | HID0_NOPDST | HID0_NOPTI 242 andc r11,r11,r3 /* clear SPD: enable speculative */ 243 li r3,0 244 245 mtspr SPRN_ICTC,r3 /* Instruction Cache Throttling off */ 246 isync 247 mtspr SPRN_HID0,r11 248 sync 249 isync 250 251 /* Enable L2 HW prefetch, if L2 is enabled 252 */ 253 mfspr r3,SPRN_L2CR 254 andis. r3,r3,L2CR_L2E@h 255 beqlr 256 mfspr r3,SPRN_MSSCR0 257 ori r3,r3,3 258 sync 259 mtspr SPRN_MSSCR0,r3 260 sync 261 isync 262 blr 263 264/* 265 * Initialize the FPU registers. This is needed to work around an errata 266 * in some 750 cpus where using a not yet initialized FPU register after 267 * power on reset may hang the CPU 268 */ 269_GLOBAL(__init_fpu_registers) 270 mfmsr r10 271 ori r11,r10,MSR_FP 272 mtmsr r11 273 isync 274 addis r9,r3,empty_zero_page@ha 275 addi r9,r9,empty_zero_page@l 276 REST_32FPRS(0,r9) 277 sync 278 mtmsr r10 279 isync 280 blr 281 282 283/* Definitions for the table use to save CPU states */ 284#define CS_HID0 0 285#define CS_HID1 4 286#define CS_HID2 8 287#define CS_MSSCR0 12 288#define CS_MSSSR0 16 289#define CS_ICTRL 20 290#define CS_LDSTCR 24 291#define CS_LDSTDB 28 292#define CS_SIZE 32 293 294 .data 295 .balign L1_CACHE_BYTES 296cpu_state_storage: 297 .space CS_SIZE 298 .balign L1_CACHE_BYTES,0 299 .text 300 301/* Called in normal context to backup CPU 0 state. This 302 * does not include cache settings. This function is also 303 * called for machine sleep. This does not include the MMU 304 * setup, BATs, etc... but rather the "special" registers 305 * like HID0, HID1, MSSCR0, etc... 306 */ 307_GLOBAL(__save_cpu_setup) 308 /* Some CR fields are volatile, we back it up all */ 309 mfcr r7 310 311 /* Get storage ptr */ 312 lis r5,cpu_state_storage@h 313 ori r5,r5,cpu_state_storage@l 314 315 /* Save HID0 (common to all CONFIG_6xx cpus) */ 316 mfspr r3,SPRN_HID0 317 stw r3,CS_HID0(r5) 318 319 /* Now deal with CPU type dependent registers */ 320 mfspr r3,SPRN_PVR 321 srwi r3,r3,16 322 cmplwi cr0,r3,0x8000 /* 7450 */ 323 cmplwi cr1,r3,0x000c /* 7400 */ 324 cmplwi cr2,r3,0x800c /* 7410 */ 325 cmplwi cr3,r3,0x8001 /* 7455 */ 326 cmplwi cr4,r3,0x8002 /* 7457 */ 327 cmplwi cr5,r3,0x8003 /* 7447A */ 328 cmplwi cr6,r3,0x7000 /* 750FX */ 329 cmplwi cr7,r3,0x8004 /* 7448 */ 330 /* cr1 is 7400 || 7410 */ 331 cror 4*cr1+eq,4*cr1+eq,4*cr2+eq 332 /* cr0 is 74xx */ 333 cror 4*cr0+eq,4*cr0+eq,4*cr3+eq 334 cror 4*cr0+eq,4*cr0+eq,4*cr4+eq 335 cror 4*cr0+eq,4*cr0+eq,4*cr1+eq 336 cror 4*cr0+eq,4*cr0+eq,4*cr5+eq 337 cror 4*cr0+eq,4*cr0+eq,4*cr7+eq 338 bne 1f 339 /* Backup 74xx specific regs */ 340 mfspr r4,SPRN_MSSCR0 341 stw r4,CS_MSSCR0(r5) 342 mfspr r4,SPRN_MSSSR0 343 stw r4,CS_MSSSR0(r5) 344 beq cr1,1f 345 /* Backup 745x specific registers */ 346 mfspr r4,SPRN_HID1 347 stw r4,CS_HID1(r5) 348 mfspr r4,SPRN_ICTRL 349 stw r4,CS_ICTRL(r5) 350 mfspr r4,SPRN_LDSTCR 351 stw r4,CS_LDSTCR(r5) 352 mfspr r4,SPRN_LDSTDB 353 stw r4,CS_LDSTDB(r5) 3541: 355 bne cr6,1f 356 /* Backup 750FX specific registers */ 357 mfspr r4,SPRN_HID1 358 stw r4,CS_HID1(r5) 359 /* If rev 2.x, backup HID2 */ 360 mfspr r3,SPRN_PVR 361 andi. r3,r3,0xff00 362 cmpwi cr0,r3,0x0200 363 bne 1f 364 mfspr r4,SPRN_HID2 365 stw r4,CS_HID2(r5) 3661: 367 mtcr r7 368 blr 369 370/* Called with no MMU context (typically MSR:IR/DR off) to 371 * restore CPU state as backed up by the previous 372 * function. This does not include cache setting 373 */ 374_GLOBAL(__restore_cpu_setup) 375 /* Some CR fields are volatile, we back it up all */ 376 mfcr r7 377 378 /* Get storage ptr */ 379 lis r5,(cpu_state_storage-KERNELBASE)@h 380 ori r5,r5,cpu_state_storage@l 381 382 /* Restore HID0 */ 383 lwz r3,CS_HID0(r5) 384 sync 385 isync 386 mtspr SPRN_HID0,r3 387 sync 388 isync 389 390 /* Now deal with CPU type dependent registers */ 391 mfspr r3,SPRN_PVR 392 srwi r3,r3,16 393 cmplwi cr0,r3,0x8000 /* 7450 */ 394 cmplwi cr1,r3,0x000c /* 7400 */ 395 cmplwi cr2,r3,0x800c /* 7410 */ 396 cmplwi cr3,r3,0x8001 /* 7455 */ 397 cmplwi cr4,r3,0x8002 /* 7457 */ 398 cmplwi cr5,r3,0x8003 /* 7447A */ 399 cmplwi cr6,r3,0x7000 /* 750FX */ 400 cmplwi cr7,r3,0x8004 /* 7448 */ 401 /* cr1 is 7400 || 7410 */ 402 cror 4*cr1+eq,4*cr1+eq,4*cr2+eq 403 /* cr0 is 74xx */ 404 cror 4*cr0+eq,4*cr0+eq,4*cr3+eq 405 cror 4*cr0+eq,4*cr0+eq,4*cr4+eq 406 cror 4*cr0+eq,4*cr0+eq,4*cr1+eq 407 cror 4*cr0+eq,4*cr0+eq,4*cr5+eq 408 cror 4*cr0+eq,4*cr0+eq,4*cr7+eq 409 bne 2f 410 /* Restore 74xx specific regs */ 411 lwz r4,CS_MSSCR0(r5) 412 sync 413 mtspr SPRN_MSSCR0,r4 414 sync 415 isync 416 lwz r4,CS_MSSSR0(r5) 417 sync 418 mtspr SPRN_MSSSR0,r4 419 sync 420 isync 421 bne cr2,1f 422 /* Clear 7410 L2CR2 */ 423 li r4,0 424 mtspr SPRN_L2CR2,r4 4251: beq cr1,2f 426 /* Restore 745x specific registers */ 427 lwz r4,CS_HID1(r5) 428 sync 429 mtspr SPRN_HID1,r4 430 isync 431 sync 432 lwz r4,CS_ICTRL(r5) 433 sync 434 mtspr SPRN_ICTRL,r4 435 isync 436 sync 437 lwz r4,CS_LDSTCR(r5) 438 sync 439 mtspr SPRN_LDSTCR,r4 440 isync 441 sync 442 lwz r4,CS_LDSTDB(r5) 443 sync 444 mtspr SPRN_LDSTDB,r4 445 isync 446 sync 4472: bne cr6,1f 448 /* Restore 750FX specific registers 449 * that is restore HID2 on rev 2.x and PLL config & switch 450 * to PLL 0 on all 451 */ 452 /* If rev 2.x, restore HID2 with low voltage bit cleared */ 453 mfspr r3,SPRN_PVR 454 andi. r3,r3,0xff00 455 cmpwi cr0,r3,0x0200 456 bne 4f 457 lwz r4,CS_HID2(r5) 458 rlwinm r4,r4,0,19,17 459 mtspr SPRN_HID2,r4 460 sync 4614: 462 lwz r4,CS_HID1(r5) 463 rlwinm r5,r4,0,16,14 464 mtspr SPRN_HID1,r5 465 /* Wait for PLL to stabilize */ 466 mftbl r5 4673: mftbl r6 468 sub r6,r6,r5 469 cmplwi cr0,r6,10000 470 ble 3b 471 /* Setup final PLL */ 472 mtspr SPRN_HID1,r4 4731: 474 mtcr r7 475 blr 476 477