1/* SPDX-License-Identifier: GPL-2.0-or-later */ 2/* 3 * Copyright (C) 2013 Imagination Technologies 4 * Author: Paul Burton <paul.burton@mips.com> 5 */ 6 7#include <asm/addrspace.h> 8#include <asm/asm.h> 9#include <asm/asm-offsets.h> 10#include <asm/asmmacro.h> 11#include <asm/cacheops.h> 12#include <asm/eva.h> 13#include <asm/mipsregs.h> 14#include <asm/mipsmtregs.h> 15#include <asm/pm.h> 16 17#define GCR_CPC_BASE_OFS 0x0088 18#define GCR_CL_COHERENCE_OFS 0x2008 19#define GCR_CL_ID_OFS 0x2028 20 21#define CPC_CL_VC_STOP_OFS 0x2020 22#define CPC_CL_VC_RUN_OFS 0x2028 23 24.extern mips_cm_base 25 26.set noreorder 27 28#ifdef CONFIG_64BIT 29# define STATUS_BITDEPS ST0_KX 30#else 31# define STATUS_BITDEPS 0 32#endif 33 34#ifdef CONFIG_MIPS_CPS_NS16550 35 36#define DUMP_EXCEP(name) \ 37 PTR_LA a0, 8f; \ 38 jal mips_cps_bev_dump; \ 39 nop; \ 40 TEXT(name) 41 42#else /* !CONFIG_MIPS_CPS_NS16550 */ 43 44#define DUMP_EXCEP(name) 45 46#endif /* !CONFIG_MIPS_CPS_NS16550 */ 47 48 /* 49 * Set dest to non-zero if the core supports the MT ASE, else zero. If 50 * MT is not supported then branch to nomt. 51 */ 52 .macro has_mt dest, nomt 53 mfc0 \dest, CP0_CONFIG, 1 54 bgez \dest, \nomt 55 mfc0 \dest, CP0_CONFIG, 2 56 bgez \dest, \nomt 57 mfc0 \dest, CP0_CONFIG, 3 58 andi \dest, \dest, MIPS_CONF3_MT 59 beqz \dest, \nomt 60 nop 61 .endm 62 63 /* 64 * Set dest to non-zero if the core supports MIPSr6 multithreading 65 * (ie. VPs), else zero. If MIPSr6 multithreading is not supported then 66 * branch to nomt. 67 */ 68 .macro has_vp dest, nomt 69 mfc0 \dest, CP0_CONFIG, 1 70 bgez \dest, \nomt 71 mfc0 \dest, CP0_CONFIG, 2 72 bgez \dest, \nomt 73 mfc0 \dest, CP0_CONFIG, 3 74 bgez \dest, \nomt 75 mfc0 \dest, CP0_CONFIG, 4 76 bgez \dest, \nomt 77 mfc0 \dest, CP0_CONFIG, 5 78 andi \dest, \dest, MIPS_CONF5_VP 79 beqz \dest, \nomt 80 nop 81 .endm 82 83 /* Calculate an uncached address for the CM GCRs */ 84 .macro cmgcrb dest 85 .set push 86 .set noat 87 MFC0 $1, CP0_CMGCRBASE 88 PTR_SLL $1, $1, 4 89 PTR_LI \dest, UNCAC_BASE 90 PTR_ADDU \dest, \dest, $1 91 .set pop 92 .endm 93 94.section .text.cps-vec 95.balign 0x1000 96 97LEAF(mips_cps_core_entry) 98 /* 99 * These first 4 bytes will be patched by cps_smp_setup to load the 100 * CCA to use into register s0. 101 */ 102 .word 0 103 104 /* Check whether we're here due to an NMI */ 105 mfc0 k0, CP0_STATUS 106 and k0, k0, ST0_NMI 107 beqz k0, not_nmi 108 nop 109 110 /* This is an NMI */ 111 PTR_LA k0, nmi_handler 112 jr k0 113 nop 114 115not_nmi: 116 /* Setup Cause */ 117 li t0, CAUSEF_IV 118 mtc0 t0, CP0_CAUSE 119 120 /* Setup Status */ 121 li t0, ST0_CU1 | ST0_CU0 | ST0_BEV | STATUS_BITDEPS 122 mtc0 t0, CP0_STATUS 123 124 /* Skip cache & coherence setup if we're already coherent */ 125 cmgcrb v1 126 lw s7, GCR_CL_COHERENCE_OFS(v1) 127 bnez s7, 1f 128 nop 129 130 /* Initialize the L1 caches */ 131 jal mips_cps_cache_init 132 nop 133 134 /* Enter the coherent domain */ 135 li t0, 0xff 136 sw t0, GCR_CL_COHERENCE_OFS(v1) 137 ehb 138 139 /* Set Kseg0 CCA to that in s0 */ 1401: mfc0 t0, CP0_CONFIG 141 ori t0, 0x7 142 xori t0, 0x7 143 or t0, t0, s0 144 mtc0 t0, CP0_CONFIG 145 ehb 146 147 /* Jump to kseg0 */ 148 PTR_LA t0, 1f 149 jr t0 150 nop 151 152 /* 153 * We're up, cached & coherent. Perform any EVA initialization necessary 154 * before we access memory. 155 */ 1561: eva_init 157 158 /* Retrieve boot configuration pointers */ 159 jal mips_cps_get_bootcfg 160 nop 161 162 /* Skip core-level init if we started up coherent */ 163 bnez s7, 1f 164 nop 165 166 /* Perform any further required core-level initialisation */ 167 jal mips_cps_core_init 168 nop 169 170 /* 171 * Boot any other VPEs within this core that should be online, and 172 * deactivate this VPE if it should be offline. 173 */ 174 move a1, t9 175 jal mips_cps_boot_vpes 176 move a0, v0 177 178 /* Off we go! */ 1791: PTR_L t1, VPEBOOTCFG_PC(v1) 180 PTR_L gp, VPEBOOTCFG_GP(v1) 181 PTR_L sp, VPEBOOTCFG_SP(v1) 182 jr t1 183 nop 184 END(mips_cps_core_entry) 185 186.org 0x200 187LEAF(excep_tlbfill) 188 DUMP_EXCEP("TLB Fill") 189 b . 190 nop 191 END(excep_tlbfill) 192 193.org 0x280 194LEAF(excep_xtlbfill) 195 DUMP_EXCEP("XTLB Fill") 196 b . 197 nop 198 END(excep_xtlbfill) 199 200.org 0x300 201LEAF(excep_cache) 202 DUMP_EXCEP("Cache") 203 b . 204 nop 205 END(excep_cache) 206 207.org 0x380 208LEAF(excep_genex) 209 DUMP_EXCEP("General") 210 b . 211 nop 212 END(excep_genex) 213 214.org 0x400 215LEAF(excep_intex) 216 DUMP_EXCEP("Interrupt") 217 b . 218 nop 219 END(excep_intex) 220 221.org 0x480 222LEAF(excep_ejtag) 223 PTR_LA k0, ejtag_debug_handler 224 jr k0 225 nop 226 END(excep_ejtag) 227 228LEAF(mips_cps_core_init) 229#ifdef CONFIG_MIPS_MT_SMP 230 /* Check that the core implements the MT ASE */ 231 has_mt t0, 3f 232 233 .set push 234 .set MIPS_ISA_LEVEL_RAW 235 .set mt 236 237 /* Only allow 1 TC per VPE to execute... */ 238 dmt 239 240 /* ...and for the moment only 1 VPE */ 241 dvpe 242 PTR_LA t1, 1f 243 jr.hb t1 244 nop 245 246 /* Enter VPE configuration state */ 2471: mfc0 t0, CP0_MVPCONTROL 248 ori t0, t0, MVPCONTROL_VPC 249 mtc0 t0, CP0_MVPCONTROL 250 251 /* Retrieve the number of VPEs within the core */ 252 mfc0 t0, CP0_MVPCONF0 253 srl t0, t0, MVPCONF0_PVPE_SHIFT 254 andi t0, t0, (MVPCONF0_PVPE >> MVPCONF0_PVPE_SHIFT) 255 addiu ta3, t0, 1 256 257 /* If there's only 1, we're done */ 258 beqz t0, 2f 259 nop 260 261 /* Loop through each VPE within this core */ 262 li ta1, 1 263 2641: /* Operate on the appropriate TC */ 265 mtc0 ta1, CP0_VPECONTROL 266 ehb 267 268 /* Bind TC to VPE (1:1 TC:VPE mapping) */ 269 mttc0 ta1, CP0_TCBIND 270 271 /* Set exclusive TC, non-active, master */ 272 li t0, VPECONF0_MVP 273 sll t1, ta1, VPECONF0_XTC_SHIFT 274 or t0, t0, t1 275 mttc0 t0, CP0_VPECONF0 276 277 /* Set TC non-active, non-allocatable */ 278 mttc0 zero, CP0_TCSTATUS 279 280 /* Set TC halted */ 281 li t0, TCHALT_H 282 mttc0 t0, CP0_TCHALT 283 284 /* Next VPE */ 285 addiu ta1, ta1, 1 286 slt t0, ta1, ta3 287 bnez t0, 1b 288 nop 289 290 /* Leave VPE configuration state */ 2912: mfc0 t0, CP0_MVPCONTROL 292 xori t0, t0, MVPCONTROL_VPC 293 mtc0 t0, CP0_MVPCONTROL 294 2953: .set pop 296#endif 297 jr ra 298 nop 299 END(mips_cps_core_init) 300 301/** 302 * mips_cps_get_bootcfg() - retrieve boot configuration pointers 303 * 304 * Returns: pointer to struct core_boot_config in v0, pointer to 305 * struct vpe_boot_config in v1, VPE ID in t9 306 */ 307LEAF(mips_cps_get_bootcfg) 308 /* Calculate a pointer to this cores struct core_boot_config */ 309 cmgcrb t0 310 lw t0, GCR_CL_ID_OFS(t0) 311 li t1, COREBOOTCFG_SIZE 312 mul t0, t0, t1 313 PTR_LA t1, mips_cps_core_bootcfg 314 PTR_L t1, 0(t1) 315 PTR_ADDU v0, t0, t1 316 317 /* Calculate this VPEs ID. If the core doesn't support MT use 0 */ 318 li t9, 0 319#if defined(CONFIG_CPU_MIPSR6) 320 has_vp ta2, 1f 321 322 /* 323 * Assume non-contiguous numbering. Perhaps some day we'll need 324 * to handle contiguous VP numbering, but no such systems yet 325 * exist. 326 */ 327 mfc0 t9, CP0_GLOBALNUMBER 328 andi t9, t9, MIPS_GLOBALNUMBER_VP 329#elif defined(CONFIG_MIPS_MT_SMP) 330 has_mt ta2, 1f 331 332 /* Find the number of VPEs present in the core */ 333 mfc0 t1, CP0_MVPCONF0 334 srl t1, t1, MVPCONF0_PVPE_SHIFT 335 andi t1, t1, MVPCONF0_PVPE >> MVPCONF0_PVPE_SHIFT 336 addiu t1, t1, 1 337 338 /* Calculate a mask for the VPE ID from EBase.CPUNum */ 339 clz t1, t1 340 li t2, 31 341 subu t1, t2, t1 342 li t2, 1 343 sll t1, t2, t1 344 addiu t1, t1, -1 345 346 /* Retrieve the VPE ID from EBase.CPUNum */ 347 mfc0 t9, $15, 1 348 and t9, t9, t1 349#endif 350 3511: /* Calculate a pointer to this VPEs struct vpe_boot_config */ 352 li t1, VPEBOOTCFG_SIZE 353 mul v1, t9, t1 354 PTR_L ta3, COREBOOTCFG_VPECONFIG(v0) 355 PTR_ADDU v1, v1, ta3 356 357 jr ra 358 nop 359 END(mips_cps_get_bootcfg) 360 361LEAF(mips_cps_boot_vpes) 362 lw ta2, COREBOOTCFG_VPEMASK(a0) 363 PTR_L ta3, COREBOOTCFG_VPECONFIG(a0) 364 365#if defined(CONFIG_CPU_MIPSR6) 366 367 has_vp t0, 5f 368 369 /* Find base address of CPC */ 370 cmgcrb t3 371 PTR_L t1, GCR_CPC_BASE_OFS(t3) 372 PTR_LI t2, ~0x7fff 373 and t1, t1, t2 374 PTR_LI t2, UNCAC_BASE 375 PTR_ADD t1, t1, t2 376 377 /* Start any other VPs that ought to be running */ 378 PTR_S ta2, CPC_CL_VC_RUN_OFS(t1) 379 380 /* Ensure this VP stops running if it shouldn't be */ 381 not ta2 382 PTR_S ta2, CPC_CL_VC_STOP_OFS(t1) 383 ehb 384 385#elif defined(CONFIG_MIPS_MT) 386 387 /* If the core doesn't support MT then return */ 388 has_mt t0, 5f 389 390 /* Enter VPE configuration state */ 391 .set push 392 .set MIPS_ISA_LEVEL_RAW 393 .set mt 394 dvpe 395 .set pop 396 397 PTR_LA t1, 1f 398 jr.hb t1 399 nop 4001: mfc0 t1, CP0_MVPCONTROL 401 ori t1, t1, MVPCONTROL_VPC 402 mtc0 t1, CP0_MVPCONTROL 403 ehb 404 405 /* Loop through each VPE */ 406 move t8, ta2 407 li ta1, 0 408 409 /* Check whether the VPE should be running. If not, skip it */ 4101: andi t0, ta2, 1 411 beqz t0, 2f 412 nop 413 414 /* Operate on the appropriate TC */ 415 mfc0 t0, CP0_VPECONTROL 416 ori t0, t0, VPECONTROL_TARGTC 417 xori t0, t0, VPECONTROL_TARGTC 418 or t0, t0, ta1 419 mtc0 t0, CP0_VPECONTROL 420 ehb 421 422 .set push 423 .set MIPS_ISA_LEVEL_RAW 424 .set mt 425 426 /* Skip the VPE if its TC is not halted */ 427 mftc0 t0, CP0_TCHALT 428 beqz t0, 2f 429 nop 430 431 /* Calculate a pointer to the VPEs struct vpe_boot_config */ 432 li t0, VPEBOOTCFG_SIZE 433 mul t0, t0, ta1 434 addu t0, t0, ta3 435 436 /* Set the TC restart PC */ 437 lw t1, VPEBOOTCFG_PC(t0) 438 mttc0 t1, CP0_TCRESTART 439 440 /* Set the TC stack pointer */ 441 lw t1, VPEBOOTCFG_SP(t0) 442 mttgpr t1, sp 443 444 /* Set the TC global pointer */ 445 lw t1, VPEBOOTCFG_GP(t0) 446 mttgpr t1, gp 447 448 /* Copy config from this VPE */ 449 mfc0 t0, CP0_CONFIG 450 mttc0 t0, CP0_CONFIG 451 452 /* 453 * Copy the EVA config from this VPE if the CPU supports it. 454 * CONFIG3 must exist to be running MT startup - just read it. 455 */ 456 mfc0 t0, CP0_CONFIG, 3 457 and t0, t0, MIPS_CONF3_SC 458 beqz t0, 3f 459 nop 460 mfc0 t0, CP0_SEGCTL0 461 mttc0 t0, CP0_SEGCTL0 462 mfc0 t0, CP0_SEGCTL1 463 mttc0 t0, CP0_SEGCTL1 464 mfc0 t0, CP0_SEGCTL2 465 mttc0 t0, CP0_SEGCTL2 4663: 467 /* Ensure no software interrupts are pending */ 468 mttc0 zero, CP0_CAUSE 469 mttc0 zero, CP0_STATUS 470 471 /* Set TC active, not interrupt exempt */ 472 mftc0 t0, CP0_TCSTATUS 473 li t1, ~TCSTATUS_IXMT 474 and t0, t0, t1 475 ori t0, t0, TCSTATUS_A 476 mttc0 t0, CP0_TCSTATUS 477 478 /* Clear the TC halt bit */ 479 mttc0 zero, CP0_TCHALT 480 481 /* Set VPE active */ 482 mftc0 t0, CP0_VPECONF0 483 ori t0, t0, VPECONF0_VPA 484 mttc0 t0, CP0_VPECONF0 485 486 /* Next VPE */ 4872: srl ta2, ta2, 1 488 addiu ta1, ta1, 1 489 bnez ta2, 1b 490 nop 491 492 /* Leave VPE configuration state */ 493 mfc0 t1, CP0_MVPCONTROL 494 xori t1, t1, MVPCONTROL_VPC 495 mtc0 t1, CP0_MVPCONTROL 496 ehb 497 evpe 498 499 .set pop 500 501 /* Check whether this VPE is meant to be running */ 502 li t0, 1 503 sll t0, t0, a1 504 and t0, t0, t8 505 bnez t0, 2f 506 nop 507 508 /* This VPE should be offline, halt the TC */ 509 li t0, TCHALT_H 510 mtc0 t0, CP0_TCHALT 511 PTR_LA t0, 1f 5121: jr.hb t0 513 nop 514 5152: 516 517#endif /* CONFIG_MIPS_MT_SMP */ 518 519 /* Return */ 5205: jr ra 521 nop 522 END(mips_cps_boot_vpes) 523 524LEAF(mips_cps_cache_init) 525 /* 526 * Clear the bits used to index the caches. Note that the architecture 527 * dictates that writing to any of TagLo or TagHi selects 0 or 2 should 528 * be valid for all MIPS32 CPUs, even those for which said writes are 529 * unnecessary. 530 */ 531 mtc0 zero, CP0_TAGLO, 0 532 mtc0 zero, CP0_TAGHI, 0 533 mtc0 zero, CP0_TAGLO, 2 534 mtc0 zero, CP0_TAGHI, 2 535 ehb 536 537 /* Primary cache configuration is indicated by Config1 */ 538 mfc0 v0, CP0_CONFIG, 1 539 540 /* Detect I-cache line size */ 541 _EXT t0, v0, MIPS_CONF1_IL_SHF, MIPS_CONF1_IL_SZ 542 beqz t0, icache_done 543 li t1, 2 544 sllv t0, t1, t0 545 546 /* Detect I-cache size */ 547 _EXT t1, v0, MIPS_CONF1_IS_SHF, MIPS_CONF1_IS_SZ 548 xori t2, t1, 0x7 549 beqz t2, 1f 550 li t3, 32 551 addiu t1, t1, 1 552 sllv t1, t3, t1 5531: /* At this point t1 == I-cache sets per way */ 554 _EXT t2, v0, MIPS_CONF1_IA_SHF, MIPS_CONF1_IA_SZ 555 addiu t2, t2, 1 556 mul t1, t1, t0 557 mul t1, t1, t2 558 559 li a0, CKSEG0 560 PTR_ADD a1, a0, t1 5611: cache Index_Store_Tag_I, 0(a0) 562 PTR_ADD a0, a0, t0 563 bne a0, a1, 1b 564 nop 565icache_done: 566 567 /* Detect D-cache line size */ 568 _EXT t0, v0, MIPS_CONF1_DL_SHF, MIPS_CONF1_DL_SZ 569 beqz t0, dcache_done 570 li t1, 2 571 sllv t0, t1, t0 572 573 /* Detect D-cache size */ 574 _EXT t1, v0, MIPS_CONF1_DS_SHF, MIPS_CONF1_DS_SZ 575 xori t2, t1, 0x7 576 beqz t2, 1f 577 li t3, 32 578 addiu t1, t1, 1 579 sllv t1, t3, t1 5801: /* At this point t1 == D-cache sets per way */ 581 _EXT t2, v0, MIPS_CONF1_DA_SHF, MIPS_CONF1_DA_SZ 582 addiu t2, t2, 1 583 mul t1, t1, t0 584 mul t1, t1, t2 585 586 li a0, CKSEG0 587 PTR_ADDU a1, a0, t1 588 PTR_SUBU a1, a1, t0 5891: cache Index_Store_Tag_D, 0(a0) 590 bne a0, a1, 1b 591 PTR_ADD a0, a0, t0 592dcache_done: 593 594 jr ra 595 nop 596 END(mips_cps_cache_init) 597 598#if defined(CONFIG_MIPS_CPS_PM) && defined(CONFIG_CPU_PM) 599 600 /* Calculate a pointer to this CPUs struct mips_static_suspend_state */ 601 .macro psstate dest 602 .set push 603 .set noat 604 lw $1, TI_CPU(gp) 605 sll $1, $1, LONGLOG 606 PTR_LA \dest, __per_cpu_offset 607 addu $1, $1, \dest 608 lw $1, 0($1) 609 PTR_LA \dest, cps_cpu_state 610 addu \dest, \dest, $1 611 .set pop 612 .endm 613 614LEAF(mips_cps_pm_save) 615 /* Save CPU state */ 616 SUSPEND_SAVE_REGS 617 psstate t1 618 SUSPEND_SAVE_STATIC 619 jr v0 620 nop 621 END(mips_cps_pm_save) 622 623LEAF(mips_cps_pm_restore) 624 /* Restore CPU state */ 625 psstate t1 626 RESUME_RESTORE_STATIC 627 RESUME_RESTORE_REGS_RETURN 628 END(mips_cps_pm_restore) 629 630#endif /* CONFIG_MIPS_CPS_PM && CONFIG_CPU_PM */ 631