1/* SPDX-License-Identifier: GPL-2.0 */ 2/* 3 * Transactional memory support routines to reclaim and recheckpoint 4 * transactional process state. 5 * 6 * Copyright 2012 Matt Evans & Michael Neuling, IBM Corporation. 7 */ 8 9#include <asm/asm-offsets.h> 10#include <asm/ppc_asm.h> 11#include <asm/ppc-opcode.h> 12#include <asm/ptrace.h> 13#include <asm/reg.h> 14#include <asm/bug.h> 15#include <asm/export.h> 16 17#ifdef CONFIG_VSX 18/* See fpu.S, this is borrowed from there */ 19#define __SAVE_32FPRS_VSRS(n,c,base) \ 20BEGIN_FTR_SECTION \ 21 b 2f; \ 22END_FTR_SECTION_IFSET(CPU_FTR_VSX); \ 23 SAVE_32FPRS(n,base); \ 24 b 3f; \ 252: SAVE_32VSRS(n,c,base); \ 263: 27#define __REST_32FPRS_VSRS(n,c,base) \ 28BEGIN_FTR_SECTION \ 29 b 2f; \ 30END_FTR_SECTION_IFSET(CPU_FTR_VSX); \ 31 REST_32FPRS(n,base); \ 32 b 3f; \ 332: REST_32VSRS(n,c,base); \ 343: 35#else 36#define __SAVE_32FPRS_VSRS(n,c,base) SAVE_32FPRS(n, base) 37#define __REST_32FPRS_VSRS(n,c,base) REST_32FPRS(n, base) 38#endif 39#define SAVE_32FPRS_VSRS(n,c,base) \ 40 __SAVE_32FPRS_VSRS(n,__REG_##c,__REG_##base) 41#define REST_32FPRS_VSRS(n,c,base) \ 42 __REST_32FPRS_VSRS(n,__REG_##c,__REG_##base) 43 44/* Stack frame offsets for local variables. */ 45#define TM_FRAME_L0 TM_FRAME_SIZE-16 46#define TM_FRAME_L1 TM_FRAME_SIZE-8 47 48 49/* In order to access the TM SPRs, TM must be enabled. So, do so: */ 50_GLOBAL(tm_enable) 51 mfmsr r4 52 li r3, MSR_TM >> 32 53 sldi r3, r3, 32 54 and. r0, r4, r3 55 bne 1f 56 or r4, r4, r3 57 mtmsrd r4 581: blr 59EXPORT_SYMBOL_GPL(tm_enable); 60 61_GLOBAL(tm_disable) 62 mfmsr r4 63 li r3, MSR_TM >> 32 64 sldi r3, r3, 32 65 andc r4, r4, r3 66 mtmsrd r4 67 blr 68EXPORT_SYMBOL_GPL(tm_disable); 69 70_GLOBAL(tm_save_sprs) 71 mfspr r0, SPRN_TFHAR 72 std r0, THREAD_TM_TFHAR(r3) 73 mfspr r0, SPRN_TEXASR 74 std r0, THREAD_TM_TEXASR(r3) 75 mfspr r0, SPRN_TFIAR 76 std r0, THREAD_TM_TFIAR(r3) 77 blr 78 79_GLOBAL(tm_restore_sprs) 80 ld r0, THREAD_TM_TFHAR(r3) 81 mtspr SPRN_TFHAR, r0 82 ld r0, THREAD_TM_TEXASR(r3) 83 mtspr SPRN_TEXASR, r0 84 ld r0, THREAD_TM_TFIAR(r3) 85 mtspr SPRN_TFIAR, r0 86 blr 87 88 /* Passed an 8-bit failure cause as first argument. */ 89_GLOBAL(tm_abort) 90 TABORT(R3) 91 blr 92EXPORT_SYMBOL_GPL(tm_abort); 93 94/* void tm_reclaim(struct thread_struct *thread, 95 * uint8_t cause) 96 * 97 * - Performs a full reclaim. This destroys outstanding 98 * transactions and updates thread->regs.tm_ckpt_* with the 99 * original checkpointed state. Note that thread->regs is 100 * unchanged. 101 * 102 * Purpose is to both abort transactions of, and preserve the state of, 103 * a transactions at a context switch. We preserve/restore both sets of process 104 * state to restore them when the thread's scheduled again. We continue in 105 * userland as though nothing happened, but when the transaction is resumed 106 * they will abort back to the checkpointed state we save out here. 107 * 108 * Call with IRQs off, stacks get all out of sync for some periods in here! 109 */ 110_GLOBAL(tm_reclaim) 111 mfcr r5 112 mflr r0 113 stw r5, 8(r1) 114 std r0, 16(r1) 115 std r2, STK_GOT(r1) 116 stdu r1, -TM_FRAME_SIZE(r1) 117 118 /* We've a struct pt_regs at [r1+STACK_FRAME_OVERHEAD]. */ 119 120 std r3, STK_PARAM(R3)(r1) 121 SAVE_NVGPRS(r1) 122 123 /* We need to setup MSR for VSX register save instructions. */ 124 mfmsr r14 125 mr r15, r14 126 ori r15, r15, MSR_FP 127 li r16, 0 128 ori r16, r16, MSR_EE /* IRQs hard off */ 129 andc r15, r15, r16 130 oris r15, r15, MSR_VEC@h 131#ifdef CONFIG_VSX 132 BEGIN_FTR_SECTION 133 oris r15,r15, MSR_VSX@h 134 END_FTR_SECTION_IFSET(CPU_FTR_VSX) 135#endif 136 mtmsrd r15 137 std r14, TM_FRAME_L0(r1) 138 139 /* Do sanity check on MSR to make sure we are suspended */ 140 li r7, (MSR_TS_S)@higher 141 srdi r6, r14, 32 142 and r6, r6, r7 1431: tdeqi r6, 0 144 EMIT_BUG_ENTRY 1b,__FILE__,__LINE__,0 145 146 /* Stash the stack pointer away for use after reclaim */ 147 std r1, PACAR1(r13) 148 149 /* Clear MSR RI since we are about to change r1, EE is already off. */ 150 li r5, 0 151 mtmsrd r5, 1 152 153 /* 154 * BE CAREFUL HERE: 155 * At this point we can't take an SLB miss since we have MSR_RI 156 * off. Load only to/from the stack/paca which are in SLB bolted regions 157 * until we turn MSR RI back on. 158 * 159 * The moment we treclaim, ALL of our GPRs will switch 160 * to user register state. (FPRs, CCR etc. also!) 161 * Use an sprg and a tm_scratch in the PACA to shuffle. 162 */ 163 TRECLAIM(R4) /* Cause in r4 */ 164 165 /* ******************** GPRs ******************** */ 166 /* Stash the checkpointed r13 away in the scratch SPR and get the real 167 * paca 168 */ 169 SET_SCRATCH0(r13) 170 GET_PACA(r13) 171 172 /* Stash the checkpointed r1 away in paca tm_scratch and get the real 173 * stack pointer back 174 */ 175 std r1, PACATMSCRATCH(r13) 176 ld r1, PACAR1(r13) 177 178 /* Store the PPR in r11 and reset to decent value */ 179 std r11, GPR11(r1) /* Temporary stash */ 180 181 /* Reset MSR RI so we can take SLB faults again */ 182 li r11, MSR_RI 183 mtmsrd r11, 1 184 185 mfspr r11, SPRN_PPR 186 HMT_MEDIUM 187 188 /* Now get some more GPRS free */ 189 std r7, GPR7(r1) /* Temporary stash */ 190 std r12, GPR12(r1) /* '' '' '' */ 191 ld r12, STK_PARAM(R3)(r1) /* Param 0, thread_struct * */ 192 193 std r11, THREAD_TM_PPR(r12) /* Store PPR and free r11 */ 194 195 addi r7, r12, PT_CKPT_REGS /* Thread's ckpt_regs */ 196 197 /* Make r7 look like an exception frame so that we 198 * can use the neat GPRx(n) macros. r7 is NOT a pt_regs ptr! 199 */ 200 subi r7, r7, STACK_FRAME_OVERHEAD 201 202 /* Sync the userland GPRs 2-12, 14-31 to thread->regs: */ 203 SAVE_GPR(0, r7) /* user r0 */ 204 SAVE_GPR(2, r7) /* user r2 */ 205 SAVE_4GPRS(3, r7) /* user r3-r6 */ 206 SAVE_GPR(8, r7) /* user r8 */ 207 SAVE_GPR(9, r7) /* user r9 */ 208 SAVE_GPR(10, r7) /* user r10 */ 209 ld r3, PACATMSCRATCH(r13) /* user r1 */ 210 ld r4, GPR7(r1) /* user r7 */ 211 ld r5, GPR11(r1) /* user r11 */ 212 ld r6, GPR12(r1) /* user r12 */ 213 GET_SCRATCH0(8) /* user r13 */ 214 std r3, GPR1(r7) 215 std r4, GPR7(r7) 216 std r5, GPR11(r7) 217 std r6, GPR12(r7) 218 std r8, GPR13(r7) 219 220 SAVE_NVGPRS(r7) /* user r14-r31 */ 221 222 /* ******************** NIP ******************** */ 223 mfspr r3, SPRN_TFHAR 224 std r3, _NIP(r7) /* Returns to failhandler */ 225 /* The checkpointed NIP is ignored when rescheduling/rechkpting, 226 * but is used in signal return to 'wind back' to the abort handler. 227 */ 228 229 /* ******************** CR,LR,CCR,MSR ********** */ 230 mfctr r3 231 mflr r4 232 mfcr r5 233 mfxer r6 234 235 std r3, _CTR(r7) 236 std r4, _LINK(r7) 237 std r5, _CCR(r7) 238 std r6, _XER(r7) 239 240 241 /* ******************** TAR, DSCR ********** */ 242 mfspr r3, SPRN_TAR 243 mfspr r4, SPRN_DSCR 244 245 std r3, THREAD_TM_TAR(r12) 246 std r4, THREAD_TM_DSCR(r12) 247 248 /* MSR and flags: We don't change CRs, and we don't need to alter 249 * MSR. 250 */ 251 252 253 /* ******************** FPR/VR/VSRs ************ 254 * After reclaiming, capture the checkpointed FPRs/VRs. 255 * 256 * We enabled VEC/FP/VSX in the msr above, so we can execute these 257 * instructions! 258 */ 259 mr r3, r12 260 261 /* Altivec (VEC/VMX/VR)*/ 262 addi r7, r3, THREAD_CKVRSTATE 263 SAVE_32VRS(0, r6, r7) /* r6 scratch, r7 transact vr state */ 264 mfvscr v0 265 li r6, VRSTATE_VSCR 266 stvx v0, r7, r6 267 268 /* VRSAVE */ 269 mfspr r0, SPRN_VRSAVE 270 std r0, THREAD_CKVRSAVE(r3) 271 272 /* Floating Point (FP) */ 273 addi r7, r3, THREAD_CKFPSTATE 274 SAVE_32FPRS_VSRS(0, R6, R7) /* r6 scratch, r7 transact fp state */ 275 mffs fr0 276 stfd fr0,FPSTATE_FPSCR(r7) 277 278 279 /* TM regs, incl TEXASR -- these live in thread_struct. Note they've 280 * been updated by the treclaim, to explain to userland the failure 281 * cause (aborted). 282 */ 283 mfspr r0, SPRN_TEXASR 284 mfspr r3, SPRN_TFHAR 285 mfspr r4, SPRN_TFIAR 286 std r0, THREAD_TM_TEXASR(r12) 287 std r3, THREAD_TM_TFHAR(r12) 288 std r4, THREAD_TM_TFIAR(r12) 289 290 /* AMR is checkpointed too, but is unsupported by Linux. */ 291 292 /* Restore original MSR/IRQ state & clear TM mode */ 293 ld r14, TM_FRAME_L0(r1) /* Orig MSR */ 294 295 li r15, 0 296 rldimi r14, r15, MSR_TS_LG, (63-MSR_TS_LG)-1 297 mtmsrd r14 298 299 REST_NVGPRS(r1) 300 301 addi r1, r1, TM_FRAME_SIZE 302 lwz r4, 8(r1) 303 ld r0, 16(r1) 304 mtcr r4 305 mtlr r0 306 ld r2, STK_GOT(r1) 307 308 /* Load CPU's default DSCR */ 309 ld r0, PACA_DSCR_DEFAULT(r13) 310 mtspr SPRN_DSCR, r0 311 312 blr 313 314 315 /* void __tm_recheckpoint(struct thread_struct *thread, 316 * unsigned long orig_msr) 317 * - Restore the checkpointed register state saved by tm_reclaim 318 * when we switch_to a process. 319 * 320 * Call with IRQs off, stacks get all out of sync for 321 * some periods in here! 322 */ 323_GLOBAL(__tm_recheckpoint) 324 mfcr r5 325 mflr r0 326 stw r5, 8(r1) 327 std r0, 16(r1) 328 std r2, STK_GOT(r1) 329 stdu r1, -TM_FRAME_SIZE(r1) 330 331 /* We've a struct pt_regs at [r1+STACK_FRAME_OVERHEAD]. 332 * This is used for backing up the NVGPRs: 333 */ 334 SAVE_NVGPRS(r1) 335 336 /* Load complete register state from ts_ckpt* registers */ 337 338 addi r7, r3, PT_CKPT_REGS /* Thread's ckpt_regs */ 339 340 /* Make r7 look like an exception frame so that we 341 * can use the neat GPRx(n) macros. r7 is now NOT a pt_regs ptr! 342 */ 343 subi r7, r7, STACK_FRAME_OVERHEAD 344 345 /* We need to setup MSR for FP/VMX/VSX register save instructions. */ 346 mfmsr r6 347 mr r5, r6 348 ori r5, r5, MSR_FP 349#ifdef CONFIG_ALTIVEC 350 oris r5, r5, MSR_VEC@h 351#endif 352#ifdef CONFIG_VSX 353 BEGIN_FTR_SECTION 354 oris r5,r5, MSR_VSX@h 355 END_FTR_SECTION_IFSET(CPU_FTR_VSX) 356#endif 357 mtmsrd r5 358 359#ifdef CONFIG_ALTIVEC 360 /* 361 * FP and VEC registers: These are recheckpointed from 362 * thread.ckfp_state and thread.ckvr_state respectively. The 363 * thread.fp_state[] version holds the 'live' (transactional) 364 * and will be loaded subsequently by any FPUnavailable trap. 365 */ 366 addi r8, r3, THREAD_CKVRSTATE 367 li r5, VRSTATE_VSCR 368 lvx v0, r8, r5 369 mtvscr v0 370 REST_32VRS(0, r5, r8) /* r5 scratch, r8 ptr */ 371 ld r5, THREAD_CKVRSAVE(r3) 372 mtspr SPRN_VRSAVE, r5 373#endif 374 375 addi r8, r3, THREAD_CKFPSTATE 376 lfd fr0, FPSTATE_FPSCR(r8) 377 MTFSF_L(fr0) 378 REST_32FPRS_VSRS(0, R4, R8) 379 380 mtmsr r6 /* FP/Vec off again! */ 381 382restore_gprs: 383 384 /* ******************** CR,LR,CCR,MSR ********** */ 385 ld r4, _CTR(r7) 386 ld r5, _LINK(r7) 387 ld r8, _XER(r7) 388 389 mtctr r4 390 mtlr r5 391 mtxer r8 392 393 /* ******************** TAR ******************** */ 394 ld r4, THREAD_TM_TAR(r3) 395 mtspr SPRN_TAR, r4 396 397 /* Load up the PPR and DSCR in GPRs only at this stage */ 398 ld r5, THREAD_TM_DSCR(r3) 399 ld r6, THREAD_TM_PPR(r3) 400 401 REST_GPR(0, r7) /* GPR0 */ 402 REST_2GPRS(2, r7) /* GPR2-3 */ 403 REST_GPR(4, r7) /* GPR4 */ 404 REST_4GPRS(8, r7) /* GPR8-11 */ 405 REST_2GPRS(12, r7) /* GPR12-13 */ 406 407 REST_NVGPRS(r7) /* GPR14-31 */ 408 409 /* Load up PPR and DSCR here so we don't run with user values for long 410 */ 411 mtspr SPRN_DSCR, r5 412 mtspr SPRN_PPR, r6 413 414 /* Do final sanity check on TEXASR to make sure FS is set. Do this 415 * here before we load up the userspace r1 so any bugs we hit will get 416 * a call chain */ 417 mfspr r5, SPRN_TEXASR 418 srdi r5, r5, 16 419 li r6, (TEXASR_FS)@h 420 and r6, r6, r5 4211: tdeqi r6, 0 422 EMIT_BUG_ENTRY 1b,__FILE__,__LINE__,0 423 424 /* Do final sanity check on MSR to make sure we are not transactional 425 * or suspended 426 */ 427 mfmsr r6 428 li r5, (MSR_TS_MASK)@higher 429 srdi r6, r6, 32 430 and r6, r6, r5 4311: tdnei r6, 0 432 EMIT_BUG_ENTRY 1b,__FILE__,__LINE__,0 433 434 /* Restore CR */ 435 ld r6, _CCR(r7) 436 mtcr r6 437 438 REST_GPR(6, r7) 439 440 /* 441 * Store r1 and r5 on the stack so that we can access them 442 * after we clear MSR RI. 443 */ 444 445 REST_GPR(5, r7) 446 std r5, -8(r1) 447 ld r5, GPR1(r7) 448 std r5, -16(r1) 449 450 REST_GPR(7, r7) 451 452 /* Clear MSR RI since we are about to change r1. EE is already off */ 453 li r5, 0 454 mtmsrd r5, 1 455 456 /* 457 * BE CAREFUL HERE: 458 * At this point we can't take an SLB miss since we have MSR_RI 459 * off. Load only to/from the stack/paca which are in SLB bolted regions 460 * until we turn MSR RI back on. 461 */ 462 463 SET_SCRATCH0(r1) 464 ld r5, -8(r1) 465 ld r1, -16(r1) 466 467 /* Commit register state as checkpointed state: */ 468 TRECHKPT 469 470 HMT_MEDIUM 471 472 /* Our transactional state has now changed. 473 * 474 * Now just get out of here. Transactional (current) state will be 475 * updated once restore is called on the return path in the _switch-ed 476 * -to process. 477 */ 478 479 GET_PACA(r13) 480 GET_SCRATCH0(r1) 481 482 /* R1 is restored, so we are recoverable again. EE is still off */ 483 li r4, MSR_RI 484 mtmsrd r4, 1 485 486 REST_NVGPRS(r1) 487 488 addi r1, r1, TM_FRAME_SIZE 489 lwz r4, 8(r1) 490 ld r0, 16(r1) 491 mtcr r4 492 mtlr r0 493 ld r2, STK_GOT(r1) 494 495 /* Load CPU's default DSCR */ 496 ld r0, PACA_DSCR_DEFAULT(r13) 497 mtspr SPRN_DSCR, r0 498 499 blr 500 501 /* ****************************************************************** */ 502