1/* SPDX-License-Identifier: GPL-2.0 */ 2/* 3 * linux/arch/x86_64/entry.S 4 * 5 * Copyright (C) 1991, 1992 Linus Torvalds 6 * Copyright (C) 2000, 2001, 2002 Andi Kleen SuSE Labs 7 * Copyright (C) 2000 Pavel Machek <pavel@suse.cz> 8 * 9 * entry.S contains the system-call and fault low-level handling routines. 10 * 11 * Some of this is documented in Documentation/x86/entry_64.txt 12 * 13 * A note on terminology: 14 * - iret frame: Architecture defined interrupt frame from SS to RIP 15 * at the top of the kernel process stack. 16 * 17 * Some macro usage: 18 * - ENTRY/END: Define functions in the symbol table. 19 * - TRACE_IRQ_*: Trace hardirq state for lock debugging. 20 * - idtentry: Define exception entry points. 21 */ 22#include <linux/linkage.h> 23#include <asm/segment.h> 24#include <asm/cache.h> 25#include <asm/errno.h> 26#include <asm/asm-offsets.h> 27#include <asm/msr.h> 28#include <asm/unistd.h> 29#include <asm/thread_info.h> 30#include <asm/hw_irq.h> 31#include <asm/page_types.h> 32#include <asm/irqflags.h> 33#include <asm/paravirt.h> 34#include <asm/percpu.h> 35#include <asm/asm.h> 36#include <asm/smap.h> 37#include <asm/pgtable_types.h> 38#include <asm/export.h> 39#include <asm/frame.h> 40#include <asm/nospec-branch.h> 41#include <asm/fsgsbase.h> 42#include <linux/err.h> 43 44#include "calling.h" 45 46.code64 47.section .entry.text, "ax" 48 49#ifdef CONFIG_PARAVIRT 50ENTRY(native_usergs_sysret64) 51 UNWIND_HINT_EMPTY 52 swapgs 53 sysretq 54END(native_usergs_sysret64) 55#endif /* CONFIG_PARAVIRT */ 56 57.macro TRACE_IRQS_FLAGS flags:req 58#ifdef CONFIG_TRACE_IRQFLAGS 59 btl $9, \flags /* interrupts off? */ 60 jnc 1f 61 TRACE_IRQS_ON 621: 63#endif 64.endm 65 66.macro TRACE_IRQS_IRETQ 67 TRACE_IRQS_FLAGS EFLAGS(%rsp) 68.endm 69 70/* 71 * When dynamic function tracer is enabled it will add a breakpoint 72 * to all locations that it is about to modify, sync CPUs, update 73 * all the code, sync CPUs, then remove the breakpoints. In this time 74 * if lockdep is enabled, it might jump back into the debug handler 75 * outside the updating of the IST protection. (TRACE_IRQS_ON/OFF). 76 * 77 * We need to change the IDT table before calling TRACE_IRQS_ON/OFF to 78 * make sure the stack pointer does not get reset back to the top 79 * of the debug stack, and instead just reuses the current stack. 80 */ 81#if defined(CONFIG_DYNAMIC_FTRACE) && defined(CONFIG_TRACE_IRQFLAGS) 82 83.macro TRACE_IRQS_OFF_DEBUG 84 call debug_stack_set_zero 85 TRACE_IRQS_OFF 86 call debug_stack_reset 87.endm 88 89.macro TRACE_IRQS_ON_DEBUG 90 call debug_stack_set_zero 91 TRACE_IRQS_ON 92 call debug_stack_reset 93.endm 94 95.macro TRACE_IRQS_IRETQ_DEBUG 96 btl $9, EFLAGS(%rsp) /* interrupts off? */ 97 jnc 1f 98 TRACE_IRQS_ON_DEBUG 991: 100.endm 101 102#else 103# define TRACE_IRQS_OFF_DEBUG TRACE_IRQS_OFF 104# define TRACE_IRQS_ON_DEBUG TRACE_IRQS_ON 105# define TRACE_IRQS_IRETQ_DEBUG TRACE_IRQS_IRETQ 106#endif 107 108/* 109 * 64-bit SYSCALL instruction entry. Up to 6 arguments in registers. 110 * 111 * This is the only entry point used for 64-bit system calls. The 112 * hardware interface is reasonably well designed and the register to 113 * argument mapping Linux uses fits well with the registers that are 114 * available when SYSCALL is used. 115 * 116 * SYSCALL instructions can be found inlined in libc implementations as 117 * well as some other programs and libraries. There are also a handful 118 * of SYSCALL instructions in the vDSO used, for example, as a 119 * clock_gettimeofday fallback. 120 * 121 * 64-bit SYSCALL saves rip to rcx, clears rflags.RF, then saves rflags to r11, 122 * then loads new ss, cs, and rip from previously programmed MSRs. 123 * rflags gets masked by a value from another MSR (so CLD and CLAC 124 * are not needed). SYSCALL does not save anything on the stack 125 * and does not change rsp. 126 * 127 * Registers on entry: 128 * rax system call number 129 * rcx return address 130 * r11 saved rflags (note: r11 is callee-clobbered register in C ABI) 131 * rdi arg0 132 * rsi arg1 133 * rdx arg2 134 * r10 arg3 (needs to be moved to rcx to conform to C ABI) 135 * r8 arg4 136 * r9 arg5 137 * (note: r12-r15, rbp, rbx are callee-preserved in C ABI) 138 * 139 * Only called from user space. 140 * 141 * When user can change pt_regs->foo always force IRET. That is because 142 * it deals with uncanonical addresses better. SYSRET has trouble 143 * with them due to bugs in both AMD and Intel CPUs. 144 */ 145 146ENTRY(entry_SYSCALL_64) 147 UNWIND_HINT_EMPTY 148 /* 149 * Interrupts are off on entry. 150 * We do not frame this tiny irq-off block with TRACE_IRQS_OFF/ON, 151 * it is too small to ever cause noticeable irq latency. 152 */ 153 154 swapgs 155 /* tss.sp2 is scratch space. */ 156 movq %rsp, PER_CPU_VAR(cpu_tss_rw + TSS_sp2) 157 SWITCH_TO_KERNEL_CR3 scratch_reg=%rsp 158 movq PER_CPU_VAR(cpu_current_top_of_stack), %rsp 159 160 /* Construct struct pt_regs on stack */ 161 pushq $__USER_DS /* pt_regs->ss */ 162 pushq PER_CPU_VAR(cpu_tss_rw + TSS_sp2) /* pt_regs->sp */ 163 pushq %r11 /* pt_regs->flags */ 164 pushq $__USER_CS /* pt_regs->cs */ 165 pushq %rcx /* pt_regs->ip */ 166GLOBAL(entry_SYSCALL_64_after_hwframe) 167 pushq %rax /* pt_regs->orig_ax */ 168 169 PUSH_AND_CLEAR_REGS rax=$-ENOSYS 170 171 TRACE_IRQS_OFF 172 173 /* IRQs are off. */ 174 movq %rax, %rdi 175 movq %rsp, %rsi 176 call do_syscall_64 /* returns with IRQs disabled */ 177 178 TRACE_IRQS_IRETQ /* we're about to change IF */ 179 180 /* 181 * Try to use SYSRET instead of IRET if we're returning to 182 * a completely clean 64-bit userspace context. If we're not, 183 * go to the slow exit path. 184 */ 185 movq RCX(%rsp), %rcx 186 movq RIP(%rsp), %r11 187 188 cmpq %rcx, %r11 /* SYSRET requires RCX == RIP */ 189 jne swapgs_restore_regs_and_return_to_usermode 190 191 /* 192 * On Intel CPUs, SYSRET with non-canonical RCX/RIP will #GP 193 * in kernel space. This essentially lets the user take over 194 * the kernel, since userspace controls RSP. 195 * 196 * If width of "canonical tail" ever becomes variable, this will need 197 * to be updated to remain correct on both old and new CPUs. 198 * 199 * Change top bits to match most significant bit (47th or 56th bit 200 * depending on paging mode) in the address. 201 */ 202#ifdef CONFIG_X86_5LEVEL 203 ALTERNATIVE "shl $(64 - 48), %rcx; sar $(64 - 48), %rcx", \ 204 "shl $(64 - 57), %rcx; sar $(64 - 57), %rcx", X86_FEATURE_LA57 205#else 206 shl $(64 - (__VIRTUAL_MASK_SHIFT+1)), %rcx 207 sar $(64 - (__VIRTUAL_MASK_SHIFT+1)), %rcx 208#endif 209 210 /* If this changed %rcx, it was not canonical */ 211 cmpq %rcx, %r11 212 jne swapgs_restore_regs_and_return_to_usermode 213 214 cmpq $__USER_CS, CS(%rsp) /* CS must match SYSRET */ 215 jne swapgs_restore_regs_and_return_to_usermode 216 217 movq R11(%rsp), %r11 218 cmpq %r11, EFLAGS(%rsp) /* R11 == RFLAGS */ 219 jne swapgs_restore_regs_and_return_to_usermode 220 221 /* 222 * SYSCALL clears RF when it saves RFLAGS in R11 and SYSRET cannot 223 * restore RF properly. If the slowpath sets it for whatever reason, we 224 * need to restore it correctly. 225 * 226 * SYSRET can restore TF, but unlike IRET, restoring TF results in a 227 * trap from userspace immediately after SYSRET. This would cause an 228 * infinite loop whenever #DB happens with register state that satisfies 229 * the opportunistic SYSRET conditions. For example, single-stepping 230 * this user code: 231 * 232 * movq $stuck_here, %rcx 233 * pushfq 234 * popq %r11 235 * stuck_here: 236 * 237 * would never get past 'stuck_here'. 238 */ 239 testq $(X86_EFLAGS_RF|X86_EFLAGS_TF), %r11 240 jnz swapgs_restore_regs_and_return_to_usermode 241 242 /* nothing to check for RSP */ 243 244 cmpq $__USER_DS, SS(%rsp) /* SS must match SYSRET */ 245 jne swapgs_restore_regs_and_return_to_usermode 246 247 /* 248 * We win! This label is here just for ease of understanding 249 * perf profiles. Nothing jumps here. 250 */ 251syscall_return_via_sysret: 252 /* rcx and r11 are already restored (see code above) */ 253 UNWIND_HINT_EMPTY 254 POP_REGS pop_rdi=0 skip_r11rcx=1 255 256 /* 257 * Now all regs are restored except RSP and RDI. 258 * Save old stack pointer and switch to trampoline stack. 259 */ 260 movq %rsp, %rdi 261 movq PER_CPU_VAR(cpu_tss_rw + TSS_sp0), %rsp 262 263 pushq RSP-RDI(%rdi) /* RSP */ 264 pushq (%rdi) /* RDI */ 265 266 /* 267 * We are on the trampoline stack. All regs except RDI are live. 268 * We can do future final exit work right here. 269 */ 270 STACKLEAK_ERASE_NOCLOBBER 271 272 SWITCH_TO_USER_CR3_STACK scratch_reg=%rdi 273 274 popq %rdi 275 popq %rsp 276 USERGS_SYSRET64 277END(entry_SYSCALL_64) 278 279/* 280 * %rdi: prev task 281 * %rsi: next task 282 */ 283ENTRY(__switch_to_asm) 284 UNWIND_HINT_FUNC 285 /* 286 * Save callee-saved registers 287 * This must match the order in inactive_task_frame 288 */ 289 pushq %rbp 290 pushq %rbx 291 pushq %r12 292 pushq %r13 293 pushq %r14 294 pushq %r15 295 296 /* switch stack */ 297 movq %rsp, TASK_threadsp(%rdi) 298 movq TASK_threadsp(%rsi), %rsp 299 300#ifdef CONFIG_STACKPROTECTOR 301 movq TASK_stack_canary(%rsi), %rbx 302 movq %rbx, PER_CPU_VAR(fixed_percpu_data) + stack_canary_offset 303#endif 304 305#ifdef CONFIG_RETPOLINE 306 /* 307 * When switching from a shallower to a deeper call stack 308 * the RSB may either underflow or use entries populated 309 * with userspace addresses. On CPUs where those concerns 310 * exist, overwrite the RSB with entries which capture 311 * speculative execution to prevent attack. 312 */ 313 FILL_RETURN_BUFFER %r12, RSB_CLEAR_LOOPS, X86_FEATURE_RSB_CTXSW 314#endif 315 316 /* restore callee-saved registers */ 317 popq %r15 318 popq %r14 319 popq %r13 320 popq %r12 321 popq %rbx 322 popq %rbp 323 324 jmp __switch_to 325END(__switch_to_asm) 326 327/* 328 * A newly forked process directly context switches into this address. 329 * 330 * rax: prev task we switched from 331 * rbx: kernel thread func (NULL for user thread) 332 * r12: kernel thread arg 333 */ 334ENTRY(ret_from_fork) 335 UNWIND_HINT_EMPTY 336 movq %rax, %rdi 337 call schedule_tail /* rdi: 'prev' task parameter */ 338 339 testq %rbx, %rbx /* from kernel_thread? */ 340 jnz 1f /* kernel threads are uncommon */ 341 3422: 343 UNWIND_HINT_REGS 344 movq %rsp, %rdi 345 call syscall_return_slowpath /* returns with IRQs disabled */ 346 TRACE_IRQS_ON /* user mode is traced as IRQS on */ 347 jmp swapgs_restore_regs_and_return_to_usermode 348 3491: 350 /* kernel thread */ 351 UNWIND_HINT_EMPTY 352 movq %r12, %rdi 353 CALL_NOSPEC %rbx 354 /* 355 * A kernel thread is allowed to return here after successfully 356 * calling do_execve(). Exit to userspace to complete the execve() 357 * syscall. 358 */ 359 movq $0, RAX(%rsp) 360 jmp 2b 361END(ret_from_fork) 362 363/* 364 * Build the entry stubs with some assembler magic. 365 * We pack 1 stub into every 8-byte block. 366 */ 367 .align 8 368ENTRY(irq_entries_start) 369 vector=FIRST_EXTERNAL_VECTOR 370 .rept (FIRST_SYSTEM_VECTOR - FIRST_EXTERNAL_VECTOR) 371 UNWIND_HINT_IRET_REGS 372 pushq $(~vector+0x80) /* Note: always in signed byte range */ 373 jmp common_interrupt 374 .align 8 375 vector=vector+1 376 .endr 377END(irq_entries_start) 378 379.macro DEBUG_ENTRY_ASSERT_IRQS_OFF 380#ifdef CONFIG_DEBUG_ENTRY 381 pushq %rax 382 SAVE_FLAGS(CLBR_RAX) 383 testl $X86_EFLAGS_IF, %eax 384 jz .Lokay_\@ 385 ud2 386.Lokay_\@: 387 popq %rax 388#endif 389.endm 390 391/* 392 * Enters the IRQ stack if we're not already using it. NMI-safe. Clobbers 393 * flags and puts old RSP into old_rsp, and leaves all other GPRs alone. 394 * Requires kernel GSBASE. 395 * 396 * The invariant is that, if irq_count != -1, then the IRQ stack is in use. 397 */ 398.macro ENTER_IRQ_STACK regs=1 old_rsp save_ret=0 399 DEBUG_ENTRY_ASSERT_IRQS_OFF 400 401 .if \save_ret 402 /* 403 * If save_ret is set, the original stack contains one additional 404 * entry -- the return address. Therefore, move the address one 405 * entry below %rsp to \old_rsp. 406 */ 407 leaq 8(%rsp), \old_rsp 408 .else 409 movq %rsp, \old_rsp 410 .endif 411 412 .if \regs 413 UNWIND_HINT_REGS base=\old_rsp 414 .endif 415 416 incl PER_CPU_VAR(irq_count) 417 jnz .Lirq_stack_push_old_rsp_\@ 418 419 /* 420 * Right now, if we just incremented irq_count to zero, we've 421 * claimed the IRQ stack but we haven't switched to it yet. 422 * 423 * If anything is added that can interrupt us here without using IST, 424 * it must be *extremely* careful to limit its stack usage. This 425 * could include kprobes and a hypothetical future IST-less #DB 426 * handler. 427 * 428 * The OOPS unwinder relies on the word at the top of the IRQ 429 * stack linking back to the previous RSP for the entire time we're 430 * on the IRQ stack. For this to work reliably, we need to write 431 * it before we actually move ourselves to the IRQ stack. 432 */ 433 434 movq \old_rsp, PER_CPU_VAR(irq_stack_backing_store + IRQ_STACK_SIZE - 8) 435 movq PER_CPU_VAR(hardirq_stack_ptr), %rsp 436 437#ifdef CONFIG_DEBUG_ENTRY 438 /* 439 * If the first movq above becomes wrong due to IRQ stack layout 440 * changes, the only way we'll notice is if we try to unwind right 441 * here. Assert that we set up the stack right to catch this type 442 * of bug quickly. 443 */ 444 cmpq -8(%rsp), \old_rsp 445 je .Lirq_stack_okay\@ 446 ud2 447 .Lirq_stack_okay\@: 448#endif 449 450.Lirq_stack_push_old_rsp_\@: 451 pushq \old_rsp 452 453 .if \regs 454 UNWIND_HINT_REGS indirect=1 455 .endif 456 457 .if \save_ret 458 /* 459 * Push the return address to the stack. This return address can 460 * be found at the "real" original RSP, which was offset by 8 at 461 * the beginning of this macro. 462 */ 463 pushq -8(\old_rsp) 464 .endif 465.endm 466 467/* 468 * Undoes ENTER_IRQ_STACK. 469 */ 470.macro LEAVE_IRQ_STACK regs=1 471 DEBUG_ENTRY_ASSERT_IRQS_OFF 472 /* We need to be off the IRQ stack before decrementing irq_count. */ 473 popq %rsp 474 475 .if \regs 476 UNWIND_HINT_REGS 477 .endif 478 479 /* 480 * As in ENTER_IRQ_STACK, irq_count == 0, we are still claiming 481 * the irq stack but we're not on it. 482 */ 483 484 decl PER_CPU_VAR(irq_count) 485.endm 486 487/* 488 * Interrupt entry helper function. 489 * 490 * Entry runs with interrupts off. Stack layout at entry: 491 * +----------------------------------------------------+ 492 * | regs->ss | 493 * | regs->rsp | 494 * | regs->eflags | 495 * | regs->cs | 496 * | regs->ip | 497 * +----------------------------------------------------+ 498 * | regs->orig_ax = ~(interrupt number) | 499 * +----------------------------------------------------+ 500 * | return address | 501 * +----------------------------------------------------+ 502 */ 503ENTRY(interrupt_entry) 504 UNWIND_HINT_FUNC 505 ASM_CLAC 506 cld 507 508 testb $3, CS-ORIG_RAX+8(%rsp) 509 jz 1f 510 SWAPGS 511 512 /* 513 * Switch to the thread stack. The IRET frame and orig_ax are 514 * on the stack, as well as the return address. RDI..R12 are 515 * not (yet) on the stack and space has not (yet) been 516 * allocated for them. 517 */ 518 pushq %rdi 519 520 /* Need to switch before accessing the thread stack. */ 521 SWITCH_TO_KERNEL_CR3 scratch_reg=%rdi 522 movq %rsp, %rdi 523 movq PER_CPU_VAR(cpu_current_top_of_stack), %rsp 524 525 /* 526 * We have RDI, return address, and orig_ax on the stack on 527 * top of the IRET frame. That means offset=24 528 */ 529 UNWIND_HINT_IRET_REGS base=%rdi offset=24 530 531 pushq 7*8(%rdi) /* regs->ss */ 532 pushq 6*8(%rdi) /* regs->rsp */ 533 pushq 5*8(%rdi) /* regs->eflags */ 534 pushq 4*8(%rdi) /* regs->cs */ 535 pushq 3*8(%rdi) /* regs->ip */ 536 pushq 2*8(%rdi) /* regs->orig_ax */ 537 pushq 8(%rdi) /* return address */ 538 UNWIND_HINT_FUNC 539 540 movq (%rdi), %rdi 5411: 542 543 PUSH_AND_CLEAR_REGS save_ret=1 544 ENCODE_FRAME_POINTER 8 545 546 testb $3, CS+8(%rsp) 547 jz 1f 548 549 /* 550 * IRQ from user mode. 551 * 552 * We need to tell lockdep that IRQs are off. We can't do this until 553 * we fix gsbase, and we should do it before enter_from_user_mode 554 * (which can take locks). Since TRACE_IRQS_OFF is idempotent, 555 * the simplest way to handle it is to just call it twice if 556 * we enter from user mode. There's no reason to optimize this since 557 * TRACE_IRQS_OFF is a no-op if lockdep is off. 558 */ 559 TRACE_IRQS_OFF 560 561 CALL_enter_from_user_mode 562 5631: 564 ENTER_IRQ_STACK old_rsp=%rdi save_ret=1 565 /* We entered an interrupt context - irqs are off: */ 566 TRACE_IRQS_OFF 567 568 ret 569END(interrupt_entry) 570_ASM_NOKPROBE(interrupt_entry) 571 572 573/* Interrupt entry/exit. */ 574 575 /* 576 * The interrupt stubs push (~vector+0x80) onto the stack and 577 * then jump to common_interrupt. 578 */ 579 .p2align CONFIG_X86_L1_CACHE_SHIFT 580common_interrupt: 581 addq $-0x80, (%rsp) /* Adjust vector to [-256, -1] range */ 582 call interrupt_entry 583 UNWIND_HINT_REGS indirect=1 584 call do_IRQ /* rdi points to pt_regs */ 585 /* 0(%rsp): old RSP */ 586ret_from_intr: 587 DISABLE_INTERRUPTS(CLBR_ANY) 588 TRACE_IRQS_OFF 589 590 LEAVE_IRQ_STACK 591 592 testb $3, CS(%rsp) 593 jz retint_kernel 594 595 /* Interrupt came from user space */ 596GLOBAL(retint_user) 597 mov %rsp,%rdi 598 call prepare_exit_to_usermode 599 TRACE_IRQS_IRETQ 600 601GLOBAL(swapgs_restore_regs_and_return_to_usermode) 602#ifdef CONFIG_DEBUG_ENTRY 603 /* Assert that pt_regs indicates user mode. */ 604 testb $3, CS(%rsp) 605 jnz 1f 606 ud2 6071: 608#endif 609 POP_REGS pop_rdi=0 610 611 /* 612 * The stack is now user RDI, orig_ax, RIP, CS, EFLAGS, RSP, SS. 613 * Save old stack pointer and switch to trampoline stack. 614 */ 615 movq %rsp, %rdi 616 movq PER_CPU_VAR(cpu_tss_rw + TSS_sp0), %rsp 617 618 /* Copy the IRET frame to the trampoline stack. */ 619 pushq 6*8(%rdi) /* SS */ 620 pushq 5*8(%rdi) /* RSP */ 621 pushq 4*8(%rdi) /* EFLAGS */ 622 pushq 3*8(%rdi) /* CS */ 623 pushq 2*8(%rdi) /* RIP */ 624 625 /* Push user RDI on the trampoline stack. */ 626 pushq (%rdi) 627 628 /* 629 * We are on the trampoline stack. All regs except RDI are live. 630 * We can do future final exit work right here. 631 */ 632 STACKLEAK_ERASE_NOCLOBBER 633 634 SWITCH_TO_USER_CR3_STACK scratch_reg=%rdi 635 636 /* Restore RDI. */ 637 popq %rdi 638 SWAPGS 639 INTERRUPT_RETURN 640 641 642/* Returning to kernel space */ 643retint_kernel: 644#ifdef CONFIG_PREEMPT 645 /* Interrupts are off */ 646 /* Check if we need preemption */ 647 btl $9, EFLAGS(%rsp) /* were interrupts off? */ 648 jnc 1f 649 cmpl $0, PER_CPU_VAR(__preempt_count) 650 jnz 1f 651 call preempt_schedule_irq 6521: 653#endif 654 /* 655 * The iretq could re-enable interrupts: 656 */ 657 TRACE_IRQS_IRETQ 658 659GLOBAL(restore_regs_and_return_to_kernel) 660#ifdef CONFIG_DEBUG_ENTRY 661 /* Assert that pt_regs indicates kernel mode. */ 662 testb $3, CS(%rsp) 663 jz 1f 664 ud2 6651: 666#endif 667 POP_REGS 668 addq $8, %rsp /* skip regs->orig_ax */ 669 /* 670 * ARCH_HAS_MEMBARRIER_SYNC_CORE rely on IRET core serialization 671 * when returning from IPI handler. 672 */ 673 INTERRUPT_RETURN 674 675ENTRY(native_iret) 676 UNWIND_HINT_IRET_REGS 677 /* 678 * Are we returning to a stack segment from the LDT? Note: in 679 * 64-bit mode SS:RSP on the exception stack is always valid. 680 */ 681#ifdef CONFIG_X86_ESPFIX64 682 testb $4, (SS-RIP)(%rsp) 683 jnz native_irq_return_ldt 684#endif 685 686.global native_irq_return_iret 687native_irq_return_iret: 688 /* 689 * This may fault. Non-paranoid faults on return to userspace are 690 * handled by fixup_bad_iret. These include #SS, #GP, and #NP. 691 * Double-faults due to espfix64 are handled in do_double_fault. 692 * Other faults here are fatal. 693 */ 694 iretq 695 696#ifdef CONFIG_X86_ESPFIX64 697native_irq_return_ldt: 698 /* 699 * We are running with user GSBASE. All GPRs contain their user 700 * values. We have a percpu ESPFIX stack that is eight slots 701 * long (see ESPFIX_STACK_SIZE). espfix_waddr points to the bottom 702 * of the ESPFIX stack. 703 * 704 * We clobber RAX and RDI in this code. We stash RDI on the 705 * normal stack and RAX on the ESPFIX stack. 706 * 707 * The ESPFIX stack layout we set up looks like this: 708 * 709 * --- top of ESPFIX stack --- 710 * SS 711 * RSP 712 * RFLAGS 713 * CS 714 * RIP <-- RSP points here when we're done 715 * RAX <-- espfix_waddr points here 716 * --- bottom of ESPFIX stack --- 717 */ 718 719 pushq %rdi /* Stash user RDI */ 720 SWAPGS /* to kernel GS */ 721 SWITCH_TO_KERNEL_CR3 scratch_reg=%rdi /* to kernel CR3 */ 722 723 movq PER_CPU_VAR(espfix_waddr), %rdi 724 movq %rax, (0*8)(%rdi) /* user RAX */ 725 movq (1*8)(%rsp), %rax /* user RIP */ 726 movq %rax, (1*8)(%rdi) 727 movq (2*8)(%rsp), %rax /* user CS */ 728 movq %rax, (2*8)(%rdi) 729 movq (3*8)(%rsp), %rax /* user RFLAGS */ 730 movq %rax, (3*8)(%rdi) 731 movq (5*8)(%rsp), %rax /* user SS */ 732 movq %rax, (5*8)(%rdi) 733 movq (4*8)(%rsp), %rax /* user RSP */ 734 movq %rax, (4*8)(%rdi) 735 /* Now RAX == RSP. */ 736 737 andl $0xffff0000, %eax /* RAX = (RSP & 0xffff0000) */ 738 739 /* 740 * espfix_stack[31:16] == 0. The page tables are set up such that 741 * (espfix_stack | (X & 0xffff0000)) points to a read-only alias of 742 * espfix_waddr for any X. That is, there are 65536 RO aliases of 743 * the same page. Set up RSP so that RSP[31:16] contains the 744 * respective 16 bits of the /userspace/ RSP and RSP nonetheless 745 * still points to an RO alias of the ESPFIX stack. 746 */ 747 orq PER_CPU_VAR(espfix_stack), %rax 748 749 SWITCH_TO_USER_CR3_STACK scratch_reg=%rdi 750 SWAPGS /* to user GS */ 751 popq %rdi /* Restore user RDI */ 752 753 movq %rax, %rsp 754 UNWIND_HINT_IRET_REGS offset=8 755 756 /* 757 * At this point, we cannot write to the stack any more, but we can 758 * still read. 759 */ 760 popq %rax /* Restore user RAX */ 761 762 /* 763 * RSP now points to an ordinary IRET frame, except that the page 764 * is read-only and RSP[31:16] are preloaded with the userspace 765 * values. We can now IRET back to userspace. 766 */ 767 jmp native_irq_return_iret 768#endif 769END(common_interrupt) 770_ASM_NOKPROBE(common_interrupt) 771 772/* 773 * APIC interrupts. 774 */ 775.macro apicinterrupt3 num sym do_sym 776ENTRY(\sym) 777 UNWIND_HINT_IRET_REGS 778 pushq $~(\num) 779.Lcommon_\sym: 780 call interrupt_entry 781 UNWIND_HINT_REGS indirect=1 782 call \do_sym /* rdi points to pt_regs */ 783 jmp ret_from_intr 784END(\sym) 785_ASM_NOKPROBE(\sym) 786.endm 787 788/* Make sure APIC interrupt handlers end up in the irqentry section: */ 789#define PUSH_SECTION_IRQENTRY .pushsection .irqentry.text, "ax" 790#define POP_SECTION_IRQENTRY .popsection 791 792.macro apicinterrupt num sym do_sym 793PUSH_SECTION_IRQENTRY 794apicinterrupt3 \num \sym \do_sym 795POP_SECTION_IRQENTRY 796.endm 797 798#ifdef CONFIG_SMP 799apicinterrupt3 IRQ_MOVE_CLEANUP_VECTOR irq_move_cleanup_interrupt smp_irq_move_cleanup_interrupt 800apicinterrupt3 REBOOT_VECTOR reboot_interrupt smp_reboot_interrupt 801#endif 802 803#ifdef CONFIG_X86_UV 804apicinterrupt3 UV_BAU_MESSAGE uv_bau_message_intr1 uv_bau_message_interrupt 805#endif 806 807apicinterrupt LOCAL_TIMER_VECTOR apic_timer_interrupt smp_apic_timer_interrupt 808apicinterrupt X86_PLATFORM_IPI_VECTOR x86_platform_ipi smp_x86_platform_ipi 809 810#ifdef CONFIG_HAVE_KVM 811apicinterrupt3 POSTED_INTR_VECTOR kvm_posted_intr_ipi smp_kvm_posted_intr_ipi 812apicinterrupt3 POSTED_INTR_WAKEUP_VECTOR kvm_posted_intr_wakeup_ipi smp_kvm_posted_intr_wakeup_ipi 813apicinterrupt3 POSTED_INTR_NESTED_VECTOR kvm_posted_intr_nested_ipi smp_kvm_posted_intr_nested_ipi 814#endif 815 816#ifdef CONFIG_X86_MCE_THRESHOLD 817apicinterrupt THRESHOLD_APIC_VECTOR threshold_interrupt smp_threshold_interrupt 818#endif 819 820#ifdef CONFIG_X86_MCE_AMD 821apicinterrupt DEFERRED_ERROR_VECTOR deferred_error_interrupt smp_deferred_error_interrupt 822#endif 823 824#ifdef CONFIG_X86_THERMAL_VECTOR 825apicinterrupt THERMAL_APIC_VECTOR thermal_interrupt smp_thermal_interrupt 826#endif 827 828#ifdef CONFIG_SMP 829apicinterrupt CALL_FUNCTION_SINGLE_VECTOR call_function_single_interrupt smp_call_function_single_interrupt 830apicinterrupt CALL_FUNCTION_VECTOR call_function_interrupt smp_call_function_interrupt 831apicinterrupt RESCHEDULE_VECTOR reschedule_interrupt smp_reschedule_interrupt 832#endif 833 834apicinterrupt ERROR_APIC_VECTOR error_interrupt smp_error_interrupt 835apicinterrupt SPURIOUS_APIC_VECTOR spurious_interrupt smp_spurious_interrupt 836 837#ifdef CONFIG_IRQ_WORK 838apicinterrupt IRQ_WORK_VECTOR irq_work_interrupt smp_irq_work_interrupt 839#endif 840 841/* 842 * Exception entry points. 843 */ 844#define CPU_TSS_IST(x) PER_CPU_VAR(cpu_tss_rw) + (TSS_ist + (x) * 8) 845 846/** 847 * idtentry - Generate an IDT entry stub 848 * @sym: Name of the generated entry point 849 * @do_sym: C function to be called 850 * @has_error_code: True if this IDT vector has an error code on the stack 851 * @paranoid: non-zero means that this vector may be invoked from 852 * kernel mode with user GSBASE and/or user CR3. 853 * 2 is special -- see below. 854 * @shift_ist: Set to an IST index if entries from kernel mode should 855 * decrement the IST stack so that nested entries get a 856 * fresh stack. (This is for #DB, which has a nasty habit 857 * of recursing.) 858 * 859 * idtentry generates an IDT stub that sets up a usable kernel context, 860 * creates struct pt_regs, and calls @do_sym. The stub has the following 861 * special behaviors: 862 * 863 * On an entry from user mode, the stub switches from the trampoline or 864 * IST stack to the normal thread stack. On an exit to user mode, the 865 * normal exit-to-usermode path is invoked. 866 * 867 * On an exit to kernel mode, if @paranoid == 0, we check for preemption, 868 * whereas we omit the preemption check if @paranoid != 0. This is purely 869 * because the implementation is simpler this way. The kernel only needs 870 * to check for asynchronous kernel preemption when IRQ handlers return. 871 * 872 * If @paranoid == 0, then the stub will handle IRET faults by pretending 873 * that the fault came from user mode. It will handle gs_change faults by 874 * pretending that the fault happened with kernel GSBASE. Since this handling 875 * is omitted for @paranoid != 0, the #GP, #SS, and #NP stubs must have 876 * @paranoid == 0. This special handling will do the wrong thing for 877 * espfix-induced #DF on IRET, so #DF must not use @paranoid == 0. 878 * 879 * @paranoid == 2 is special: the stub will never switch stacks. This is for 880 * #DF: if the thread stack is somehow unusable, we'll still get a useful OOPS. 881 */ 882.macro idtentry sym do_sym has_error_code:req paranoid=0 shift_ist=-1 ist_offset=0 create_gap=0 883ENTRY(\sym) 884 UNWIND_HINT_IRET_REGS offset=\has_error_code*8 885 886 /* Sanity check */ 887 .if \shift_ist != -1 && \paranoid == 0 888 .error "using shift_ist requires paranoid=1" 889 .endif 890 891 ASM_CLAC 892 893 .if \has_error_code == 0 894 pushq $-1 /* ORIG_RAX: no syscall to restart */ 895 .endif 896 897 .if \paranoid == 1 898 testb $3, CS-ORIG_RAX(%rsp) /* If coming from userspace, switch stacks */ 899 jnz .Lfrom_usermode_switch_stack_\@ 900 .endif 901 902 .if \create_gap == 1 903 /* 904 * If coming from kernel space, create a 6-word gap to allow the 905 * int3 handler to emulate a call instruction. 906 */ 907 testb $3, CS-ORIG_RAX(%rsp) 908 jnz .Lfrom_usermode_no_gap_\@ 909 .rept 6 910 pushq 5*8(%rsp) 911 .endr 912 UNWIND_HINT_IRET_REGS offset=8 913.Lfrom_usermode_no_gap_\@: 914 .endif 915 916 .if \paranoid 917 call paranoid_entry 918 .else 919 call error_entry 920 .endif 921 UNWIND_HINT_REGS 922 /* returned flag: ebx=0: need swapgs on exit, ebx=1: don't need it */ 923 924 .if \paranoid 925 .if \shift_ist != -1 926 TRACE_IRQS_OFF_DEBUG /* reload IDT in case of recursion */ 927 .else 928 TRACE_IRQS_OFF 929 .endif 930 .endif 931 932 movq %rsp, %rdi /* pt_regs pointer */ 933 934 .if \has_error_code 935 movq ORIG_RAX(%rsp), %rsi /* get error code */ 936 movq $-1, ORIG_RAX(%rsp) /* no syscall to restart */ 937 .else 938 xorl %esi, %esi /* no error code */ 939 .endif 940 941 .if \shift_ist != -1 942 subq $\ist_offset, CPU_TSS_IST(\shift_ist) 943 .endif 944 945 call \do_sym 946 947 .if \shift_ist != -1 948 addq $\ist_offset, CPU_TSS_IST(\shift_ist) 949 .endif 950 951 .if \paranoid 952 jmp paranoid_exit 953 .else 954 jmp error_exit 955 .endif 956 957 .if \paranoid == 1 958 /* 959 * Entry from userspace. Switch stacks and treat it 960 * as a normal entry. This means that paranoid handlers 961 * run in real process context if user_mode(regs). 962 */ 963.Lfrom_usermode_switch_stack_\@: 964 call error_entry 965 966 movq %rsp, %rdi /* pt_regs pointer */ 967 968 .if \has_error_code 969 movq ORIG_RAX(%rsp), %rsi /* get error code */ 970 movq $-1, ORIG_RAX(%rsp) /* no syscall to restart */ 971 .else 972 xorl %esi, %esi /* no error code */ 973 .endif 974 975 call \do_sym 976 977 jmp error_exit 978 .endif 979_ASM_NOKPROBE(\sym) 980END(\sym) 981.endm 982 983idtentry divide_error do_divide_error has_error_code=0 984idtentry overflow do_overflow has_error_code=0 985idtentry bounds do_bounds has_error_code=0 986idtentry invalid_op do_invalid_op has_error_code=0 987idtentry device_not_available do_device_not_available has_error_code=0 988idtentry double_fault do_double_fault has_error_code=1 paranoid=2 989idtentry coprocessor_segment_overrun do_coprocessor_segment_overrun has_error_code=0 990idtentry invalid_TSS do_invalid_TSS has_error_code=1 991idtentry segment_not_present do_segment_not_present has_error_code=1 992idtentry spurious_interrupt_bug do_spurious_interrupt_bug has_error_code=0 993idtentry coprocessor_error do_coprocessor_error has_error_code=0 994idtentry alignment_check do_alignment_check has_error_code=1 995idtentry simd_coprocessor_error do_simd_coprocessor_error has_error_code=0 996 997 998 /* 999 * Reload gs selector with exception handling 1000 * edi: new selector 1001 */ 1002ENTRY(native_load_gs_index) 1003 FRAME_BEGIN 1004 pushfq 1005 DISABLE_INTERRUPTS(CLBR_ANY & ~CLBR_RDI) 1006 TRACE_IRQS_OFF 1007 SWAPGS 1008.Lgs_change: 1009 movl %edi, %gs 10102: ALTERNATIVE "", "mfence", X86_BUG_SWAPGS_FENCE 1011 SWAPGS 1012 TRACE_IRQS_FLAGS (%rsp) 1013 popfq 1014 FRAME_END 1015 ret 1016ENDPROC(native_load_gs_index) 1017EXPORT_SYMBOL(native_load_gs_index) 1018 1019 _ASM_EXTABLE(.Lgs_change, bad_gs) 1020 .section .fixup, "ax" 1021 /* running with kernelgs */ 1022bad_gs: 1023 SWAPGS /* switch back to user gs */ 1024.macro ZAP_GS 1025 /* This can't be a string because the preprocessor needs to see it. */ 1026 movl $__USER_DS, %eax 1027 movl %eax, %gs 1028.endm 1029 ALTERNATIVE "", "ZAP_GS", X86_BUG_NULL_SEG 1030 xorl %eax, %eax 1031 movl %eax, %gs 1032 jmp 2b 1033 .previous 1034 1035/* Call softirq on interrupt stack. Interrupts are off. */ 1036ENTRY(do_softirq_own_stack) 1037 pushq %rbp 1038 mov %rsp, %rbp 1039 ENTER_IRQ_STACK regs=0 old_rsp=%r11 1040 call __do_softirq 1041 LEAVE_IRQ_STACK regs=0 1042 leaveq 1043 ret 1044ENDPROC(do_softirq_own_stack) 1045 1046#ifdef CONFIG_XEN_PV 1047idtentry hypervisor_callback xen_do_hypervisor_callback has_error_code=0 1048 1049/* 1050 * A note on the "critical region" in our callback handler. 1051 * We want to avoid stacking callback handlers due to events occurring 1052 * during handling of the last event. To do this, we keep events disabled 1053 * until we've done all processing. HOWEVER, we must enable events before 1054 * popping the stack frame (can't be done atomically) and so it would still 1055 * be possible to get enough handler activations to overflow the stack. 1056 * Although unlikely, bugs of that kind are hard to track down, so we'd 1057 * like to avoid the possibility. 1058 * So, on entry to the handler we detect whether we interrupted an 1059 * existing activation in its critical region -- if so, we pop the current 1060 * activation and restart the handler using the previous one. 1061 */ 1062ENTRY(xen_do_hypervisor_callback) /* do_hypervisor_callback(struct *pt_regs) */ 1063 1064/* 1065 * Since we don't modify %rdi, evtchn_do_upall(struct *pt_regs) will 1066 * see the correct pointer to the pt_regs 1067 */ 1068 UNWIND_HINT_FUNC 1069 movq %rdi, %rsp /* we don't return, adjust the stack frame */ 1070 UNWIND_HINT_REGS 1071 1072 ENTER_IRQ_STACK old_rsp=%r10 1073 call xen_evtchn_do_upcall 1074 LEAVE_IRQ_STACK 1075 1076#ifndef CONFIG_PREEMPT 1077 call xen_maybe_preempt_hcall 1078#endif 1079 jmp error_exit 1080END(xen_do_hypervisor_callback) 1081 1082/* 1083 * Hypervisor uses this for application faults while it executes. 1084 * We get here for two reasons: 1085 * 1. Fault while reloading DS, ES, FS or GS 1086 * 2. Fault while executing IRET 1087 * Category 1 we do not need to fix up as Xen has already reloaded all segment 1088 * registers that could be reloaded and zeroed the others. 1089 * Category 2 we fix up by killing the current process. We cannot use the 1090 * normal Linux return path in this case because if we use the IRET hypercall 1091 * to pop the stack frame we end up in an infinite loop of failsafe callbacks. 1092 * We distinguish between categories by comparing each saved segment register 1093 * with its current contents: any discrepancy means we in category 1. 1094 */ 1095ENTRY(xen_failsafe_callback) 1096 UNWIND_HINT_EMPTY 1097 movl %ds, %ecx 1098 cmpw %cx, 0x10(%rsp) 1099 jne 1f 1100 movl %es, %ecx 1101 cmpw %cx, 0x18(%rsp) 1102 jne 1f 1103 movl %fs, %ecx 1104 cmpw %cx, 0x20(%rsp) 1105 jne 1f 1106 movl %gs, %ecx 1107 cmpw %cx, 0x28(%rsp) 1108 jne 1f 1109 /* All segments match their saved values => Category 2 (Bad IRET). */ 1110 movq (%rsp), %rcx 1111 movq 8(%rsp), %r11 1112 addq $0x30, %rsp 1113 pushq $0 /* RIP */ 1114 UNWIND_HINT_IRET_REGS offset=8 1115 jmp general_protection 11161: /* Segment mismatch => Category 1 (Bad segment). Retry the IRET. */ 1117 movq (%rsp), %rcx 1118 movq 8(%rsp), %r11 1119 addq $0x30, %rsp 1120 UNWIND_HINT_IRET_REGS 1121 pushq $-1 /* orig_ax = -1 => not a system call */ 1122 PUSH_AND_CLEAR_REGS 1123 ENCODE_FRAME_POINTER 1124 jmp error_exit 1125END(xen_failsafe_callback) 1126#endif /* CONFIG_XEN_PV */ 1127 1128#ifdef CONFIG_XEN_PVHVM 1129apicinterrupt3 HYPERVISOR_CALLBACK_VECTOR \ 1130 xen_hvm_callback_vector xen_evtchn_do_upcall 1131#endif 1132 1133 1134#if IS_ENABLED(CONFIG_HYPERV) 1135apicinterrupt3 HYPERVISOR_CALLBACK_VECTOR \ 1136 hyperv_callback_vector hyperv_vector_handler 1137 1138apicinterrupt3 HYPERV_REENLIGHTENMENT_VECTOR \ 1139 hyperv_reenlightenment_vector hyperv_reenlightenment_intr 1140 1141apicinterrupt3 HYPERV_STIMER0_VECTOR \ 1142 hv_stimer0_callback_vector hv_stimer0_vector_handler 1143#endif /* CONFIG_HYPERV */ 1144 1145idtentry debug do_debug has_error_code=0 paranoid=1 shift_ist=IST_INDEX_DB ist_offset=DB_STACK_OFFSET 1146idtentry int3 do_int3 has_error_code=0 create_gap=1 1147idtentry stack_segment do_stack_segment has_error_code=1 1148 1149#ifdef CONFIG_XEN_PV 1150idtentry xennmi do_nmi has_error_code=0 1151idtentry xendebug do_debug has_error_code=0 1152idtentry xenint3 do_int3 has_error_code=0 1153#endif 1154 1155idtentry general_protection do_general_protection has_error_code=1 1156idtentry page_fault do_page_fault has_error_code=1 1157 1158#ifdef CONFIG_KVM_GUEST 1159idtentry async_page_fault do_async_page_fault has_error_code=1 1160#endif 1161 1162#ifdef CONFIG_X86_MCE 1163idtentry machine_check do_mce has_error_code=0 paranoid=1 1164#endif 1165 1166/* 1167 * Save all registers in pt_regs. Return GSBASE related information 1168 * in EBX depending on the availability of the FSGSBASE instructions: 1169 * 1170 * FSGSBASE R/EBX 1171 * N 0 -> SWAPGS on exit 1172 * 1 -> no SWAPGS on exit 1173 * 1174 * Y GSBASE value at entry, must be restored in paranoid_exit 1175 */ 1176ENTRY(paranoid_entry) 1177 UNWIND_HINT_FUNC 1178 cld 1179 PUSH_AND_CLEAR_REGS save_ret=1 1180 ENCODE_FRAME_POINTER 8 1181 1182 /* 1183 * Always stash CR3 in %r14. This value will be restored, 1184 * verbatim, at exit. Needed if paranoid_entry interrupted 1185 * another entry that already switched to the user CR3 value 1186 * but has not yet returned to userspace. 1187 * 1188 * This is also why CS (stashed in the "iret frame" by the 1189 * hardware at entry) can not be used: this may be a return 1190 * to kernel code, but with a user CR3 value. 1191 * 1192 * Switching CR3 does not depend on kernel GSBASE so it can 1193 * be done before switching to the kernel GSBASE. This is 1194 * required for FSGSBASE because the kernel GSBASE has to 1195 * be retrieved from a kernel internal table. 1196 */ 1197 SAVE_AND_SWITCH_TO_KERNEL_CR3 scratch_reg=%rax save_reg=%r14 1198 1199 /* 1200 * Handling GSBASE depends on the availability of FSGSBASE. 1201 * 1202 * Without FSGSBASE the kernel enforces that negative GSBASE 1203 * values indicate kernel GSBASE. With FSGSBASE no assumptions 1204 * can be made about the GSBASE value when entering from user 1205 * space. 1206 */ 1207 ALTERNATIVE "jmp .Lparanoid_entry_checkgs", "", X86_FEATURE_FSGSBASE 1208 1209 /* 1210 * Read the current GSBASE and store it in in %rbx unconditionally, 1211 * retrieve and set the current CPUs kernel GSBASE. The stored value 1212 * has to be restored in paranoid_exit unconditionally. 1213 */ 1214 SAVE_AND_SET_GSBASE scratch_reg=%rax save_reg=%rbx 1215 ret 1216 1217.Lparanoid_entry_checkgs: 1218 /* EBX = 1 -> kernel GSBASE active, no restore required */ 1219 movl $1, %ebx 1220 /* 1221 * The kernel-enforced convention is a negative GSBASE indicates 1222 * a kernel value. No SWAPGS needed on entry and exit. 1223 */ 1224 movl $MSR_GS_BASE, %ecx 1225 rdmsr 1226 testl %edx, %edx 1227 jns .Lparanoid_entry_swapgs 1228 ret 1229 1230.Lparanoid_entry_swapgs: 1231 SWAPGS 1232 /* EBX = 0 -> SWAPGS required on exit */ 1233 xorl %ebx, %ebx 1234 ret 1235END(paranoid_entry) 1236 1237/* 1238 * "Paranoid" exit path from exception stack. This is invoked 1239 * only on return from non-NMI IST interrupts that came 1240 * from kernel space. 1241 * 1242 * We may be returning to very strange contexts (e.g. very early 1243 * in syscall entry), so checking for preemption here would 1244 * be complicated. Fortunately, there's no good reason to try 1245 * to handle preemption here. 1246 * 1247 * R/EBX contains the GSBASE related information depending on the 1248 * availability of the FSGSBASE instructions: 1249 * 1250 * FSGSBASE R/EBX 1251 * N 0 -> SWAPGS on exit 1252 * 1 -> no SWAPGS on exit 1253 * 1254 * Y User space GSBASE, must be restored unconditionally 1255 */ 1256ENTRY(paranoid_exit) 1257 UNWIND_HINT_REGS 1258 DISABLE_INTERRUPTS(CLBR_ANY) 1259 TRACE_IRQS_OFF_DEBUG 1260 1261 /* Handle GS depending on FSGSBASE availability */ 1262 ALTERNATIVE "jmp .Lparanoid_exit_checkgs", "nop",X86_FEATURE_FSGSBASE 1263 1264 /* With FSGSBASE enabled, unconditionally restore GSBASE */ 1265 wrgsbase %rbx 1266 jmp .Lparanoid_exit_no_swapgs; 1267 1268.Lparanoid_exit_checkgs: 1269 /* On non-FSGSBASE systems, conditionally do SWAPGS */ 1270 testl %ebx, %ebx 1271 jnz .Lparanoid_exit_no_swapgs 1272 TRACE_IRQS_IRETQ 1273 /* Always restore stashed CR3 value (see paranoid_entry) */ 1274 RESTORE_CR3 scratch_reg=%rbx save_reg=%r14 1275 SWAPGS_UNSAFE_STACK 1276 jmp .Lparanoid_exit_restore 1277 1278.Lparanoid_exit_no_swapgs: 1279 TRACE_IRQS_IRETQ_DEBUG 1280 /* Always restore stashed CR3 value (see paranoid_entry) */ 1281 RESTORE_CR3 scratch_reg=%rbx save_reg=%r14 1282 1283.Lparanoid_exit_restore: 1284 jmp restore_regs_and_return_to_kernel 1285END(paranoid_exit) 1286 1287/* 1288 * Save all registers in pt_regs, and switch GS if needed. 1289 */ 1290ENTRY(error_entry) 1291 UNWIND_HINT_FUNC 1292 cld 1293 PUSH_AND_CLEAR_REGS save_ret=1 1294 ENCODE_FRAME_POINTER 8 1295 testb $3, CS+8(%rsp) 1296 jz .Lerror_kernelspace 1297 1298 /* 1299 * We entered from user mode or we're pretending to have entered 1300 * from user mode due to an IRET fault. 1301 */ 1302 SWAPGS 1303 /* We have user CR3. Change to kernel CR3. */ 1304 SWITCH_TO_KERNEL_CR3 scratch_reg=%rax 1305 1306.Lerror_entry_from_usermode_after_swapgs: 1307 /* Put us onto the real thread stack. */ 1308 popq %r12 /* save return addr in %12 */ 1309 movq %rsp, %rdi /* arg0 = pt_regs pointer */ 1310 call sync_regs 1311 movq %rax, %rsp /* switch stack */ 1312 ENCODE_FRAME_POINTER 1313 pushq %r12 1314 1315 /* 1316 * We need to tell lockdep that IRQs are off. We can't do this until 1317 * we fix gsbase, and we should do it before enter_from_user_mode 1318 * (which can take locks). 1319 */ 1320 TRACE_IRQS_OFF 1321 CALL_enter_from_user_mode 1322 ret 1323 1324.Lerror_entry_done: 1325 TRACE_IRQS_OFF 1326 ret 1327 1328 /* 1329 * There are two places in the kernel that can potentially fault with 1330 * usergs. Handle them here. B stepping K8s sometimes report a 1331 * truncated RIP for IRET exceptions returning to compat mode. Check 1332 * for these here too. 1333 */ 1334.Lerror_kernelspace: 1335 leaq native_irq_return_iret(%rip), %rcx 1336 cmpq %rcx, RIP+8(%rsp) 1337 je .Lerror_bad_iret 1338 movl %ecx, %eax /* zero extend */ 1339 cmpq %rax, RIP+8(%rsp) 1340 je .Lbstep_iret 1341 cmpq $.Lgs_change, RIP+8(%rsp) 1342 jne .Lerror_entry_done 1343 1344 /* 1345 * hack: .Lgs_change can fail with user gsbase. If this happens, fix up 1346 * gsbase and proceed. We'll fix up the exception and land in 1347 * .Lgs_change's error handler with kernel gsbase. 1348 */ 1349 SWAPGS 1350 SWITCH_TO_KERNEL_CR3 scratch_reg=%rax 1351 jmp .Lerror_entry_done 1352 1353.Lbstep_iret: 1354 /* Fix truncated RIP */ 1355 movq %rcx, RIP+8(%rsp) 1356 /* fall through */ 1357 1358.Lerror_bad_iret: 1359 /* 1360 * We came from an IRET to user mode, so we have user 1361 * gsbase and CR3. Switch to kernel gsbase and CR3: 1362 */ 1363 SWAPGS 1364 SWITCH_TO_KERNEL_CR3 scratch_reg=%rax 1365 1366 /* 1367 * Pretend that the exception came from user mode: set up pt_regs 1368 * as if we faulted immediately after IRET. 1369 */ 1370 mov %rsp, %rdi 1371 call fixup_bad_iret 1372 mov %rax, %rsp 1373 jmp .Lerror_entry_from_usermode_after_swapgs 1374END(error_entry) 1375 1376ENTRY(error_exit) 1377 UNWIND_HINT_REGS 1378 DISABLE_INTERRUPTS(CLBR_ANY) 1379 TRACE_IRQS_OFF 1380 testb $3, CS(%rsp) 1381 jz retint_kernel 1382 jmp retint_user 1383END(error_exit) 1384 1385/* 1386 * Runs on exception stack. Xen PV does not go through this path at all, 1387 * so we can use real assembly here. 1388 * 1389 * Registers: 1390 * %r14: Used to save/restore the CR3 of the interrupted context 1391 * when PAGE_TABLE_ISOLATION is in use. Do not clobber. 1392 */ 1393ENTRY(nmi) 1394 UNWIND_HINT_IRET_REGS 1395 1396 /* 1397 * We allow breakpoints in NMIs. If a breakpoint occurs, then 1398 * the iretq it performs will take us out of NMI context. 1399 * This means that we can have nested NMIs where the next 1400 * NMI is using the top of the stack of the previous NMI. We 1401 * can't let it execute because the nested NMI will corrupt the 1402 * stack of the previous NMI. NMI handlers are not re-entrant 1403 * anyway. 1404 * 1405 * To handle this case we do the following: 1406 * Check the a special location on the stack that contains 1407 * a variable that is set when NMIs are executing. 1408 * The interrupted task's stack is also checked to see if it 1409 * is an NMI stack. 1410 * If the variable is not set and the stack is not the NMI 1411 * stack then: 1412 * o Set the special variable on the stack 1413 * o Copy the interrupt frame into an "outermost" location on the 1414 * stack 1415 * o Copy the interrupt frame into an "iret" location on the stack 1416 * o Continue processing the NMI 1417 * If the variable is set or the previous stack is the NMI stack: 1418 * o Modify the "iret" location to jump to the repeat_nmi 1419 * o return back to the first NMI 1420 * 1421 * Now on exit of the first NMI, we first clear the stack variable 1422 * The NMI stack will tell any nested NMIs at that point that it is 1423 * nested. Then we pop the stack normally with iret, and if there was 1424 * a nested NMI that updated the copy interrupt stack frame, a 1425 * jump will be made to the repeat_nmi code that will handle the second 1426 * NMI. 1427 * 1428 * However, espfix prevents us from directly returning to userspace 1429 * with a single IRET instruction. Similarly, IRET to user mode 1430 * can fault. We therefore handle NMIs from user space like 1431 * other IST entries. 1432 */ 1433 1434 ASM_CLAC 1435 1436 /* Use %rdx as our temp variable throughout */ 1437 pushq %rdx 1438 1439 testb $3, CS-RIP+8(%rsp) 1440 jz .Lnmi_from_kernel 1441 1442 /* 1443 * NMI from user mode. We need to run on the thread stack, but we 1444 * can't go through the normal entry paths: NMIs are masked, and 1445 * we don't want to enable interrupts, because then we'll end 1446 * up in an awkward situation in which IRQs are on but NMIs 1447 * are off. 1448 * 1449 * We also must not push anything to the stack before switching 1450 * stacks lest we corrupt the "NMI executing" variable. 1451 */ 1452 1453 swapgs 1454 cld 1455 SWITCH_TO_KERNEL_CR3 scratch_reg=%rdx 1456 movq %rsp, %rdx 1457 movq PER_CPU_VAR(cpu_current_top_of_stack), %rsp 1458 UNWIND_HINT_IRET_REGS base=%rdx offset=8 1459 pushq 5*8(%rdx) /* pt_regs->ss */ 1460 pushq 4*8(%rdx) /* pt_regs->rsp */ 1461 pushq 3*8(%rdx) /* pt_regs->flags */ 1462 pushq 2*8(%rdx) /* pt_regs->cs */ 1463 pushq 1*8(%rdx) /* pt_regs->rip */ 1464 UNWIND_HINT_IRET_REGS 1465 pushq $-1 /* pt_regs->orig_ax */ 1466 PUSH_AND_CLEAR_REGS rdx=(%rdx) 1467 ENCODE_FRAME_POINTER 1468 1469 /* 1470 * At this point we no longer need to worry about stack damage 1471 * due to nesting -- we're on the normal thread stack and we're 1472 * done with the NMI stack. 1473 */ 1474 1475 movq %rsp, %rdi 1476 movq $-1, %rsi 1477 call do_nmi 1478 1479 /* 1480 * Return back to user mode. We must *not* do the normal exit 1481 * work, because we don't want to enable interrupts. 1482 */ 1483 jmp swapgs_restore_regs_and_return_to_usermode 1484 1485.Lnmi_from_kernel: 1486 /* 1487 * Here's what our stack frame will look like: 1488 * +---------------------------------------------------------+ 1489 * | original SS | 1490 * | original Return RSP | 1491 * | original RFLAGS | 1492 * | original CS | 1493 * | original RIP | 1494 * +---------------------------------------------------------+ 1495 * | temp storage for rdx | 1496 * +---------------------------------------------------------+ 1497 * | "NMI executing" variable | 1498 * +---------------------------------------------------------+ 1499 * | iret SS } Copied from "outermost" frame | 1500 * | iret Return RSP } on each loop iteration; overwritten | 1501 * | iret RFLAGS } by a nested NMI to force another | 1502 * | iret CS } iteration if needed. | 1503 * | iret RIP } | 1504 * +---------------------------------------------------------+ 1505 * | outermost SS } initialized in first_nmi; | 1506 * | outermost Return RSP } will not be changed before | 1507 * | outermost RFLAGS } NMI processing is done. | 1508 * | outermost CS } Copied to "iret" frame on each | 1509 * | outermost RIP } iteration. | 1510 * +---------------------------------------------------------+ 1511 * | pt_regs | 1512 * +---------------------------------------------------------+ 1513 * 1514 * The "original" frame is used by hardware. Before re-enabling 1515 * NMIs, we need to be done with it, and we need to leave enough 1516 * space for the asm code here. 1517 * 1518 * We return by executing IRET while RSP points to the "iret" frame. 1519 * That will either return for real or it will loop back into NMI 1520 * processing. 1521 * 1522 * The "outermost" frame is copied to the "iret" frame on each 1523 * iteration of the loop, so each iteration starts with the "iret" 1524 * frame pointing to the final return target. 1525 */ 1526 1527 /* 1528 * Determine whether we're a nested NMI. 1529 * 1530 * If we interrupted kernel code between repeat_nmi and 1531 * end_repeat_nmi, then we are a nested NMI. We must not 1532 * modify the "iret" frame because it's being written by 1533 * the outer NMI. That's okay; the outer NMI handler is 1534 * about to about to call do_nmi anyway, so we can just 1535 * resume the outer NMI. 1536 */ 1537 1538 movq $repeat_nmi, %rdx 1539 cmpq 8(%rsp), %rdx 1540 ja 1f 1541 movq $end_repeat_nmi, %rdx 1542 cmpq 8(%rsp), %rdx 1543 ja nested_nmi_out 15441: 1545 1546 /* 1547 * Now check "NMI executing". If it's set, then we're nested. 1548 * This will not detect if we interrupted an outer NMI just 1549 * before IRET. 1550 */ 1551 cmpl $1, -8(%rsp) 1552 je nested_nmi 1553 1554 /* 1555 * Now test if the previous stack was an NMI stack. This covers 1556 * the case where we interrupt an outer NMI after it clears 1557 * "NMI executing" but before IRET. We need to be careful, though: 1558 * there is one case in which RSP could point to the NMI stack 1559 * despite there being no NMI active: naughty userspace controls 1560 * RSP at the very beginning of the SYSCALL targets. We can 1561 * pull a fast one on naughty userspace, though: we program 1562 * SYSCALL to mask DF, so userspace cannot cause DF to be set 1563 * if it controls the kernel's RSP. We set DF before we clear 1564 * "NMI executing". 1565 */ 1566 lea 6*8(%rsp), %rdx 1567 /* Compare the NMI stack (rdx) with the stack we came from (4*8(%rsp)) */ 1568 cmpq %rdx, 4*8(%rsp) 1569 /* If the stack pointer is above the NMI stack, this is a normal NMI */ 1570 ja first_nmi 1571 1572 subq $EXCEPTION_STKSZ, %rdx 1573 cmpq %rdx, 4*8(%rsp) 1574 /* If it is below the NMI stack, it is a normal NMI */ 1575 jb first_nmi 1576 1577 /* Ah, it is within the NMI stack. */ 1578 1579 testb $(X86_EFLAGS_DF >> 8), (3*8 + 1)(%rsp) 1580 jz first_nmi /* RSP was user controlled. */ 1581 1582 /* This is a nested NMI. */ 1583 1584nested_nmi: 1585 /* 1586 * Modify the "iret" frame to point to repeat_nmi, forcing another 1587 * iteration of NMI handling. 1588 */ 1589 subq $8, %rsp 1590 leaq -10*8(%rsp), %rdx 1591 pushq $__KERNEL_DS 1592 pushq %rdx 1593 pushfq 1594 pushq $__KERNEL_CS 1595 pushq $repeat_nmi 1596 1597 /* Put stack back */ 1598 addq $(6*8), %rsp 1599 1600nested_nmi_out: 1601 popq %rdx 1602 1603 /* We are returning to kernel mode, so this cannot result in a fault. */ 1604 iretq 1605 1606first_nmi: 1607 /* Restore rdx. */ 1608 movq (%rsp), %rdx 1609 1610 /* Make room for "NMI executing". */ 1611 pushq $0 1612 1613 /* Leave room for the "iret" frame */ 1614 subq $(5*8), %rsp 1615 1616 /* Copy the "original" frame to the "outermost" frame */ 1617 .rept 5 1618 pushq 11*8(%rsp) 1619 .endr 1620 UNWIND_HINT_IRET_REGS 1621 1622 /* Everything up to here is safe from nested NMIs */ 1623 1624#ifdef CONFIG_DEBUG_ENTRY 1625 /* 1626 * For ease of testing, unmask NMIs right away. Disabled by 1627 * default because IRET is very expensive. 1628 */ 1629 pushq $0 /* SS */ 1630 pushq %rsp /* RSP (minus 8 because of the previous push) */ 1631 addq $8, (%rsp) /* Fix up RSP */ 1632 pushfq /* RFLAGS */ 1633 pushq $__KERNEL_CS /* CS */ 1634 pushq $1f /* RIP */ 1635 iretq /* continues at repeat_nmi below */ 1636 UNWIND_HINT_IRET_REGS 16371: 1638#endif 1639 1640repeat_nmi: 1641 /* 1642 * If there was a nested NMI, the first NMI's iret will return 1643 * here. But NMIs are still enabled and we can take another 1644 * nested NMI. The nested NMI checks the interrupted RIP to see 1645 * if it is between repeat_nmi and end_repeat_nmi, and if so 1646 * it will just return, as we are about to repeat an NMI anyway. 1647 * This makes it safe to copy to the stack frame that a nested 1648 * NMI will update. 1649 * 1650 * RSP is pointing to "outermost RIP". gsbase is unknown, but, if 1651 * we're repeating an NMI, gsbase has the same value that it had on 1652 * the first iteration. paranoid_entry will load the kernel 1653 * gsbase if needed before we call do_nmi. "NMI executing" 1654 * is zero. 1655 */ 1656 movq $1, 10*8(%rsp) /* Set "NMI executing". */ 1657 1658 /* 1659 * Copy the "outermost" frame to the "iret" frame. NMIs that nest 1660 * here must not modify the "iret" frame while we're writing to 1661 * it or it will end up containing garbage. 1662 */ 1663 addq $(10*8), %rsp 1664 .rept 5 1665 pushq -6*8(%rsp) 1666 .endr 1667 subq $(5*8), %rsp 1668end_repeat_nmi: 1669 1670 /* 1671 * Everything below this point can be preempted by a nested NMI. 1672 * If this happens, then the inner NMI will change the "iret" 1673 * frame to point back to repeat_nmi. 1674 */ 1675 pushq $-1 /* ORIG_RAX: no syscall to restart */ 1676 1677 /* 1678 * Use paranoid_entry to handle SWAPGS, but no need to use paranoid_exit 1679 * as we should not be calling schedule in NMI context. 1680 * Even with normal interrupts enabled. An NMI should not be 1681 * setting NEED_RESCHED or anything that normal interrupts and 1682 * exceptions might do. 1683 */ 1684 call paranoid_entry 1685 UNWIND_HINT_REGS 1686 1687 /* paranoidentry do_nmi, 0; without TRACE_IRQS_OFF */ 1688 movq %rsp, %rdi 1689 movq $-1, %rsi 1690 call do_nmi 1691 1692 /* Always restore stashed CR3 value (see paranoid_entry) */ 1693 RESTORE_CR3 scratch_reg=%r15 save_reg=%r14 1694 1695 /* 1696 * The above invocation of paranoid_entry stored the GSBASE 1697 * related information in R/EBX depending on the availability 1698 * of FSGSBASE. 1699 * 1700 * If FSGSBASE is enabled, restore the saved GSBASE value 1701 * unconditionally, otherwise take the conditional SWAPGS path. 1702 */ 1703 ALTERNATIVE "jmp nmi_no_fsgsbase", "", X86_FEATURE_FSGSBASE 1704 1705 wrgsbase %rbx 1706 jmp nmi_restore 1707 1708nmi_no_fsgsbase: 1709 /* EBX == 0 -> invoke SWAPGS */ 1710 testl %ebx, %ebx 1711 jnz nmi_restore 1712 1713nmi_swapgs: 1714 SWAPGS_UNSAFE_STACK 1715 1716nmi_restore: 1717 POP_REGS 1718 1719 /* 1720 * Skip orig_ax and the "outermost" frame to point RSP at the "iret" 1721 * at the "iret" frame. 1722 */ 1723 addq $6*8, %rsp 1724 1725 /* 1726 * Clear "NMI executing". Set DF first so that we can easily 1727 * distinguish the remaining code between here and IRET from 1728 * the SYSCALL entry and exit paths. 1729 * 1730 * We arguably should just inspect RIP instead, but I (Andy) wrote 1731 * this code when I had the misapprehension that Xen PV supported 1732 * NMIs, and Xen PV would break that approach. 1733 */ 1734 std 1735 movq $0, 5*8(%rsp) /* clear "NMI executing" */ 1736 1737 /* 1738 * iretq reads the "iret" frame and exits the NMI stack in a 1739 * single instruction. We are returning to kernel mode, so this 1740 * cannot result in a fault. Similarly, we don't need to worry 1741 * about espfix64 on the way back to kernel mode. 1742 */ 1743 iretq 1744END(nmi) 1745 1746ENTRY(ignore_sysret) 1747 UNWIND_HINT_EMPTY 1748 mov $-ENOSYS, %eax 1749 sysret 1750END(ignore_sysret) 1751 1752ENTRY(rewind_stack_do_exit) 1753 UNWIND_HINT_FUNC 1754 /* Prevent any naive code from trying to unwind to our caller. */ 1755 xorl %ebp, %ebp 1756 1757 movq PER_CPU_VAR(cpu_current_top_of_stack), %rax 1758 leaq -PTREGS_SIZE(%rax), %rsp 1759 UNWIND_HINT_FUNC sp_offset=PTREGS_SIZE 1760 1761 call do_exit 1762END(rewind_stack_do_exit) 1763