1/* SPDX-License-Identifier: GPL-2.0 */ 2/* 3 * linux/arch/x86_64/entry.S 4 * 5 * Copyright (C) 1991, 1992 Linus Torvalds 6 * Copyright (C) 2000, 2001, 2002 Andi Kleen SuSE Labs 7 * Copyright (C) 2000 Pavel Machek <pavel@suse.cz> 8 * 9 * entry.S contains the system-call and fault low-level handling routines. 10 * 11 * Some of this is documented in Documentation/x86/entry_64.txt 12 * 13 * A note on terminology: 14 * - iret frame: Architecture defined interrupt frame from SS to RIP 15 * at the top of the kernel process stack. 16 * 17 * Some macro usage: 18 * - ENTRY/END: Define functions in the symbol table. 19 * - TRACE_IRQ_*: Trace hardirq state for lock debugging. 20 * - idtentry: Define exception entry points. 21 */ 22#include <linux/linkage.h> 23#include <asm/segment.h> 24#include <asm/cache.h> 25#include <asm/errno.h> 26#include <asm/asm-offsets.h> 27#include <asm/msr.h> 28#include <asm/unistd.h> 29#include <asm/thread_info.h> 30#include <asm/hw_irq.h> 31#include <asm/page_types.h> 32#include <asm/irqflags.h> 33#include <asm/paravirt.h> 34#include <asm/percpu.h> 35#include <asm/asm.h> 36#include <asm/smap.h> 37#include <asm/pgtable_types.h> 38#include <asm/export.h> 39#include <asm/frame.h> 40#include <asm/nospec-branch.h> 41#include <linux/err.h> 42 43#include "calling.h" 44 45.code64 46.section .entry.text, "ax" 47 48#ifdef CONFIG_PARAVIRT 49ENTRY(native_usergs_sysret64) 50 UNWIND_HINT_EMPTY 51 swapgs 52 sysretq 53END(native_usergs_sysret64) 54#endif /* CONFIG_PARAVIRT */ 55 56.macro TRACE_IRQS_FLAGS flags:req 57#ifdef CONFIG_TRACE_IRQFLAGS 58 bt $9, \flags /* interrupts off? */ 59 jnc 1f 60 TRACE_IRQS_ON 611: 62#endif 63.endm 64 65.macro TRACE_IRQS_IRETQ 66 TRACE_IRQS_FLAGS EFLAGS(%rsp) 67.endm 68 69/* 70 * When dynamic function tracer is enabled it will add a breakpoint 71 * to all locations that it is about to modify, sync CPUs, update 72 * all the code, sync CPUs, then remove the breakpoints. In this time 73 * if lockdep is enabled, it might jump back into the debug handler 74 * outside the updating of the IST protection. (TRACE_IRQS_ON/OFF). 75 * 76 * We need to change the IDT table before calling TRACE_IRQS_ON/OFF to 77 * make sure the stack pointer does not get reset back to the top 78 * of the debug stack, and instead just reuses the current stack. 79 */ 80#if defined(CONFIG_DYNAMIC_FTRACE) && defined(CONFIG_TRACE_IRQFLAGS) 81 82.macro TRACE_IRQS_OFF_DEBUG 83 call debug_stack_set_zero 84 TRACE_IRQS_OFF 85 call debug_stack_reset 86.endm 87 88.macro TRACE_IRQS_ON_DEBUG 89 call debug_stack_set_zero 90 TRACE_IRQS_ON 91 call debug_stack_reset 92.endm 93 94.macro TRACE_IRQS_IRETQ_DEBUG 95 bt $9, EFLAGS(%rsp) /* interrupts off? */ 96 jnc 1f 97 TRACE_IRQS_ON_DEBUG 981: 99.endm 100 101#else 102# define TRACE_IRQS_OFF_DEBUG TRACE_IRQS_OFF 103# define TRACE_IRQS_ON_DEBUG TRACE_IRQS_ON 104# define TRACE_IRQS_IRETQ_DEBUG TRACE_IRQS_IRETQ 105#endif 106 107/* 108 * 64-bit SYSCALL instruction entry. Up to 6 arguments in registers. 109 * 110 * This is the only entry point used for 64-bit system calls. The 111 * hardware interface is reasonably well designed and the register to 112 * argument mapping Linux uses fits well with the registers that are 113 * available when SYSCALL is used. 114 * 115 * SYSCALL instructions can be found inlined in libc implementations as 116 * well as some other programs and libraries. There are also a handful 117 * of SYSCALL instructions in the vDSO used, for example, as a 118 * clock_gettimeofday fallback. 119 * 120 * 64-bit SYSCALL saves rip to rcx, clears rflags.RF, then saves rflags to r11, 121 * then loads new ss, cs, and rip from previously programmed MSRs. 122 * rflags gets masked by a value from another MSR (so CLD and CLAC 123 * are not needed). SYSCALL does not save anything on the stack 124 * and does not change rsp. 125 * 126 * Registers on entry: 127 * rax system call number 128 * rcx return address 129 * r11 saved rflags (note: r11 is callee-clobbered register in C ABI) 130 * rdi arg0 131 * rsi arg1 132 * rdx arg2 133 * r10 arg3 (needs to be moved to rcx to conform to C ABI) 134 * r8 arg4 135 * r9 arg5 136 * (note: r12-r15, rbp, rbx are callee-preserved in C ABI) 137 * 138 * Only called from user space. 139 * 140 * When user can change pt_regs->foo always force IRET. That is because 141 * it deals with uncanonical addresses better. SYSRET has trouble 142 * with them due to bugs in both AMD and Intel CPUs. 143 */ 144 145 .pushsection .entry_trampoline, "ax" 146 147/* 148 * The code in here gets remapped into cpu_entry_area's trampoline. This means 149 * that the assembler and linker have the wrong idea as to where this code 150 * lives (and, in fact, it's mapped more than once, so it's not even at a 151 * fixed address). So we can't reference any symbols outside the entry 152 * trampoline and expect it to work. 153 * 154 * Instead, we carefully abuse %rip-relative addressing. 155 * _entry_trampoline(%rip) refers to the start of the remapped) entry 156 * trampoline. We can thus find cpu_entry_area with this macro: 157 */ 158 159#define CPU_ENTRY_AREA \ 160 _entry_trampoline - CPU_ENTRY_AREA_entry_trampoline(%rip) 161 162/* The top word of the SYSENTER stack is hot and is usable as scratch space. */ 163#define RSP_SCRATCH CPU_ENTRY_AREA_entry_stack + \ 164 SIZEOF_entry_stack - 8 + CPU_ENTRY_AREA 165 166ENTRY(entry_SYSCALL_64_trampoline) 167 UNWIND_HINT_EMPTY 168 swapgs 169 170 /* Stash the user RSP. */ 171 movq %rsp, RSP_SCRATCH 172 173 /* Note: using %rsp as a scratch reg. */ 174 SWITCH_TO_KERNEL_CR3 scratch_reg=%rsp 175 176 /* Load the top of the task stack into RSP */ 177 movq CPU_ENTRY_AREA_tss + TSS_sp1 + CPU_ENTRY_AREA, %rsp 178 179 /* Start building the simulated IRET frame. */ 180 pushq $__USER_DS /* pt_regs->ss */ 181 pushq RSP_SCRATCH /* pt_regs->sp */ 182 pushq %r11 /* pt_regs->flags */ 183 pushq $__USER_CS /* pt_regs->cs */ 184 pushq %rcx /* pt_regs->ip */ 185 186 /* 187 * x86 lacks a near absolute jump, and we can't jump to the real 188 * entry text with a relative jump. We could push the target 189 * address and then use retq, but this destroys the pipeline on 190 * many CPUs (wasting over 20 cycles on Sandy Bridge). Instead, 191 * spill RDI and restore it in a second-stage trampoline. 192 */ 193 pushq %rdi 194 movq $entry_SYSCALL_64_stage2, %rdi 195 JMP_NOSPEC %rdi 196END(entry_SYSCALL_64_trampoline) 197 198 .popsection 199 200ENTRY(entry_SYSCALL_64_stage2) 201 UNWIND_HINT_EMPTY 202 popq %rdi 203 jmp entry_SYSCALL_64_after_hwframe 204END(entry_SYSCALL_64_stage2) 205 206ENTRY(entry_SYSCALL_64) 207 UNWIND_HINT_EMPTY 208 /* 209 * Interrupts are off on entry. 210 * We do not frame this tiny irq-off block with TRACE_IRQS_OFF/ON, 211 * it is too small to ever cause noticeable irq latency. 212 */ 213 214 swapgs 215 /* 216 * This path is not taken when PAGE_TABLE_ISOLATION is disabled so it 217 * is not required to switch CR3. 218 */ 219 movq %rsp, PER_CPU_VAR(rsp_scratch) 220 movq PER_CPU_VAR(cpu_current_top_of_stack), %rsp 221 222 /* Construct struct pt_regs on stack */ 223 pushq $__USER_DS /* pt_regs->ss */ 224 pushq PER_CPU_VAR(rsp_scratch) /* pt_regs->sp */ 225 pushq %r11 /* pt_regs->flags */ 226 pushq $__USER_CS /* pt_regs->cs */ 227 pushq %rcx /* pt_regs->ip */ 228GLOBAL(entry_SYSCALL_64_after_hwframe) 229 pushq %rax /* pt_regs->orig_ax */ 230 pushq %rdi /* pt_regs->di */ 231 pushq %rsi /* pt_regs->si */ 232 pushq %rdx /* pt_regs->dx */ 233 pushq %rcx /* pt_regs->cx */ 234 pushq $-ENOSYS /* pt_regs->ax */ 235 pushq %r8 /* pt_regs->r8 */ 236 pushq %r9 /* pt_regs->r9 */ 237 pushq %r10 /* pt_regs->r10 */ 238 pushq %r11 /* pt_regs->r11 */ 239 pushq %rbx /* pt_regs->rbx */ 240 pushq %rbp /* pt_regs->rbp */ 241 pushq %r12 /* pt_regs->r12 */ 242 pushq %r13 /* pt_regs->r13 */ 243 pushq %r14 /* pt_regs->r14 */ 244 pushq %r15 /* pt_regs->r15 */ 245 UNWIND_HINT_REGS 246 247 TRACE_IRQS_OFF 248 249 /* IRQs are off. */ 250 movq %rsp, %rdi 251 call do_syscall_64 /* returns with IRQs disabled */ 252 253 TRACE_IRQS_IRETQ /* we're about to change IF */ 254 255 /* 256 * Try to use SYSRET instead of IRET if we're returning to 257 * a completely clean 64-bit userspace context. If we're not, 258 * go to the slow exit path. 259 */ 260 movq RCX(%rsp), %rcx 261 movq RIP(%rsp), %r11 262 263 cmpq %rcx, %r11 /* SYSRET requires RCX == RIP */ 264 jne swapgs_restore_regs_and_return_to_usermode 265 266 /* 267 * On Intel CPUs, SYSRET with non-canonical RCX/RIP will #GP 268 * in kernel space. This essentially lets the user take over 269 * the kernel, since userspace controls RSP. 270 * 271 * If width of "canonical tail" ever becomes variable, this will need 272 * to be updated to remain correct on both old and new CPUs. 273 * 274 * Change top bits to match most significant bit (47th or 56th bit 275 * depending on paging mode) in the address. 276 */ 277 shl $(64 - (__VIRTUAL_MASK_SHIFT+1)), %rcx 278 sar $(64 - (__VIRTUAL_MASK_SHIFT+1)), %rcx 279 280 /* If this changed %rcx, it was not canonical */ 281 cmpq %rcx, %r11 282 jne swapgs_restore_regs_and_return_to_usermode 283 284 cmpq $__USER_CS, CS(%rsp) /* CS must match SYSRET */ 285 jne swapgs_restore_regs_and_return_to_usermode 286 287 movq R11(%rsp), %r11 288 cmpq %r11, EFLAGS(%rsp) /* R11 == RFLAGS */ 289 jne swapgs_restore_regs_and_return_to_usermode 290 291 /* 292 * SYSCALL clears RF when it saves RFLAGS in R11 and SYSRET cannot 293 * restore RF properly. If the slowpath sets it for whatever reason, we 294 * need to restore it correctly. 295 * 296 * SYSRET can restore TF, but unlike IRET, restoring TF results in a 297 * trap from userspace immediately after SYSRET. This would cause an 298 * infinite loop whenever #DB happens with register state that satisfies 299 * the opportunistic SYSRET conditions. For example, single-stepping 300 * this user code: 301 * 302 * movq $stuck_here, %rcx 303 * pushfq 304 * popq %r11 305 * stuck_here: 306 * 307 * would never get past 'stuck_here'. 308 */ 309 testq $(X86_EFLAGS_RF|X86_EFLAGS_TF), %r11 310 jnz swapgs_restore_regs_and_return_to_usermode 311 312 /* nothing to check for RSP */ 313 314 cmpq $__USER_DS, SS(%rsp) /* SS must match SYSRET */ 315 jne swapgs_restore_regs_and_return_to_usermode 316 317 /* 318 * We win! This label is here just for ease of understanding 319 * perf profiles. Nothing jumps here. 320 */ 321syscall_return_via_sysret: 322 /* rcx and r11 are already restored (see code above) */ 323 UNWIND_HINT_EMPTY 324 POP_EXTRA_REGS 325 popq %rsi /* skip r11 */ 326 popq %r10 327 popq %r9 328 popq %r8 329 popq %rax 330 popq %rsi /* skip rcx */ 331 popq %rdx 332 popq %rsi 333 334 /* 335 * Now all regs are restored except RSP and RDI. 336 * Save old stack pointer and switch to trampoline stack. 337 */ 338 movq %rsp, %rdi 339 movq PER_CPU_VAR(cpu_tss_rw + TSS_sp0), %rsp 340 341 pushq RSP-RDI(%rdi) /* RSP */ 342 pushq (%rdi) /* RDI */ 343 344 /* 345 * We are on the trampoline stack. All regs except RDI are live. 346 * We can do future final exit work right here. 347 */ 348 SWITCH_TO_USER_CR3_STACK scratch_reg=%rdi 349 350 popq %rdi 351 popq %rsp 352 USERGS_SYSRET64 353END(entry_SYSCALL_64) 354 355/* 356 * %rdi: prev task 357 * %rsi: next task 358 */ 359ENTRY(__switch_to_asm) 360 UNWIND_HINT_FUNC 361 /* 362 * Save callee-saved registers 363 * This must match the order in inactive_task_frame 364 */ 365 pushq %rbp 366 pushq %rbx 367 pushq %r12 368 pushq %r13 369 pushq %r14 370 pushq %r15 371 372 /* switch stack */ 373 movq %rsp, TASK_threadsp(%rdi) 374 movq TASK_threadsp(%rsi), %rsp 375 376#ifdef CONFIG_CC_STACKPROTECTOR 377 movq TASK_stack_canary(%rsi), %rbx 378 movq %rbx, PER_CPU_VAR(irq_stack_union)+stack_canary_offset 379#endif 380 381#ifdef CONFIG_RETPOLINE 382 /* 383 * When switching from a shallower to a deeper call stack 384 * the RSB may either underflow or use entries populated 385 * with userspace addresses. On CPUs where those concerns 386 * exist, overwrite the RSB with entries which capture 387 * speculative execution to prevent attack. 388 */ 389 /* Clobbers %rbx */ 390 FILL_RETURN_BUFFER RSB_CLEAR_LOOPS, X86_FEATURE_RSB_CTXSW 391#endif 392 393 /* restore callee-saved registers */ 394 popq %r15 395 popq %r14 396 popq %r13 397 popq %r12 398 popq %rbx 399 popq %rbp 400 401 jmp __switch_to 402END(__switch_to_asm) 403 404/* 405 * A newly forked process directly context switches into this address. 406 * 407 * rax: prev task we switched from 408 * rbx: kernel thread func (NULL for user thread) 409 * r12: kernel thread arg 410 */ 411ENTRY(ret_from_fork) 412 UNWIND_HINT_EMPTY 413 movq %rax, %rdi 414 call schedule_tail /* rdi: 'prev' task parameter */ 415 416 testq %rbx, %rbx /* from kernel_thread? */ 417 jnz 1f /* kernel threads are uncommon */ 418 4192: 420 UNWIND_HINT_REGS 421 movq %rsp, %rdi 422 call syscall_return_slowpath /* returns with IRQs disabled */ 423 TRACE_IRQS_ON /* user mode is traced as IRQS on */ 424 jmp swapgs_restore_regs_and_return_to_usermode 425 4261: 427 /* kernel thread */ 428 movq %r12, %rdi 429 CALL_NOSPEC %rbx 430 /* 431 * A kernel thread is allowed to return here after successfully 432 * calling do_execve(). Exit to userspace to complete the execve() 433 * syscall. 434 */ 435 movq $0, RAX(%rsp) 436 jmp 2b 437END(ret_from_fork) 438 439/* 440 * Build the entry stubs with some assembler magic. 441 * We pack 1 stub into every 8-byte block. 442 */ 443 .align 8 444ENTRY(irq_entries_start) 445 vector=FIRST_EXTERNAL_VECTOR 446 .rept (FIRST_SYSTEM_VECTOR - FIRST_EXTERNAL_VECTOR) 447 UNWIND_HINT_IRET_REGS 448 pushq $(~vector+0x80) /* Note: always in signed byte range */ 449 jmp common_interrupt 450 .align 8 451 vector=vector+1 452 .endr 453END(irq_entries_start) 454 455.macro DEBUG_ENTRY_ASSERT_IRQS_OFF 456#ifdef CONFIG_DEBUG_ENTRY 457 pushq %rax 458 SAVE_FLAGS(CLBR_RAX) 459 testl $X86_EFLAGS_IF, %eax 460 jz .Lokay_\@ 461 ud2 462.Lokay_\@: 463 popq %rax 464#endif 465.endm 466 467/* 468 * Enters the IRQ stack if we're not already using it. NMI-safe. Clobbers 469 * flags and puts old RSP into old_rsp, and leaves all other GPRs alone. 470 * Requires kernel GSBASE. 471 * 472 * The invariant is that, if irq_count != -1, then the IRQ stack is in use. 473 */ 474.macro ENTER_IRQ_STACK regs=1 old_rsp 475 DEBUG_ENTRY_ASSERT_IRQS_OFF 476 movq %rsp, \old_rsp 477 478 .if \regs 479 UNWIND_HINT_REGS base=\old_rsp 480 .endif 481 482 incl PER_CPU_VAR(irq_count) 483 jnz .Lirq_stack_push_old_rsp_\@ 484 485 /* 486 * Right now, if we just incremented irq_count to zero, we've 487 * claimed the IRQ stack but we haven't switched to it yet. 488 * 489 * If anything is added that can interrupt us here without using IST, 490 * it must be *extremely* careful to limit its stack usage. This 491 * could include kprobes and a hypothetical future IST-less #DB 492 * handler. 493 * 494 * The OOPS unwinder relies on the word at the top of the IRQ 495 * stack linking back to the previous RSP for the entire time we're 496 * on the IRQ stack. For this to work reliably, we need to write 497 * it before we actually move ourselves to the IRQ stack. 498 */ 499 500 movq \old_rsp, PER_CPU_VAR(irq_stack_union + IRQ_STACK_SIZE - 8) 501 movq PER_CPU_VAR(irq_stack_ptr), %rsp 502 503#ifdef CONFIG_DEBUG_ENTRY 504 /* 505 * If the first movq above becomes wrong due to IRQ stack layout 506 * changes, the only way we'll notice is if we try to unwind right 507 * here. Assert that we set up the stack right to catch this type 508 * of bug quickly. 509 */ 510 cmpq -8(%rsp), \old_rsp 511 je .Lirq_stack_okay\@ 512 ud2 513 .Lirq_stack_okay\@: 514#endif 515 516.Lirq_stack_push_old_rsp_\@: 517 pushq \old_rsp 518 519 .if \regs 520 UNWIND_HINT_REGS indirect=1 521 .endif 522.endm 523 524/* 525 * Undoes ENTER_IRQ_STACK. 526 */ 527.macro LEAVE_IRQ_STACK regs=1 528 DEBUG_ENTRY_ASSERT_IRQS_OFF 529 /* We need to be off the IRQ stack before decrementing irq_count. */ 530 popq %rsp 531 532 .if \regs 533 UNWIND_HINT_REGS 534 .endif 535 536 /* 537 * As in ENTER_IRQ_STACK, irq_count == 0, we are still claiming 538 * the irq stack but we're not on it. 539 */ 540 541 decl PER_CPU_VAR(irq_count) 542.endm 543 544/* 545 * Interrupt entry/exit. 546 * 547 * Interrupt entry points save only callee clobbered registers in fast path. 548 * 549 * Entry runs with interrupts off. 550 */ 551 552/* 0(%rsp): ~(interrupt number) */ 553 .macro interrupt func 554 cld 555 556 testb $3, CS-ORIG_RAX(%rsp) 557 jz 1f 558 SWAPGS 559 call switch_to_thread_stack 5601: 561 562 ALLOC_PT_GPREGS_ON_STACK 563 SAVE_C_REGS 564 SAVE_EXTRA_REGS 565 ENCODE_FRAME_POINTER 566 567 testb $3, CS(%rsp) 568 jz 1f 569 570 /* 571 * IRQ from user mode. 572 * 573 * We need to tell lockdep that IRQs are off. We can't do this until 574 * we fix gsbase, and we should do it before enter_from_user_mode 575 * (which can take locks). Since TRACE_IRQS_OFF idempotent, 576 * the simplest way to handle it is to just call it twice if 577 * we enter from user mode. There's no reason to optimize this since 578 * TRACE_IRQS_OFF is a no-op if lockdep is off. 579 */ 580 TRACE_IRQS_OFF 581 582 CALL_enter_from_user_mode 583 5841: 585 ENTER_IRQ_STACK old_rsp=%rdi 586 /* We entered an interrupt context - irqs are off: */ 587 TRACE_IRQS_OFF 588 589 call \func /* rdi points to pt_regs */ 590 .endm 591 592 /* 593 * The interrupt stubs push (~vector+0x80) onto the stack and 594 * then jump to common_interrupt. 595 */ 596 .p2align CONFIG_X86_L1_CACHE_SHIFT 597common_interrupt: 598 ASM_CLAC 599 addq $-0x80, (%rsp) /* Adjust vector to [-256, -1] range */ 600 interrupt do_IRQ 601 /* 0(%rsp): old RSP */ 602ret_from_intr: 603 DISABLE_INTERRUPTS(CLBR_ANY) 604 TRACE_IRQS_OFF 605 606 LEAVE_IRQ_STACK 607 608 testb $3, CS(%rsp) 609 jz retint_kernel 610 611 /* Interrupt came from user space */ 612GLOBAL(retint_user) 613 mov %rsp,%rdi 614 call prepare_exit_to_usermode 615 TRACE_IRQS_IRETQ 616 617GLOBAL(swapgs_restore_regs_and_return_to_usermode) 618#ifdef CONFIG_DEBUG_ENTRY 619 /* Assert that pt_regs indicates user mode. */ 620 testb $3, CS(%rsp) 621 jnz 1f 622 ud2 6231: 624#endif 625 POP_EXTRA_REGS 626 popq %r11 627 popq %r10 628 popq %r9 629 popq %r8 630 popq %rax 631 popq %rcx 632 popq %rdx 633 popq %rsi 634 635 /* 636 * The stack is now user RDI, orig_ax, RIP, CS, EFLAGS, RSP, SS. 637 * Save old stack pointer and switch to trampoline stack. 638 */ 639 movq %rsp, %rdi 640 movq PER_CPU_VAR(cpu_tss_rw + TSS_sp0), %rsp 641 642 /* Copy the IRET frame to the trampoline stack. */ 643 pushq 6*8(%rdi) /* SS */ 644 pushq 5*8(%rdi) /* RSP */ 645 pushq 4*8(%rdi) /* EFLAGS */ 646 pushq 3*8(%rdi) /* CS */ 647 pushq 2*8(%rdi) /* RIP */ 648 649 /* Push user RDI on the trampoline stack. */ 650 pushq (%rdi) 651 652 /* 653 * We are on the trampoline stack. All regs except RDI are live. 654 * We can do future final exit work right here. 655 */ 656 657 SWITCH_TO_USER_CR3_STACK scratch_reg=%rdi 658 659 /* Restore RDI. */ 660 popq %rdi 661 SWAPGS 662 INTERRUPT_RETURN 663 664 665/* Returning to kernel space */ 666retint_kernel: 667#ifdef CONFIG_PREEMPT 668 /* Interrupts are off */ 669 /* Check if we need preemption */ 670 bt $9, EFLAGS(%rsp) /* were interrupts off? */ 671 jnc 1f 6720: cmpl $0, PER_CPU_VAR(__preempt_count) 673 jnz 1f 674 call preempt_schedule_irq 675 jmp 0b 6761: 677#endif 678 /* 679 * The iretq could re-enable interrupts: 680 */ 681 TRACE_IRQS_IRETQ 682 683GLOBAL(restore_regs_and_return_to_kernel) 684#ifdef CONFIG_DEBUG_ENTRY 685 /* Assert that pt_regs indicates kernel mode. */ 686 testb $3, CS(%rsp) 687 jz 1f 688 ud2 6891: 690#endif 691 POP_EXTRA_REGS 692 POP_C_REGS 693 addq $8, %rsp /* skip regs->orig_ax */ 694 /* 695 * ARCH_HAS_MEMBARRIER_SYNC_CORE rely on IRET core serialization 696 * when returning from IPI handler. 697 */ 698 INTERRUPT_RETURN 699 700ENTRY(native_iret) 701 UNWIND_HINT_IRET_REGS 702 /* 703 * Are we returning to a stack segment from the LDT? Note: in 704 * 64-bit mode SS:RSP on the exception stack is always valid. 705 */ 706#ifdef CONFIG_X86_ESPFIX64 707 testb $4, (SS-RIP)(%rsp) 708 jnz native_irq_return_ldt 709#endif 710 711.global native_irq_return_iret 712native_irq_return_iret: 713 /* 714 * This may fault. Non-paranoid faults on return to userspace are 715 * handled by fixup_bad_iret. These include #SS, #GP, and #NP. 716 * Double-faults due to espfix64 are handled in do_double_fault. 717 * Other faults here are fatal. 718 */ 719 iretq 720 721#ifdef CONFIG_X86_ESPFIX64 722native_irq_return_ldt: 723 /* 724 * We are running with user GSBASE. All GPRs contain their user 725 * values. We have a percpu ESPFIX stack that is eight slots 726 * long (see ESPFIX_STACK_SIZE). espfix_waddr points to the bottom 727 * of the ESPFIX stack. 728 * 729 * We clobber RAX and RDI in this code. We stash RDI on the 730 * normal stack and RAX on the ESPFIX stack. 731 * 732 * The ESPFIX stack layout we set up looks like this: 733 * 734 * --- top of ESPFIX stack --- 735 * SS 736 * RSP 737 * RFLAGS 738 * CS 739 * RIP <-- RSP points here when we're done 740 * RAX <-- espfix_waddr points here 741 * --- bottom of ESPFIX stack --- 742 */ 743 744 pushq %rdi /* Stash user RDI */ 745 SWAPGS /* to kernel GS */ 746 SWITCH_TO_KERNEL_CR3 scratch_reg=%rdi /* to kernel CR3 */ 747 748 movq PER_CPU_VAR(espfix_waddr), %rdi 749 movq %rax, (0*8)(%rdi) /* user RAX */ 750 movq (1*8)(%rsp), %rax /* user RIP */ 751 movq %rax, (1*8)(%rdi) 752 movq (2*8)(%rsp), %rax /* user CS */ 753 movq %rax, (2*8)(%rdi) 754 movq (3*8)(%rsp), %rax /* user RFLAGS */ 755 movq %rax, (3*8)(%rdi) 756 movq (5*8)(%rsp), %rax /* user SS */ 757 movq %rax, (5*8)(%rdi) 758 movq (4*8)(%rsp), %rax /* user RSP */ 759 movq %rax, (4*8)(%rdi) 760 /* Now RAX == RSP. */ 761 762 andl $0xffff0000, %eax /* RAX = (RSP & 0xffff0000) */ 763 764 /* 765 * espfix_stack[31:16] == 0. The page tables are set up such that 766 * (espfix_stack | (X & 0xffff0000)) points to a read-only alias of 767 * espfix_waddr for any X. That is, there are 65536 RO aliases of 768 * the same page. Set up RSP so that RSP[31:16] contains the 769 * respective 16 bits of the /userspace/ RSP and RSP nonetheless 770 * still points to an RO alias of the ESPFIX stack. 771 */ 772 orq PER_CPU_VAR(espfix_stack), %rax 773 774 SWITCH_TO_USER_CR3_STACK scratch_reg=%rdi 775 SWAPGS /* to user GS */ 776 popq %rdi /* Restore user RDI */ 777 778 movq %rax, %rsp 779 UNWIND_HINT_IRET_REGS offset=8 780 781 /* 782 * At this point, we cannot write to the stack any more, but we can 783 * still read. 784 */ 785 popq %rax /* Restore user RAX */ 786 787 /* 788 * RSP now points to an ordinary IRET frame, except that the page 789 * is read-only and RSP[31:16] are preloaded with the userspace 790 * values. We can now IRET back to userspace. 791 */ 792 jmp native_irq_return_iret 793#endif 794END(common_interrupt) 795 796/* 797 * APIC interrupts. 798 */ 799.macro apicinterrupt3 num sym do_sym 800ENTRY(\sym) 801 UNWIND_HINT_IRET_REGS 802 ASM_CLAC 803 pushq $~(\num) 804.Lcommon_\sym: 805 interrupt \do_sym 806 jmp ret_from_intr 807END(\sym) 808.endm 809 810/* Make sure APIC interrupt handlers end up in the irqentry section: */ 811#define PUSH_SECTION_IRQENTRY .pushsection .irqentry.text, "ax" 812#define POP_SECTION_IRQENTRY .popsection 813 814.macro apicinterrupt num sym do_sym 815PUSH_SECTION_IRQENTRY 816apicinterrupt3 \num \sym \do_sym 817POP_SECTION_IRQENTRY 818.endm 819 820#ifdef CONFIG_SMP 821apicinterrupt3 IRQ_MOVE_CLEANUP_VECTOR irq_move_cleanup_interrupt smp_irq_move_cleanup_interrupt 822apicinterrupt3 REBOOT_VECTOR reboot_interrupt smp_reboot_interrupt 823#endif 824 825#ifdef CONFIG_X86_UV 826apicinterrupt3 UV_BAU_MESSAGE uv_bau_message_intr1 uv_bau_message_interrupt 827#endif 828 829apicinterrupt LOCAL_TIMER_VECTOR apic_timer_interrupt smp_apic_timer_interrupt 830apicinterrupt X86_PLATFORM_IPI_VECTOR x86_platform_ipi smp_x86_platform_ipi 831 832#ifdef CONFIG_HAVE_KVM 833apicinterrupt3 POSTED_INTR_VECTOR kvm_posted_intr_ipi smp_kvm_posted_intr_ipi 834apicinterrupt3 POSTED_INTR_WAKEUP_VECTOR kvm_posted_intr_wakeup_ipi smp_kvm_posted_intr_wakeup_ipi 835apicinterrupt3 POSTED_INTR_NESTED_VECTOR kvm_posted_intr_nested_ipi smp_kvm_posted_intr_nested_ipi 836#endif 837 838#ifdef CONFIG_X86_MCE_THRESHOLD 839apicinterrupt THRESHOLD_APIC_VECTOR threshold_interrupt smp_threshold_interrupt 840#endif 841 842#ifdef CONFIG_X86_MCE_AMD 843apicinterrupt DEFERRED_ERROR_VECTOR deferred_error_interrupt smp_deferred_error_interrupt 844#endif 845 846#ifdef CONFIG_X86_THERMAL_VECTOR 847apicinterrupt THERMAL_APIC_VECTOR thermal_interrupt smp_thermal_interrupt 848#endif 849 850#ifdef CONFIG_SMP 851apicinterrupt CALL_FUNCTION_SINGLE_VECTOR call_function_single_interrupt smp_call_function_single_interrupt 852apicinterrupt CALL_FUNCTION_VECTOR call_function_interrupt smp_call_function_interrupt 853apicinterrupt RESCHEDULE_VECTOR reschedule_interrupt smp_reschedule_interrupt 854#endif 855 856apicinterrupt ERROR_APIC_VECTOR error_interrupt smp_error_interrupt 857apicinterrupt SPURIOUS_APIC_VECTOR spurious_interrupt smp_spurious_interrupt 858 859#ifdef CONFIG_IRQ_WORK 860apicinterrupt IRQ_WORK_VECTOR irq_work_interrupt smp_irq_work_interrupt 861#endif 862 863/* 864 * Exception entry points. 865 */ 866#define CPU_TSS_IST(x) PER_CPU_VAR(cpu_tss_rw) + (TSS_ist + ((x) - 1) * 8) 867 868/* 869 * Switch to the thread stack. This is called with the IRET frame and 870 * orig_ax on the stack. (That is, RDI..R12 are not on the stack and 871 * space has not been allocated for them.) 872 */ 873ENTRY(switch_to_thread_stack) 874 UNWIND_HINT_FUNC 875 876 pushq %rdi 877 /* Need to switch before accessing the thread stack. */ 878 SWITCH_TO_KERNEL_CR3 scratch_reg=%rdi 879 movq %rsp, %rdi 880 movq PER_CPU_VAR(cpu_current_top_of_stack), %rsp 881 UNWIND_HINT sp_offset=16 sp_reg=ORC_REG_DI 882 883 pushq 7*8(%rdi) /* regs->ss */ 884 pushq 6*8(%rdi) /* regs->rsp */ 885 pushq 5*8(%rdi) /* regs->eflags */ 886 pushq 4*8(%rdi) /* regs->cs */ 887 pushq 3*8(%rdi) /* regs->ip */ 888 pushq 2*8(%rdi) /* regs->orig_ax */ 889 pushq 8(%rdi) /* return address */ 890 UNWIND_HINT_FUNC 891 892 movq (%rdi), %rdi 893 ret 894END(switch_to_thread_stack) 895 896.macro idtentry sym do_sym has_error_code:req paranoid=0 shift_ist=-1 897ENTRY(\sym) 898 UNWIND_HINT_IRET_REGS offset=\has_error_code*8 899 900 /* Sanity check */ 901 .if \shift_ist != -1 && \paranoid == 0 902 .error "using shift_ist requires paranoid=1" 903 .endif 904 905 ASM_CLAC 906 907 .if \has_error_code == 0 908 pushq $-1 /* ORIG_RAX: no syscall to restart */ 909 .endif 910 911 ALLOC_PT_GPREGS_ON_STACK 912 913 .if \paranoid < 2 914 testb $3, CS(%rsp) /* If coming from userspace, switch stacks */ 915 jnz .Lfrom_usermode_switch_stack_\@ 916 .endif 917 918 .if \paranoid 919 call paranoid_entry 920 .else 921 call error_entry 922 .endif 923 UNWIND_HINT_REGS 924 /* returned flag: ebx=0: need swapgs on exit, ebx=1: don't need it */ 925 926 .if \paranoid 927 .if \shift_ist != -1 928 TRACE_IRQS_OFF_DEBUG /* reload IDT in case of recursion */ 929 .else 930 TRACE_IRQS_OFF 931 .endif 932 .endif 933 934 movq %rsp, %rdi /* pt_regs pointer */ 935 936 .if \has_error_code 937 movq ORIG_RAX(%rsp), %rsi /* get error code */ 938 movq $-1, ORIG_RAX(%rsp) /* no syscall to restart */ 939 .else 940 xorl %esi, %esi /* no error code */ 941 .endif 942 943 .if \shift_ist != -1 944 subq $EXCEPTION_STKSZ, CPU_TSS_IST(\shift_ist) 945 .endif 946 947 call \do_sym 948 949 .if \shift_ist != -1 950 addq $EXCEPTION_STKSZ, CPU_TSS_IST(\shift_ist) 951 .endif 952 953 /* these procedures expect "no swapgs" flag in ebx */ 954 .if \paranoid 955 jmp paranoid_exit 956 .else 957 jmp error_exit 958 .endif 959 960 .if \paranoid < 2 961 /* 962 * Entry from userspace. Switch stacks and treat it 963 * as a normal entry. This means that paranoid handlers 964 * run in real process context if user_mode(regs). 965 */ 966.Lfrom_usermode_switch_stack_\@: 967 call error_entry 968 969 movq %rsp, %rdi /* pt_regs pointer */ 970 971 .if \has_error_code 972 movq ORIG_RAX(%rsp), %rsi /* get error code */ 973 movq $-1, ORIG_RAX(%rsp) /* no syscall to restart */ 974 .else 975 xorl %esi, %esi /* no error code */ 976 .endif 977 978 call \do_sym 979 980 jmp error_exit /* %ebx: no swapgs flag */ 981 .endif 982END(\sym) 983.endm 984 985idtentry divide_error do_divide_error has_error_code=0 986idtentry overflow do_overflow has_error_code=0 987idtentry bounds do_bounds has_error_code=0 988idtentry invalid_op do_invalid_op has_error_code=0 989idtentry device_not_available do_device_not_available has_error_code=0 990idtentry double_fault do_double_fault has_error_code=1 paranoid=2 991idtentry coprocessor_segment_overrun do_coprocessor_segment_overrun has_error_code=0 992idtentry invalid_TSS do_invalid_TSS has_error_code=1 993idtentry segment_not_present do_segment_not_present has_error_code=1 994idtentry spurious_interrupt_bug do_spurious_interrupt_bug has_error_code=0 995idtentry coprocessor_error do_coprocessor_error has_error_code=0 996idtentry alignment_check do_alignment_check has_error_code=1 997idtentry simd_coprocessor_error do_simd_coprocessor_error has_error_code=0 998 999 1000 /* 1001 * Reload gs selector with exception handling 1002 * edi: new selector 1003 */ 1004ENTRY(native_load_gs_index) 1005 FRAME_BEGIN 1006 pushfq 1007 DISABLE_INTERRUPTS(CLBR_ANY & ~CLBR_RDI) 1008 TRACE_IRQS_OFF 1009 SWAPGS 1010.Lgs_change: 1011 movl %edi, %gs 10122: ALTERNATIVE "", "mfence", X86_BUG_SWAPGS_FENCE 1013 SWAPGS 1014 TRACE_IRQS_FLAGS (%rsp) 1015 popfq 1016 FRAME_END 1017 ret 1018ENDPROC(native_load_gs_index) 1019EXPORT_SYMBOL(native_load_gs_index) 1020 1021 _ASM_EXTABLE(.Lgs_change, bad_gs) 1022 .section .fixup, "ax" 1023 /* running with kernelgs */ 1024bad_gs: 1025 SWAPGS /* switch back to user gs */ 1026.macro ZAP_GS 1027 /* This can't be a string because the preprocessor needs to see it. */ 1028 movl $__USER_DS, %eax 1029 movl %eax, %gs 1030.endm 1031 ALTERNATIVE "", "ZAP_GS", X86_BUG_NULL_SEG 1032 xorl %eax, %eax 1033 movl %eax, %gs 1034 jmp 2b 1035 .previous 1036 1037/* Call softirq on interrupt stack. Interrupts are off. */ 1038ENTRY(do_softirq_own_stack) 1039 pushq %rbp 1040 mov %rsp, %rbp 1041 ENTER_IRQ_STACK regs=0 old_rsp=%r11 1042 call __do_softirq 1043 LEAVE_IRQ_STACK regs=0 1044 leaveq 1045 ret 1046ENDPROC(do_softirq_own_stack) 1047 1048#ifdef CONFIG_XEN 1049idtentry hypervisor_callback xen_do_hypervisor_callback has_error_code=0 1050 1051/* 1052 * A note on the "critical region" in our callback handler. 1053 * We want to avoid stacking callback handlers due to events occurring 1054 * during handling of the last event. To do this, we keep events disabled 1055 * until we've done all processing. HOWEVER, we must enable events before 1056 * popping the stack frame (can't be done atomically) and so it would still 1057 * be possible to get enough handler activations to overflow the stack. 1058 * Although unlikely, bugs of that kind are hard to track down, so we'd 1059 * like to avoid the possibility. 1060 * So, on entry to the handler we detect whether we interrupted an 1061 * existing activation in its critical region -- if so, we pop the current 1062 * activation and restart the handler using the previous one. 1063 */ 1064ENTRY(xen_do_hypervisor_callback) /* do_hypervisor_callback(struct *pt_regs) */ 1065 1066/* 1067 * Since we don't modify %rdi, evtchn_do_upall(struct *pt_regs) will 1068 * see the correct pointer to the pt_regs 1069 */ 1070 UNWIND_HINT_FUNC 1071 movq %rdi, %rsp /* we don't return, adjust the stack frame */ 1072 UNWIND_HINT_REGS 1073 1074 ENTER_IRQ_STACK old_rsp=%r10 1075 call xen_evtchn_do_upcall 1076 LEAVE_IRQ_STACK 1077 1078#ifndef CONFIG_PREEMPT 1079 call xen_maybe_preempt_hcall 1080#endif 1081 jmp error_exit 1082END(xen_do_hypervisor_callback) 1083 1084/* 1085 * Hypervisor uses this for application faults while it executes. 1086 * We get here for two reasons: 1087 * 1. Fault while reloading DS, ES, FS or GS 1088 * 2. Fault while executing IRET 1089 * Category 1 we do not need to fix up as Xen has already reloaded all segment 1090 * registers that could be reloaded and zeroed the others. 1091 * Category 2 we fix up by killing the current process. We cannot use the 1092 * normal Linux return path in this case because if we use the IRET hypercall 1093 * to pop the stack frame we end up in an infinite loop of failsafe callbacks. 1094 * We distinguish between categories by comparing each saved segment register 1095 * with its current contents: any discrepancy means we in category 1. 1096 */ 1097ENTRY(xen_failsafe_callback) 1098 UNWIND_HINT_EMPTY 1099 movl %ds, %ecx 1100 cmpw %cx, 0x10(%rsp) 1101 jne 1f 1102 movl %es, %ecx 1103 cmpw %cx, 0x18(%rsp) 1104 jne 1f 1105 movl %fs, %ecx 1106 cmpw %cx, 0x20(%rsp) 1107 jne 1f 1108 movl %gs, %ecx 1109 cmpw %cx, 0x28(%rsp) 1110 jne 1f 1111 /* All segments match their saved values => Category 2 (Bad IRET). */ 1112 movq (%rsp), %rcx 1113 movq 8(%rsp), %r11 1114 addq $0x30, %rsp 1115 pushq $0 /* RIP */ 1116 UNWIND_HINT_IRET_REGS offset=8 1117 jmp general_protection 11181: /* Segment mismatch => Category 1 (Bad segment). Retry the IRET. */ 1119 movq (%rsp), %rcx 1120 movq 8(%rsp), %r11 1121 addq $0x30, %rsp 1122 UNWIND_HINT_IRET_REGS 1123 pushq $-1 /* orig_ax = -1 => not a system call */ 1124 ALLOC_PT_GPREGS_ON_STACK 1125 SAVE_C_REGS 1126 SAVE_EXTRA_REGS 1127 ENCODE_FRAME_POINTER 1128 jmp error_exit 1129END(xen_failsafe_callback) 1130 1131apicinterrupt3 HYPERVISOR_CALLBACK_VECTOR \ 1132 xen_hvm_callback_vector xen_evtchn_do_upcall 1133 1134#endif /* CONFIG_XEN */ 1135 1136#if IS_ENABLED(CONFIG_HYPERV) 1137apicinterrupt3 HYPERVISOR_CALLBACK_VECTOR \ 1138 hyperv_callback_vector hyperv_vector_handler 1139#endif /* CONFIG_HYPERV */ 1140 1141idtentry debug do_debug has_error_code=0 paranoid=1 shift_ist=DEBUG_STACK 1142idtentry int3 do_int3 has_error_code=0 paranoid=1 shift_ist=DEBUG_STACK 1143idtentry stack_segment do_stack_segment has_error_code=1 1144 1145#ifdef CONFIG_XEN 1146idtentry xennmi do_nmi has_error_code=0 1147idtentry xendebug do_debug has_error_code=0 1148idtentry xenint3 do_int3 has_error_code=0 1149#endif 1150 1151idtentry general_protection do_general_protection has_error_code=1 1152idtentry page_fault do_page_fault has_error_code=1 1153 1154#ifdef CONFIG_KVM_GUEST 1155idtentry async_page_fault do_async_page_fault has_error_code=1 1156#endif 1157 1158#ifdef CONFIG_X86_MCE 1159idtentry machine_check do_mce has_error_code=0 paranoid=1 1160#endif 1161 1162/* 1163 * Save all registers in pt_regs, and switch gs if needed. 1164 * Use slow, but surefire "are we in kernel?" check. 1165 * Return: ebx=0: need swapgs on exit, ebx=1: otherwise 1166 */ 1167ENTRY(paranoid_entry) 1168 UNWIND_HINT_FUNC 1169 cld 1170 SAVE_C_REGS 8 1171 SAVE_EXTRA_REGS 8 1172 ENCODE_FRAME_POINTER 8 1173 movl $1, %ebx 1174 movl $MSR_GS_BASE, %ecx 1175 rdmsr 1176 testl %edx, %edx 1177 js 1f /* negative -> in kernel */ 1178 SWAPGS 1179 xorl %ebx, %ebx 1180 11811: 1182 SAVE_AND_SWITCH_TO_KERNEL_CR3 scratch_reg=%rax save_reg=%r14 1183 1184 ret 1185END(paranoid_entry) 1186 1187/* 1188 * "Paranoid" exit path from exception stack. This is invoked 1189 * only on return from non-NMI IST interrupts that came 1190 * from kernel space. 1191 * 1192 * We may be returning to very strange contexts (e.g. very early 1193 * in syscall entry), so checking for preemption here would 1194 * be complicated. Fortunately, we there's no good reason 1195 * to try to handle preemption here. 1196 * 1197 * On entry, ebx is "no swapgs" flag (1: don't need swapgs, 0: need it) 1198 */ 1199ENTRY(paranoid_exit) 1200 UNWIND_HINT_REGS 1201 DISABLE_INTERRUPTS(CLBR_ANY) 1202 TRACE_IRQS_OFF_DEBUG 1203 testl %ebx, %ebx /* swapgs needed? */ 1204 jnz .Lparanoid_exit_no_swapgs 1205 TRACE_IRQS_IRETQ 1206 RESTORE_CR3 scratch_reg=%rbx save_reg=%r14 1207 SWAPGS_UNSAFE_STACK 1208 jmp .Lparanoid_exit_restore 1209.Lparanoid_exit_no_swapgs: 1210 TRACE_IRQS_IRETQ_DEBUG 1211.Lparanoid_exit_restore: 1212 jmp restore_regs_and_return_to_kernel 1213END(paranoid_exit) 1214 1215/* 1216 * Save all registers in pt_regs, and switch gs if needed. 1217 * Return: EBX=0: came from user mode; EBX=1: otherwise 1218 */ 1219ENTRY(error_entry) 1220 UNWIND_HINT_FUNC 1221 cld 1222 SAVE_C_REGS 8 1223 SAVE_EXTRA_REGS 8 1224 ENCODE_FRAME_POINTER 8 1225 xorl %ebx, %ebx 1226 testb $3, CS+8(%rsp) 1227 jz .Lerror_kernelspace 1228 1229 /* 1230 * We entered from user mode or we're pretending to have entered 1231 * from user mode due to an IRET fault. 1232 */ 1233 SWAPGS 1234 /* We have user CR3. Change to kernel CR3. */ 1235 SWITCH_TO_KERNEL_CR3 scratch_reg=%rax 1236 1237.Lerror_entry_from_usermode_after_swapgs: 1238 /* Put us onto the real thread stack. */ 1239 popq %r12 /* save return addr in %12 */ 1240 movq %rsp, %rdi /* arg0 = pt_regs pointer */ 1241 call sync_regs 1242 movq %rax, %rsp /* switch stack */ 1243 ENCODE_FRAME_POINTER 1244 pushq %r12 1245 1246 /* 1247 * We need to tell lockdep that IRQs are off. We can't do this until 1248 * we fix gsbase, and we should do it before enter_from_user_mode 1249 * (which can take locks). 1250 */ 1251 TRACE_IRQS_OFF 1252 CALL_enter_from_user_mode 1253 ret 1254 1255.Lerror_entry_done: 1256 TRACE_IRQS_OFF 1257 ret 1258 1259 /* 1260 * There are two places in the kernel that can potentially fault with 1261 * usergs. Handle them here. B stepping K8s sometimes report a 1262 * truncated RIP for IRET exceptions returning to compat mode. Check 1263 * for these here too. 1264 */ 1265.Lerror_kernelspace: 1266 incl %ebx 1267 leaq native_irq_return_iret(%rip), %rcx 1268 cmpq %rcx, RIP+8(%rsp) 1269 je .Lerror_bad_iret 1270 movl %ecx, %eax /* zero extend */ 1271 cmpq %rax, RIP+8(%rsp) 1272 je .Lbstep_iret 1273 cmpq $.Lgs_change, RIP+8(%rsp) 1274 jne .Lerror_entry_done 1275 1276 /* 1277 * hack: .Lgs_change can fail with user gsbase. If this happens, fix up 1278 * gsbase and proceed. We'll fix up the exception and land in 1279 * .Lgs_change's error handler with kernel gsbase. 1280 */ 1281 SWAPGS 1282 SWITCH_TO_KERNEL_CR3 scratch_reg=%rax 1283 jmp .Lerror_entry_done 1284 1285.Lbstep_iret: 1286 /* Fix truncated RIP */ 1287 movq %rcx, RIP+8(%rsp) 1288 /* fall through */ 1289 1290.Lerror_bad_iret: 1291 /* 1292 * We came from an IRET to user mode, so we have user 1293 * gsbase and CR3. Switch to kernel gsbase and CR3: 1294 */ 1295 SWAPGS 1296 SWITCH_TO_KERNEL_CR3 scratch_reg=%rax 1297 1298 /* 1299 * Pretend that the exception came from user mode: set up pt_regs 1300 * as if we faulted immediately after IRET and clear EBX so that 1301 * error_exit knows that we will be returning to user mode. 1302 */ 1303 mov %rsp, %rdi 1304 call fixup_bad_iret 1305 mov %rax, %rsp 1306 decl %ebx 1307 jmp .Lerror_entry_from_usermode_after_swapgs 1308END(error_entry) 1309 1310 1311/* 1312 * On entry, EBX is a "return to kernel mode" flag: 1313 * 1: already in kernel mode, don't need SWAPGS 1314 * 0: user gsbase is loaded, we need SWAPGS and standard preparation for return to usermode 1315 */ 1316ENTRY(error_exit) 1317 UNWIND_HINT_REGS 1318 DISABLE_INTERRUPTS(CLBR_ANY) 1319 TRACE_IRQS_OFF 1320 testl %ebx, %ebx 1321 jnz retint_kernel 1322 jmp retint_user 1323END(error_exit) 1324 1325/* 1326 * Runs on exception stack. Xen PV does not go through this path at all, 1327 * so we can use real assembly here. 1328 * 1329 * Registers: 1330 * %r14: Used to save/restore the CR3 of the interrupted context 1331 * when PAGE_TABLE_ISOLATION is in use. Do not clobber. 1332 */ 1333ENTRY(nmi) 1334 UNWIND_HINT_IRET_REGS 1335 1336 /* 1337 * We allow breakpoints in NMIs. If a breakpoint occurs, then 1338 * the iretq it performs will take us out of NMI context. 1339 * This means that we can have nested NMIs where the next 1340 * NMI is using the top of the stack of the previous NMI. We 1341 * can't let it execute because the nested NMI will corrupt the 1342 * stack of the previous NMI. NMI handlers are not re-entrant 1343 * anyway. 1344 * 1345 * To handle this case we do the following: 1346 * Check the a special location on the stack that contains 1347 * a variable that is set when NMIs are executing. 1348 * The interrupted task's stack is also checked to see if it 1349 * is an NMI stack. 1350 * If the variable is not set and the stack is not the NMI 1351 * stack then: 1352 * o Set the special variable on the stack 1353 * o Copy the interrupt frame into an "outermost" location on the 1354 * stack 1355 * o Copy the interrupt frame into an "iret" location on the stack 1356 * o Continue processing the NMI 1357 * If the variable is set or the previous stack is the NMI stack: 1358 * o Modify the "iret" location to jump to the repeat_nmi 1359 * o return back to the first NMI 1360 * 1361 * Now on exit of the first NMI, we first clear the stack variable 1362 * The NMI stack will tell any nested NMIs at that point that it is 1363 * nested. Then we pop the stack normally with iret, and if there was 1364 * a nested NMI that updated the copy interrupt stack frame, a 1365 * jump will be made to the repeat_nmi code that will handle the second 1366 * NMI. 1367 * 1368 * However, espfix prevents us from directly returning to userspace 1369 * with a single IRET instruction. Similarly, IRET to user mode 1370 * can fault. We therefore handle NMIs from user space like 1371 * other IST entries. 1372 */ 1373 1374 ASM_CLAC 1375 1376 /* Use %rdx as our temp variable throughout */ 1377 pushq %rdx 1378 1379 testb $3, CS-RIP+8(%rsp) 1380 jz .Lnmi_from_kernel 1381 1382 /* 1383 * NMI from user mode. We need to run on the thread stack, but we 1384 * can't go through the normal entry paths: NMIs are masked, and 1385 * we don't want to enable interrupts, because then we'll end 1386 * up in an awkward situation in which IRQs are on but NMIs 1387 * are off. 1388 * 1389 * We also must not push anything to the stack before switching 1390 * stacks lest we corrupt the "NMI executing" variable. 1391 */ 1392 1393 swapgs 1394 cld 1395 SWITCH_TO_KERNEL_CR3 scratch_reg=%rdx 1396 movq %rsp, %rdx 1397 movq PER_CPU_VAR(cpu_current_top_of_stack), %rsp 1398 UNWIND_HINT_IRET_REGS base=%rdx offset=8 1399 pushq 5*8(%rdx) /* pt_regs->ss */ 1400 pushq 4*8(%rdx) /* pt_regs->rsp */ 1401 pushq 3*8(%rdx) /* pt_regs->flags */ 1402 pushq 2*8(%rdx) /* pt_regs->cs */ 1403 pushq 1*8(%rdx) /* pt_regs->rip */ 1404 UNWIND_HINT_IRET_REGS 1405 pushq $-1 /* pt_regs->orig_ax */ 1406 pushq %rdi /* pt_regs->di */ 1407 pushq %rsi /* pt_regs->si */ 1408 pushq (%rdx) /* pt_regs->dx */ 1409 pushq %rcx /* pt_regs->cx */ 1410 pushq %rax /* pt_regs->ax */ 1411 pushq %r8 /* pt_regs->r8 */ 1412 pushq %r9 /* pt_regs->r9 */ 1413 pushq %r10 /* pt_regs->r10 */ 1414 pushq %r11 /* pt_regs->r11 */ 1415 pushq %rbx /* pt_regs->rbx */ 1416 pushq %rbp /* pt_regs->rbp */ 1417 pushq %r12 /* pt_regs->r12 */ 1418 pushq %r13 /* pt_regs->r13 */ 1419 pushq %r14 /* pt_regs->r14 */ 1420 pushq %r15 /* pt_regs->r15 */ 1421 UNWIND_HINT_REGS 1422 ENCODE_FRAME_POINTER 1423 1424 /* 1425 * At this point we no longer need to worry about stack damage 1426 * due to nesting -- we're on the normal thread stack and we're 1427 * done with the NMI stack. 1428 */ 1429 1430 movq %rsp, %rdi 1431 movq $-1, %rsi 1432 call do_nmi 1433 1434 /* 1435 * Return back to user mode. We must *not* do the normal exit 1436 * work, because we don't want to enable interrupts. 1437 */ 1438 jmp swapgs_restore_regs_and_return_to_usermode 1439 1440.Lnmi_from_kernel: 1441 /* 1442 * Here's what our stack frame will look like: 1443 * +---------------------------------------------------------+ 1444 * | original SS | 1445 * | original Return RSP | 1446 * | original RFLAGS | 1447 * | original CS | 1448 * | original RIP | 1449 * +---------------------------------------------------------+ 1450 * | temp storage for rdx | 1451 * +---------------------------------------------------------+ 1452 * | "NMI executing" variable | 1453 * +---------------------------------------------------------+ 1454 * | iret SS } Copied from "outermost" frame | 1455 * | iret Return RSP } on each loop iteration; overwritten | 1456 * | iret RFLAGS } by a nested NMI to force another | 1457 * | iret CS } iteration if needed. | 1458 * | iret RIP } | 1459 * +---------------------------------------------------------+ 1460 * | outermost SS } initialized in first_nmi; | 1461 * | outermost Return RSP } will not be changed before | 1462 * | outermost RFLAGS } NMI processing is done. | 1463 * | outermost CS } Copied to "iret" frame on each | 1464 * | outermost RIP } iteration. | 1465 * +---------------------------------------------------------+ 1466 * | pt_regs | 1467 * +---------------------------------------------------------+ 1468 * 1469 * The "original" frame is used by hardware. Before re-enabling 1470 * NMIs, we need to be done with it, and we need to leave enough 1471 * space for the asm code here. 1472 * 1473 * We return by executing IRET while RSP points to the "iret" frame. 1474 * That will either return for real or it will loop back into NMI 1475 * processing. 1476 * 1477 * The "outermost" frame is copied to the "iret" frame on each 1478 * iteration of the loop, so each iteration starts with the "iret" 1479 * frame pointing to the final return target. 1480 */ 1481 1482 /* 1483 * Determine whether we're a nested NMI. 1484 * 1485 * If we interrupted kernel code between repeat_nmi and 1486 * end_repeat_nmi, then we are a nested NMI. We must not 1487 * modify the "iret" frame because it's being written by 1488 * the outer NMI. That's okay; the outer NMI handler is 1489 * about to about to call do_nmi anyway, so we can just 1490 * resume the outer NMI. 1491 */ 1492 1493 movq $repeat_nmi, %rdx 1494 cmpq 8(%rsp), %rdx 1495 ja 1f 1496 movq $end_repeat_nmi, %rdx 1497 cmpq 8(%rsp), %rdx 1498 ja nested_nmi_out 14991: 1500 1501 /* 1502 * Now check "NMI executing". If it's set, then we're nested. 1503 * This will not detect if we interrupted an outer NMI just 1504 * before IRET. 1505 */ 1506 cmpl $1, -8(%rsp) 1507 je nested_nmi 1508 1509 /* 1510 * Now test if the previous stack was an NMI stack. This covers 1511 * the case where we interrupt an outer NMI after it clears 1512 * "NMI executing" but before IRET. We need to be careful, though: 1513 * there is one case in which RSP could point to the NMI stack 1514 * despite there being no NMI active: naughty userspace controls 1515 * RSP at the very beginning of the SYSCALL targets. We can 1516 * pull a fast one on naughty userspace, though: we program 1517 * SYSCALL to mask DF, so userspace cannot cause DF to be set 1518 * if it controls the kernel's RSP. We set DF before we clear 1519 * "NMI executing". 1520 */ 1521 lea 6*8(%rsp), %rdx 1522 /* Compare the NMI stack (rdx) with the stack we came from (4*8(%rsp)) */ 1523 cmpq %rdx, 4*8(%rsp) 1524 /* If the stack pointer is above the NMI stack, this is a normal NMI */ 1525 ja first_nmi 1526 1527 subq $EXCEPTION_STKSZ, %rdx 1528 cmpq %rdx, 4*8(%rsp) 1529 /* If it is below the NMI stack, it is a normal NMI */ 1530 jb first_nmi 1531 1532 /* Ah, it is within the NMI stack. */ 1533 1534 testb $(X86_EFLAGS_DF >> 8), (3*8 + 1)(%rsp) 1535 jz first_nmi /* RSP was user controlled. */ 1536 1537 /* This is a nested NMI. */ 1538 1539nested_nmi: 1540 /* 1541 * Modify the "iret" frame to point to repeat_nmi, forcing another 1542 * iteration of NMI handling. 1543 */ 1544 subq $8, %rsp 1545 leaq -10*8(%rsp), %rdx 1546 pushq $__KERNEL_DS 1547 pushq %rdx 1548 pushfq 1549 pushq $__KERNEL_CS 1550 pushq $repeat_nmi 1551 1552 /* Put stack back */ 1553 addq $(6*8), %rsp 1554 1555nested_nmi_out: 1556 popq %rdx 1557 1558 /* We are returning to kernel mode, so this cannot result in a fault. */ 1559 iretq 1560 1561first_nmi: 1562 /* Restore rdx. */ 1563 movq (%rsp), %rdx 1564 1565 /* Make room for "NMI executing". */ 1566 pushq $0 1567 1568 /* Leave room for the "iret" frame */ 1569 subq $(5*8), %rsp 1570 1571 /* Copy the "original" frame to the "outermost" frame */ 1572 .rept 5 1573 pushq 11*8(%rsp) 1574 .endr 1575 UNWIND_HINT_IRET_REGS 1576 1577 /* Everything up to here is safe from nested NMIs */ 1578 1579#ifdef CONFIG_DEBUG_ENTRY 1580 /* 1581 * For ease of testing, unmask NMIs right away. Disabled by 1582 * default because IRET is very expensive. 1583 */ 1584 pushq $0 /* SS */ 1585 pushq %rsp /* RSP (minus 8 because of the previous push) */ 1586 addq $8, (%rsp) /* Fix up RSP */ 1587 pushfq /* RFLAGS */ 1588 pushq $__KERNEL_CS /* CS */ 1589 pushq $1f /* RIP */ 1590 iretq /* continues at repeat_nmi below */ 1591 UNWIND_HINT_IRET_REGS 15921: 1593#endif 1594 1595repeat_nmi: 1596 /* 1597 * If there was a nested NMI, the first NMI's iret will return 1598 * here. But NMIs are still enabled and we can take another 1599 * nested NMI. The nested NMI checks the interrupted RIP to see 1600 * if it is between repeat_nmi and end_repeat_nmi, and if so 1601 * it will just return, as we are about to repeat an NMI anyway. 1602 * This makes it safe to copy to the stack frame that a nested 1603 * NMI will update. 1604 * 1605 * RSP is pointing to "outermost RIP". gsbase is unknown, but, if 1606 * we're repeating an NMI, gsbase has the same value that it had on 1607 * the first iteration. paranoid_entry will load the kernel 1608 * gsbase if needed before we call do_nmi. "NMI executing" 1609 * is zero. 1610 */ 1611 movq $1, 10*8(%rsp) /* Set "NMI executing". */ 1612 1613 /* 1614 * Copy the "outermost" frame to the "iret" frame. NMIs that nest 1615 * here must not modify the "iret" frame while we're writing to 1616 * it or it will end up containing garbage. 1617 */ 1618 addq $(10*8), %rsp 1619 .rept 5 1620 pushq -6*8(%rsp) 1621 .endr 1622 subq $(5*8), %rsp 1623end_repeat_nmi: 1624 1625 /* 1626 * Everything below this point can be preempted by a nested NMI. 1627 * If this happens, then the inner NMI will change the "iret" 1628 * frame to point back to repeat_nmi. 1629 */ 1630 pushq $-1 /* ORIG_RAX: no syscall to restart */ 1631 ALLOC_PT_GPREGS_ON_STACK 1632 1633 /* 1634 * Use paranoid_entry to handle SWAPGS, but no need to use paranoid_exit 1635 * as we should not be calling schedule in NMI context. 1636 * Even with normal interrupts enabled. An NMI should not be 1637 * setting NEED_RESCHED or anything that normal interrupts and 1638 * exceptions might do. 1639 */ 1640 call paranoid_entry 1641 UNWIND_HINT_REGS 1642 1643 /* paranoidentry do_nmi, 0; without TRACE_IRQS_OFF */ 1644 movq %rsp, %rdi 1645 movq $-1, %rsi 1646 call do_nmi 1647 1648 RESTORE_CR3 scratch_reg=%r15 save_reg=%r14 1649 1650 testl %ebx, %ebx /* swapgs needed? */ 1651 jnz nmi_restore 1652nmi_swapgs: 1653 SWAPGS_UNSAFE_STACK 1654nmi_restore: 1655 POP_EXTRA_REGS 1656 POP_C_REGS 1657 1658 /* 1659 * Skip orig_ax and the "outermost" frame to point RSP at the "iret" 1660 * at the "iret" frame. 1661 */ 1662 addq $6*8, %rsp 1663 1664 /* 1665 * Clear "NMI executing". Set DF first so that we can easily 1666 * distinguish the remaining code between here and IRET from 1667 * the SYSCALL entry and exit paths. 1668 * 1669 * We arguably should just inspect RIP instead, but I (Andy) wrote 1670 * this code when I had the misapprehension that Xen PV supported 1671 * NMIs, and Xen PV would break that approach. 1672 */ 1673 std 1674 movq $0, 5*8(%rsp) /* clear "NMI executing" */ 1675 1676 /* 1677 * iretq reads the "iret" frame and exits the NMI stack in a 1678 * single instruction. We are returning to kernel mode, so this 1679 * cannot result in a fault. Similarly, we don't need to worry 1680 * about espfix64 on the way back to kernel mode. 1681 */ 1682 iretq 1683END(nmi) 1684 1685ENTRY(ignore_sysret) 1686 UNWIND_HINT_EMPTY 1687 mov $-ENOSYS, %eax 1688 sysret 1689END(ignore_sysret) 1690 1691ENTRY(rewind_stack_do_exit) 1692 UNWIND_HINT_FUNC 1693 /* Prevent any naive code from trying to unwind to our caller. */ 1694 xorl %ebp, %ebp 1695 1696 movq PER_CPU_VAR(cpu_current_top_of_stack), %rax 1697 leaq -PTREGS_SIZE(%rax), %rsp 1698 UNWIND_HINT_FUNC sp_offset=PTREGS_SIZE 1699 1700 call do_exit 1701END(rewind_stack_do_exit) 1702