Lines Matching +full:ecx +full:- +full:2000

1 /* SPDX-License-Identifier: GPL-2.0 */
6 * Copyright (C) 2000, 2001, 2002 Andi Kleen SuSE Labs
7 * Copyright (C) 2000 Pavel Machek <pavel@suse.cz>
9 * entry.S contains the system-call and fault low-level handling routines.
14 * - iret frame: Architecture defined interrupt frame from SS to RIP
18 * - SYM_FUNC_START/END:Define functions in the symbol table.
19 * - idtentry: Define exception entry points.
26 #include <asm/asm-offsets.h>
40 #include <asm/nospec-branch.h>
50 * 64-bit SYSCALL instruction entry. Up to 6 arguments in registers.
52 * This is the only entry point used for 64-bit system calls. The
62 * 64-bit SYSCALL saves rip to rcx, clears rflags.RF, then saves rflags to r11,
71 * r11 saved rflags (note: r11 is callee-clobbered register in C ABI)
78 * (note: r12-r15, rbp, rbx are callee-preserved in C ABI)
82 * When user can change pt_regs->foo always force IRET. That is because
101 pushq $__USER_DS /* pt_regs->ss */
102 pushq PER_CPU_VAR(cpu_tss_rw + TSS_sp2) /* pt_regs->sp */
103 pushq %r11 /* pt_regs->flags */
104 pushq $__USER_CS /* pt_regs->cs */
105 pushq %rcx /* pt_regs->ip */
107 pushq %rax /* pt_regs->orig_ax */
109 PUSH_AND_CLEAR_REGS rax=$-ENOSYS
125 * a completely clean 64-bit userspace context. If we're not,
149 pushq RSP-RDI(%rdi) /* RSP */
180 * Save callee-saved registers
208 /* restore callee-saved registers */
249 * -- at this point the register set should be a valid user set
285 * idtentry_body - Macro to emit code calling the C function
309 movq $-1, ORIG_RAX(%rsp) /* no syscall to restart */
320 * idtentry - Macro to generate entry stubs for simple IDT entries
344 pushq $-1 /* ORIG_RAX: no syscall to restart */
349 * If coming from kernel space, create a 6-word gap to allow the
352 testb $3, CS-ORIG_RAX(%rsp)
382 * idtentry_mce_db - Macro to generate entry stubs for #MC and #DB
404 pushq $-1 /* ORIG_RAX: no syscall to restart */
410 testb $3, CS-ORIG_RAX(%rsp)
434 * idtentry_vc - Macro to generate entry stub for #VC
443 * an IST stack by switching to the task stack if coming from user-space (which
445 * entered from kernel-mode.
447 * If entered from kernel-mode the return stack is validated first, and if it is
449 * will switch to a fall-back stack (VC2) and call a special handler function.
465 testb $3, CS-ORIG_RAX(%rsp)
470 * EBX == 0 -> SWAPGS, EBX == 1 -> no SWAPGS
479 * stack if it is safe to do so. If not it switches to the VC fall-back
491 movq $-1, ORIG_RAX(%rsp) /* no syscall to restart */
499 * identical to the stack in the IRET frame or the VC fall-back stack,
531 movq $-1, ORIG_RAX(%rsp) /* no syscall to restart */
628 addq $8, %rsp /* skip regs->orig_ax */
637 .long .Lnative_iret - (. + 4)
644 * 64-bit mode SS:RSP on the exception stack is always valid.
647 testb $4, (SS-RIP)(%rsp)
654 * This may fault. Non-paranoid faults on return to userspace are
656 * Double-faults due to espfix64 are handled in exc_double_fault.
674 * --- top of ESPFIX stack ---
679 * RIP <-- RSP points here when we're done
680 * RAX <-- espfix_waddr points here
681 * --- bottom of ESPFIX stack ---
706 * (espfix_stack | (X & 0xffff0000)) points to a read-only alias of
731 * is read-only and RSP[31:16] are preloaded with the userspace
786 * existing activation in its critical region -- if so, we pop the current
824 movl %ds, %ecx
827 movl %es, %ecx
830 movl %fs, %ecx
833 movl %gs, %ecx
848 pushq $-1 /* orig_ax = -1 => not a system call */
860 * N 0 -> SWAPGS on exit
861 * 1 -> no SWAPGS on exit
865 * R14 - old CR3
866 * R15 - old SPEC_CTRL
914 /* EBX = 1 -> kernel GSBASE active, no restore required */
918 * The kernel-enforced convention is a negative GSBASE indicates
921 movl $MSR_GS_BASE, %ecx
926 /* EBX = 0 -> SWAPGS required on exit */
945 * only on return from non-NMI IST interrupts that came
957 * N 0 -> SWAPGS on exit
958 * 1 -> no SWAPGS on exit
962 * R14 - old CR3
963 * R15 - old SPEC_CTRL
970 * to the per-CPU x86_spec_ctrl_shadow variable.
992 /* On non-FSGSBASE systems, conditionally do SWAPGS */
1039 movl %ecx, %eax /* zero extend */
1115 * stack of the previous NMI. NMI handlers are not re-entrant
1153 testb $3, CS-RIP+8(%rsp)
1173 pushq 5*8(%rdx) /* pt_regs->ss */
1174 pushq 4*8(%rdx) /* pt_regs->rsp */
1175 pushq 3*8(%rdx) /* pt_regs->flags */
1176 pushq 2*8(%rdx) /* pt_regs->cs */
1177 pushq 1*8(%rdx) /* pt_regs->rip */
1179 pushq $-1 /* pt_regs->orig_ax */
1188 * due to nesting -- we're on the normal thread stack and we're
1204 * +---------------------------------------------------------+
1210 * +---------------------------------------------------------+
1212 * +---------------------------------------------------------+
1214 * +---------------------------------------------------------+
1220 * +---------------------------------------------------------+
1226 * +---------------------------------------------------------+
1228 * +---------------------------------------------------------+
1230 * The "original" frame is used by hardware. Before re-enabling
1267 cmpl $1, -8(%rsp)
1306 leaq -10*8(%rsp), %rdx
1382 pushq -6*8(%rsp)
1393 pushq $-1 /* ORIG_RAX: no syscall to restart */
1427 /* EBX == 0 -> invoke SWAPGS */
1471 * This handles SYSCALL from 32-bit code. There is no way to program
1472 * MSRs to fully disable 32-bit SYSCALL.
1477 mov $-ENOSYS, %eax
1490 leaq -PTREGS_SIZE(%rax), %rsp
1503 * BHI_DIS_S hardware control instead. If a pre-Alder Lake part is being
1523 * This means that the stack is non-constant and ORC can't unwind it with %rsp
1536 movl $5, %ecx
1545 .skip 32 - (.Lret1 - 1f), 0xcc
1553 * This should be ideally be: .skip 32 - (.Lret2 - 2f), 0xcc
1556 .skip 32 - 18, 0xcc
1562 sub $1, %ecx