Lines Matching +full:4 +full:- +full:switch

1 /* SPDX-License-Identifier: GPL-2.0 */
5 * entry_32.S contains the system-call and low-level fault and trap handling routines.
13 * 0(%esp) - %ebx
14 * 4(%esp) - %ecx
15 * 8(%esp) - %edx
16 * C(%esp) - %esi
17 * 10(%esp) - %edi
18 * 14(%esp) - %ebp
19 * 18(%esp) - %eax
20 * 1C(%esp) - %ds
21 * 20(%esp) - %es
22 * 24(%esp) - %fs
23 * 28(%esp) - unused -- was %gs on old stackprotector kernels
24 * 2C(%esp) - orig_eax
25 * 30(%esp) - %eip
26 * 34(%esp) - %cs
27 * 38(%esp) - %eflags
28 * 3C(%esp) - %oldesp
29 * 40(%esp) - %oldss
40 #include <asm/processor-flags.h>
48 #include <asm/nospec-branch.h>
56 /* Unconditionally switch to user cr3 */
74 /* On user-cr3? */
78 /* From userspace with kernel cr3 - BUG */
85 * Switch to kernel cr3 if not already loaded and return current cr3 in
111 andl $0x0000ffff, 4*4(%esp)
114 testl $X86_EFLAGS_VM, 5*4(%esp)
117 testl $USER_SEGMENT_RPL_MASK, 4*4(%esp)
120 orl $CS_FROM_KERNEL, 4*4(%esp)
125 * 6*4(%esp) - <previous context>
126 * 5*4(%esp) - flags
127 * 4*4(%esp) - cs
128 * 3*4(%esp) - ip
129 * 2*4(%esp) - orig_eax
130 * 1*4(%esp) - gs / function
131 * 0*4(%esp) - fs
134 * is complete and in particular regs->sp is correct. This gives us
137 * 14*4(%esp) - <previous context>
138 * 13*4(%esp) - gap / flags
139 * 12*4(%esp) - gap / cs
140 * 11*4(%esp) - gap / ip
141 * 10*4(%esp) - gap / orig_eax
142 * 9*4(%esp) - gap / gs / function
143 * 8*4(%esp) - gap / fs
144 * 7*4(%esp) - ss
145 * 6*4(%esp) - sp
146 * 5*4(%esp) - flags
147 * 4*4(%esp) - cs
148 * 3*4(%esp) - ip
149 * 2*4(%esp) - orig_eax
150 * 1*4(%esp) - gs / function
151 * 0*4(%esp) - fs
156 addl $7*4, (%esp) # point sp back at the previous context
157 pushl 7*4(%esp) # flags
158 pushl 7*4(%esp) # cs
159 pushl 7*4(%esp) # ip
160 pushl 7*4(%esp) # orig_eax
161 pushl 7*4(%esp) # gs / function
162 pushl 7*4(%esp) # fs
174 testl $CS_FROM_KERNEL, 1*4(%esp)
179 * regs->sp without lowering %esp in between, such that an NMI in the
184 movl 5*4(%esp), %eax # (modified) regs->sp
186 movl 4*4(%esp), %ecx # flags
187 movl %ecx, %ss:-1*4(%eax)
189 movl 3*4(%esp), %ecx # cs
191 movl %ecx, %ss:-2*4(%eax)
193 movl 2*4(%esp), %ecx # ip
194 movl %ecx, %ss:-3*4(%eax)
196 movl 1*4(%esp), %ecx # eax
197 movl %ecx, %ss:-4*4(%eax)
200 lea -4*4(%eax), %esp
233 /* Switch to kernel stack if necessary */
245 * Now switch the CR3 when PTI is enabled.
271 4: addl $(4 + \pop), %esp /* pop the unused "gs" slot */
276 * ASM the registers are known and we can trivially hard-code them.
280 _ASM_EXTABLE_TYPE(3b, 4b, EX_TYPE_POP_ZERO|EX_REG_FS)
285 * Now switch the CR3 when PTI is enabled.
287 * We enter with kernel cr3 and switch the cr3 to the value
295 /* User cr3 in \cr3_reg - write it to hardware cr3 */
322 jne .Lend_\@ # returning to user-space with LDT SS
325 * Setup and switch to ESPFIX stack
329 * "official" bug of all the x86-compatible CPUs, which we can work
340 mov %dl, GDT_ESPFIX_SS + 4 /* bits 16..23 */
350 lss (%esp), %esp /* switch to espfix segment */
359 * We need to be very careful here with the %esp switch, because an NMI
361 * entry-stack, it will overwrite the task-stack and everything we
362 * copied there. So allocate the stack-frame on the task-stack and
363 * switch to it before we do any copying.
380 subl %esp, %ecx /* ecx = (end of entry_stack) - esp */
392 /* Load top of task-stack into %edi */
395 /* Special case - entry from kernel mode via entry stack */
415 * Stack-frame contains 4 additional segment registers when
418 addl $(4 * 4), %ecx
423 /* Allocate frame on task-stack */
426 /* Switch to task-stack */
430 * We are now on the task-stack and can safely copy over the
431 * stack-frame
443 * kernel-mode and %esp points to the entry-stack. When this
444 * happens we need to switch to the task-stack to run C code,
445 * but switch back to the entry-stack again when we approach
446 * iret and return to the interrupted code-path. This usually
447 * happens when we hit an exception while restoring user-space
448 * segment registers on the way back to user-space or when the
451 * When we switch to the task-stack here, we can't trust the
452 * contents of the entry-stack anymore, as the exception handler
454 * copy the complete entry-stack to the task-stack and set a
455 * marker in the iret-frame (bit 31 of the CS dword) to detect
458 * On the iret path we copy everything back and switch to the
459 * entry-stack, so that the interrupted kernel code-path
464 * %esi: Entry-Stack pointer (same as %esp)
472 /* %ecx to the top of entry-stack */
484 * so that we can switch back to it before iret.
493 * the stack-frame on task-stack and copy everything over
501 * Switch back from the kernel stack to the entry stack.
504 * first calculate the size of the stack-frame to copy, depending on
509 * task-stack once we switched to the entry-stack. When an NMI happens
510 * while on the entry-stack, the NMI handler will switch back to the top
511 * of the task stack, overwriting our stack-frame we are about to copy.
512 * Therefore we switch the stack only after everything is copied over.
523 /* Additional 4 registers to copy when returning to VM86 mode */
524 addl $(4 * 4), %ecx
537 /* Copy over the stack-frame */
543 * Switch to entry-stack - needs to happen after everything is
544 * copied because the NMI handler will overwrite the task-stack
545 * when on entry-stack
553 * This macro handles the case when we return to kernel-mode on the iret
554 * path and have to switch back to the entry stack and/or user-cr3
562 * Test if we entered the kernel with the entry-stack. Most
564 * return-to-kernel path.
569 /* Unlikely slow-path */
571 /* Clear marker from stack-frame */
574 /* Copy the remaining task-stack contents to entry-stack */
578 /* Bytes on the task-stack to ecx */
582 /* Allocate stack-frame on entry-stack */
586 * Save future stack-pointer, we must not switch until the
588 * contents of the task-stack we are about to copy.
597 /* Safe to switch to entry-stack now */
601 * We came from entry-stack and need to check if we also need to
602 * switch back to user cr3.
607 /* Clear marker from stack-frame */
616 * idtentry - Macro to generate entry stubs for simple IDT entries
631 /* Push the C-function address into the GS slot */
646 movl $-1, PT_ORIG_EAX(%esp) /* no syscall to restart */
674 * Save callee-saved registers
688 /* switch stack */
708 /* restore callee-saved registers */
737 addl $4, %esp
747 * to being single-stepped if a user program sets TF and executes SYSENTER.
751 * will ignore all of the single-step traps generated in this range.
755 * 32-bit SYSENTER entry.
757 * 32-bit system calls through the vDSO's __kernel_vsyscall enter here
759 * entry on 32-bit systems.
764 * never happened in any of Google's Bionic versions -- it only happened
765 * in a narrow range of Intel-provided versions.
788 * On entry-stack with all userspace-regs live - save and
789 * restore eflags and %eax to use it as scratch-reg for the cr3
790 * switch.
799 /* Stack empty again, switch to task stack */
803 pushl $__USER_DS /* pt_regs->ss */
804 pushl $0 /* pt_regs->sp (placeholder) */
805 pushfl /* pt_regs->flags (except IF = 0) */
806 pushl $__USER_CS /* pt_regs->cs */
807 pushl $0 /* pt_regs->ip = 0 (placeholder) */
808 pushl %eax /* pt_regs->orig_ax */
809 SAVE_ALL pt_regs_ax=$-ENOSYS /* save rest, stack already switched */
818 * If TF is set, we will single-step all the way to here -- do_debug
820 * single-stepping in general. This allows us to avoid having
822 * forces us to single-step through the SYSENTER entry code.)
825 * out-of-line as an optimization: NT is unlikely to be set in the
828 * not-taken and therefore its instructions won't be fetched.
844 * Setup entry stack - we keep the pointer in %eax and do the
845 * switch after almost all user-state is restored.
850 subl $(2*4), %eax
856 movl %esi, 4(%eax)
859 movl PT_EIP(%esp), %edx /* pt_regs->ip */
860 movl PT_OLDESP(%esp), %ecx /* pt_regs->sp */
863 popl %ebx /* pt_regs->bx */
864 addl $2*4, %esp /* skip pt_regs->cx and pt_regs->dx */
865 popl %esi /* pt_regs->si */
866 popl %edi /* pt_regs->di */
867 popl %ebp /* pt_regs->bp */
869 /* Switch to entry stack */
872 /* Now ready to switch the cr3 */
879 * STI gives a one-instruction window in which we won't be interrupted,
906 * 32-bit legacy system call entry.
908 * 32-bit x86 Linux system calls traditionally used the INT $0x80
911 * This entry point can be used by any 32-bit perform system calls.
915 * Restarted 32-bit system calls also fall back to INT $0x80
917 * call. (64-bit programs can use INT $0x80 as well, but they can
918 * only run on 64-bit kernels and therefore land in
935 pushl %eax /* pt_regs->orig_ax */
937 SAVE_ALL pt_regs_ax=$-ENOSYS switch_stacks=1 /* save rest */
948 /* Switch back to user CR3 */
954 RESTORE_REGS pop=4 # skip orig_eax/error_code
960 * scheduler to user-space.
970 * The stack-frame here is the one that iret faulted on, so its a
971 * return-to-user frame. We are on kernel-cr3 because we come here from
972 * the fixup code. This confuses the CR3 checker, so switch to user-cr3
987 * Switch back for ESPFIX stack to the normal zerobased stack
1000 subl $2*4, %esp
1005 * zero. %cs is the only known-linear segment we have right now.
1007 mov %cs:GDT_ESPFIX_OFFSET + 4(%ecx), %al /* bits 16..23 */
1010 addl $2*4, %esp
1015 lss (%esp), %esp /* switch to the normal stack segment */
1026 /* switch to normal stack */
1041 movl $-1, PT_ORIG_EAX(%esp) # no syscall to restart
1063 RESTORE_REGS 4
1079 * - CR0.TS is set. "TS" literally means "task switched".
1080 * - EFLAGS.NT is set because we're a "nested task".
1081 * - The doublefault TSS has back_link set and has been marked busy.
1082 * - TR points to the doublefault TSS and the normal TSS is busy.
1083 * - CR3 is the normal kernel PGD. This would be delightful, except
1095 * We will manually undo the task switch instead of doing a
1096 * task-switching IRET.
1133 pushl %eax # pt_regs->orig_ax
1142 subl %eax, %ecx /* ecx = (end of entry_stack) - esp */
1152 * We're on the SYSENTER stack. Switch off. No one (not even debug)
1167 RESTORE_ALL_NMI cr3_reg=%edi pop=4
1178 addl $4, (%esp)
1181 pushl 4*4(%esp) # flags
1182 pushl 4*4(%esp) # cs
1183 pushl 4*4(%esp) # ip
1203 * 3 - original frame (exception)
1204 * 2 - ESPFIX block (above)
1205 * 6 - gap (FIXUP_FRAME)
1206 * 5 - long frame (FIXUP_FRAME)
1207 * 1 - orig_ax
1209 lss (1+5+6)*4(%esp), %esp # back to espfix stack
1221 leal -TOP_OF_KERNEL_STACK_PADDING-PTREGS_SIZE(%esi), %esp