entry_32.S (9e8238020c5beba64e7ffafbb7ea0fb02fe68270) entry_32.S (a13f2ef168cb2a033a284eb841bcc481ffbc90cf)
1/* SPDX-License-Identifier: GPL-2.0 */
2/*
3 * Copyright (C) 1991,1992 Linus Torvalds
4 *
5 * entry_32.S contains the system-call and low-level fault and trap handling routines.
6 *
7 * Stack layout while running C code:
8 * ptrace needs to have all registers on the stack.

--- 30 unchanged lines hidden (view full) ---

39#include <asm/percpu.h>
40#include <asm/processor-flags.h>
41#include <asm/irq_vectors.h>
42#include <asm/cpufeatures.h>
43#include <asm/alternative-asm.h>
44#include <asm/asm.h>
45#include <asm/smap.h>
46#include <asm/frame.h>
1/* SPDX-License-Identifier: GPL-2.0 */
2/*
3 * Copyright (C) 1991,1992 Linus Torvalds
4 *
5 * entry_32.S contains the system-call and low-level fault and trap handling routines.
6 *
7 * Stack layout while running C code:
8 * ptrace needs to have all registers on the stack.

--- 30 unchanged lines hidden (view full) ---

39#include <asm/percpu.h>
40#include <asm/processor-flags.h>
41#include <asm/irq_vectors.h>
42#include <asm/cpufeatures.h>
43#include <asm/alternative-asm.h>
44#include <asm/asm.h>
45#include <asm/smap.h>
46#include <asm/frame.h>
47#include <asm/trapnr.h>
47#include <asm/nospec-branch.h>
48
49#include "calling.h"
50
51 .section .entry.text, "ax"
52
48#include <asm/nospec-branch.h>
49
50#include "calling.h"
51
52 .section .entry.text, "ax"
53
53/*
54 * We use macros for low-level operations which need to be overridden
55 * for paravirtualization. The following will never clobber any registers:
56 * INTERRUPT_RETURN (aka. "iret")
57 * GET_CR0_INTO_EAX (aka. "movl %cr0, %eax")
58 * ENABLE_INTERRUPTS_SYSEXIT (aka "sti; sysexit").
59 *
60 * For DISABLE_INTERRUPTS/ENABLE_INTERRUPTS (aka "cli"/"sti"), you must
61 * specify what registers can be overwritten (CLBR_NONE, CLBR_EAX/EDX/ECX/ANY).
62 * Allowing a register to be clobbered can shrink the paravirt replacement
63 * enough to patch inline, increasing performance.
64 */
65
66#ifdef CONFIG_PREEMPTION
67# define preempt_stop(clobbers) DISABLE_INTERRUPTS(clobbers); TRACE_IRQS_OFF
68#else
69# define preempt_stop(clobbers)
70#endif
71
72.macro TRACE_IRQS_IRET
73#ifdef CONFIG_TRACE_IRQFLAGS
74 testl $X86_EFLAGS_IF, PT_EFLAGS(%esp) # interrupts off?
75 jz 1f
76 TRACE_IRQS_ON
771:
78#endif
79.endm
80
81#define PTI_SWITCH_MASK (1 << PAGE_SHIFT)
82
83/*
84 * User gs save/restore
85 *
86 * %gs is used for userland TLS and kernel only uses it for stack
87 * canary which is required to be at %gs:20 by gcc. Read the comment
88 * at the top of stackprotector.h for more info.

--- 382 unchanged lines hidden (view full) ---

471 * can happen everywhere. If the NMI handler finds itself on the
472 * entry-stack, it will overwrite the task-stack and everything we
473 * copied there. So allocate the stack-frame on the task-stack and
474 * switch to it before we do any copying.
475 */
476
477.macro SWITCH_TO_KERNEL_STACK
478
54#define PTI_SWITCH_MASK (1 << PAGE_SHIFT)
55
56/*
57 * User gs save/restore
58 *
59 * %gs is used for userland TLS and kernel only uses it for stack
60 * canary which is required to be at %gs:20 by gcc. Read the comment
61 * at the top of stackprotector.h for more info.

--- 382 unchanged lines hidden (view full) ---

444 * can happen everywhere. If the NMI handler finds itself on the
445 * entry-stack, it will overwrite the task-stack and everything we
446 * copied there. So allocate the stack-frame on the task-stack and
447 * switch to it before we do any copying.
448 */
449
450.macro SWITCH_TO_KERNEL_STACK
451
479 ALTERNATIVE "", "jmp .Lend_\@", X86_FEATURE_XENPV
480
481 BUG_IF_WRONG_CR3
482
483 SWITCH_TO_KERNEL_CR3 scratch_reg=%eax
484
485 /*
486 * %eax now contains the entry cr3 and we carry it forward in
487 * that register for the time this macro runs
488 */

--- 132 unchanged lines hidden (view full) ---

621 * We must be very careful here, as we can't trust the contents of the
622 * task-stack once we switched to the entry-stack. When an NMI happens
623 * while on the entry-stack, the NMI handler will switch back to the top
624 * of the task stack, overwriting our stack-frame we are about to copy.
625 * Therefore we switch the stack only after everything is copied over.
626 */
627.macro SWITCH_TO_ENTRY_STACK
628
452 BUG_IF_WRONG_CR3
453
454 SWITCH_TO_KERNEL_CR3 scratch_reg=%eax
455
456 /*
457 * %eax now contains the entry cr3 and we carry it forward in
458 * that register for the time this macro runs
459 */

--- 132 unchanged lines hidden (view full) ---

592 * We must be very careful here, as we can't trust the contents of the
593 * task-stack once we switched to the entry-stack. When an NMI happens
594 * while on the entry-stack, the NMI handler will switch back to the top
595 * of the task stack, overwriting our stack-frame we are about to copy.
596 * Therefore we switch the stack only after everything is copied over.
597 */
598.macro SWITCH_TO_ENTRY_STACK
599
629 ALTERNATIVE "", "jmp .Lend_\@", X86_FEATURE_XENPV
630
631 /* Bytes to copy */
632 movl $PTREGS_SIZE, %ecx
633
634#ifdef CONFIG_VM86
635 testl $(X86_EFLAGS_VM), PT_EFLAGS(%esp)
636 jz .Lcopy_pt_regs_\@
637
638 /* Additional 4 registers to copy when returning to VM86 mode */

--- 82 unchanged lines hidden (view full) ---

721
722 /* Clear marker from stack-frame */
723 andl $(~CS_FROM_USER_CR3), PT_CS(%esp)
724
725 SWITCH_TO_USER_CR3 scratch_reg=%eax
726
727.Lend_\@:
728.endm
600 /* Bytes to copy */
601 movl $PTREGS_SIZE, %ecx
602
603#ifdef CONFIG_VM86
604 testl $(X86_EFLAGS_VM), PT_EFLAGS(%esp)
605 jz .Lcopy_pt_regs_\@
606
607 /* Additional 4 registers to copy when returning to VM86 mode */

--- 82 unchanged lines hidden (view full) ---

690
691 /* Clear marker from stack-frame */
692 andl $(~CS_FROM_USER_CR3), PT_CS(%esp)
693
694 SWITCH_TO_USER_CR3 scratch_reg=%eax
695
696.Lend_\@:
697.endm
698
699/**
700 * idtentry - Macro to generate entry stubs for simple IDT entries
701 * @vector: Vector number
702 * @asmsym: ASM symbol for the entry point
703 * @cfunc: C function to be called
704 * @has_error_code: Hardware pushed error code on stack
705 */
706.macro idtentry vector asmsym cfunc has_error_code:req
707SYM_CODE_START(\asmsym)
708 ASM_CLAC
709 cld
710
711 .if \has_error_code == 0
712 pushl $0 /* Clear the error code */
713 .endif
714
715 /* Push the C-function address into the GS slot */
716 pushl $\cfunc
717 /* Invoke the common exception entry */
718 jmp handle_exception
719SYM_CODE_END(\asmsym)
720.endm
721
722.macro idtentry_irq vector cfunc
723 .p2align CONFIG_X86_L1_CACHE_SHIFT
724SYM_CODE_START_LOCAL(asm_\cfunc)
725 ASM_CLAC
726 SAVE_ALL switch_stacks=1
727 ENCODE_FRAME_POINTER
728 movl %esp, %eax
729 movl PT_ORIG_EAX(%esp), %edx /* get the vector from stack */
730 movl $-1, PT_ORIG_EAX(%esp) /* no syscall to restart */
731 call \cfunc
732 jmp handle_exception_return
733SYM_CODE_END(asm_\cfunc)
734.endm
735
736.macro idtentry_sysvec vector cfunc
737 idtentry \vector asm_\cfunc \cfunc has_error_code=0
738.endm
739
729/*
740/*
741 * Include the defines which emit the idt entries which are shared
742 * shared between 32 and 64 bit and emit the __irqentry_text_* markers
743 * so the stacktrace boundary checks work.
744 */
745 .align 16
746 .globl __irqentry_text_start
747__irqentry_text_start:
748
749#include <asm/idtentry.h>
750
751 .align 16
752 .globl __irqentry_text_end
753__irqentry_text_end:
754
755/*
730 * %eax: prev task
731 * %edx: next task
732 */
756 * %eax: prev task
757 * %edx: next task
758 */
759.pushsection .text, "ax"
733SYM_CODE_START(__switch_to_asm)
734 /*
735 * Save callee-saved registers
736 * This must match the order in struct inactive_task_frame
737 */
738 pushl %ebp
739 pushl %ebx
740 pushl %edi

--- 30 unchanged lines hidden (view full) ---

771 /* restore callee-saved registers */
772 popl %esi
773 popl %edi
774 popl %ebx
775 popl %ebp
776
777 jmp __switch_to
778SYM_CODE_END(__switch_to_asm)
760SYM_CODE_START(__switch_to_asm)
761 /*
762 * Save callee-saved registers
763 * This must match the order in struct inactive_task_frame
764 */
765 pushl %ebp
766 pushl %ebx
767 pushl %edi

--- 30 unchanged lines hidden (view full) ---

798 /* restore callee-saved registers */
799 popl %esi
800 popl %edi
801 popl %ebx
802 popl %ebp
803
804 jmp __switch_to
805SYM_CODE_END(__switch_to_asm)
806.popsection
779
780/*
781 * The unwinder expects the last frame on the stack to always be at the same
782 * offset from the end of the page, which allows it to validate the stack.
783 * Calling schedule_tail() directly would break that convention because its an
784 * asmlinkage function so its argument has to be pushed on the stack. This
785 * wrapper creates a proper "end of stack" frame header before the call.
786 */
807
808/*
809 * The unwinder expects the last frame on the stack to always be at the same
810 * offset from the end of the page, which allows it to validate the stack.
811 * Calling schedule_tail() directly would break that convention because its an
812 * asmlinkage function so its argument has to be pushed on the stack. This
813 * wrapper creates a proper "end of stack" frame header before the call.
814 */
815.pushsection .text, "ax"
787SYM_FUNC_START(schedule_tail_wrapper)
788 FRAME_BEGIN
789
790 pushl %eax
791 call schedule_tail
792 popl %eax
793
794 FRAME_END
795 ret
796SYM_FUNC_END(schedule_tail_wrapper)
816SYM_FUNC_START(schedule_tail_wrapper)
817 FRAME_BEGIN
818
819 pushl %eax
820 call schedule_tail
821 popl %eax
822
823 FRAME_END
824 ret
825SYM_FUNC_END(schedule_tail_wrapper)
826.popsection
827
797/*
798 * A newly forked process directly context switches into this address.
799 *
800 * eax: prev task we switched from
801 * ebx: kernel thread func (NULL for user thread)
802 * edi: kernel thread arg
803 */
828/*
829 * A newly forked process directly context switches into this address.
830 *
831 * eax: prev task we switched from
832 * ebx: kernel thread func (NULL for user thread)
833 * edi: kernel thread arg
834 */
835.pushsection .text, "ax"
804SYM_CODE_START(ret_from_fork)
805 call schedule_tail_wrapper
806
807 testl %ebx, %ebx
808 jnz 1f /* kernel threads are uncommon */
809
8102:
811 /* When we fork, we trace the syscall return in the child, too. */
812 movl %esp, %eax
813 call syscall_return_slowpath
836SYM_CODE_START(ret_from_fork)
837 call schedule_tail_wrapper
838
839 testl %ebx, %ebx
840 jnz 1f /* kernel threads are uncommon */
841
8422:
843 /* When we fork, we trace the syscall return in the child, too. */
844 movl %esp, %eax
845 call syscall_return_slowpath
814 STACKLEAK_ERASE
815 jmp restore_all
846 jmp .Lsyscall_32_done
816
817 /* kernel thread */
8181: movl %edi, %eax
847
848 /* kernel thread */
8491: movl %edi, %eax
819 CALL_NOSPEC %ebx
850 CALL_NOSPEC ebx
820 /*
821 * A kernel thread is allowed to return here after successfully
822 * calling do_execve(). Exit to userspace to complete the execve()
823 * syscall.
824 */
825 movl $0, PT_EAX(%esp)
826 jmp 2b
827SYM_CODE_END(ret_from_fork)
851 /*
852 * A kernel thread is allowed to return here after successfully
853 * calling do_execve(). Exit to userspace to complete the execve()
854 * syscall.
855 */
856 movl $0, PT_EAX(%esp)
857 jmp 2b
858SYM_CODE_END(ret_from_fork)
859.popsection
828
860
829/*
830 * Return to user mode is not as complex as all this looks,
831 * but we want the default path for a system call return to
832 * go as quickly as possible which is why some of this is
833 * less clear than it otherwise should be.
834 */
835
836 # userspace resumption stub bypassing syscall exit tracing
837SYM_CODE_START_LOCAL(ret_from_exception)
838 preempt_stop(CLBR_ANY)
839ret_from_intr:
840#ifdef CONFIG_VM86
841 movl PT_EFLAGS(%esp), %eax # mix EFLAGS and CS
842 movb PT_CS(%esp), %al
843 andl $(X86_EFLAGS_VM | SEGMENT_RPL_MASK), %eax
844#else
845 /*
846 * We can be coming here from child spawned by kernel_thread().
847 */
848 movl PT_CS(%esp), %eax
849 andl $SEGMENT_RPL_MASK, %eax
850#endif
851 cmpl $USER_RPL, %eax
852 jb restore_all_kernel # not returning to v8086 or userspace
853
854 DISABLE_INTERRUPTS(CLBR_ANY)
855 TRACE_IRQS_OFF
856 movl %esp, %eax
857 call prepare_exit_to_usermode
858 jmp restore_all
859SYM_CODE_END(ret_from_exception)
860
861SYM_ENTRY(__begin_SYSENTER_singlestep_region, SYM_L_GLOBAL, SYM_A_NONE)
862/*
863 * All code from here through __end_SYSENTER_singlestep_region is subject
864 * to being single-stepped if a user program sets TF and executes SYSENTER.
865 * There is absolutely nothing that we can do to prevent this from happening
866 * (thanks Intel!). To keep our handling of this situation as simple as
867 * possible, we handle TF just like AC and NT, except that our #DB handler
868 * will ignore all of the single-step traps generated in this range.
869 */
870
861SYM_ENTRY(__begin_SYSENTER_singlestep_region, SYM_L_GLOBAL, SYM_A_NONE)
862/*
863 * All code from here through __end_SYSENTER_singlestep_region is subject
864 * to being single-stepped if a user program sets TF and executes SYSENTER.
865 * There is absolutely nothing that we can do to prevent this from happening
866 * (thanks Intel!). To keep our handling of this situation as simple as
867 * possible, we handle TF just like AC and NT, except that our #DB handler
868 * will ignore all of the single-step traps generated in this range.
869 */
870
871#ifdef CONFIG_XEN_PV
872/*
871/*
873 * Xen doesn't set %esp to be precisely what the normal SYSENTER
874 * entry point expects, so fix it up before using the normal path.
875 */
876SYM_CODE_START(xen_sysenter_target)
877 addl $5*4, %esp /* remove xen-provided frame */
878 jmp .Lsysenter_past_esp
879SYM_CODE_END(xen_sysenter_target)
880#endif
881
882/*
883 * 32-bit SYSENTER entry.
884 *
885 * 32-bit system calls through the vDSO's __kernel_vsyscall enter here
886 * if X86_FEATURE_SEP is available. This is the preferred system call
887 * entry on 32-bit systems.
888 *
889 * The SYSENTER instruction, in principle, should *only* occur in the
890 * vDSO. In practice, a small number of Android devices were shipped

--- 33 unchanged lines hidden (view full) ---

924 popl %eax
925 popfl
926
927 /* Stack empty again, switch to task stack */
928 movl TSS_entry2task_stack(%esp), %esp
929
930.Lsysenter_past_esp:
931 pushl $__USER_DS /* pt_regs->ss */
872 * 32-bit SYSENTER entry.
873 *
874 * 32-bit system calls through the vDSO's __kernel_vsyscall enter here
875 * if X86_FEATURE_SEP is available. This is the preferred system call
876 * entry on 32-bit systems.
877 *
878 * The SYSENTER instruction, in principle, should *only* occur in the
879 * vDSO. In practice, a small number of Android devices were shipped

--- 33 unchanged lines hidden (view full) ---

913 popl %eax
914 popfl
915
916 /* Stack empty again, switch to task stack */
917 movl TSS_entry2task_stack(%esp), %esp
918
919.Lsysenter_past_esp:
920 pushl $__USER_DS /* pt_regs->ss */
932 pushl %ebp /* pt_regs->sp (stashed in bp) */
921 pushl $0 /* pt_regs->sp (placeholder) */
933 pushfl /* pt_regs->flags (except IF = 0) */
922 pushfl /* pt_regs->flags (except IF = 0) */
934 orl $X86_EFLAGS_IF, (%esp) /* Fix IF */
935 pushl $__USER_CS /* pt_regs->cs */
936 pushl $0 /* pt_regs->ip = 0 (placeholder) */
937 pushl %eax /* pt_regs->orig_ax */
938 SAVE_ALL pt_regs_ax=$-ENOSYS /* save rest, stack already switched */
939
940 /*
941 * SYSENTER doesn't filter flags, so we need to clear NT, AC
942 * and TF ourselves. To save a few cycles, we can check whether

--- 12 unchanged lines hidden (view full) ---

955 * majority of the cases and instead of polluting the I$ unnecessarily,
956 * we're keeping that code behind a branch which will predict as
957 * not-taken and therefore its instructions won't be fetched.
958 */
959 testl $X86_EFLAGS_NT|X86_EFLAGS_AC|X86_EFLAGS_TF, PT_EFLAGS(%esp)
960 jnz .Lsysenter_fix_flags
961.Lsysenter_flags_fixed:
962
923 pushl $__USER_CS /* pt_regs->cs */
924 pushl $0 /* pt_regs->ip = 0 (placeholder) */
925 pushl %eax /* pt_regs->orig_ax */
926 SAVE_ALL pt_regs_ax=$-ENOSYS /* save rest, stack already switched */
927
928 /*
929 * SYSENTER doesn't filter flags, so we need to clear NT, AC
930 * and TF ourselves. To save a few cycles, we can check whether

--- 12 unchanged lines hidden (view full) ---

943 * majority of the cases and instead of polluting the I$ unnecessarily,
944 * we're keeping that code behind a branch which will predict as
945 * not-taken and therefore its instructions won't be fetched.
946 */
947 testl $X86_EFLAGS_NT|X86_EFLAGS_AC|X86_EFLAGS_TF, PT_EFLAGS(%esp)
948 jnz .Lsysenter_fix_flags
949.Lsysenter_flags_fixed:
950
963 /*
964 * User mode is traced as though IRQs are on, and SYSENTER
965 * turned them off.
966 */
967 TRACE_IRQS_OFF
968
969 movl %esp, %eax
951 movl %esp, %eax
970 call do_fast_syscall_32
971 /* XEN PV guests always use IRET path */
972 ALTERNATIVE "testl %eax, %eax; jz .Lsyscall_32_done", \
973 "jmp .Lsyscall_32_done", X86_FEATURE_XENPV
952 call do_SYSENTER_32
953 testl %eax, %eax
954 jz .Lsyscall_32_done
974
975 STACKLEAK_ERASE
976
955
956 STACKLEAK_ERASE
957
977/* Opportunistic SYSEXIT */
978 TRACE_IRQS_ON /* User mode traces as IRQs on. */
958 /* Opportunistic SYSEXIT */
979
980 /*
981 * Setup entry stack - we keep the pointer in %eax and do the
982 * switch after almost all user-state is restored.
983 */
984
985 /* Load entry stack pointer and allocate frame for eflags/eax */
986 movl PER_CPU_VAR(cpu_tss_rw + TSS_sp0), %eax

--- 83 unchanged lines hidden (view full) ---

1070 * ebp arg6
1071 */
1072SYM_FUNC_START(entry_INT80_32)
1073 ASM_CLAC
1074 pushl %eax /* pt_regs->orig_ax */
1075
1076 SAVE_ALL pt_regs_ax=$-ENOSYS switch_stacks=1 /* save rest */
1077
959
960 /*
961 * Setup entry stack - we keep the pointer in %eax and do the
962 * switch after almost all user-state is restored.
963 */
964
965 /* Load entry stack pointer and allocate frame for eflags/eax */
966 movl PER_CPU_VAR(cpu_tss_rw + TSS_sp0), %eax

--- 83 unchanged lines hidden (view full) ---

1050 * ebp arg6
1051 */
1052SYM_FUNC_START(entry_INT80_32)
1053 ASM_CLAC
1054 pushl %eax /* pt_regs->orig_ax */
1055
1056 SAVE_ALL pt_regs_ax=$-ENOSYS switch_stacks=1 /* save rest */
1057
1078 /*
1079 * User mode is traced as though IRQs are on, and the interrupt gate
1080 * turned them off.
1081 */
1082 TRACE_IRQS_OFF
1083
1084 movl %esp, %eax
1085 call do_int80_syscall_32
1086.Lsyscall_32_done:
1058 movl %esp, %eax
1059 call do_int80_syscall_32
1060.Lsyscall_32_done:
1087
1088 STACKLEAK_ERASE
1089
1061 STACKLEAK_ERASE
1062
1090restore_all:
1091 TRACE_IRQS_ON
1063restore_all_switch_stack:
1092 SWITCH_TO_ENTRY_STACK
1093 CHECK_AND_APPLY_ESPFIX
1094
1095 /* Switch back to user CR3 */
1096 SWITCH_TO_USER_CR3 scratch_reg=%eax
1097
1098 BUG_IF_WRONG_CR3
1099
1100 /* Restore user state */
1101 RESTORE_REGS pop=4 # skip orig_eax/error_code
1102.Lirq_return:
1103 /*
1104 * ARCH_HAS_MEMBARRIER_SYNC_CORE rely on IRET core serialization
1105 * when returning from IPI handler and when returning from
1106 * scheduler to user-space.
1107 */
1108 INTERRUPT_RETURN
1109
1064 SWITCH_TO_ENTRY_STACK
1065 CHECK_AND_APPLY_ESPFIX
1066
1067 /* Switch back to user CR3 */
1068 SWITCH_TO_USER_CR3 scratch_reg=%eax
1069
1070 BUG_IF_WRONG_CR3
1071
1072 /* Restore user state */
1073 RESTORE_REGS pop=4 # skip orig_eax/error_code
1074.Lirq_return:
1075 /*
1076 * ARCH_HAS_MEMBARRIER_SYNC_CORE rely on IRET core serialization
1077 * when returning from IPI handler and when returning from
1078 * scheduler to user-space.
1079 */
1080 INTERRUPT_RETURN
1081
1110restore_all_kernel:
1111#ifdef CONFIG_PREEMPTION
1112 DISABLE_INTERRUPTS(CLBR_ANY)
1113 cmpl $0, PER_CPU_VAR(__preempt_count)
1114 jnz .Lno_preempt
1115 testl $X86_EFLAGS_IF, PT_EFLAGS(%esp) # interrupts off (exception path) ?
1116 jz .Lno_preempt
1117 call preempt_schedule_irq
1118.Lno_preempt:
1119#endif
1120 TRACE_IRQS_IRET
1121 PARANOID_EXIT_TO_KERNEL_MODE
1122 BUG_IF_WRONG_CR3
1123 RESTORE_REGS 4
1124 jmp .Lirq_return
1125
1126.section .fixup, "ax"
1082.section .fixup, "ax"
1127SYM_CODE_START(iret_exc)
1083SYM_CODE_START(asm_iret_error)
1128 pushl $0 # no error code
1084 pushl $0 # no error code
1129 pushl $do_iret_error
1085 pushl $iret_error
1130
1131#ifdef CONFIG_DEBUG_ENTRY
1132 /*
1133 * The stack-frame here is the one that iret faulted on, so its a
1134 * return-to-user frame. We are on kernel-cr3 because we come here from
1135 * the fixup code. This confuses the CR3 checker, so switch to user-cr3
1136 * as the checker expects it.
1137 */
1138 pushl %eax
1139 SWITCH_TO_USER_CR3 scratch_reg=%eax
1140 popl %eax
1141#endif
1142
1086
1087#ifdef CONFIG_DEBUG_ENTRY
1088 /*
1089 * The stack-frame here is the one that iret faulted on, so its a
1090 * return-to-user frame. We are on kernel-cr3 because we come here from
1091 * the fixup code. This confuses the CR3 checker, so switch to user-cr3
1092 * as the checker expects it.
1093 */
1094 pushl %eax
1095 SWITCH_TO_USER_CR3 scratch_reg=%eax
1096 popl %eax
1097#endif
1098
1143 jmp common_exception
1144SYM_CODE_END(iret_exc)
1099 jmp handle_exception
1100SYM_CODE_END(asm_iret_error)
1145.previous
1101.previous
1146 _ASM_EXTABLE(.Lirq_return, iret_exc)
1102 _ASM_EXTABLE(.Lirq_return, asm_iret_error)
1147SYM_FUNC_END(entry_INT80_32)
1148
1149.macro FIXUP_ESPFIX_STACK
1150/*
1151 * Switch back for ESPFIX stack to the normal zerobased stack
1152 *
1153 * We can't call C functions using the ESPFIX stack. This code reads
1154 * the high word of the segment base from the GDT and swiches to the

--- 33 unchanged lines hidden (view full) ---

1188 cmpw $__ESPFIX_SS, %ax
1189 jne .Lno_fixup_\@
1190 /* switch to normal stack */
1191 FIXUP_ESPFIX_STACK
1192.Lno_fixup_\@:
1193#endif
1194.endm
1195
1103SYM_FUNC_END(entry_INT80_32)
1104
1105.macro FIXUP_ESPFIX_STACK
1106/*
1107 * Switch back for ESPFIX stack to the normal zerobased stack
1108 *
1109 * We can't call C functions using the ESPFIX stack. This code reads
1110 * the high word of the segment base from the GDT and swiches to the

--- 33 unchanged lines hidden (view full) ---

1144 cmpw $__ESPFIX_SS, %ax
1145 jne .Lno_fixup_\@
1146 /* switch to normal stack */
1147 FIXUP_ESPFIX_STACK
1148.Lno_fixup_\@:
1149#endif
1150.endm
1151
1196/*
1197 * Build the entry stubs with some assembler magic.
1198 * We pack 1 stub into every 8-byte block.
1199 */
1200 .align 8
1201SYM_CODE_START(irq_entries_start)
1202 vector=FIRST_EXTERNAL_VECTOR
1203 .rept (FIRST_SYSTEM_VECTOR - FIRST_EXTERNAL_VECTOR)
1204 pushl $(~vector+0x80) /* Note: always in signed byte range */
1205 vector=vector+1
1206 jmp common_interrupt
1207 .align 8
1208 .endr
1209SYM_CODE_END(irq_entries_start)
1210
1211#ifdef CONFIG_X86_LOCAL_APIC
1212 .align 8
1213SYM_CODE_START(spurious_entries_start)
1214 vector=FIRST_SYSTEM_VECTOR
1215 .rept (NR_VECTORS - FIRST_SYSTEM_VECTOR)
1216 pushl $(~vector+0x80) /* Note: always in signed byte range */
1217 vector=vector+1
1218 jmp common_spurious
1219 .align 8
1220 .endr
1221SYM_CODE_END(spurious_entries_start)
1222
1223SYM_CODE_START_LOCAL(common_spurious)
1224 ASM_CLAC
1225 addl $-0x80, (%esp) /* Adjust vector into the [-256, -1] range */
1226 SAVE_ALL switch_stacks=1
1227 ENCODE_FRAME_POINTER
1228 TRACE_IRQS_OFF
1229 movl %esp, %eax
1230 call smp_spurious_interrupt
1231 jmp ret_from_intr
1232SYM_CODE_END(common_spurious)
1233#endif
1234
1235/*
1236 * the CPU automatically disables interrupts when executing an IRQ vector,
1237 * so IRQ-flags tracing has to follow that:
1238 */
1239 .p2align CONFIG_X86_L1_CACHE_SHIFT
1240SYM_CODE_START_LOCAL(common_interrupt)
1241 ASM_CLAC
1242 addl $-0x80, (%esp) /* Adjust vector into the [-256, -1] range */
1243
1244 SAVE_ALL switch_stacks=1
1245 ENCODE_FRAME_POINTER
1246 TRACE_IRQS_OFF
1247 movl %esp, %eax
1248 call do_IRQ
1249 jmp ret_from_intr
1250SYM_CODE_END(common_interrupt)
1251
1252#define BUILD_INTERRUPT3(name, nr, fn) \
1253SYM_FUNC_START(name) \
1254 ASM_CLAC; \
1255 pushl $~(nr); \
1256 SAVE_ALL switch_stacks=1; \
1257 ENCODE_FRAME_POINTER; \
1258 TRACE_IRQS_OFF \
1259 movl %esp, %eax; \
1260 call fn; \
1261 jmp ret_from_intr; \
1262SYM_FUNC_END(name)
1263
1264#define BUILD_INTERRUPT(name, nr) \
1265 BUILD_INTERRUPT3(name, nr, smp_##name); \
1266
1267/* The include is where all of the SMP etc. interrupts come from */
1268#include <asm/entry_arch.h>
1269
1270SYM_CODE_START(coprocessor_error)
1271 ASM_CLAC
1272 pushl $0
1273 pushl $do_coprocessor_error
1274 jmp common_exception
1275SYM_CODE_END(coprocessor_error)
1276
1277SYM_CODE_START(simd_coprocessor_error)
1278 ASM_CLAC
1279 pushl $0
1280#ifdef CONFIG_X86_INVD_BUG
1281 /* AMD 486 bug: invd from userspace calls exception 19 instead of #GP */
1282 ALTERNATIVE "pushl $do_general_protection", \
1283 "pushl $do_simd_coprocessor_error", \
1284 X86_FEATURE_XMM
1285#else
1286 pushl $do_simd_coprocessor_error
1287#endif
1288 jmp common_exception
1289SYM_CODE_END(simd_coprocessor_error)
1290
1291SYM_CODE_START(device_not_available)
1292 ASM_CLAC
1293 pushl $0
1294 pushl $do_device_not_available
1295 jmp common_exception
1296SYM_CODE_END(device_not_available)
1297
1298#ifdef CONFIG_PARAVIRT
1299SYM_CODE_START(native_iret)
1300 iret
1301 _ASM_EXTABLE(native_iret, iret_exc)
1302SYM_CODE_END(native_iret)
1303#endif
1304
1305SYM_CODE_START(overflow)
1306 ASM_CLAC
1307 pushl $0
1308 pushl $do_overflow
1309 jmp common_exception
1310SYM_CODE_END(overflow)
1311
1312SYM_CODE_START(bounds)
1313 ASM_CLAC
1314 pushl $0
1315 pushl $do_bounds
1316 jmp common_exception
1317SYM_CODE_END(bounds)
1318
1319SYM_CODE_START(invalid_op)
1320 ASM_CLAC
1321 pushl $0
1322 pushl $do_invalid_op
1323 jmp common_exception
1324SYM_CODE_END(invalid_op)
1325
1326SYM_CODE_START(coprocessor_segment_overrun)
1327 ASM_CLAC
1328 pushl $0
1329 pushl $do_coprocessor_segment_overrun
1330 jmp common_exception
1331SYM_CODE_END(coprocessor_segment_overrun)
1332
1333SYM_CODE_START(invalid_TSS)
1334 ASM_CLAC
1335 pushl $do_invalid_TSS
1336 jmp common_exception
1337SYM_CODE_END(invalid_TSS)
1338
1339SYM_CODE_START(segment_not_present)
1340 ASM_CLAC
1341 pushl $do_segment_not_present
1342 jmp common_exception
1343SYM_CODE_END(segment_not_present)
1344
1345SYM_CODE_START(stack_segment)
1346 ASM_CLAC
1347 pushl $do_stack_segment
1348 jmp common_exception
1349SYM_CODE_END(stack_segment)
1350
1351SYM_CODE_START(alignment_check)
1352 ASM_CLAC
1353 pushl $do_alignment_check
1354 jmp common_exception
1355SYM_CODE_END(alignment_check)
1356
1357SYM_CODE_START(divide_error)
1358 ASM_CLAC
1359 pushl $0 # no error code
1360 pushl $do_divide_error
1361 jmp common_exception
1362SYM_CODE_END(divide_error)
1363
1364#ifdef CONFIG_X86_MCE
1365SYM_CODE_START(machine_check)
1366 ASM_CLAC
1367 pushl $0
1368 pushl $do_mce
1369 jmp common_exception
1370SYM_CODE_END(machine_check)
1371#endif
1372
1373SYM_CODE_START(spurious_interrupt_bug)
1374 ASM_CLAC
1375 pushl $0
1376 pushl $do_spurious_interrupt_bug
1377 jmp common_exception
1378SYM_CODE_END(spurious_interrupt_bug)
1379
1380#ifdef CONFIG_XEN_PV
1381SYM_FUNC_START(xen_hypervisor_callback)
1382 /*
1383 * Check to see if we got the event in the critical
1384 * region in xen_iret_direct, after we've reenabled
1385 * events and checked for pending events. This simulates
1386 * iret instruction's behaviour where it delivers a
1387 * pending interrupt when enabling interrupts:
1388 */
1389 cmpl $xen_iret_start_crit, (%esp)
1390 jb 1f
1391 cmpl $xen_iret_end_crit, (%esp)
1392 jae 1f
1393 call xen_iret_crit_fixup
13941:
1395 pushl $-1 /* orig_ax = -1 => not a system call */
1396 SAVE_ALL
1397 ENCODE_FRAME_POINTER
1398 TRACE_IRQS_OFF
1399 mov %esp, %eax
1400 call xen_evtchn_do_upcall
1401#ifndef CONFIG_PREEMPTION
1402 call xen_maybe_preempt_hcall
1403#endif
1404 jmp ret_from_intr
1405SYM_FUNC_END(xen_hypervisor_callback)
1406
1407/*
1408 * Hypervisor uses this for application faults while it executes.
1409 * We get here for two reasons:
1410 * 1. Fault while reloading DS, ES, FS or GS
1411 * 2. Fault while executing IRET
1412 * Category 1 we fix up by reattempting the load, and zeroing the segment
1413 * register if the load fails.
1414 * Category 2 we fix up by jumping to do_iret_error. We cannot use the
1415 * normal Linux return path in this case because if we use the IRET hypercall
1416 * to pop the stack frame we end up in an infinite loop of failsafe callbacks.
1417 * We distinguish between categories by maintaining a status value in EAX.
1418 */
1419SYM_FUNC_START(xen_failsafe_callback)
1420 pushl %eax
1421 movl $1, %eax
14221: mov 4(%esp), %ds
14232: mov 8(%esp), %es
14243: mov 12(%esp), %fs
14254: mov 16(%esp), %gs
1426 /* EAX == 0 => Category 1 (Bad segment)
1427 EAX != 0 => Category 2 (Bad IRET) */
1428 testl %eax, %eax
1429 popl %eax
1430 lea 16(%esp), %esp
1431 jz 5f
1432 jmp iret_exc
14335: pushl $-1 /* orig_ax = -1 => not a system call */
1434 SAVE_ALL
1435 ENCODE_FRAME_POINTER
1436 jmp ret_from_exception
1437
1438.section .fixup, "ax"
14396: xorl %eax, %eax
1440 movl %eax, 4(%esp)
1441 jmp 1b
14427: xorl %eax, %eax
1443 movl %eax, 8(%esp)
1444 jmp 2b
14458: xorl %eax, %eax
1446 movl %eax, 12(%esp)
1447 jmp 3b
14489: xorl %eax, %eax
1449 movl %eax, 16(%esp)
1450 jmp 4b
1451.previous
1452 _ASM_EXTABLE(1b, 6b)
1453 _ASM_EXTABLE(2b, 7b)
1454 _ASM_EXTABLE(3b, 8b)
1455 _ASM_EXTABLE(4b, 9b)
1456SYM_FUNC_END(xen_failsafe_callback)
1457#endif /* CONFIG_XEN_PV */
1458
1459#ifdef CONFIG_XEN_PVHVM
1460BUILD_INTERRUPT3(xen_hvm_callback_vector, HYPERVISOR_CALLBACK_VECTOR,
1461 xen_evtchn_do_upcall)
1462#endif
1463
1464
1465#if IS_ENABLED(CONFIG_HYPERV)
1466
1467BUILD_INTERRUPT3(hyperv_callback_vector, HYPERVISOR_CALLBACK_VECTOR,
1468 hyperv_vector_handler)
1469
1470BUILD_INTERRUPT3(hyperv_reenlightenment_vector, HYPERV_REENLIGHTENMENT_VECTOR,
1471 hyperv_reenlightenment_intr)
1472
1473BUILD_INTERRUPT3(hv_stimer0_callback_vector, HYPERV_STIMER0_VECTOR,
1474 hv_stimer0_vector_handler)
1475
1476#endif /* CONFIG_HYPERV */
1477
1478SYM_CODE_START(page_fault)
1479 ASM_CLAC
1480 pushl $do_page_fault
1481 jmp common_exception_read_cr2
1482SYM_CODE_END(page_fault)
1483
1484SYM_CODE_START_LOCAL_NOALIGN(common_exception_read_cr2)
1152SYM_CODE_START_LOCAL_NOALIGN(handle_exception)
1485 /* the function address is in %gs's slot on the stack */
1486 SAVE_ALL switch_stacks=1 skip_gs=1 unwind_espfix=1
1153 /* the function address is in %gs's slot on the stack */
1154 SAVE_ALL switch_stacks=1 skip_gs=1 unwind_espfix=1
1487
1488 ENCODE_FRAME_POINTER
1489
1490 /* fixup %gs */
1491 GS_TO_REG %ecx
1155 ENCODE_FRAME_POINTER
1156
1157 /* fixup %gs */
1158 GS_TO_REG %ecx
1492 movl PT_GS(%esp), %edi
1493 REG_TO_PTGS %ecx
1494 SET_KERNEL_GS %ecx
1495
1496 GET_CR2_INTO(%ecx) # might clobber %eax
1497
1498 /* fixup orig %eax */
1499 movl PT_ORIG_EAX(%esp), %edx # get the error code
1500 movl $-1, PT_ORIG_EAX(%esp) # no syscall to restart
1501
1502 TRACE_IRQS_OFF
1503 movl %esp, %eax # pt_regs pointer
1504 CALL_NOSPEC %edi
1505 jmp ret_from_exception
1506SYM_CODE_END(common_exception_read_cr2)
1507
1508SYM_CODE_START_LOCAL_NOALIGN(common_exception)
1509 /* the function address is in %gs's slot on the stack */
1510 SAVE_ALL switch_stacks=1 skip_gs=1 unwind_espfix=1
1511 ENCODE_FRAME_POINTER
1512
1513 /* fixup %gs */
1514 GS_TO_REG %ecx
1515 movl PT_GS(%esp), %edi # get the function address
1516 REG_TO_PTGS %ecx
1517 SET_KERNEL_GS %ecx
1518
1519 /* fixup orig %eax */
1520 movl PT_ORIG_EAX(%esp), %edx # get the error code
1521 movl $-1, PT_ORIG_EAX(%esp) # no syscall to restart
1522
1159 movl PT_GS(%esp), %edi # get the function address
1160 REG_TO_PTGS %ecx
1161 SET_KERNEL_GS %ecx
1162
1163 /* fixup orig %eax */
1164 movl PT_ORIG_EAX(%esp), %edx # get the error code
1165 movl $-1, PT_ORIG_EAX(%esp) # no syscall to restart
1166
1523 TRACE_IRQS_OFF
1524 movl %esp, %eax # pt_regs pointer
1167 movl %esp, %eax # pt_regs pointer
1525 CALL_NOSPEC %edi
1526 jmp ret_from_exception
1527SYM_CODE_END(common_exception)
1168 CALL_NOSPEC edi
1528
1169
1529SYM_CODE_START(debug)
1170handle_exception_return:
1171#ifdef CONFIG_VM86
1172 movl PT_EFLAGS(%esp), %eax # mix EFLAGS and CS
1173 movb PT_CS(%esp), %al
1174 andl $(X86_EFLAGS_VM | SEGMENT_RPL_MASK), %eax
1175#else
1530 /*
1176 /*
1531 * Entry from sysenter is now handled in common_exception
1177 * We can be coming here from child spawned by kernel_thread().
1532 */
1178 */
1533 ASM_CLAC
1534 pushl $0
1535 pushl $do_debug
1536 jmp common_exception
1537SYM_CODE_END(debug)
1179 movl PT_CS(%esp), %eax
1180 andl $SEGMENT_RPL_MASK, %eax
1181#endif
1182 cmpl $USER_RPL, %eax # returning to v8086 or userspace ?
1183 jnb ret_to_user
1538
1184
1539#ifdef CONFIG_DOUBLEFAULT
1540SYM_CODE_START(double_fault)
1185 PARANOID_EXIT_TO_KERNEL_MODE
1186 BUG_IF_WRONG_CR3
1187 RESTORE_REGS 4
1188 jmp .Lirq_return
1189
1190ret_to_user:
1191 movl %esp, %eax
1192 jmp restore_all_switch_stack
1193SYM_CODE_END(handle_exception)
1194
1195SYM_CODE_START(asm_exc_double_fault)
15411:
1542 /*
1543 * This is a task gate handler, not an interrupt gate handler.
1544 * The error code is on the stack, but the stack is otherwise
1545 * empty. Interrupts are off. Our state is sane with the following
1546 * exceptions:
1547 *
1548 * - CR0.TS is set. "TS" literally means "task switched".

--- 21 unchanged lines hidden (view full) ---

1570 popfl /* clear EFLAGS.NT */
1571
1572 call doublefault_shim
1573
1574 /* We don't support returning, so we have no IRET here. */
15751:
1576 hlt
1577 jmp 1b
11961:
1197 /*
1198 * This is a task gate handler, not an interrupt gate handler.
1199 * The error code is on the stack, but the stack is otherwise
1200 * empty. Interrupts are off. Our state is sane with the following
1201 * exceptions:
1202 *
1203 * - CR0.TS is set. "TS" literally means "task switched".

--- 21 unchanged lines hidden (view full) ---

1225 popfl /* clear EFLAGS.NT */
1226
1227 call doublefault_shim
1228
1229 /* We don't support returning, so we have no IRET here. */
12301:
1231 hlt
1232 jmp 1b
1578SYM_CODE_END(double_fault)
1579#endif
1233SYM_CODE_END(asm_exc_double_fault)
1580
1581/*
1582 * NMI is doubly nasty. It can happen on the first instruction of
1583 * entry_SYSENTER_32 (just like #DB), but it can also interrupt the beginning
1584 * of the #DB handler even if that #DB in turn hit before entry_SYSENTER_32
1585 * switched stacks. We handle both conditions by simply checking whether we
1586 * interrupted kernel code running on the SYSENTER stack.
1587 */
1234
1235/*
1236 * NMI is doubly nasty. It can happen on the first instruction of
1237 * entry_SYSENTER_32 (just like #DB), but it can also interrupt the beginning
1238 * of the #DB handler even if that #DB in turn hit before entry_SYSENTER_32
1239 * switched stacks. We handle both conditions by simply checking whether we
1240 * interrupted kernel code running on the SYSENTER stack.
1241 */
1588SYM_CODE_START(nmi)
1242SYM_CODE_START(asm_exc_nmi)
1589 ASM_CLAC
1590
1591#ifdef CONFIG_X86_ESPFIX32
1592 /*
1593 * ESPFIX_SS is only ever set on the return to user path
1594 * after we've switched to the entry stack.
1595 */
1596 pushl %eax

--- 12 unchanged lines hidden (view full) ---

1609 /* Are we currently on the SYSENTER stack? */
1610 movl PER_CPU_VAR(cpu_entry_area), %ecx
1611 addl $CPU_ENTRY_AREA_entry_stack + SIZEOF_entry_stack, %ecx
1612 subl %eax, %ecx /* ecx = (end of entry_stack) - esp */
1613 cmpl $SIZEOF_entry_stack, %ecx
1614 jb .Lnmi_from_sysenter_stack
1615
1616 /* Not on SYSENTER stack. */
1243 ASM_CLAC
1244
1245#ifdef CONFIG_X86_ESPFIX32
1246 /*
1247 * ESPFIX_SS is only ever set on the return to user path
1248 * after we've switched to the entry stack.
1249 */
1250 pushl %eax

--- 12 unchanged lines hidden (view full) ---

1263 /* Are we currently on the SYSENTER stack? */
1264 movl PER_CPU_VAR(cpu_entry_area), %ecx
1265 addl $CPU_ENTRY_AREA_entry_stack + SIZEOF_entry_stack, %ecx
1266 subl %eax, %ecx /* ecx = (end of entry_stack) - esp */
1267 cmpl $SIZEOF_entry_stack, %ecx
1268 jb .Lnmi_from_sysenter_stack
1269
1270 /* Not on SYSENTER stack. */
1617 call do_nmi
1271 call exc_nmi
1618 jmp .Lnmi_return
1619
1620.Lnmi_from_sysenter_stack:
1621 /*
1622 * We're on the SYSENTER stack. Switch off. No one (not even debug)
1623 * is using the thread stack right now, so it's safe for us to use it.
1624 */
1625 movl %esp, %ebx
1626 movl PER_CPU_VAR(cpu_current_top_of_stack), %esp
1272 jmp .Lnmi_return
1273
1274.Lnmi_from_sysenter_stack:
1275 /*
1276 * We're on the SYSENTER stack. Switch off. No one (not even debug)
1277 * is using the thread stack right now, so it's safe for us to use it.
1278 */
1279 movl %esp, %ebx
1280 movl PER_CPU_VAR(cpu_current_top_of_stack), %esp
1627 call do_nmi
1281 call exc_nmi
1628 movl %ebx, %esp
1629
1630.Lnmi_return:
1631#ifdef CONFIG_X86_ESPFIX32
1632 testl $CS_FROM_ESPFIX, PT_CS(%esp)
1633 jnz .Lnmi_from_espfix
1634#endif
1635

--- 37 unchanged lines hidden (view full) ---

1673 * 2 - ESPFIX block (above)
1674 * 6 - gap (FIXUP_FRAME)
1675 * 5 - long frame (FIXUP_FRAME)
1676 * 1 - orig_ax
1677 */
1678 lss (1+5+6)*4(%esp), %esp # back to espfix stack
1679 jmp .Lirq_return
1680#endif
1282 movl %ebx, %esp
1283
1284.Lnmi_return:
1285#ifdef CONFIG_X86_ESPFIX32
1286 testl $CS_FROM_ESPFIX, PT_CS(%esp)
1287 jnz .Lnmi_from_espfix
1288#endif
1289

--- 37 unchanged lines hidden (view full) ---

1327 * 2 - ESPFIX block (above)
1328 * 6 - gap (FIXUP_FRAME)
1329 * 5 - long frame (FIXUP_FRAME)
1330 * 1 - orig_ax
1331 */
1332 lss (1+5+6)*4(%esp), %esp # back to espfix stack
1333 jmp .Lirq_return
1334#endif
1681SYM_CODE_END(nmi)
1335SYM_CODE_END(asm_exc_nmi)
1682
1336
1683SYM_CODE_START(int3)
1684 ASM_CLAC
1685 pushl $0
1686 pushl $do_int3
1687 jmp common_exception
1688SYM_CODE_END(int3)
1689
1690SYM_CODE_START(general_protection)
1691 ASM_CLAC
1692 pushl $do_general_protection
1693 jmp common_exception
1694SYM_CODE_END(general_protection)
1695
1696#ifdef CONFIG_KVM_GUEST
1697SYM_CODE_START(async_page_fault)
1698 ASM_CLAC
1699 pushl $do_async_page_fault
1700 jmp common_exception_read_cr2
1701SYM_CODE_END(async_page_fault)
1702#endif
1703
1337.pushsection .text, "ax"
1704SYM_CODE_START(rewind_stack_do_exit)
1705 /* Prevent any naive code from trying to unwind to our caller. */
1706 xorl %ebp, %ebp
1707
1708 movl PER_CPU_VAR(cpu_current_top_of_stack), %esi
1709 leal -TOP_OF_KERNEL_STACK_PADDING-PTREGS_SIZE(%esi), %esp
1710
1711 call do_exit
17121: jmp 1b
1713SYM_CODE_END(rewind_stack_do_exit)
1338SYM_CODE_START(rewind_stack_do_exit)
1339 /* Prevent any naive code from trying to unwind to our caller. */
1340 xorl %ebp, %ebp
1341
1342 movl PER_CPU_VAR(cpu_current_top_of_stack), %esi
1343 leal -TOP_OF_KERNEL_STACK_PADDING-PTREGS_SIZE(%esi), %esp
1344
1345 call do_exit
13461: jmp 1b
1347SYM_CODE_END(rewind_stack_do_exit)
1348.popsection