1/* SPDX-License-Identifier: GPL-2.0 */ 2/* 3 * Copyright (C) 1991,1992 Linus Torvalds 4 * 5 * entry_32.S contains the system-call and low-level fault and trap handling routines. 6 * 7 * Stack layout while running C code: 8 * ptrace needs to have all registers on the stack. 9 * If the order here is changed, it needs to be 10 * updated in fork.c:copy_process(), signal.c:do_signal(), 11 * ptrace.c and ptrace.h 12 * 13 * 0(%esp) - %ebx 14 * 4(%esp) - %ecx 15 * 8(%esp) - %edx 16 * C(%esp) - %esi 17 * 10(%esp) - %edi 18 * 14(%esp) - %ebp 19 * 18(%esp) - %eax 20 * 1C(%esp) - %ds 21 * 20(%esp) - %es 22 * 24(%esp) - %fs 23 * 28(%esp) - %gs saved iff !CONFIG_X86_32_LAZY_GS 24 * 2C(%esp) - orig_eax 25 * 30(%esp) - %eip 26 * 34(%esp) - %cs 27 * 38(%esp) - %eflags 28 * 3C(%esp) - %oldesp 29 * 40(%esp) - %oldss 30 */ 31 32#include <linux/linkage.h> 33#include <linux/err.h> 34#include <asm/thread_info.h> 35#include <asm/irqflags.h> 36#include <asm/errno.h> 37#include <asm/segment.h> 38#include <asm/smp.h> 39#include <asm/percpu.h> 40#include <asm/processor-flags.h> 41#include <asm/irq_vectors.h> 42#include <asm/cpufeatures.h> 43#include <asm/alternative-asm.h> 44#include <asm/asm.h> 45#include <asm/smap.h> 46#include <asm/frame.h> 47#include <asm/nospec-branch.h> 48 49#include "calling.h" 50 51 .section .entry.text, "ax" 52 53/* 54 * We use macros for low-level operations which need to be overridden 55 * for paravirtualization. The following will never clobber any registers: 56 * INTERRUPT_RETURN (aka. "iret") 57 * GET_CR0_INTO_EAX (aka. "movl %cr0, %eax") 58 * ENABLE_INTERRUPTS_SYSEXIT (aka "sti; sysexit"). 59 * 60 * For DISABLE_INTERRUPTS/ENABLE_INTERRUPTS (aka "cli"/"sti"), you must 61 * specify what registers can be overwritten (CLBR_NONE, CLBR_EAX/EDX/ECX/ANY). 62 * Allowing a register to be clobbered can shrink the paravirt replacement 63 * enough to patch inline, increasing performance. 64 */ 65 66#ifdef CONFIG_PREEMPTION 67# define preempt_stop(clobbers) DISABLE_INTERRUPTS(clobbers); TRACE_IRQS_OFF 68#else 69# define preempt_stop(clobbers) 70#endif 71 72.macro TRACE_IRQS_IRET 73#ifdef CONFIG_TRACE_IRQFLAGS 74 testl $X86_EFLAGS_IF, PT_EFLAGS(%esp) # interrupts off? 75 jz 1f 76 TRACE_IRQS_ON 771: 78#endif 79.endm 80 81#define PTI_SWITCH_MASK (1 << PAGE_SHIFT) 82 83/* 84 * User gs save/restore 85 * 86 * %gs is used for userland TLS and kernel only uses it for stack 87 * canary which is required to be at %gs:20 by gcc. Read the comment 88 * at the top of stackprotector.h for more info. 89 * 90 * Local labels 98 and 99 are used. 91 */ 92#ifdef CONFIG_X86_32_LAZY_GS 93 94 /* unfortunately push/pop can't be no-op */ 95.macro PUSH_GS 96 pushl $0 97.endm 98.macro POP_GS pop=0 99 addl $(4 + \pop), %esp 100.endm 101.macro POP_GS_EX 102.endm 103 104 /* all the rest are no-op */ 105.macro PTGS_TO_GS 106.endm 107.macro PTGS_TO_GS_EX 108.endm 109.macro GS_TO_REG reg 110.endm 111.macro REG_TO_PTGS reg 112.endm 113.macro SET_KERNEL_GS reg 114.endm 115 116#else /* CONFIG_X86_32_LAZY_GS */ 117 118.macro PUSH_GS 119 pushl %gs 120.endm 121 122.macro POP_GS pop=0 12398: popl %gs 124 .if \pop <> 0 125 add $\pop, %esp 126 .endif 127.endm 128.macro POP_GS_EX 129.pushsection .fixup, "ax" 13099: movl $0, (%esp) 131 jmp 98b 132.popsection 133 _ASM_EXTABLE(98b, 99b) 134.endm 135 136.macro PTGS_TO_GS 13798: mov PT_GS(%esp), %gs 138.endm 139.macro PTGS_TO_GS_EX 140.pushsection .fixup, "ax" 14199: movl $0, PT_GS(%esp) 142 jmp 98b 143.popsection 144 _ASM_EXTABLE(98b, 99b) 145.endm 146 147.macro GS_TO_REG reg 148 movl %gs, \reg 149.endm 150.macro REG_TO_PTGS reg 151 movl \reg, PT_GS(%esp) 152.endm 153.macro SET_KERNEL_GS reg 154 movl $(__KERNEL_STACK_CANARY), \reg 155 movl \reg, %gs 156.endm 157 158#endif /* CONFIG_X86_32_LAZY_GS */ 159 160/* Unconditionally switch to user cr3 */ 161.macro SWITCH_TO_USER_CR3 scratch_reg:req 162 ALTERNATIVE "jmp .Lend_\@", "", X86_FEATURE_PTI 163 164 movl %cr3, \scratch_reg 165 orl $PTI_SWITCH_MASK, \scratch_reg 166 movl \scratch_reg, %cr3 167.Lend_\@: 168.endm 169 170.macro BUG_IF_WRONG_CR3 no_user_check=0 171#ifdef CONFIG_DEBUG_ENTRY 172 ALTERNATIVE "jmp .Lend_\@", "", X86_FEATURE_PTI 173 .if \no_user_check == 0 174 /* coming from usermode? */ 175 testl $USER_SEGMENT_RPL_MASK, PT_CS(%esp) 176 jz .Lend_\@ 177 .endif 178 /* On user-cr3? */ 179 movl %cr3, %eax 180 testl $PTI_SWITCH_MASK, %eax 181 jnz .Lend_\@ 182 /* From userspace with kernel cr3 - BUG */ 183 ud2 184.Lend_\@: 185#endif 186.endm 187 188/* 189 * Switch to kernel cr3 if not already loaded and return current cr3 in 190 * \scratch_reg 191 */ 192.macro SWITCH_TO_KERNEL_CR3 scratch_reg:req 193 ALTERNATIVE "jmp .Lend_\@", "", X86_FEATURE_PTI 194 movl %cr3, \scratch_reg 195 /* Test if we are already on kernel CR3 */ 196 testl $PTI_SWITCH_MASK, \scratch_reg 197 jz .Lend_\@ 198 andl $(~PTI_SWITCH_MASK), \scratch_reg 199 movl \scratch_reg, %cr3 200 /* Return original CR3 in \scratch_reg */ 201 orl $PTI_SWITCH_MASK, \scratch_reg 202.Lend_\@: 203.endm 204 205#define CS_FROM_ENTRY_STACK (1 << 31) 206#define CS_FROM_USER_CR3 (1 << 30) 207#define CS_FROM_KERNEL (1 << 29) 208#define CS_FROM_ESPFIX (1 << 28) 209 210.macro FIXUP_FRAME 211 /* 212 * The high bits of the CS dword (__csh) are used for CS_FROM_*. 213 * Clear them in case hardware didn't do this for us. 214 */ 215 andl $0x0000ffff, 4*4(%esp) 216 217#ifdef CONFIG_VM86 218 testl $X86_EFLAGS_VM, 5*4(%esp) 219 jnz .Lfrom_usermode_no_fixup_\@ 220#endif 221 testl $USER_SEGMENT_RPL_MASK, 4*4(%esp) 222 jnz .Lfrom_usermode_no_fixup_\@ 223 224 orl $CS_FROM_KERNEL, 4*4(%esp) 225 226 /* 227 * When we're here from kernel mode; the (exception) stack looks like: 228 * 229 * 6*4(%esp) - <previous context> 230 * 5*4(%esp) - flags 231 * 4*4(%esp) - cs 232 * 3*4(%esp) - ip 233 * 2*4(%esp) - orig_eax 234 * 1*4(%esp) - gs / function 235 * 0*4(%esp) - fs 236 * 237 * Lets build a 5 entry IRET frame after that, such that struct pt_regs 238 * is complete and in particular regs->sp is correct. This gives us 239 * the original 6 enties as gap: 240 * 241 * 14*4(%esp) - <previous context> 242 * 13*4(%esp) - gap / flags 243 * 12*4(%esp) - gap / cs 244 * 11*4(%esp) - gap / ip 245 * 10*4(%esp) - gap / orig_eax 246 * 9*4(%esp) - gap / gs / function 247 * 8*4(%esp) - gap / fs 248 * 7*4(%esp) - ss 249 * 6*4(%esp) - sp 250 * 5*4(%esp) - flags 251 * 4*4(%esp) - cs 252 * 3*4(%esp) - ip 253 * 2*4(%esp) - orig_eax 254 * 1*4(%esp) - gs / function 255 * 0*4(%esp) - fs 256 */ 257 258 pushl %ss # ss 259 pushl %esp # sp (points at ss) 260 addl $7*4, (%esp) # point sp back at the previous context 261 pushl 7*4(%esp) # flags 262 pushl 7*4(%esp) # cs 263 pushl 7*4(%esp) # ip 264 pushl 7*4(%esp) # orig_eax 265 pushl 7*4(%esp) # gs / function 266 pushl 7*4(%esp) # fs 267.Lfrom_usermode_no_fixup_\@: 268.endm 269 270.macro IRET_FRAME 271 /* 272 * We're called with %ds, %es, %fs, and %gs from the interrupted 273 * frame, so we shouldn't use them. Also, we may be in ESPFIX 274 * mode and therefore have a nonzero SS base and an offset ESP, 275 * so any attempt to access the stack needs to use SS. (except for 276 * accesses through %esp, which automatically use SS.) 277 */ 278 testl $CS_FROM_KERNEL, 1*4(%esp) 279 jz .Lfinished_frame_\@ 280 281 /* 282 * Reconstruct the 3 entry IRET frame right after the (modified) 283 * regs->sp without lowering %esp in between, such that an NMI in the 284 * middle doesn't scribble our stack. 285 */ 286 pushl %eax 287 pushl %ecx 288 movl 5*4(%esp), %eax # (modified) regs->sp 289 290 movl 4*4(%esp), %ecx # flags 291 movl %ecx, %ss:-1*4(%eax) 292 293 movl 3*4(%esp), %ecx # cs 294 andl $0x0000ffff, %ecx 295 movl %ecx, %ss:-2*4(%eax) 296 297 movl 2*4(%esp), %ecx # ip 298 movl %ecx, %ss:-3*4(%eax) 299 300 movl 1*4(%esp), %ecx # eax 301 movl %ecx, %ss:-4*4(%eax) 302 303 popl %ecx 304 lea -4*4(%eax), %esp 305 popl %eax 306.Lfinished_frame_\@: 307.endm 308 309.macro SAVE_ALL pt_regs_ax=%eax switch_stacks=0 skip_gs=0 unwind_espfix=0 310 cld 311.if \skip_gs == 0 312 PUSH_GS 313.endif 314 pushl %fs 315 316 pushl %eax 317 movl $(__KERNEL_PERCPU), %eax 318 movl %eax, %fs 319.if \unwind_espfix > 0 320 UNWIND_ESPFIX_STACK 321.endif 322 popl %eax 323 324 FIXUP_FRAME 325 pushl %es 326 pushl %ds 327 pushl \pt_regs_ax 328 pushl %ebp 329 pushl %edi 330 pushl %esi 331 pushl %edx 332 pushl %ecx 333 pushl %ebx 334 movl $(__USER_DS), %edx 335 movl %edx, %ds 336 movl %edx, %es 337.if \skip_gs == 0 338 SET_KERNEL_GS %edx 339.endif 340 /* Switch to kernel stack if necessary */ 341.if \switch_stacks > 0 342 SWITCH_TO_KERNEL_STACK 343.endif 344.endm 345 346.macro SAVE_ALL_NMI cr3_reg:req unwind_espfix=0 347 SAVE_ALL unwind_espfix=\unwind_espfix 348 349 BUG_IF_WRONG_CR3 350 351 /* 352 * Now switch the CR3 when PTI is enabled. 353 * 354 * We can enter with either user or kernel cr3, the code will 355 * store the old cr3 in \cr3_reg and switches to the kernel cr3 356 * if necessary. 357 */ 358 SWITCH_TO_KERNEL_CR3 scratch_reg=\cr3_reg 359 360.Lend_\@: 361.endm 362 363.macro RESTORE_INT_REGS 364 popl %ebx 365 popl %ecx 366 popl %edx 367 popl %esi 368 popl %edi 369 popl %ebp 370 popl %eax 371.endm 372 373.macro RESTORE_REGS pop=0 374 RESTORE_INT_REGS 3751: popl %ds 3762: popl %es 3773: popl %fs 378 POP_GS \pop 379 IRET_FRAME 380.pushsection .fixup, "ax" 3814: movl $0, (%esp) 382 jmp 1b 3835: movl $0, (%esp) 384 jmp 2b 3856: movl $0, (%esp) 386 jmp 3b 387.popsection 388 _ASM_EXTABLE(1b, 4b) 389 _ASM_EXTABLE(2b, 5b) 390 _ASM_EXTABLE(3b, 6b) 391 POP_GS_EX 392.endm 393 394.macro RESTORE_ALL_NMI cr3_reg:req pop=0 395 /* 396 * Now switch the CR3 when PTI is enabled. 397 * 398 * We enter with kernel cr3 and switch the cr3 to the value 399 * stored on \cr3_reg, which is either a user or a kernel cr3. 400 */ 401 ALTERNATIVE "jmp .Lswitched_\@", "", X86_FEATURE_PTI 402 403 testl $PTI_SWITCH_MASK, \cr3_reg 404 jz .Lswitched_\@ 405 406 /* User cr3 in \cr3_reg - write it to hardware cr3 */ 407 movl \cr3_reg, %cr3 408 409.Lswitched_\@: 410 411 BUG_IF_WRONG_CR3 412 413 RESTORE_REGS pop=\pop 414.endm 415 416.macro CHECK_AND_APPLY_ESPFIX 417#ifdef CONFIG_X86_ESPFIX32 418#define GDT_ESPFIX_OFFSET (GDT_ENTRY_ESPFIX_SS * 8) 419#define GDT_ESPFIX_SS PER_CPU_VAR(gdt_page) + GDT_ESPFIX_OFFSET 420 421 ALTERNATIVE "jmp .Lend_\@", "", X86_BUG_ESPFIX 422 423 movl PT_EFLAGS(%esp), %eax # mix EFLAGS, SS and CS 424 /* 425 * Warning: PT_OLDSS(%esp) contains the wrong/random values if we 426 * are returning to the kernel. 427 * See comments in process.c:copy_thread() for details. 428 */ 429 movb PT_OLDSS(%esp), %ah 430 movb PT_CS(%esp), %al 431 andl $(X86_EFLAGS_VM | (SEGMENT_TI_MASK << 8) | SEGMENT_RPL_MASK), %eax 432 cmpl $((SEGMENT_LDT << 8) | USER_RPL), %eax 433 jne .Lend_\@ # returning to user-space with LDT SS 434 435 /* 436 * Setup and switch to ESPFIX stack 437 * 438 * We're returning to userspace with a 16 bit stack. The CPU will not 439 * restore the high word of ESP for us on executing iret... This is an 440 * "official" bug of all the x86-compatible CPUs, which we can work 441 * around to make dosemu and wine happy. We do this by preloading the 442 * high word of ESP with the high word of the userspace ESP while 443 * compensating for the offset by changing to the ESPFIX segment with 444 * a base address that matches for the difference. 445 */ 446 mov %esp, %edx /* load kernel esp */ 447 mov PT_OLDESP(%esp), %eax /* load userspace esp */ 448 mov %dx, %ax /* eax: new kernel esp */ 449 sub %eax, %edx /* offset (low word is 0) */ 450 shr $16, %edx 451 mov %dl, GDT_ESPFIX_SS + 4 /* bits 16..23 */ 452 mov %dh, GDT_ESPFIX_SS + 7 /* bits 24..31 */ 453 pushl $__ESPFIX_SS 454 pushl %eax /* new kernel esp */ 455 /* 456 * Disable interrupts, but do not irqtrace this section: we 457 * will soon execute iret and the tracer was already set to 458 * the irqstate after the IRET: 459 */ 460 DISABLE_INTERRUPTS(CLBR_ANY) 461 lss (%esp), %esp /* switch to espfix segment */ 462.Lend_\@: 463#endif /* CONFIG_X86_ESPFIX32 */ 464.endm 465 466/* 467 * Called with pt_regs fully populated and kernel segments loaded, 468 * so we can access PER_CPU and use the integer registers. 469 * 470 * We need to be very careful here with the %esp switch, because an NMI 471 * can happen everywhere. If the NMI handler finds itself on the 472 * entry-stack, it will overwrite the task-stack and everything we 473 * copied there. So allocate the stack-frame on the task-stack and 474 * switch to it before we do any copying. 475 */ 476 477.macro SWITCH_TO_KERNEL_STACK 478 479 ALTERNATIVE "", "jmp .Lend_\@", X86_FEATURE_XENPV 480 481 BUG_IF_WRONG_CR3 482 483 SWITCH_TO_KERNEL_CR3 scratch_reg=%eax 484 485 /* 486 * %eax now contains the entry cr3 and we carry it forward in 487 * that register for the time this macro runs 488 */ 489 490 /* Are we on the entry stack? Bail out if not! */ 491 movl PER_CPU_VAR(cpu_entry_area), %ecx 492 addl $CPU_ENTRY_AREA_entry_stack + SIZEOF_entry_stack, %ecx 493 subl %esp, %ecx /* ecx = (end of entry_stack) - esp */ 494 cmpl $SIZEOF_entry_stack, %ecx 495 jae .Lend_\@ 496 497 /* Load stack pointer into %esi and %edi */ 498 movl %esp, %esi 499 movl %esi, %edi 500 501 /* Move %edi to the top of the entry stack */ 502 andl $(MASK_entry_stack), %edi 503 addl $(SIZEOF_entry_stack), %edi 504 505 /* Load top of task-stack into %edi */ 506 movl TSS_entry2task_stack(%edi), %edi 507 508 /* Special case - entry from kernel mode via entry stack */ 509#ifdef CONFIG_VM86 510 movl PT_EFLAGS(%esp), %ecx # mix EFLAGS and CS 511 movb PT_CS(%esp), %cl 512 andl $(X86_EFLAGS_VM | SEGMENT_RPL_MASK), %ecx 513#else 514 movl PT_CS(%esp), %ecx 515 andl $SEGMENT_RPL_MASK, %ecx 516#endif 517 cmpl $USER_RPL, %ecx 518 jb .Lentry_from_kernel_\@ 519 520 /* Bytes to copy */ 521 movl $PTREGS_SIZE, %ecx 522 523#ifdef CONFIG_VM86 524 testl $X86_EFLAGS_VM, PT_EFLAGS(%esi) 525 jz .Lcopy_pt_regs_\@ 526 527 /* 528 * Stack-frame contains 4 additional segment registers when 529 * coming from VM86 mode 530 */ 531 addl $(4 * 4), %ecx 532 533#endif 534.Lcopy_pt_regs_\@: 535 536 /* Allocate frame on task-stack */ 537 subl %ecx, %edi 538 539 /* Switch to task-stack */ 540 movl %edi, %esp 541 542 /* 543 * We are now on the task-stack and can safely copy over the 544 * stack-frame 545 */ 546 shrl $2, %ecx 547 cld 548 rep movsl 549 550 jmp .Lend_\@ 551 552.Lentry_from_kernel_\@: 553 554 /* 555 * This handles the case when we enter the kernel from 556 * kernel-mode and %esp points to the entry-stack. When this 557 * happens we need to switch to the task-stack to run C code, 558 * but switch back to the entry-stack again when we approach 559 * iret and return to the interrupted code-path. This usually 560 * happens when we hit an exception while restoring user-space 561 * segment registers on the way back to user-space or when the 562 * sysenter handler runs with eflags.tf set. 563 * 564 * When we switch to the task-stack here, we can't trust the 565 * contents of the entry-stack anymore, as the exception handler 566 * might be scheduled out or moved to another CPU. Therefore we 567 * copy the complete entry-stack to the task-stack and set a 568 * marker in the iret-frame (bit 31 of the CS dword) to detect 569 * what we've done on the iret path. 570 * 571 * On the iret path we copy everything back and switch to the 572 * entry-stack, so that the interrupted kernel code-path 573 * continues on the same stack it was interrupted with. 574 * 575 * Be aware that an NMI can happen anytime in this code. 576 * 577 * %esi: Entry-Stack pointer (same as %esp) 578 * %edi: Top of the task stack 579 * %eax: CR3 on kernel entry 580 */ 581 582 /* Calculate number of bytes on the entry stack in %ecx */ 583 movl %esi, %ecx 584 585 /* %ecx to the top of entry-stack */ 586 andl $(MASK_entry_stack), %ecx 587 addl $(SIZEOF_entry_stack), %ecx 588 589 /* Number of bytes on the entry stack to %ecx */ 590 sub %esi, %ecx 591 592 /* Mark stackframe as coming from entry stack */ 593 orl $CS_FROM_ENTRY_STACK, PT_CS(%esp) 594 595 /* 596 * Test the cr3 used to enter the kernel and add a marker 597 * so that we can switch back to it before iret. 598 */ 599 testl $PTI_SWITCH_MASK, %eax 600 jz .Lcopy_pt_regs_\@ 601 orl $CS_FROM_USER_CR3, PT_CS(%esp) 602 603 /* 604 * %esi and %edi are unchanged, %ecx contains the number of 605 * bytes to copy. The code at .Lcopy_pt_regs_\@ will allocate 606 * the stack-frame on task-stack and copy everything over 607 */ 608 jmp .Lcopy_pt_regs_\@ 609 610.Lend_\@: 611.endm 612 613/* 614 * Switch back from the kernel stack to the entry stack. 615 * 616 * The %esp register must point to pt_regs on the task stack. It will 617 * first calculate the size of the stack-frame to copy, depending on 618 * whether we return to VM86 mode or not. With that it uses 'rep movsl' 619 * to copy the contents of the stack over to the entry stack. 620 * 621 * We must be very careful here, as we can't trust the contents of the 622 * task-stack once we switched to the entry-stack. When an NMI happens 623 * while on the entry-stack, the NMI handler will switch back to the top 624 * of the task stack, overwriting our stack-frame we are about to copy. 625 * Therefore we switch the stack only after everything is copied over. 626 */ 627.macro SWITCH_TO_ENTRY_STACK 628 629 ALTERNATIVE "", "jmp .Lend_\@", X86_FEATURE_XENPV 630 631 /* Bytes to copy */ 632 movl $PTREGS_SIZE, %ecx 633 634#ifdef CONFIG_VM86 635 testl $(X86_EFLAGS_VM), PT_EFLAGS(%esp) 636 jz .Lcopy_pt_regs_\@ 637 638 /* Additional 4 registers to copy when returning to VM86 mode */ 639 addl $(4 * 4), %ecx 640 641.Lcopy_pt_regs_\@: 642#endif 643 644 /* Initialize source and destination for movsl */ 645 movl PER_CPU_VAR(cpu_tss_rw + TSS_sp0), %edi 646 subl %ecx, %edi 647 movl %esp, %esi 648 649 /* Save future stack pointer in %ebx */ 650 movl %edi, %ebx 651 652 /* Copy over the stack-frame */ 653 shrl $2, %ecx 654 cld 655 rep movsl 656 657 /* 658 * Switch to entry-stack - needs to happen after everything is 659 * copied because the NMI handler will overwrite the task-stack 660 * when on entry-stack 661 */ 662 movl %ebx, %esp 663 664.Lend_\@: 665.endm 666 667/* 668 * This macro handles the case when we return to kernel-mode on the iret 669 * path and have to switch back to the entry stack and/or user-cr3 670 * 671 * See the comments below the .Lentry_from_kernel_\@ label in the 672 * SWITCH_TO_KERNEL_STACK macro for more details. 673 */ 674.macro PARANOID_EXIT_TO_KERNEL_MODE 675 676 /* 677 * Test if we entered the kernel with the entry-stack. Most 678 * likely we did not, because this code only runs on the 679 * return-to-kernel path. 680 */ 681 testl $CS_FROM_ENTRY_STACK, PT_CS(%esp) 682 jz .Lend_\@ 683 684 /* Unlikely slow-path */ 685 686 /* Clear marker from stack-frame */ 687 andl $(~CS_FROM_ENTRY_STACK), PT_CS(%esp) 688 689 /* Copy the remaining task-stack contents to entry-stack */ 690 movl %esp, %esi 691 movl PER_CPU_VAR(cpu_tss_rw + TSS_sp0), %edi 692 693 /* Bytes on the task-stack to ecx */ 694 movl PER_CPU_VAR(cpu_tss_rw + TSS_sp1), %ecx 695 subl %esi, %ecx 696 697 /* Allocate stack-frame on entry-stack */ 698 subl %ecx, %edi 699 700 /* 701 * Save future stack-pointer, we must not switch until the 702 * copy is done, otherwise the NMI handler could destroy the 703 * contents of the task-stack we are about to copy. 704 */ 705 movl %edi, %ebx 706 707 /* Do the copy */ 708 shrl $2, %ecx 709 cld 710 rep movsl 711 712 /* Safe to switch to entry-stack now */ 713 movl %ebx, %esp 714 715 /* 716 * We came from entry-stack and need to check if we also need to 717 * switch back to user cr3. 718 */ 719 testl $CS_FROM_USER_CR3, PT_CS(%esp) 720 jz .Lend_\@ 721 722 /* Clear marker from stack-frame */ 723 andl $(~CS_FROM_USER_CR3), PT_CS(%esp) 724 725 SWITCH_TO_USER_CR3 scratch_reg=%eax 726 727.Lend_\@: 728.endm 729/* 730 * %eax: prev task 731 * %edx: next task 732 */ 733SYM_CODE_START(__switch_to_asm) 734 /* 735 * Save callee-saved registers 736 * This must match the order in struct inactive_task_frame 737 */ 738 pushl %ebp 739 pushl %ebx 740 pushl %edi 741 pushl %esi 742 /* 743 * Flags are saved to prevent AC leakage. This could go 744 * away if objtool would have 32bit support to verify 745 * the STAC/CLAC correctness. 746 */ 747 pushfl 748 749 /* switch stack */ 750 movl %esp, TASK_threadsp(%eax) 751 movl TASK_threadsp(%edx), %esp 752 753#ifdef CONFIG_STACKPROTECTOR 754 movl TASK_stack_canary(%edx), %ebx 755 movl %ebx, PER_CPU_VAR(stack_canary)+stack_canary_offset 756#endif 757 758#ifdef CONFIG_RETPOLINE 759 /* 760 * When switching from a shallower to a deeper call stack 761 * the RSB may either underflow or use entries populated 762 * with userspace addresses. On CPUs where those concerns 763 * exist, overwrite the RSB with entries which capture 764 * speculative execution to prevent attack. 765 */ 766 FILL_RETURN_BUFFER %ebx, RSB_CLEAR_LOOPS, X86_FEATURE_RSB_CTXSW 767#endif 768 769 /* Restore flags or the incoming task to restore AC state. */ 770 popfl 771 /* restore callee-saved registers */ 772 popl %esi 773 popl %edi 774 popl %ebx 775 popl %ebp 776 777 jmp __switch_to 778SYM_CODE_END(__switch_to_asm) 779 780/* 781 * The unwinder expects the last frame on the stack to always be at the same 782 * offset from the end of the page, which allows it to validate the stack. 783 * Calling schedule_tail() directly would break that convention because its an 784 * asmlinkage function so its argument has to be pushed on the stack. This 785 * wrapper creates a proper "end of stack" frame header before the call. 786 */ 787SYM_FUNC_START(schedule_tail_wrapper) 788 FRAME_BEGIN 789 790 pushl %eax 791 call schedule_tail 792 popl %eax 793 794 FRAME_END 795 ret 796SYM_FUNC_END(schedule_tail_wrapper) 797/* 798 * A newly forked process directly context switches into this address. 799 * 800 * eax: prev task we switched from 801 * ebx: kernel thread func (NULL for user thread) 802 * edi: kernel thread arg 803 */ 804SYM_CODE_START(ret_from_fork) 805 call schedule_tail_wrapper 806 807 testl %ebx, %ebx 808 jnz 1f /* kernel threads are uncommon */ 809 8102: 811 /* When we fork, we trace the syscall return in the child, too. */ 812 movl %esp, %eax 813 call syscall_return_slowpath 814 STACKLEAK_ERASE 815 jmp restore_all 816 817 /* kernel thread */ 8181: movl %edi, %eax 819 CALL_NOSPEC %ebx 820 /* 821 * A kernel thread is allowed to return here after successfully 822 * calling do_execve(). Exit to userspace to complete the execve() 823 * syscall. 824 */ 825 movl $0, PT_EAX(%esp) 826 jmp 2b 827SYM_CODE_END(ret_from_fork) 828 829/* 830 * Return to user mode is not as complex as all this looks, 831 * but we want the default path for a system call return to 832 * go as quickly as possible which is why some of this is 833 * less clear than it otherwise should be. 834 */ 835 836 # userspace resumption stub bypassing syscall exit tracing 837SYM_CODE_START_LOCAL(ret_from_exception) 838 preempt_stop(CLBR_ANY) 839ret_from_intr: 840#ifdef CONFIG_VM86 841 movl PT_EFLAGS(%esp), %eax # mix EFLAGS and CS 842 movb PT_CS(%esp), %al 843 andl $(X86_EFLAGS_VM | SEGMENT_RPL_MASK), %eax 844#else 845 /* 846 * We can be coming here from child spawned by kernel_thread(). 847 */ 848 movl PT_CS(%esp), %eax 849 andl $SEGMENT_RPL_MASK, %eax 850#endif 851 cmpl $USER_RPL, %eax 852 jb restore_all_kernel # not returning to v8086 or userspace 853 854 DISABLE_INTERRUPTS(CLBR_ANY) 855 TRACE_IRQS_OFF 856 movl %esp, %eax 857 call prepare_exit_to_usermode 858 jmp restore_all 859SYM_CODE_END(ret_from_exception) 860 861SYM_ENTRY(__begin_SYSENTER_singlestep_region, SYM_L_GLOBAL, SYM_A_NONE) 862/* 863 * All code from here through __end_SYSENTER_singlestep_region is subject 864 * to being single-stepped if a user program sets TF and executes SYSENTER. 865 * There is absolutely nothing that we can do to prevent this from happening 866 * (thanks Intel!). To keep our handling of this situation as simple as 867 * possible, we handle TF just like AC and NT, except that our #DB handler 868 * will ignore all of the single-step traps generated in this range. 869 */ 870 871#ifdef CONFIG_XEN_PV 872/* 873 * Xen doesn't set %esp to be precisely what the normal SYSENTER 874 * entry point expects, so fix it up before using the normal path. 875 */ 876SYM_CODE_START(xen_sysenter_target) 877 addl $5*4, %esp /* remove xen-provided frame */ 878 jmp .Lsysenter_past_esp 879SYM_CODE_END(xen_sysenter_target) 880#endif 881 882/* 883 * 32-bit SYSENTER entry. 884 * 885 * 32-bit system calls through the vDSO's __kernel_vsyscall enter here 886 * if X86_FEATURE_SEP is available. This is the preferred system call 887 * entry on 32-bit systems. 888 * 889 * The SYSENTER instruction, in principle, should *only* occur in the 890 * vDSO. In practice, a small number of Android devices were shipped 891 * with a copy of Bionic that inlined a SYSENTER instruction. This 892 * never happened in any of Google's Bionic versions -- it only happened 893 * in a narrow range of Intel-provided versions. 894 * 895 * SYSENTER loads SS, ESP, CS, and EIP from previously programmed MSRs. 896 * IF and VM in RFLAGS are cleared (IOW: interrupts are off). 897 * SYSENTER does not save anything on the stack, 898 * and does not save old EIP (!!!), ESP, or EFLAGS. 899 * 900 * To avoid losing track of EFLAGS.VM (and thus potentially corrupting 901 * user and/or vm86 state), we explicitly disable the SYSENTER 902 * instruction in vm86 mode by reprogramming the MSRs. 903 * 904 * Arguments: 905 * eax system call number 906 * ebx arg1 907 * ecx arg2 908 * edx arg3 909 * esi arg4 910 * edi arg5 911 * ebp user stack 912 * 0(%ebp) arg6 913 */ 914SYM_FUNC_START(entry_SYSENTER_32) 915 /* 916 * On entry-stack with all userspace-regs live - save and 917 * restore eflags and %eax to use it as scratch-reg for the cr3 918 * switch. 919 */ 920 pushfl 921 pushl %eax 922 BUG_IF_WRONG_CR3 no_user_check=1 923 SWITCH_TO_KERNEL_CR3 scratch_reg=%eax 924 popl %eax 925 popfl 926 927 /* Stack empty again, switch to task stack */ 928 movl TSS_entry2task_stack(%esp), %esp 929 930.Lsysenter_past_esp: 931 pushl $__USER_DS /* pt_regs->ss */ 932 pushl %ebp /* pt_regs->sp (stashed in bp) */ 933 pushfl /* pt_regs->flags (except IF = 0) */ 934 orl $X86_EFLAGS_IF, (%esp) /* Fix IF */ 935 pushl $__USER_CS /* pt_regs->cs */ 936 pushl $0 /* pt_regs->ip = 0 (placeholder) */ 937 pushl %eax /* pt_regs->orig_ax */ 938 SAVE_ALL pt_regs_ax=$-ENOSYS /* save rest, stack already switched */ 939 940 /* 941 * SYSENTER doesn't filter flags, so we need to clear NT, AC 942 * and TF ourselves. To save a few cycles, we can check whether 943 * either was set instead of doing an unconditional popfq. 944 * This needs to happen before enabling interrupts so that 945 * we don't get preempted with NT set. 946 * 947 * If TF is set, we will single-step all the way to here -- do_debug 948 * will ignore all the traps. (Yes, this is slow, but so is 949 * single-stepping in general. This allows us to avoid having 950 * a more complicated code to handle the case where a user program 951 * forces us to single-step through the SYSENTER entry code.) 952 * 953 * NB.: .Lsysenter_fix_flags is a label with the code under it moved 954 * out-of-line as an optimization: NT is unlikely to be set in the 955 * majority of the cases and instead of polluting the I$ unnecessarily, 956 * we're keeping that code behind a branch which will predict as 957 * not-taken and therefore its instructions won't be fetched. 958 */ 959 testl $X86_EFLAGS_NT|X86_EFLAGS_AC|X86_EFLAGS_TF, PT_EFLAGS(%esp) 960 jnz .Lsysenter_fix_flags 961.Lsysenter_flags_fixed: 962 963 /* 964 * User mode is traced as though IRQs are on, and SYSENTER 965 * turned them off. 966 */ 967 TRACE_IRQS_OFF 968 969 movl %esp, %eax 970 call do_fast_syscall_32 971 /* XEN PV guests always use IRET path */ 972 ALTERNATIVE "testl %eax, %eax; jz .Lsyscall_32_done", \ 973 "jmp .Lsyscall_32_done", X86_FEATURE_XENPV 974 975 STACKLEAK_ERASE 976 977/* Opportunistic SYSEXIT */ 978 TRACE_IRQS_ON /* User mode traces as IRQs on. */ 979 980 /* 981 * Setup entry stack - we keep the pointer in %eax and do the 982 * switch after almost all user-state is restored. 983 */ 984 985 /* Load entry stack pointer and allocate frame for eflags/eax */ 986 movl PER_CPU_VAR(cpu_tss_rw + TSS_sp0), %eax 987 subl $(2*4), %eax 988 989 /* Copy eflags and eax to entry stack */ 990 movl PT_EFLAGS(%esp), %edi 991 movl PT_EAX(%esp), %esi 992 movl %edi, (%eax) 993 movl %esi, 4(%eax) 994 995 /* Restore user registers and segments */ 996 movl PT_EIP(%esp), %edx /* pt_regs->ip */ 997 movl PT_OLDESP(%esp), %ecx /* pt_regs->sp */ 9981: mov PT_FS(%esp), %fs 999 PTGS_TO_GS 1000 1001 popl %ebx /* pt_regs->bx */ 1002 addl $2*4, %esp /* skip pt_regs->cx and pt_regs->dx */ 1003 popl %esi /* pt_regs->si */ 1004 popl %edi /* pt_regs->di */ 1005 popl %ebp /* pt_regs->bp */ 1006 1007 /* Switch to entry stack */ 1008 movl %eax, %esp 1009 1010 /* Now ready to switch the cr3 */ 1011 SWITCH_TO_USER_CR3 scratch_reg=%eax 1012 1013 /* 1014 * Restore all flags except IF. (We restore IF separately because 1015 * STI gives a one-instruction window in which we won't be interrupted, 1016 * whereas POPF does not.) 1017 */ 1018 btrl $X86_EFLAGS_IF_BIT, (%esp) 1019 BUG_IF_WRONG_CR3 no_user_check=1 1020 popfl 1021 popl %eax 1022 1023 /* 1024 * Return back to the vDSO, which will pop ecx and edx. 1025 * Don't bother with DS and ES (they already contain __USER_DS). 1026 */ 1027 sti 1028 sysexit 1029 1030.pushsection .fixup, "ax" 10312: movl $0, PT_FS(%esp) 1032 jmp 1b 1033.popsection 1034 _ASM_EXTABLE(1b, 2b) 1035 PTGS_TO_GS_EX 1036 1037.Lsysenter_fix_flags: 1038 pushl $X86_EFLAGS_FIXED 1039 popfl 1040 jmp .Lsysenter_flags_fixed 1041SYM_ENTRY(__end_SYSENTER_singlestep_region, SYM_L_GLOBAL, SYM_A_NONE) 1042SYM_FUNC_END(entry_SYSENTER_32) 1043 1044/* 1045 * 32-bit legacy system call entry. 1046 * 1047 * 32-bit x86 Linux system calls traditionally used the INT $0x80 1048 * instruction. INT $0x80 lands here. 1049 * 1050 * This entry point can be used by any 32-bit perform system calls. 1051 * Instances of INT $0x80 can be found inline in various programs and 1052 * libraries. It is also used by the vDSO's __kernel_vsyscall 1053 * fallback for hardware that doesn't support a faster entry method. 1054 * Restarted 32-bit system calls also fall back to INT $0x80 1055 * regardless of what instruction was originally used to do the system 1056 * call. (64-bit programs can use INT $0x80 as well, but they can 1057 * only run on 64-bit kernels and therefore land in 1058 * entry_INT80_compat.) 1059 * 1060 * This is considered a slow path. It is not used by most libc 1061 * implementations on modern hardware except during process startup. 1062 * 1063 * Arguments: 1064 * eax system call number 1065 * ebx arg1 1066 * ecx arg2 1067 * edx arg3 1068 * esi arg4 1069 * edi arg5 1070 * ebp arg6 1071 */ 1072SYM_FUNC_START(entry_INT80_32) 1073 ASM_CLAC 1074 pushl %eax /* pt_regs->orig_ax */ 1075 1076 SAVE_ALL pt_regs_ax=$-ENOSYS switch_stacks=1 /* save rest */ 1077 1078 /* 1079 * User mode is traced as though IRQs are on, and the interrupt gate 1080 * turned them off. 1081 */ 1082 TRACE_IRQS_OFF 1083 1084 movl %esp, %eax 1085 call do_int80_syscall_32 1086.Lsyscall_32_done: 1087 1088 STACKLEAK_ERASE 1089 1090restore_all: 1091 TRACE_IRQS_IRET 1092 SWITCH_TO_ENTRY_STACK 1093 CHECK_AND_APPLY_ESPFIX 1094.Lrestore_nocheck: 1095 /* Switch back to user CR3 */ 1096 SWITCH_TO_USER_CR3 scratch_reg=%eax 1097 1098 BUG_IF_WRONG_CR3 1099 1100 /* Restore user state */ 1101 RESTORE_REGS pop=4 # skip orig_eax/error_code 1102.Lirq_return: 1103 /* 1104 * ARCH_HAS_MEMBARRIER_SYNC_CORE rely on IRET core serialization 1105 * when returning from IPI handler and when returning from 1106 * scheduler to user-space. 1107 */ 1108 INTERRUPT_RETURN 1109 1110restore_all_kernel: 1111#ifdef CONFIG_PREEMPTION 1112 DISABLE_INTERRUPTS(CLBR_ANY) 1113 cmpl $0, PER_CPU_VAR(__preempt_count) 1114 jnz .Lno_preempt 1115 testl $X86_EFLAGS_IF, PT_EFLAGS(%esp) # interrupts off (exception path) ? 1116 jz .Lno_preempt 1117 call preempt_schedule_irq 1118.Lno_preempt: 1119#endif 1120 TRACE_IRQS_IRET 1121 PARANOID_EXIT_TO_KERNEL_MODE 1122 BUG_IF_WRONG_CR3 1123 RESTORE_REGS 4 1124 jmp .Lirq_return 1125 1126.section .fixup, "ax" 1127SYM_CODE_START(iret_exc) 1128 pushl $0 # no error code 1129 pushl $do_iret_error 1130 1131#ifdef CONFIG_DEBUG_ENTRY 1132 /* 1133 * The stack-frame here is the one that iret faulted on, so its a 1134 * return-to-user frame. We are on kernel-cr3 because we come here from 1135 * the fixup code. This confuses the CR3 checker, so switch to user-cr3 1136 * as the checker expects it. 1137 */ 1138 pushl %eax 1139 SWITCH_TO_USER_CR3 scratch_reg=%eax 1140 popl %eax 1141#endif 1142 1143 jmp common_exception 1144SYM_CODE_END(iret_exc) 1145.previous 1146 _ASM_EXTABLE(.Lirq_return, iret_exc) 1147SYM_FUNC_END(entry_INT80_32) 1148 1149.macro FIXUP_ESPFIX_STACK 1150/* 1151 * Switch back for ESPFIX stack to the normal zerobased stack 1152 * 1153 * We can't call C functions using the ESPFIX stack. This code reads 1154 * the high word of the segment base from the GDT and swiches to the 1155 * normal stack and adjusts ESP with the matching offset. 1156 * 1157 * We might be on user CR3 here, so percpu data is not mapped and we can't 1158 * access the GDT through the percpu segment. Instead, use SGDT to find 1159 * the cpu_entry_area alias of the GDT. 1160 */ 1161#ifdef CONFIG_X86_ESPFIX32 1162 /* fixup the stack */ 1163 pushl %ecx 1164 subl $2*4, %esp 1165 sgdt (%esp) 1166 movl 2(%esp), %ecx /* GDT address */ 1167 /* 1168 * Careful: ECX is a linear pointer, so we need to force base 1169 * zero. %cs is the only known-linear segment we have right now. 1170 */ 1171 mov %cs:GDT_ESPFIX_OFFSET + 4(%ecx), %al /* bits 16..23 */ 1172 mov %cs:GDT_ESPFIX_OFFSET + 7(%ecx), %ah /* bits 24..31 */ 1173 shl $16, %eax 1174 addl $2*4, %esp 1175 popl %ecx 1176 addl %esp, %eax /* the adjusted stack pointer */ 1177 pushl $__KERNEL_DS 1178 pushl %eax 1179 lss (%esp), %esp /* switch to the normal stack segment */ 1180#endif 1181.endm 1182 1183.macro UNWIND_ESPFIX_STACK 1184 /* It's safe to clobber %eax, all other regs need to be preserved */ 1185#ifdef CONFIG_X86_ESPFIX32 1186 movl %ss, %eax 1187 /* see if on espfix stack */ 1188 cmpw $__ESPFIX_SS, %ax 1189 jne .Lno_fixup_\@ 1190 /* switch to normal stack */ 1191 FIXUP_ESPFIX_STACK 1192.Lno_fixup_\@: 1193#endif 1194.endm 1195 1196/* 1197 * Build the entry stubs with some assembler magic. 1198 * We pack 1 stub into every 8-byte block. 1199 */ 1200 .align 8 1201SYM_CODE_START(irq_entries_start) 1202 vector=FIRST_EXTERNAL_VECTOR 1203 .rept (FIRST_SYSTEM_VECTOR - FIRST_EXTERNAL_VECTOR) 1204 pushl $(~vector+0x80) /* Note: always in signed byte range */ 1205 vector=vector+1 1206 jmp common_interrupt 1207 .align 8 1208 .endr 1209SYM_CODE_END(irq_entries_start) 1210 1211#ifdef CONFIG_X86_LOCAL_APIC 1212 .align 8 1213SYM_CODE_START(spurious_entries_start) 1214 vector=FIRST_SYSTEM_VECTOR 1215 .rept (NR_VECTORS - FIRST_SYSTEM_VECTOR) 1216 pushl $(~vector+0x80) /* Note: always in signed byte range */ 1217 vector=vector+1 1218 jmp common_spurious 1219 .align 8 1220 .endr 1221SYM_CODE_END(spurious_entries_start) 1222 1223SYM_CODE_START_LOCAL(common_spurious) 1224 ASM_CLAC 1225 addl $-0x80, (%esp) /* Adjust vector into the [-256, -1] range */ 1226 SAVE_ALL switch_stacks=1 1227 ENCODE_FRAME_POINTER 1228 TRACE_IRQS_OFF 1229 movl %esp, %eax 1230 call smp_spurious_interrupt 1231 jmp ret_from_intr 1232SYM_CODE_END(common_spurious) 1233#endif 1234 1235/* 1236 * the CPU automatically disables interrupts when executing an IRQ vector, 1237 * so IRQ-flags tracing has to follow that: 1238 */ 1239 .p2align CONFIG_X86_L1_CACHE_SHIFT 1240SYM_CODE_START_LOCAL(common_interrupt) 1241 ASM_CLAC 1242 addl $-0x80, (%esp) /* Adjust vector into the [-256, -1] range */ 1243 1244 SAVE_ALL switch_stacks=1 1245 ENCODE_FRAME_POINTER 1246 TRACE_IRQS_OFF 1247 movl %esp, %eax 1248 call do_IRQ 1249 jmp ret_from_intr 1250SYM_CODE_END(common_interrupt) 1251 1252#define BUILD_INTERRUPT3(name, nr, fn) \ 1253SYM_FUNC_START(name) \ 1254 ASM_CLAC; \ 1255 pushl $~(nr); \ 1256 SAVE_ALL switch_stacks=1; \ 1257 ENCODE_FRAME_POINTER; \ 1258 TRACE_IRQS_OFF \ 1259 movl %esp, %eax; \ 1260 call fn; \ 1261 jmp ret_from_intr; \ 1262SYM_FUNC_END(name) 1263 1264#define BUILD_INTERRUPT(name, nr) \ 1265 BUILD_INTERRUPT3(name, nr, smp_##name); \ 1266 1267/* The include is where all of the SMP etc. interrupts come from */ 1268#include <asm/entry_arch.h> 1269 1270SYM_CODE_START(coprocessor_error) 1271 ASM_CLAC 1272 pushl $0 1273 pushl $do_coprocessor_error 1274 jmp common_exception 1275SYM_CODE_END(coprocessor_error) 1276 1277SYM_CODE_START(simd_coprocessor_error) 1278 ASM_CLAC 1279 pushl $0 1280#ifdef CONFIG_X86_INVD_BUG 1281 /* AMD 486 bug: invd from userspace calls exception 19 instead of #GP */ 1282 ALTERNATIVE "pushl $do_general_protection", \ 1283 "pushl $do_simd_coprocessor_error", \ 1284 X86_FEATURE_XMM 1285#else 1286 pushl $do_simd_coprocessor_error 1287#endif 1288 jmp common_exception 1289SYM_CODE_END(simd_coprocessor_error) 1290 1291SYM_CODE_START(device_not_available) 1292 ASM_CLAC 1293 pushl $-1 # mark this as an int 1294 pushl $do_device_not_available 1295 jmp common_exception 1296SYM_CODE_END(device_not_available) 1297 1298#ifdef CONFIG_PARAVIRT 1299SYM_CODE_START(native_iret) 1300 iret 1301 _ASM_EXTABLE(native_iret, iret_exc) 1302SYM_CODE_END(native_iret) 1303#endif 1304 1305SYM_CODE_START(overflow) 1306 ASM_CLAC 1307 pushl $0 1308 pushl $do_overflow 1309 jmp common_exception 1310SYM_CODE_END(overflow) 1311 1312SYM_CODE_START(bounds) 1313 ASM_CLAC 1314 pushl $0 1315 pushl $do_bounds 1316 jmp common_exception 1317SYM_CODE_END(bounds) 1318 1319SYM_CODE_START(invalid_op) 1320 ASM_CLAC 1321 pushl $0 1322 pushl $do_invalid_op 1323 jmp common_exception 1324SYM_CODE_END(invalid_op) 1325 1326SYM_CODE_START(coprocessor_segment_overrun) 1327 ASM_CLAC 1328 pushl $0 1329 pushl $do_coprocessor_segment_overrun 1330 jmp common_exception 1331SYM_CODE_END(coprocessor_segment_overrun) 1332 1333SYM_CODE_START(invalid_TSS) 1334 ASM_CLAC 1335 pushl $do_invalid_TSS 1336 jmp common_exception 1337SYM_CODE_END(invalid_TSS) 1338 1339SYM_CODE_START(segment_not_present) 1340 ASM_CLAC 1341 pushl $do_segment_not_present 1342 jmp common_exception 1343SYM_CODE_END(segment_not_present) 1344 1345SYM_CODE_START(stack_segment) 1346 ASM_CLAC 1347 pushl $do_stack_segment 1348 jmp common_exception 1349SYM_CODE_END(stack_segment) 1350 1351SYM_CODE_START(alignment_check) 1352 ASM_CLAC 1353 pushl $do_alignment_check 1354 jmp common_exception 1355SYM_CODE_END(alignment_check) 1356 1357SYM_CODE_START(divide_error) 1358 ASM_CLAC 1359 pushl $0 # no error code 1360 pushl $do_divide_error 1361 jmp common_exception 1362SYM_CODE_END(divide_error) 1363 1364#ifdef CONFIG_X86_MCE 1365SYM_CODE_START(machine_check) 1366 ASM_CLAC 1367 pushl $0 1368 pushl machine_check_vector 1369 jmp common_exception 1370SYM_CODE_END(machine_check) 1371#endif 1372 1373SYM_CODE_START(spurious_interrupt_bug) 1374 ASM_CLAC 1375 pushl $0 1376 pushl $do_spurious_interrupt_bug 1377 jmp common_exception 1378SYM_CODE_END(spurious_interrupt_bug) 1379 1380#ifdef CONFIG_XEN_PV 1381SYM_FUNC_START(xen_hypervisor_callback) 1382 /* 1383 * Check to see if we got the event in the critical 1384 * region in xen_iret_direct, after we've reenabled 1385 * events and checked for pending events. This simulates 1386 * iret instruction's behaviour where it delivers a 1387 * pending interrupt when enabling interrupts: 1388 */ 1389 cmpl $xen_iret_start_crit, (%esp) 1390 jb 1f 1391 cmpl $xen_iret_end_crit, (%esp) 1392 jae 1f 1393 call xen_iret_crit_fixup 13941: 1395 pushl $-1 /* orig_ax = -1 => not a system call */ 1396 SAVE_ALL 1397 ENCODE_FRAME_POINTER 1398 TRACE_IRQS_OFF 1399 mov %esp, %eax 1400 call xen_evtchn_do_upcall 1401#ifndef CONFIG_PREEMPTION 1402 call xen_maybe_preempt_hcall 1403#endif 1404 jmp ret_from_intr 1405SYM_FUNC_END(xen_hypervisor_callback) 1406 1407/* 1408 * Hypervisor uses this for application faults while it executes. 1409 * We get here for two reasons: 1410 * 1. Fault while reloading DS, ES, FS or GS 1411 * 2. Fault while executing IRET 1412 * Category 1 we fix up by reattempting the load, and zeroing the segment 1413 * register if the load fails. 1414 * Category 2 we fix up by jumping to do_iret_error. We cannot use the 1415 * normal Linux return path in this case because if we use the IRET hypercall 1416 * to pop the stack frame we end up in an infinite loop of failsafe callbacks. 1417 * We distinguish between categories by maintaining a status value in EAX. 1418 */ 1419SYM_FUNC_START(xen_failsafe_callback) 1420 pushl %eax 1421 movl $1, %eax 14221: mov 4(%esp), %ds 14232: mov 8(%esp), %es 14243: mov 12(%esp), %fs 14254: mov 16(%esp), %gs 1426 /* EAX == 0 => Category 1 (Bad segment) 1427 EAX != 0 => Category 2 (Bad IRET) */ 1428 testl %eax, %eax 1429 popl %eax 1430 lea 16(%esp), %esp 1431 jz 5f 1432 jmp iret_exc 14335: pushl $-1 /* orig_ax = -1 => not a system call */ 1434 SAVE_ALL 1435 ENCODE_FRAME_POINTER 1436 jmp ret_from_exception 1437 1438.section .fixup, "ax" 14396: xorl %eax, %eax 1440 movl %eax, 4(%esp) 1441 jmp 1b 14427: xorl %eax, %eax 1443 movl %eax, 8(%esp) 1444 jmp 2b 14458: xorl %eax, %eax 1446 movl %eax, 12(%esp) 1447 jmp 3b 14489: xorl %eax, %eax 1449 movl %eax, 16(%esp) 1450 jmp 4b 1451.previous 1452 _ASM_EXTABLE(1b, 6b) 1453 _ASM_EXTABLE(2b, 7b) 1454 _ASM_EXTABLE(3b, 8b) 1455 _ASM_EXTABLE(4b, 9b) 1456SYM_FUNC_END(xen_failsafe_callback) 1457#endif /* CONFIG_XEN_PV */ 1458 1459#ifdef CONFIG_XEN_PVHVM 1460BUILD_INTERRUPT3(xen_hvm_callback_vector, HYPERVISOR_CALLBACK_VECTOR, 1461 xen_evtchn_do_upcall) 1462#endif 1463 1464 1465#if IS_ENABLED(CONFIG_HYPERV) 1466 1467BUILD_INTERRUPT3(hyperv_callback_vector, HYPERVISOR_CALLBACK_VECTOR, 1468 hyperv_vector_handler) 1469 1470BUILD_INTERRUPT3(hyperv_reenlightenment_vector, HYPERV_REENLIGHTENMENT_VECTOR, 1471 hyperv_reenlightenment_intr) 1472 1473BUILD_INTERRUPT3(hv_stimer0_callback_vector, HYPERV_STIMER0_VECTOR, 1474 hv_stimer0_vector_handler) 1475 1476#endif /* CONFIG_HYPERV */ 1477 1478SYM_CODE_START(page_fault) 1479 ASM_CLAC 1480 pushl $do_page_fault 1481 jmp common_exception_read_cr2 1482SYM_CODE_END(page_fault) 1483 1484SYM_CODE_START_LOCAL_NOALIGN(common_exception_read_cr2) 1485 /* the function address is in %gs's slot on the stack */ 1486 SAVE_ALL switch_stacks=1 skip_gs=1 unwind_espfix=1 1487 1488 ENCODE_FRAME_POINTER 1489 1490 /* fixup %gs */ 1491 GS_TO_REG %ecx 1492 movl PT_GS(%esp), %edi 1493 REG_TO_PTGS %ecx 1494 SET_KERNEL_GS %ecx 1495 1496 GET_CR2_INTO(%ecx) # might clobber %eax 1497 1498 /* fixup orig %eax */ 1499 movl PT_ORIG_EAX(%esp), %edx # get the error code 1500 movl $-1, PT_ORIG_EAX(%esp) # no syscall to restart 1501 1502 TRACE_IRQS_OFF 1503 movl %esp, %eax # pt_regs pointer 1504 CALL_NOSPEC %edi 1505 jmp ret_from_exception 1506SYM_CODE_END(common_exception_read_cr2) 1507 1508SYM_CODE_START_LOCAL_NOALIGN(common_exception) 1509 /* the function address is in %gs's slot on the stack */ 1510 SAVE_ALL switch_stacks=1 skip_gs=1 unwind_espfix=1 1511 ENCODE_FRAME_POINTER 1512 1513 /* fixup %gs */ 1514 GS_TO_REG %ecx 1515 movl PT_GS(%esp), %edi # get the function address 1516 REG_TO_PTGS %ecx 1517 SET_KERNEL_GS %ecx 1518 1519 /* fixup orig %eax */ 1520 movl PT_ORIG_EAX(%esp), %edx # get the error code 1521 movl $-1, PT_ORIG_EAX(%esp) # no syscall to restart 1522 1523 TRACE_IRQS_OFF 1524 movl %esp, %eax # pt_regs pointer 1525 CALL_NOSPEC %edi 1526 jmp ret_from_exception 1527SYM_CODE_END(common_exception) 1528 1529SYM_CODE_START(debug) 1530 /* 1531 * Entry from sysenter is now handled in common_exception 1532 */ 1533 ASM_CLAC 1534 pushl $-1 # mark this as an int 1535 pushl $do_debug 1536 jmp common_exception 1537SYM_CODE_END(debug) 1538 1539#ifdef CONFIG_DOUBLEFAULT 1540SYM_CODE_START(double_fault) 15411: 1542 /* 1543 * This is a task gate handler, not an interrupt gate handler. 1544 * The error code is on the stack, but the stack is otherwise 1545 * empty. Interrupts are off. Our state is sane with the following 1546 * exceptions: 1547 * 1548 * - CR0.TS is set. "TS" literally means "task switched". 1549 * - EFLAGS.NT is set because we're a "nested task". 1550 * - The doublefault TSS has back_link set and has been marked busy. 1551 * - TR points to the doublefault TSS and the normal TSS is busy. 1552 * - CR3 is the normal kernel PGD. This would be delightful, except 1553 * that the CPU didn't bother to save the old CR3 anywhere. This 1554 * would make it very awkward to return back to the context we came 1555 * from. 1556 * 1557 * The rest of EFLAGS is sanitized for us, so we don't need to 1558 * worry about AC or DF. 1559 * 1560 * Don't even bother popping the error code. It's always zero, 1561 * and ignoring it makes us a bit more robust against buggy 1562 * hypervisor task gate implementations. 1563 * 1564 * We will manually undo the task switch instead of doing a 1565 * task-switching IRET. 1566 */ 1567 1568 clts /* clear CR0.TS */ 1569 pushl $X86_EFLAGS_FIXED 1570 popfl /* clear EFLAGS.NT */ 1571 1572 call doublefault_shim 1573 1574 /* We don't support returning, so we have no IRET here. */ 15751: 1576 hlt 1577 jmp 1b 1578SYM_CODE_END(double_fault) 1579#endif 1580 1581/* 1582 * NMI is doubly nasty. It can happen on the first instruction of 1583 * entry_SYSENTER_32 (just like #DB), but it can also interrupt the beginning 1584 * of the #DB handler even if that #DB in turn hit before entry_SYSENTER_32 1585 * switched stacks. We handle both conditions by simply checking whether we 1586 * interrupted kernel code running on the SYSENTER stack. 1587 */ 1588SYM_CODE_START(nmi) 1589 ASM_CLAC 1590 1591#ifdef CONFIG_X86_ESPFIX32 1592 /* 1593 * ESPFIX_SS is only ever set on the return to user path 1594 * after we've switched to the entry stack. 1595 */ 1596 pushl %eax 1597 movl %ss, %eax 1598 cmpw $__ESPFIX_SS, %ax 1599 popl %eax 1600 je .Lnmi_espfix_stack 1601#endif 1602 1603 pushl %eax # pt_regs->orig_ax 1604 SAVE_ALL_NMI cr3_reg=%edi 1605 ENCODE_FRAME_POINTER 1606 xorl %edx, %edx # zero error code 1607 movl %esp, %eax # pt_regs pointer 1608 1609 /* Are we currently on the SYSENTER stack? */ 1610 movl PER_CPU_VAR(cpu_entry_area), %ecx 1611 addl $CPU_ENTRY_AREA_entry_stack + SIZEOF_entry_stack, %ecx 1612 subl %eax, %ecx /* ecx = (end of entry_stack) - esp */ 1613 cmpl $SIZEOF_entry_stack, %ecx 1614 jb .Lnmi_from_sysenter_stack 1615 1616 /* Not on SYSENTER stack. */ 1617 call do_nmi 1618 jmp .Lnmi_return 1619 1620.Lnmi_from_sysenter_stack: 1621 /* 1622 * We're on the SYSENTER stack. Switch off. No one (not even debug) 1623 * is using the thread stack right now, so it's safe for us to use it. 1624 */ 1625 movl %esp, %ebx 1626 movl PER_CPU_VAR(cpu_current_top_of_stack), %esp 1627 call do_nmi 1628 movl %ebx, %esp 1629 1630.Lnmi_return: 1631#ifdef CONFIG_X86_ESPFIX32 1632 testl $CS_FROM_ESPFIX, PT_CS(%esp) 1633 jnz .Lnmi_from_espfix 1634#endif 1635 1636 CHECK_AND_APPLY_ESPFIX 1637 RESTORE_ALL_NMI cr3_reg=%edi pop=4 1638 jmp .Lirq_return 1639 1640#ifdef CONFIG_X86_ESPFIX32 1641.Lnmi_espfix_stack: 1642 /* 1643 * Create the pointer to LSS back 1644 */ 1645 pushl %ss 1646 pushl %esp 1647 addl $4, (%esp) 1648 1649 /* Copy the (short) IRET frame */ 1650 pushl 4*4(%esp) # flags 1651 pushl 4*4(%esp) # cs 1652 pushl 4*4(%esp) # ip 1653 1654 pushl %eax # orig_ax 1655 1656 SAVE_ALL_NMI cr3_reg=%edi unwind_espfix=1 1657 ENCODE_FRAME_POINTER 1658 1659 /* clear CS_FROM_KERNEL, set CS_FROM_ESPFIX */ 1660 xorl $(CS_FROM_ESPFIX | CS_FROM_KERNEL), PT_CS(%esp) 1661 1662 xorl %edx, %edx # zero error code 1663 movl %esp, %eax # pt_regs pointer 1664 jmp .Lnmi_from_sysenter_stack 1665 1666.Lnmi_from_espfix: 1667 RESTORE_ALL_NMI cr3_reg=%edi 1668 /* 1669 * Because we cleared CS_FROM_KERNEL, IRET_FRAME 'forgot' to 1670 * fix up the gap and long frame: 1671 * 1672 * 3 - original frame (exception) 1673 * 2 - ESPFIX block (above) 1674 * 6 - gap (FIXUP_FRAME) 1675 * 5 - long frame (FIXUP_FRAME) 1676 * 1 - orig_ax 1677 */ 1678 lss (1+5+6)*4(%esp), %esp # back to espfix stack 1679 jmp .Lirq_return 1680#endif 1681SYM_CODE_END(nmi) 1682 1683SYM_CODE_START(int3) 1684 ASM_CLAC 1685 pushl $-1 # mark this as an int 1686 1687 SAVE_ALL switch_stacks=1 1688 ENCODE_FRAME_POINTER 1689 TRACE_IRQS_OFF 1690 xorl %edx, %edx # zero error code 1691 movl %esp, %eax # pt_regs pointer 1692 call do_int3 1693 jmp ret_from_exception 1694SYM_CODE_END(int3) 1695 1696SYM_CODE_START(general_protection) 1697 pushl $do_general_protection 1698 jmp common_exception 1699SYM_CODE_END(general_protection) 1700 1701#ifdef CONFIG_KVM_GUEST 1702SYM_CODE_START(async_page_fault) 1703 ASM_CLAC 1704 pushl $do_async_page_fault 1705 jmp common_exception_read_cr2 1706SYM_CODE_END(async_page_fault) 1707#endif 1708 1709SYM_CODE_START(rewind_stack_do_exit) 1710 /* Prevent any naive code from trying to unwind to our caller. */ 1711 xorl %ebp, %ebp 1712 1713 movl PER_CPU_VAR(cpu_current_top_of_stack), %esi 1714 leal -TOP_OF_KERNEL_STACK_PADDING-PTREGS_SIZE(%esi), %esp 1715 1716 call do_exit 17171: jmp 1b 1718SYM_CODE_END(rewind_stack_do_exit) 1719