Lines Matching +full:8 +full:a
13 * A note on terminology:
58 * well as some other programs and libraries. There are also a handful
59 * of SYSCALL instructions in the vDSO used, for example, as a
64 * rflags gets masked by a value from another MSR (so CLD and CLAC
125 * a completely clean 64-bit userspace context. If we're not,
200 * When switching from a shallower to a deeper call stack
221 * A newly forked process directly context switches into this address.
230 * This is the start of the kernel stack; even through there's a
234 * This ensures stack unwinds of kernel threads terminate in a known
249 * -- at this point the register set should be a valid user set
279 ENCODE_FRAME_POINTER 8
312 /* For some configurations \cfunc ends up being a noreturn. */
334 UNWIND_HINT_IRET_ENTRY offset=\has_error_code*8 signal=0
336 UNWIND_HINT_IRET_ENTRY offset=\has_error_code*8
349 * If coming from kernel space, create a 6-word gap to allow the
350 * int3 handler to emulate a call instruction.
355 pushq 5*8(%rsp)
357 UNWIND_HINT_IRET_REGS offset=8
374 * common_interrupt is a hotpath, align it to a cache line
408 * a normal entry.
449 * will switch to a fall-back stack (VC2) and call a special handler function.
463 * a normal entry.
520 UNWIND_HINT_IRET_ENTRY offset=8
533 /* For some configurations \cfunc ends up being a noreturn. */
571 add $8, %rsp /* orig_ax */
578 testb $3, 8(%rsp)
595 pushq 6*8(%rdi) /* SS */
596 pushq 5*8(%rdi) /* RSP */
597 pushq 4*8(%rdi) /* EFLAGS */
598 pushq 3*8(%rdi) /* CS */
599 pushq 2*8(%rdi) /* RIP */
628 addq $8, %rsp /* skip regs->orig_ax */
643 * Are we returning to a stack segment from the LDT? Note: in
665 * values. We have a percpu ESPFIX stack that is eight slots
689 movq %rax, (0*8)(%rdi) /* user RAX */
690 movq (1*8)(%rsp), %rax /* user RIP */
691 movq %rax, (1*8)(%rdi)
692 movq (2*8)(%rsp), %rax /* user CS */
693 movq %rax, (2*8)(%rdi)
694 movq (3*8)(%rsp), %rax /* user RFLAGS */
695 movq %rax, (3*8)(%rdi)
696 movq (5*8)(%rsp), %rax /* user SS */
697 movq %rax, (5*8)(%rdi)
698 movq (4*8)(%rsp), %rax /* user RSP */
699 movq %rax, (4*8)(%rdi)
706 * (espfix_stack | (X & 0xffff0000)) points to a read-only alias of
719 UNWIND_HINT_IRET_REGS offset=8
761 /* This can't be a string because the preprocessor needs to see it. */
777 * A note on the "critical region" in our callback handler.
838 movq 8(%rsp), %r11
841 UNWIND_HINT_IRET_REGS offset=8
845 movq 8(%rsp), %r11
848 pushq $-1 /* orig_ax = -1 => not a system call */
872 ENCODE_FRAME_POINTER 8
881 * hardware at entry) can not be used: this may be a return
882 * to kernel code, but with a user CR3 value.
887 * be retrieved from a kernel internal table.
907 * loads based on a mispredicted GS base can happen, therefore no LFENCE
918 * The kernel-enforced convention is a negative GSBASE indicates
919 * a kernel value. No SWAPGS needed on entry and exit.
935 * CR3 above, keep the old value in a callee saved register.
996 /* We are returning to a context with user GSBASE */
1009 ENCODE_FRAME_POINTER 8
1011 testb $3, CS+8(%rsp)
1025 leaq 8(%rsp), %rdi /* arg0 = pt_regs pointer */
1031 * usergs. Handle them here. B stepping K8s sometimes report a
1037 cmpq %rcx, RIP+8(%rsp)
1040 cmpq %rax, RIP+8(%rsp)
1042 cmpq $.Lgs_change, RIP+8(%rsp)
1053 * Issue an LFENCE to prevent GS speculation, regardless of whether it is a
1059 leaq 8(%rsp), %rax /* return pt_regs pointer */
1065 movq %rcx, RIP+8(%rsp)
1083 leaq 8(%rsp), %rdi /* arg0 = pt_regs pointer */
1110 * We allow breakpoints in NMIs. If a breakpoint occurs, then
1119 * Check a special location on the stack that contains a
1137 * a nested NMI that updated the copy interrupt stack frame, a
1142 * with a single IRET instruction. Similarly, IRET to user mode
1153 testb $3, CS-RIP+8(%rsp)
1172 UNWIND_HINT_IRET_REGS base=%rdx offset=8
1173 pushq 5*8(%rdx) /* pt_regs->ss */
1174 pushq 4*8(%rdx) /* pt_regs->rsp */
1175 pushq 3*8(%rdx) /* pt_regs->flags */
1176 pushq 2*8(%rdx) /* pt_regs->cs */
1177 pushq 1*8(%rdx) /* pt_regs->rip */
1217 * | iret RFLAGS } by a nested NMI to force another |
1244 * Determine whether we're a nested NMI.
1247 * end_repeat_nmi, then we are a nested NMI. We must not
1255 cmpq 8(%rsp), %rdx
1258 cmpq 8(%rsp), %rdx
1267 cmpl $1, -8(%rsp)
1277 * pull a fast one on naughty userspace, though: we program
1282 lea 6*8(%rsp), %rdx
1283 /* Compare the NMI stack (rdx) with the stack we came from (4*8(%rsp)) */
1284 cmpq %rdx, 4*8(%rsp)
1285 /* If the stack pointer is above the NMI stack, this is a normal NMI */
1289 cmpq %rdx, 4*8(%rsp)
1290 /* If it is below the NMI stack, it is a normal NMI */
1295 testb $(X86_EFLAGS_DF >> 8), (3*8 + 1)(%rsp)
1298 /* This is a nested NMI. */
1305 subq $8, %rsp
1306 leaq -10*8(%rsp), %rdx
1314 addq $(6*8), %rsp
1319 /* We are returning to kernel mode, so this cannot result in a fault. */
1330 subq $(5*8), %rsp
1334 pushq 11*8(%rsp)
1346 pushq %rsp /* RSP (minus 8 because of the previous push) */
1347 addq $8, (%rsp) /* Fix up RSP */
1359 * If there was a nested NMI, the first NMI's iret will return
1364 * This makes it safe to copy to the stack frame that a nested
1373 movq $1, 10*8(%rsp) /* Set "NMI executing". */
1380 addq $(10*8), %rsp
1382 pushq -6*8(%rsp)
1384 subq $(5*8), %rsp
1389 * Everything below this point can be preempted by a nested NMI.
1441 addq $6*8, %rsp
1453 movq $0, 5*8(%rsp) /* clear "NMI executing" */
1462 * iretq reads the "iret" frame and exits the NMI stack in a
1464 * cannot result in a fault. Similarly, we don't need to worry
1503 * BHI_DIS_S hardware control instead. If a pre-Alder Lake part is being
1529 * that all RETs are in the second half of a cacheline to mitigate Indirect