entry_64.S (9e05c864993c5442227f83ae1694a737d7a102ed) entry_64.S (10bcc80e9dbced128e3b4aa86e4737e5486a45d0)
1/* SPDX-License-Identifier: GPL-2.0 */
2/*
3 * linux/arch/x86_64/entry.S
4 *
5 * Copyright (C) 1991, 1992 Linus Torvalds
6 * Copyright (C) 2000, 2001, 2002 Andi Kleen SuSE Labs
7 * Copyright (C) 2000 Pavel Machek <pavel@suse.cz>
8 *

--- 222 unchanged lines hidden (view full) ---

231 pushq %rsi /* pt_regs->si */
232 pushq %rdx /* pt_regs->dx */
233 pushq %rcx /* pt_regs->cx */
234 pushq $-ENOSYS /* pt_regs->ax */
235 pushq %r8 /* pt_regs->r8 */
236 pushq %r9 /* pt_regs->r9 */
237 pushq %r10 /* pt_regs->r10 */
238 pushq %r11 /* pt_regs->r11 */
1/* SPDX-License-Identifier: GPL-2.0 */
2/*
3 * linux/arch/x86_64/entry.S
4 *
5 * Copyright (C) 1991, 1992 Linus Torvalds
6 * Copyright (C) 2000, 2001, 2002 Andi Kleen SuSE Labs
7 * Copyright (C) 2000 Pavel Machek <pavel@suse.cz>
8 *

--- 222 unchanged lines hidden (view full) ---

231 pushq %rsi /* pt_regs->si */
232 pushq %rdx /* pt_regs->dx */
233 pushq %rcx /* pt_regs->cx */
234 pushq $-ENOSYS /* pt_regs->ax */
235 pushq %r8 /* pt_regs->r8 */
236 pushq %r9 /* pt_regs->r9 */
237 pushq %r10 /* pt_regs->r10 */
238 pushq %r11 /* pt_regs->r11 */
239 pushq %rbx /* pt_regs->rbx */
240 pushq %rbp /* pt_regs->rbp */
241 pushq %r12 /* pt_regs->r12 */
242 pushq %r13 /* pt_regs->r13 */
243 pushq %r14 /* pt_regs->r14 */
244 pushq %r15 /* pt_regs->r15 */
245 UNWIND_HINT_REGS
239 sub $(6*8), %rsp /* pt_regs->bp, bx, r12-15 not saved */
240 UNWIND_HINT_REGS extra=0
246
247 TRACE_IRQS_OFF
248
241
242 TRACE_IRQS_OFF
243
244 /*
245 * If we need to do entry work or if we guess we'll need to do
246 * exit work, go straight to the slow path.
247 */
248 movq PER_CPU_VAR(current_task), %r11
249 testl $_TIF_WORK_SYSCALL_ENTRY|_TIF_ALLWORK_MASK, TASK_TI_flags(%r11)
250 jnz entry_SYSCALL64_slow_path
251
252entry_SYSCALL_64_fastpath:
253 /*
254 * Easy case: enable interrupts and issue the syscall. If the syscall
255 * needs pt_regs, we'll call a stub that disables interrupts again
256 * and jumps to the slow path.
257 */
258 TRACE_IRQS_ON
259 ENABLE_INTERRUPTS(CLBR_NONE)
260#if __SYSCALL_MASK == ~0
261 cmpq $__NR_syscall_max, %rax
262#else
263 andl $__SYSCALL_MASK, %eax
264 cmpl $__NR_syscall_max, %eax
265#endif
266 ja 1f /* return -ENOSYS (already in pt_regs->ax) */
267 movq %r10, %rcx
268
269 /*
270 * This call instruction is handled specially in stub_ptregs_64.
271 * It might end up jumping to the slow path. If it jumps, RAX
272 * and all argument registers are clobbered.
273 */
274#ifdef CONFIG_RETPOLINE
275 movq sys_call_table(, %rax, 8), %rax
276 call __x86_indirect_thunk_rax
277#else
278 call *sys_call_table(, %rax, 8)
279#endif
280.Lentry_SYSCALL_64_after_fastpath_call:
281
282 movq %rax, RAX(%rsp)
2831:
284
285 /*
286 * If we get here, then we know that pt_regs is clean for SYSRET64.
287 * If we see that no exit work is required (which we are required
288 * to check with IRQs off), then we can go straight to SYSRET64.
289 */
290 DISABLE_INTERRUPTS(CLBR_ANY)
291 TRACE_IRQS_OFF
292 movq PER_CPU_VAR(current_task), %r11
293 testl $_TIF_ALLWORK_MASK, TASK_TI_flags(%r11)
294 jnz 1f
295
296 LOCKDEP_SYS_EXIT
297 TRACE_IRQS_ON /* user mode is traced as IRQs on */
298 movq RIP(%rsp), %rcx
299 movq EFLAGS(%rsp), %r11
300 addq $6*8, %rsp /* skip extra regs -- they were preserved */
301 UNWIND_HINT_EMPTY
302 jmp .Lpop_c_regs_except_rcx_r11_and_sysret
303
3041:
305 /*
306 * The fast path looked good when we started, but something changed
307 * along the way and we need to switch to the slow path. Calling
308 * raise(3) will trigger this, for example. IRQs are off.
309 */
310 TRACE_IRQS_ON
311 ENABLE_INTERRUPTS(CLBR_ANY)
312 SAVE_EXTRA_REGS
313 movq %rsp, %rdi
314 call syscall_return_slowpath /* returns with IRQs disabled */
315 jmp return_from_SYSCALL_64
316
317entry_SYSCALL64_slow_path:
249 /* IRQs are off. */
318 /* IRQs are off. */
319 SAVE_EXTRA_REGS
250 movq %rsp, %rdi
251 call do_syscall_64 /* returns with IRQs disabled */
252
320 movq %rsp, %rdi
321 call do_syscall_64 /* returns with IRQs disabled */
322
323return_from_SYSCALL_64:
253 TRACE_IRQS_IRETQ /* we're about to change IF */
254
255 /*
256 * Try to use SYSRET instead of IRET if we're returning to
257 * a completely clean 64-bit userspace context. If we're not,
258 * go to the slow exit path.
259 */
260 movq RCX(%rsp), %rcx

--- 56 unchanged lines hidden (view full) ---

317 /*
318 * We win! This label is here just for ease of understanding
319 * perf profiles. Nothing jumps here.
320 */
321syscall_return_via_sysret:
322 /* rcx and r11 are already restored (see code above) */
323 UNWIND_HINT_EMPTY
324 POP_EXTRA_REGS
324 TRACE_IRQS_IRETQ /* we're about to change IF */
325
326 /*
327 * Try to use SYSRET instead of IRET if we're returning to
328 * a completely clean 64-bit userspace context. If we're not,
329 * go to the slow exit path.
330 */
331 movq RCX(%rsp), %rcx

--- 56 unchanged lines hidden (view full) ---

388 /*
389 * We win! This label is here just for ease of understanding
390 * perf profiles. Nothing jumps here.
391 */
392syscall_return_via_sysret:
393 /* rcx and r11 are already restored (see code above) */
394 UNWIND_HINT_EMPTY
395 POP_EXTRA_REGS
396.Lpop_c_regs_except_rcx_r11_and_sysret:
325 popq %rsi /* skip r11 */
326 popq %r10
327 popq %r9
328 popq %r8
329 popq %rax
330 popq %rsi /* skip rcx */
331 popq %rdx
332 popq %rsi

--- 14 unchanged lines hidden (view full) ---

347 */
348 SWITCH_TO_USER_CR3_STACK scratch_reg=%rdi
349
350 popq %rdi
351 popq %rsp
352 USERGS_SYSRET64
353END(entry_SYSCALL_64)
354
397 popq %rsi /* skip r11 */
398 popq %r10
399 popq %r9
400 popq %r8
401 popq %rax
402 popq %rsi /* skip rcx */
403 popq %rdx
404 popq %rsi

--- 14 unchanged lines hidden (view full) ---

419 */
420 SWITCH_TO_USER_CR3_STACK scratch_reg=%rdi
421
422 popq %rdi
423 popq %rsp
424 USERGS_SYSRET64
425END(entry_SYSCALL_64)
426
427ENTRY(stub_ptregs_64)
428 /*
429 * Syscalls marked as needing ptregs land here.
430 * If we are on the fast path, we need to save the extra regs,
431 * which we achieve by trying again on the slow path. If we are on
432 * the slow path, the extra regs are already saved.
433 *
434 * RAX stores a pointer to the C function implementing the syscall.
435 * IRQs are on.
436 */
437 cmpq $.Lentry_SYSCALL_64_after_fastpath_call, (%rsp)
438 jne 1f
439
440 /*
441 * Called from fast path -- disable IRQs again, pop return address
442 * and jump to slow path
443 */
444 DISABLE_INTERRUPTS(CLBR_ANY)
445 TRACE_IRQS_OFF
446 popq %rax
447 UNWIND_HINT_REGS extra=0
448 jmp entry_SYSCALL64_slow_path
449
4501:
451 JMP_NOSPEC %rax /* Called from C */
452END(stub_ptregs_64)
453
454.macro ptregs_stub func
455ENTRY(ptregs_\func)
456 UNWIND_HINT_FUNC
457 leaq \func(%rip), %rax
458 jmp stub_ptregs_64
459END(ptregs_\func)
460.endm
461
462/* Instantiate ptregs_stub for each ptregs-using syscall */
463#define __SYSCALL_64_QUAL_(sym)
464#define __SYSCALL_64_QUAL_ptregs(sym) ptregs_stub sym
465#define __SYSCALL_64(nr, sym, qual) __SYSCALL_64_QUAL_##qual(sym)
466#include <asm/syscalls_64.h>
467
355/*
356 * %rdi: prev task
357 * %rsi: next task
358 */
359ENTRY(__switch_to_asm)
360 UNWIND_HINT_FUNC
361 /*
362 * Save callee-saved registers

--- 323 unchanged lines hidden (view full) ---

686 testb $3, CS(%rsp)
687 jz 1f
688 ud2
6891:
690#endif
691 POP_EXTRA_REGS
692 POP_C_REGS
693 addq $8, %rsp /* skip regs->orig_ax */
468/*
469 * %rdi: prev task
470 * %rsi: next task
471 */
472ENTRY(__switch_to_asm)
473 UNWIND_HINT_FUNC
474 /*
475 * Save callee-saved registers

--- 323 unchanged lines hidden (view full) ---

799 testb $3, CS(%rsp)
800 jz 1f
801 ud2
8021:
803#endif
804 POP_EXTRA_REGS
805 POP_C_REGS
806 addq $8, %rsp /* skip regs->orig_ax */
807 /*
808 * ARCH_HAS_MEMBARRIER_SYNC_CORE rely on IRET core serialization
809 * when returning from IPI handler.
810 */
694 INTERRUPT_RETURN
695
696ENTRY(native_iret)
697 UNWIND_HINT_IRET_REGS
698 /*
699 * Are we returning to a stack segment from the LDT? Note: in
700 * 64-bit mode SS:RSP on the exception stack is always valid.
701 */

--- 996 unchanged lines hidden ---
811 INTERRUPT_RETURN
812
813ENTRY(native_iret)
814 UNWIND_HINT_IRET_REGS
815 /*
816 * Are we returning to a stack segment from the LDT? Note: in
817 * 64-bit mode SS:RSP on the exception stack is always valid.
818 */

--- 996 unchanged lines hidden ---