17c478bd9Sstevel@tonic-gate/* 27c478bd9Sstevel@tonic-gate * CDDL HEADER START 37c478bd9Sstevel@tonic-gate * 47c478bd9Sstevel@tonic-gate * The contents of this file are subject to the terms of the 59acbbeafSnn35248 * Common Development and Distribution License (the "License"). 69acbbeafSnn35248 * You may not use this file except in compliance with the License. 77c478bd9Sstevel@tonic-gate * 87c478bd9Sstevel@tonic-gate * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 97c478bd9Sstevel@tonic-gate * or http://www.opensolaris.org/os/licensing. 107c478bd9Sstevel@tonic-gate * See the License for the specific language governing permissions 117c478bd9Sstevel@tonic-gate * and limitations under the License. 127c478bd9Sstevel@tonic-gate * 137c478bd9Sstevel@tonic-gate * When distributing Covered Code, include this CDDL HEADER in each 147c478bd9Sstevel@tonic-gate * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 157c478bd9Sstevel@tonic-gate * If applicable, add the following below this CDDL HEADER, with the 167c478bd9Sstevel@tonic-gate * fields enclosed by brackets "[]" replaced with your own identifying 177c478bd9Sstevel@tonic-gate * information: Portions Copyright [yyyy] [name of copyright owner] 187c478bd9Sstevel@tonic-gate * 197c478bd9Sstevel@tonic-gate * CDDL HEADER END 207c478bd9Sstevel@tonic-gate */ 217c478bd9Sstevel@tonic-gate/* 22eb5a5c78SSurya Prakki * Copyright (c) 2004, 2010, Oracle and/or its affiliates. All rights reserved. 236ba2dbf5SRobert Mustacchi * Copyright (c) 2012, Joyent, Inc. All rights reserved. 247c478bd9Sstevel@tonic-gate */ 257c478bd9Sstevel@tonic-gate 267c478bd9Sstevel@tonic-gate#include <sys/asm_linkage.h> 277c478bd9Sstevel@tonic-gate#include <sys/asm_misc.h> 287c478bd9Sstevel@tonic-gate#include <sys/regset.h> 29ae115bc7Smrj#include <sys/privregs.h> 307c478bd9Sstevel@tonic-gate#include <sys/psw.h> 319acbbeafSnn35248#include <sys/machbrand.h> 327c478bd9Sstevel@tonic-gate 337c478bd9Sstevel@tonic-gate#if defined(__lint) 347c478bd9Sstevel@tonic-gate 357c478bd9Sstevel@tonic-gate#include <sys/types.h> 367c478bd9Sstevel@tonic-gate#include <sys/thread.h> 377c478bd9Sstevel@tonic-gate#include <sys/systm.h> 387c478bd9Sstevel@tonic-gate 397c478bd9Sstevel@tonic-gate#else /* __lint */ 407c478bd9Sstevel@tonic-gate 417c478bd9Sstevel@tonic-gate#include <sys/segments.h> 427c478bd9Sstevel@tonic-gate#include <sys/pcb.h> 437c478bd9Sstevel@tonic-gate#include <sys/trap.h> 447c478bd9Sstevel@tonic-gate#include <sys/ftrace.h> 457c478bd9Sstevel@tonic-gate#include <sys/traptrace.h> 467c478bd9Sstevel@tonic-gate#include <sys/clock.h> 477c478bd9Sstevel@tonic-gate#include <sys/model.h> 487c478bd9Sstevel@tonic-gate#include <sys/panic.h> 49843e1988Sjohnlev 50843e1988Sjohnlev#if defined(__xpv) 51843e1988Sjohnlev#include <sys/hypervisor.h> 52843e1988Sjohnlev#endif 53843e1988Sjohnlev 547c478bd9Sstevel@tonic-gate#include "assym.h" 557c478bd9Sstevel@tonic-gate 567c478bd9Sstevel@tonic-gate#endif /* __lint */ 577c478bd9Sstevel@tonic-gate 587c478bd9Sstevel@tonic-gate/* 597c478bd9Sstevel@tonic-gate * We implement five flavours of system call entry points 607c478bd9Sstevel@tonic-gate * 617c478bd9Sstevel@tonic-gate * - syscall/sysretq (amd64 generic) 627c478bd9Sstevel@tonic-gate * - syscall/sysretl (i386 plus SYSC bit) 637c478bd9Sstevel@tonic-gate * - sysenter/sysexit (i386 plus SEP bit) 647c478bd9Sstevel@tonic-gate * - int/iret (i386 generic) 657c478bd9Sstevel@tonic-gate * - lcall/iret (i386 generic) 667c478bd9Sstevel@tonic-gate * 677c478bd9Sstevel@tonic-gate * The current libc included in Solaris uses int/iret as the base unoptimized 687c478bd9Sstevel@tonic-gate * kernel entry method. Older libc implementations and legacy binaries may use 697c478bd9Sstevel@tonic-gate * the lcall call gate, so it must continue to be supported. 707c478bd9Sstevel@tonic-gate * 717c478bd9Sstevel@tonic-gate * System calls that use an lcall call gate are processed in trap() via a 727c478bd9Sstevel@tonic-gate * segment-not-present trap, i.e. lcalls are extremely slow(!). 737c478bd9Sstevel@tonic-gate * 747c478bd9Sstevel@tonic-gate * The basic pattern used in the 32-bit SYSC handler at this point in time is 757c478bd9Sstevel@tonic-gate * to have the bare minimum of assembler, and get to the C handlers as 767c478bd9Sstevel@tonic-gate * quickly as possible. 777c478bd9Sstevel@tonic-gate * 787c478bd9Sstevel@tonic-gate * The 64-bit handler is much closer to the sparcv9 handler; that's 797c478bd9Sstevel@tonic-gate * because of passing arguments in registers. The 32-bit world still 807c478bd9Sstevel@tonic-gate * passes arguments on the stack -- that makes that handler substantially 817c478bd9Sstevel@tonic-gate * more complex. 827c478bd9Sstevel@tonic-gate * 837c478bd9Sstevel@tonic-gate * The two handlers share a few code fragments which are broken 847c478bd9Sstevel@tonic-gate * out into preprocessor macros below. 857c478bd9Sstevel@tonic-gate * 867c478bd9Sstevel@tonic-gate * XX64 come back and speed all this up later. The 32-bit stuff looks 877c478bd9Sstevel@tonic-gate * especially easy to speed up the argument copying part .. 887c478bd9Sstevel@tonic-gate * 897c478bd9Sstevel@tonic-gate * 907c478bd9Sstevel@tonic-gate * Notes about segment register usage (c.f. the 32-bit kernel) 917c478bd9Sstevel@tonic-gate * 927c478bd9Sstevel@tonic-gate * In the 32-bit kernel, segment registers are dutifully saved and 937c478bd9Sstevel@tonic-gate * restored on all mode transitions because the kernel uses them directly. 947c478bd9Sstevel@tonic-gate * When the processor is running in 64-bit mode, segment registers are 957c478bd9Sstevel@tonic-gate * largely ignored. 967c478bd9Sstevel@tonic-gate * 977c478bd9Sstevel@tonic-gate * %cs and %ss 987c478bd9Sstevel@tonic-gate * controlled by the hardware mechanisms that make mode transitions 997c478bd9Sstevel@tonic-gate * 1007c478bd9Sstevel@tonic-gate * The remaining segment registers have to either be pointing at a valid 1017c478bd9Sstevel@tonic-gate * descriptor i.e. with the 'present' bit set, or they can NULL descriptors 1027c478bd9Sstevel@tonic-gate * 1037c478bd9Sstevel@tonic-gate * %ds and %es 1047c478bd9Sstevel@tonic-gate * always ignored 1057c478bd9Sstevel@tonic-gate * 1067c478bd9Sstevel@tonic-gate * %fs and %gs 1077c478bd9Sstevel@tonic-gate * fsbase and gsbase are used to control the place they really point at. 1087c478bd9Sstevel@tonic-gate * The kernel only depends on %gs, and controls its own gsbase via swapgs 1097c478bd9Sstevel@tonic-gate * 1107c478bd9Sstevel@tonic-gate * Note that loading segment registers is still costly because the GDT 1117c478bd9Sstevel@tonic-gate * lookup still happens (this is because the hardware can't know that we're 1127c478bd9Sstevel@tonic-gate * not setting up these segment registers for a 32-bit program). Thus we 1137c478bd9Sstevel@tonic-gate * avoid doing this in the syscall path, and defer them to lwp context switch 1147c478bd9Sstevel@tonic-gate * handlers, so the register values remain virtualized to the lwp. 1157c478bd9Sstevel@tonic-gate */ 1167c478bd9Sstevel@tonic-gate 1177c478bd9Sstevel@tonic-gate#if defined(SYSCALLTRACE) 1187c478bd9Sstevel@tonic-gate#define ORL_SYSCALLTRACE(r32) \ 1197c478bd9Sstevel@tonic-gate orl syscalltrace(%rip), r32 1207c478bd9Sstevel@tonic-gate#else 1217c478bd9Sstevel@tonic-gate#define ORL_SYSCALLTRACE(r32) 1227c478bd9Sstevel@tonic-gate#endif 1237c478bd9Sstevel@tonic-gate 1249acbbeafSnn35248/* 1259acbbeafSnn35248 * In the 32-bit kernel, we do absolutely nothing before getting into the 1269acbbeafSnn35248 * brand callback checks. In 64-bit land, we do swapgs and then come here. 1279acbbeafSnn35248 * We assume that the %rsp- and %r15-stashing fields in the CPU structure 1289acbbeafSnn35248 * are still unused. 1299acbbeafSnn35248 * 13065488c97S * Check if a brand_mach_ops callback is defined for the specified callback_id 13165488c97S * type. If so invoke it with the kernel's %gs value loaded and the following 13265488c97S * data on the stack: 1339acbbeafSnn35248 * 1349acbbeafSnn35248 * stack: -------------------------------------- 13565488c97S * 32 | callback pointer | 136b72c368aS * | 24 | user (or interrupt) stack pointer | 13765488c97S * | 16 | lwp pointer | 13865488c97S * v 8 | userland return address | 13965488c97S * 0 | callback wrapper return addr | 1409acbbeafSnn35248 * -------------------------------------- 1419acbbeafSnn35248 * 142b72c368aS * Since we're pushing the userland return address onto the kernel stack 143b72c368aS * we need to get that address without accessing the user's stack (since we 144b72c368aS * can't trust that data). There are different ways to get the userland 145b72c368aS * return address depending on how the syscall trap was made: 146b72c368aS * 147b72c368aS * a) For sys_syscall and sys_syscall32 the return address is in %rcx. 148b72c368aS * b) For sys_sysenter the return address is in %rdx. 149b72c368aS * c) For sys_int80 and sys_syscall_int (int91), upon entry into the macro, 150b72c368aS * the stack pointer points at the state saved when we took the interrupt: 151b72c368aS * ------------------------ 152b72c368aS * | | user's %ss | 153b72c368aS * | | user's %esp | 154b72c368aS * | | EFLAGS register | 155b72c368aS * v | user's %cs | 156b72c368aS * | user's %eip | 157b72c368aS * ------------------------ 158b72c368aS * 159b72c368aS * The 2nd parameter to the BRAND_CALLBACK macro is either the 160b72c368aS * BRAND_URET_FROM_REG or BRAND_URET_FROM_INTR_STACK macro. These macros are 161b72c368aS * used to generate the proper code to get the userland return address for 162b72c368aS * each syscall entry point. 16306b6cf06S * 16406b6cf06S * The interface to the brand callbacks on the 64-bit kernel assumes %r15 16506b6cf06S * is available as a scratch register within the callback. If the callback 16606b6cf06S * returns within the kernel then this macro will restore %r15. If the 16706b6cf06S * callback is going to return directly to userland then it should restore 16806b6cf06S * %r15 before returning to userland. 1699acbbeafSnn35248 */ 170b72c368aS#define BRAND_URET_FROM_REG(rip_reg) \ 171b72c368aS pushq rip_reg /* push the return address */ 172b72c368aS 173b72c368aS/* 174b72c368aS * The interrupt stack pointer we saved on entry to the BRAND_CALLBACK macro 175b72c368aS * is currently pointing at the user return address (%eip). 176b72c368aS */ 177b72c368aS#define BRAND_URET_FROM_INTR_STACK() \ 178b72c368aS movq %gs:CPU_RTMP_RSP, %r15 /* grab the intr. stack pointer */ ;\ 179b72c368aS pushq (%r15) /* push the return address */ 180b72c368aS 181b72c368aS#define BRAND_CALLBACK(callback_id, push_userland_ret) \ 1829acbbeafSnn35248 movq %rsp, %gs:CPU_RTMP_RSP /* save the stack pointer */ ;\ 1839acbbeafSnn35248 movq %r15, %gs:CPU_RTMP_R15 /* save %r15 */ ;\ 1849acbbeafSnn35248 movq %gs:CPU_THREAD, %r15 /* load the thread pointer */ ;\ 1859acbbeafSnn35248 movq T_STACK(%r15), %rsp /* switch to the kernel stack */ ;\ 18606b6cf06S subq $16, %rsp /* save space for 2 pointers */ ;\ 1879acbbeafSnn35248 pushq %r14 /* save %r14 */ ;\ 1889acbbeafSnn35248 movq %gs:CPU_RTMP_RSP, %r14 ;\ 1899acbbeafSnn35248 movq %r14, 8(%rsp) /* stash the user stack pointer */ ;\ 1909acbbeafSnn35248 popq %r14 /* restore %r14 */ ;\ 1919acbbeafSnn35248 movq T_LWP(%r15), %r15 /* load the lwp pointer */ ;\ 192c7cf3afeSsp92102 pushq %r15 /* push the lwp pointer */ ;\ 1939acbbeafSnn35248 movq LWP_PROCP(%r15), %r15 /* load the proc pointer */ ;\ 1949acbbeafSnn35248 movq P_BRAND(%r15), %r15 /* load the brand pointer */ ;\ 1959acbbeafSnn35248 movq B_MACHOPS(%r15), %r15 /* load the machops pointer */ ;\ 1969acbbeafSnn35248 movq _CONST(_MUL(callback_id, CPTRSIZE))(%r15), %r15 ;\ 1979acbbeafSnn35248 cmpq $0, %r15 ;\ 1989acbbeafSnn35248 je 1f ;\ 199c7cf3afeSsp92102 movq %r15, 16(%rsp) /* save the callback pointer */ ;\ 200b72c368aS push_userland_ret /* push the return address */ ;\ 20165488c97S call *24(%rsp) /* call callback */ ;\ 2029acbbeafSnn352481: movq %gs:CPU_RTMP_R15, %r15 /* restore %r15 */ ;\ 2039acbbeafSnn35248 movq %gs:CPU_RTMP_RSP, %rsp /* restore the stack pointer */ 2049acbbeafSnn35248 2057c478bd9Sstevel@tonic-gate#define MSTATE_TRANSITION(from, to) \ 2067c478bd9Sstevel@tonic-gate movl $from, %edi; \ 2077c478bd9Sstevel@tonic-gate movl $to, %esi; \ 2087c478bd9Sstevel@tonic-gate call syscall_mstate 2097c478bd9Sstevel@tonic-gate 2107c478bd9Sstevel@tonic-gate/* 2117c478bd9Sstevel@tonic-gate * Check to see if a simple (direct) return is possible i.e. 2127c478bd9Sstevel@tonic-gate * 2137712e92cSsudheer * if (t->t_post_sys_ast | syscalltrace | 2147712e92cSsudheer * lwp->lwp_pcb.pcb_rupdate == 1) 2157c478bd9Sstevel@tonic-gate * do full version ; 2167c478bd9Sstevel@tonic-gate * 2177c478bd9Sstevel@tonic-gate * Preconditions: 2187c478bd9Sstevel@tonic-gate * - t is curthread 2197c478bd9Sstevel@tonic-gate * Postconditions: 2207c478bd9Sstevel@tonic-gate * - condition code NE is set if post-sys is too complex 2217c478bd9Sstevel@tonic-gate * - rtmp is zeroed if it isn't (we rely on this!) 2227c478bd9Sstevel@tonic-gate * - ltmp is smashed 2237c478bd9Sstevel@tonic-gate */ 2247c478bd9Sstevel@tonic-gate#define CHECK_POSTSYS_NE(t, ltmp, rtmp) \ 2257c478bd9Sstevel@tonic-gate movq T_LWP(t), ltmp; \ 2267712e92cSsudheer movzbl PCB_RUPDATE(ltmp), rtmp; \ 2277c478bd9Sstevel@tonic-gate ORL_SYSCALLTRACE(rtmp); \ 2287c478bd9Sstevel@tonic-gate orl T_POST_SYS_AST(t), rtmp; \ 2297c478bd9Sstevel@tonic-gate cmpl $0, rtmp 2307c478bd9Sstevel@tonic-gate 2317c478bd9Sstevel@tonic-gate/* 2327c478bd9Sstevel@tonic-gate * Fix up the lwp, thread, and eflags for a successful return 2337c478bd9Sstevel@tonic-gate * 2347c478bd9Sstevel@tonic-gate * Preconditions: 2357c478bd9Sstevel@tonic-gate * - zwreg contains zero 2367c478bd9Sstevel@tonic-gate */ 2377c478bd9Sstevel@tonic-gate#define SIMPLE_SYSCALL_POSTSYS(t, lwp, zwreg) \ 2387c478bd9Sstevel@tonic-gate movb $LWP_USER, LWP_STATE(lwp); \ 2397c478bd9Sstevel@tonic-gate movw zwreg, T_SYSNUM(t); \ 2407c478bd9Sstevel@tonic-gate andb $_CONST(0xffff - PS_C), REGOFF_RFL(%rsp) 2417c478bd9Sstevel@tonic-gate 2427c478bd9Sstevel@tonic-gate/* 2437c478bd9Sstevel@tonic-gate * ASSERT(lwptoregs(lwp) == rp); 2447c478bd9Sstevel@tonic-gate * 2457c478bd9Sstevel@tonic-gate * This may seem obvious, but very odd things happen if this 2467c478bd9Sstevel@tonic-gate * assertion is false 2477c478bd9Sstevel@tonic-gate * 2487c478bd9Sstevel@tonic-gate * Preconditions: 2497c478bd9Sstevel@tonic-gate * (%rsp is ready for normal call sequence) 2507c478bd9Sstevel@tonic-gate * Postconditions (if assertion is true): 2517c478bd9Sstevel@tonic-gate * %r11 is smashed 2527c478bd9Sstevel@tonic-gate * 2537c478bd9Sstevel@tonic-gate * ASSERT(rp->r_cs == descnum) 2547c478bd9Sstevel@tonic-gate * 2557c478bd9Sstevel@tonic-gate * The code selector is written into the regs structure when the 2567c478bd9Sstevel@tonic-gate * lwp stack is created. We use this ASSERT to validate that 2577c478bd9Sstevel@tonic-gate * the regs structure really matches how we came in. 2587c478bd9Sstevel@tonic-gate * 2597c478bd9Sstevel@tonic-gate * Preconditions: 2607c478bd9Sstevel@tonic-gate * (%rsp is ready for normal call sequence) 2617c478bd9Sstevel@tonic-gate * Postconditions (if assertion is true): 2627c478bd9Sstevel@tonic-gate * -none- 2637c478bd9Sstevel@tonic-gate * 2647712e92cSsudheer * ASSERT(lwp->lwp_pcb.pcb_rupdate == 0); 2657c478bd9Sstevel@tonic-gate * 2667c478bd9Sstevel@tonic-gate * If this is false, it meant that we returned to userland without 2677c478bd9Sstevel@tonic-gate * updating the segment registers as we were supposed to. 2687c478bd9Sstevel@tonic-gate * 2697c478bd9Sstevel@tonic-gate * Note that we must ensure no interrupts or other traps intervene 2707c478bd9Sstevel@tonic-gate * between entering privileged mode and performing the assertion, 2717c478bd9Sstevel@tonic-gate * otherwise we may perform a context switch on the thread, which 2727712e92cSsudheer * will end up setting pcb_rupdate to 1 again. 2737c478bd9Sstevel@tonic-gate */ 2747c478bd9Sstevel@tonic-gate#if defined(DEBUG) 2757c478bd9Sstevel@tonic-gate 2767c478bd9Sstevel@tonic-gate#if !defined(__lint) 2777c478bd9Sstevel@tonic-gate 2787c478bd9Sstevel@tonic-gate__lwptoregs_msg: 279613b2871SRichard Bean .string "syscall_asm_amd64.s:%d lwptoregs(%p) [%p] != rp [%p]" 2807c478bd9Sstevel@tonic-gate 2817c478bd9Sstevel@tonic-gate__codesel_msg: 282613b2871SRichard Bean .string "syscall_asm_amd64.s:%d rp->r_cs [%ld] != %ld" 2837c478bd9Sstevel@tonic-gate 2847c478bd9Sstevel@tonic-gate__no_rupdate_msg: 285613b2871SRichard Bean .string "syscall_asm_amd64.s:%d lwp %p, pcb_rupdate != 0" 2867c478bd9Sstevel@tonic-gate 2877c478bd9Sstevel@tonic-gate#endif /* !__lint */ 2887c478bd9Sstevel@tonic-gate 2897c478bd9Sstevel@tonic-gate#define ASSERT_LWPTOREGS(lwp, rp) \ 2907c478bd9Sstevel@tonic-gate movq LWP_REGS(lwp), %r11; \ 2917c478bd9Sstevel@tonic-gate cmpq rp, %r11; \ 2927c478bd9Sstevel@tonic-gate je 7f; \ 2937c478bd9Sstevel@tonic-gate leaq __lwptoregs_msg(%rip), %rdi; \ 2947c478bd9Sstevel@tonic-gate movl $__LINE__, %esi; \ 2957c478bd9Sstevel@tonic-gate movq lwp, %rdx; \ 2967c478bd9Sstevel@tonic-gate movq %r11, %rcx; \ 2977c478bd9Sstevel@tonic-gate movq rp, %r8; \ 2987c478bd9Sstevel@tonic-gate xorl %eax, %eax; \ 2997c478bd9Sstevel@tonic-gate call panic; \ 3007c478bd9Sstevel@tonic-gate7: 3017c478bd9Sstevel@tonic-gate 3027c478bd9Sstevel@tonic-gate#define ASSERT_NO_RUPDATE_PENDING(lwp) \ 3037712e92cSsudheer testb $0x1, PCB_RUPDATE(lwp); \ 3047c478bd9Sstevel@tonic-gate je 8f; \ 3057c478bd9Sstevel@tonic-gate movq lwp, %rdx; \ 3067c478bd9Sstevel@tonic-gate leaq __no_rupdate_msg(%rip), %rdi; \ 3077c478bd9Sstevel@tonic-gate movl $__LINE__, %esi; \ 3087c478bd9Sstevel@tonic-gate xorl %eax, %eax; \ 3097c478bd9Sstevel@tonic-gate call panic; \ 3107c478bd9Sstevel@tonic-gate8: 3117c478bd9Sstevel@tonic-gate 3127c478bd9Sstevel@tonic-gate#else 3137c478bd9Sstevel@tonic-gate#define ASSERT_LWPTOREGS(lwp, rp) 3147c478bd9Sstevel@tonic-gate#define ASSERT_NO_RUPDATE_PENDING(lwp) 3157c478bd9Sstevel@tonic-gate#endif 3167c478bd9Sstevel@tonic-gate 3177c478bd9Sstevel@tonic-gate/* 3187c478bd9Sstevel@tonic-gate * Do the traptrace thing and restore any registers we used 3197c478bd9Sstevel@tonic-gate * in situ. Assumes that %rsp is pointing at the base of 3207c478bd9Sstevel@tonic-gate * the struct regs, obviously .. 3217c478bd9Sstevel@tonic-gate */ 3227c478bd9Sstevel@tonic-gate#ifdef TRAPTRACE 3237c478bd9Sstevel@tonic-gate#define SYSCALL_TRAPTRACE(ttype) \ 3247c478bd9Sstevel@tonic-gate TRACE_PTR(%rdi, %rbx, %ebx, %rcx, ttype); \ 3257c478bd9Sstevel@tonic-gate TRACE_REGS(%rdi, %rsp, %rbx, %rcx); \ 3267c478bd9Sstevel@tonic-gate TRACE_STAMP(%rdi); /* rdtsc clobbers %eax, %edx */ \ 3277c478bd9Sstevel@tonic-gate movq REGOFF_RAX(%rsp), %rax; \ 3287c478bd9Sstevel@tonic-gate movq REGOFF_RBX(%rsp), %rbx; \ 3297c478bd9Sstevel@tonic-gate movq REGOFF_RCX(%rsp), %rcx; \ 3307c478bd9Sstevel@tonic-gate movq REGOFF_RDX(%rsp), %rdx; \ 3317c478bd9Sstevel@tonic-gate movl %eax, TTR_SYSNUM(%rdi); \ 3327c478bd9Sstevel@tonic-gate movq REGOFF_RDI(%rsp), %rdi 3337c478bd9Sstevel@tonic-gate 3347c478bd9Sstevel@tonic-gate#define SYSCALL_TRAPTRACE32(ttype) \ 3357c478bd9Sstevel@tonic-gate SYSCALL_TRAPTRACE(ttype); \ 3367c478bd9Sstevel@tonic-gate /* paranoia: clean the top 32-bits of the registers */ \ 3377c478bd9Sstevel@tonic-gate orl %eax, %eax; \ 3387c478bd9Sstevel@tonic-gate orl %ebx, %ebx; \ 3397c478bd9Sstevel@tonic-gate orl %ecx, %ecx; \ 3407c478bd9Sstevel@tonic-gate orl %edx, %edx; \ 3417c478bd9Sstevel@tonic-gate orl %edi, %edi 3427c478bd9Sstevel@tonic-gate#else /* TRAPTRACE */ 3437c478bd9Sstevel@tonic-gate#define SYSCALL_TRAPTRACE(ttype) 3447c478bd9Sstevel@tonic-gate#define SYSCALL_TRAPTRACE32(ttype) 3457c478bd9Sstevel@tonic-gate#endif /* TRAPTRACE */ 3467c478bd9Sstevel@tonic-gate 3477c478bd9Sstevel@tonic-gate/* 3487c478bd9Sstevel@tonic-gate * The 64-bit libc syscall wrapper does this: 3497c478bd9Sstevel@tonic-gate * 3507c478bd9Sstevel@tonic-gate * fn(<args>) 3517c478bd9Sstevel@tonic-gate * { 3527c478bd9Sstevel@tonic-gate * movq %rcx, %r10 -- because syscall smashes %rcx 3537c478bd9Sstevel@tonic-gate * movl $CODE, %eax 3547c478bd9Sstevel@tonic-gate * syscall 3557c478bd9Sstevel@tonic-gate * <error processing> 3567c478bd9Sstevel@tonic-gate * } 3577c478bd9Sstevel@tonic-gate * 3587c478bd9Sstevel@tonic-gate * Thus when we come into the kernel: 3597c478bd9Sstevel@tonic-gate * 3607c478bd9Sstevel@tonic-gate * %rdi, %rsi, %rdx, %r10, %r8, %r9 contain first six args 3617c478bd9Sstevel@tonic-gate * %rax is the syscall number 3627c478bd9Sstevel@tonic-gate * %r12-%r15 contain caller state 3637c478bd9Sstevel@tonic-gate * 3647c478bd9Sstevel@tonic-gate * The syscall instruction arranges that: 3657c478bd9Sstevel@tonic-gate * 3667c478bd9Sstevel@tonic-gate * %rcx contains the return %rip 3677c478bd9Sstevel@tonic-gate * %r11d contains bottom 32-bits of %rflags 3687c478bd9Sstevel@tonic-gate * %rflags is masked (as determined by the SFMASK msr) 3697c478bd9Sstevel@tonic-gate * %cs is set to UCS_SEL (as determined by the STAR msr) 3707c478bd9Sstevel@tonic-gate * %ss is set to UDS_SEL (as determined by the STAR msr) 3717c478bd9Sstevel@tonic-gate * %rip is set to sys_syscall (as determined by the LSTAR msr) 3727c478bd9Sstevel@tonic-gate * 3737c478bd9Sstevel@tonic-gate * Or in other words, we have no registers available at all. 3747c478bd9Sstevel@tonic-gate * Only swapgs can save us! 37505979f64Sjohnlev * 37605979f64Sjohnlev * Under the hypervisor, the swapgs has happened already. However, the 37705979f64Sjohnlev * state of the world is very different from that we're familiar with. 37805979f64Sjohnlev * 37905979f64Sjohnlev * In particular, we have a stack structure like that for interrupt 38005979f64Sjohnlev * gates, except that the %cs and %ss registers are modified for reasons 38105979f64Sjohnlev * that are not entirely clear. Critically, the %rcx/%r11 values do 38205979f64Sjohnlev * *not* reflect the usage of those registers under a 'real' syscall[1]; 38305979f64Sjohnlev * the stack, therefore, looks like this: 38405979f64Sjohnlev * 38505979f64Sjohnlev * 0x0(rsp) potentially junk %rcx 38605979f64Sjohnlev * 0x8(rsp) potentially junk %r11 38705979f64Sjohnlev * 0x10(rsp) user %rip 38805979f64Sjohnlev * 0x18(rsp) modified %cs 38905979f64Sjohnlev * 0x20(rsp) user %rflags 39005979f64Sjohnlev * 0x28(rsp) user %rsp 39105979f64Sjohnlev * 0x30(rsp) modified %ss 39205979f64Sjohnlev * 39305979f64Sjohnlev * 39405979f64Sjohnlev * and before continuing on, we must load the %rip into %rcx and the 39505979f64Sjohnlev * %rflags into %r11. 39605979f64Sjohnlev * 39705979f64Sjohnlev * [1] They used to, and we relied on it, but this was broken in 3.1.1. 39805979f64Sjohnlev * Sigh. 3997c478bd9Sstevel@tonic-gate */ 40005979f64Sjohnlev#if defined(__xpv) 40105979f64Sjohnlev#define XPV_SYSCALL_PROD \ 40281fd181aSTodd Clayton movq 0x10(%rsp), %rcx; \ 40381fd181aSTodd Clayton movq 0x20(%rsp), %r11; \ 40481fd181aSTodd Clayton movq 0x28(%rsp), %rsp 40505979f64Sjohnlev#else 40605979f64Sjohnlev#define XPV_SYSCALL_PROD /* nothing */ 40705979f64Sjohnlev#endif 40805979f64Sjohnlev 4097c478bd9Sstevel@tonic-gate#if defined(__lint) 4107c478bd9Sstevel@tonic-gate 4117c478bd9Sstevel@tonic-gate/*ARGSUSED*/ 4127c478bd9Sstevel@tonic-gatevoid 4137c478bd9Sstevel@tonic-gatesys_syscall() 4147c478bd9Sstevel@tonic-gate{} 4157c478bd9Sstevel@tonic-gate 4167c478bd9Sstevel@tonic-gatevoid 4177c478bd9Sstevel@tonic-gate_allsyscalls() 4187c478bd9Sstevel@tonic-gate{} 4197c478bd9Sstevel@tonic-gate 4207c478bd9Sstevel@tonic-gatesize_t _allsyscalls_size; 4217c478bd9Sstevel@tonic-gate 4227c478bd9Sstevel@tonic-gate#else /* __lint */ 4237c478bd9Sstevel@tonic-gate 4249acbbeafSnn35248 ENTRY_NP2(brand_sys_syscall,_allsyscalls) 425843e1988Sjohnlev SWAPGS /* kernel gsbase */ 42605979f64Sjohnlev XPV_SYSCALL_PROD 427b72c368aS BRAND_CALLBACK(BRAND_CB_SYSCALL, BRAND_URET_FROM_REG(%rcx)) 42805979f64Sjohnlev jmp noprod_sys_syscall 4297c478bd9Sstevel@tonic-gate 4309acbbeafSnn35248 ALTENTRY(sys_syscall) 431843e1988Sjohnlev SWAPGS /* kernel gsbase */ 43205979f64Sjohnlev XPV_SYSCALL_PROD 433843e1988Sjohnlev 43405979f64Sjohnlevnoprod_sys_syscall: 4357c478bd9Sstevel@tonic-gate movq %r15, %gs:CPU_RTMP_R15 436843e1988Sjohnlev movq %rsp, %gs:CPU_RTMP_RSP 437843e1988Sjohnlev 4387c478bd9Sstevel@tonic-gate movq %gs:CPU_THREAD, %r15 43981fd181aSTodd Clayton movq T_STACK(%r15), %rsp /* switch from user to kernel stack */ 44081fd181aSTodd Clayton 44181fd181aSTodd Clayton ASSERT_UPCALL_MASK_IS_SET 4427c478bd9Sstevel@tonic-gate 4437c478bd9Sstevel@tonic-gate movl $UCS_SEL, REGOFF_CS(%rsp) 4447c478bd9Sstevel@tonic-gate movq %rcx, REGOFF_RIP(%rsp) /* syscall: %rip -> %rcx */ 4457c478bd9Sstevel@tonic-gate movq %r11, REGOFF_RFL(%rsp) /* syscall: %rfl -> %r11d */ 4467c478bd9Sstevel@tonic-gate movl $UDS_SEL, REGOFF_SS(%rsp) 4477c478bd9Sstevel@tonic-gate 4487c478bd9Sstevel@tonic-gate movl %eax, %eax /* wrapper: sysc# -> %eax */ 4497c478bd9Sstevel@tonic-gate movq %rdi, REGOFF_RDI(%rsp) 4507c478bd9Sstevel@tonic-gate movq %rsi, REGOFF_RSI(%rsp) 4517c478bd9Sstevel@tonic-gate movq %rdx, REGOFF_RDX(%rsp) 4527c478bd9Sstevel@tonic-gate movq %r10, REGOFF_RCX(%rsp) /* wrapper: %rcx -> %r10 */ 4537c478bd9Sstevel@tonic-gate movq %r10, %rcx /* arg[3] for direct calls */ 4547c478bd9Sstevel@tonic-gate 4557c478bd9Sstevel@tonic-gate movq %r8, REGOFF_R8(%rsp) 4567c478bd9Sstevel@tonic-gate movq %r9, REGOFF_R9(%rsp) 4577c478bd9Sstevel@tonic-gate movq %rax, REGOFF_RAX(%rsp) 4587c478bd9Sstevel@tonic-gate movq %rbx, REGOFF_RBX(%rsp) 4597c478bd9Sstevel@tonic-gate 4607c478bd9Sstevel@tonic-gate movq %rbp, REGOFF_RBP(%rsp) 4617c478bd9Sstevel@tonic-gate movq %r10, REGOFF_R10(%rsp) 4627c478bd9Sstevel@tonic-gate movq %gs:CPU_RTMP_RSP, %r11 4637c478bd9Sstevel@tonic-gate movq %r11, REGOFF_RSP(%rsp) 4647c478bd9Sstevel@tonic-gate movq %r12, REGOFF_R12(%rsp) 4657c478bd9Sstevel@tonic-gate 4667c478bd9Sstevel@tonic-gate movq %r13, REGOFF_R13(%rsp) 4677c478bd9Sstevel@tonic-gate movq %r14, REGOFF_R14(%rsp) 4687c478bd9Sstevel@tonic-gate movq %gs:CPU_RTMP_R15, %r10 4697c478bd9Sstevel@tonic-gate movq %r10, REGOFF_R15(%rsp) 4707c478bd9Sstevel@tonic-gate movq $0, REGOFF_SAVFP(%rsp) 4717c478bd9Sstevel@tonic-gate movq $0, REGOFF_SAVPC(%rsp) 4727c478bd9Sstevel@tonic-gate 4737c478bd9Sstevel@tonic-gate /* 4747c478bd9Sstevel@tonic-gate * Copy these registers here in case we end up stopped with 4757c478bd9Sstevel@tonic-gate * someone (like, say, /proc) messing with our register state. 4767c478bd9Sstevel@tonic-gate * We don't -restore- them unless we have to in update_sregs. 4777c478bd9Sstevel@tonic-gate * 4787c478bd9Sstevel@tonic-gate * Since userland -can't- change fsbase or gsbase directly, 4797c478bd9Sstevel@tonic-gate * and capturing them involves two serializing instructions, 4807c478bd9Sstevel@tonic-gate * we don't bother to capture them here. 4817c478bd9Sstevel@tonic-gate */ 4827c478bd9Sstevel@tonic-gate xorl %ebx, %ebx 4837c478bd9Sstevel@tonic-gate movw %ds, %bx 4847c478bd9Sstevel@tonic-gate movq %rbx, REGOFF_DS(%rsp) 4857c478bd9Sstevel@tonic-gate movw %es, %bx 4867c478bd9Sstevel@tonic-gate movq %rbx, REGOFF_ES(%rsp) 4877c478bd9Sstevel@tonic-gate movw %fs, %bx 4887c478bd9Sstevel@tonic-gate movq %rbx, REGOFF_FS(%rsp) 4897c478bd9Sstevel@tonic-gate movw %gs, %bx 4907c478bd9Sstevel@tonic-gate movq %rbx, REGOFF_GS(%rsp) 4917c478bd9Sstevel@tonic-gate 4927c478bd9Sstevel@tonic-gate /* 4937c478bd9Sstevel@tonic-gate * Machine state saved in the regs structure on the stack 4947c478bd9Sstevel@tonic-gate * First six args in %rdi, %rsi, %rdx, %rcx, %r8, %r9 4957c478bd9Sstevel@tonic-gate * %eax is the syscall number 4967c478bd9Sstevel@tonic-gate * %rsp is the thread's stack, %r15 is curthread 4977c478bd9Sstevel@tonic-gate * REG_RSP(%rsp) is the user's stack 4987c478bd9Sstevel@tonic-gate */ 4997c478bd9Sstevel@tonic-gate 5007c478bd9Sstevel@tonic-gate SYSCALL_TRAPTRACE($TT_SYSC64) 5017c478bd9Sstevel@tonic-gate 5027c478bd9Sstevel@tonic-gate movq %rsp, %rbp 5037c478bd9Sstevel@tonic-gate 5047c478bd9Sstevel@tonic-gate movq T_LWP(%r15), %r14 5057c478bd9Sstevel@tonic-gate ASSERT_NO_RUPDATE_PENDING(%r14) 5067c478bd9Sstevel@tonic-gate ENABLE_INTR_FLAGS 5077c478bd9Sstevel@tonic-gate 5087c478bd9Sstevel@tonic-gate MSTATE_TRANSITION(LMS_USER, LMS_SYSTEM) 5097c478bd9Sstevel@tonic-gate movl REGOFF_RAX(%rsp), %eax /* (%rax damaged by mstate call) */ 5107c478bd9Sstevel@tonic-gate 5117c478bd9Sstevel@tonic-gate ASSERT_LWPTOREGS(%r14, %rsp) 5127c478bd9Sstevel@tonic-gate 5137c478bd9Sstevel@tonic-gate movb $LWP_SYS, LWP_STATE(%r14) 5147c478bd9Sstevel@tonic-gate incq LWP_RU_SYSC(%r14) 5157c478bd9Sstevel@tonic-gate movb $NORMALRETURN, LWP_EOSYS(%r14) 5167c478bd9Sstevel@tonic-gate 5177c478bd9Sstevel@tonic-gate incq %gs:CPU_STATS_SYS_SYSCALL 5187c478bd9Sstevel@tonic-gate 5197c478bd9Sstevel@tonic-gate movw %ax, T_SYSNUM(%r15) 5207c478bd9Sstevel@tonic-gate movzbl T_PRE_SYS(%r15), %ebx 5217c478bd9Sstevel@tonic-gate ORL_SYSCALLTRACE(%ebx) 5227c478bd9Sstevel@tonic-gate testl %ebx, %ebx 5237c478bd9Sstevel@tonic-gate jne _syscall_pre 5247c478bd9Sstevel@tonic-gate 5257c478bd9Sstevel@tonic-gate_syscall_invoke: 5267c478bd9Sstevel@tonic-gate movq REGOFF_RDI(%rbp), %rdi 5277c478bd9Sstevel@tonic-gate movq REGOFF_RSI(%rbp), %rsi 5287c478bd9Sstevel@tonic-gate movq REGOFF_RDX(%rbp), %rdx 5297c478bd9Sstevel@tonic-gate movq REGOFF_RCX(%rbp), %rcx 5307c478bd9Sstevel@tonic-gate movq REGOFF_R8(%rbp), %r8 5317c478bd9Sstevel@tonic-gate movq REGOFF_R9(%rbp), %r9 5327c478bd9Sstevel@tonic-gate 5337c478bd9Sstevel@tonic-gate cmpl $NSYSCALL, %eax 5347c478bd9Sstevel@tonic-gate jae _syscall_ill 5357c478bd9Sstevel@tonic-gate shll $SYSENT_SIZE_SHIFT, %eax 5367c478bd9Sstevel@tonic-gate leaq sysent(%rax), %rbx 5377c478bd9Sstevel@tonic-gate 5387c478bd9Sstevel@tonic-gate call *SY_CALLC(%rbx) 5397c478bd9Sstevel@tonic-gate 5407c478bd9Sstevel@tonic-gate movq %rax, %r12 5417c478bd9Sstevel@tonic-gate movq %rdx, %r13 5427c478bd9Sstevel@tonic-gate 5437c478bd9Sstevel@tonic-gate /* 5447c478bd9Sstevel@tonic-gate * If the handler returns two ints, then we need to split the 5457c478bd9Sstevel@tonic-gate * 64-bit return value into two 32-bit values. 5467c478bd9Sstevel@tonic-gate */ 5477c478bd9Sstevel@tonic-gate testw $SE_32RVAL2, SY_FLAGS(%rbx) 5487c478bd9Sstevel@tonic-gate je 5f 5497c478bd9Sstevel@tonic-gate movq %r12, %r13 5507c478bd9Sstevel@tonic-gate shrq $32, %r13 /* upper 32-bits into %edx */ 5517c478bd9Sstevel@tonic-gate movl %r12d, %r12d /* lower 32-bits into %eax */ 5527c478bd9Sstevel@tonic-gate5: 5537c478bd9Sstevel@tonic-gate /* 5547c478bd9Sstevel@tonic-gate * Optimistically assume that there's no post-syscall 5557c478bd9Sstevel@tonic-gate * work to do. (This is to avoid having to call syscall_mstate() 5567c478bd9Sstevel@tonic-gate * with interrupts disabled) 5577c478bd9Sstevel@tonic-gate */ 5587c478bd9Sstevel@tonic-gate MSTATE_TRANSITION(LMS_SYSTEM, LMS_USER) 5597c478bd9Sstevel@tonic-gate 5607c478bd9Sstevel@tonic-gate /* 5617c478bd9Sstevel@tonic-gate * We must protect ourselves from being descheduled here; 5627c478bd9Sstevel@tonic-gate * If we were, and we ended up on another cpu, or another 5637c478bd9Sstevel@tonic-gate * lwp got in ahead of us, it could change the segment 5647c478bd9Sstevel@tonic-gate * registers without us noticing before we return to userland. 5657c478bd9Sstevel@tonic-gate */ 566ae115bc7Smrj CLI(%r14) 5677c478bd9Sstevel@tonic-gate CHECK_POSTSYS_NE(%r15, %r14, %ebx) 5687c478bd9Sstevel@tonic-gate jne _syscall_post 5696ba2dbf5SRobert Mustacchi 5706ba2dbf5SRobert Mustacchi /* 5716ba2dbf5SRobert Mustacchi * We need to protect ourselves against non-canonical return values 5726ba2dbf5SRobert Mustacchi * because Intel doesn't check for them on sysret (AMD does). Canonical 5736ba2dbf5SRobert Mustacchi * addresses on current amd64 processors only use 48-bits for VAs; an 5746ba2dbf5SRobert Mustacchi * address is canonical if all upper bits (47-63) are identical. If we 5756ba2dbf5SRobert Mustacchi * find a non-canonical %rip, we opt to go through the full 5766ba2dbf5SRobert Mustacchi * _syscall_post path which takes us into an iretq which is not 5776ba2dbf5SRobert Mustacchi * susceptible to the same problems sysret is. 5786ba2dbf5SRobert Mustacchi * 5796ba2dbf5SRobert Mustacchi * We're checking for a canonical address by first doing an arithmetic 5806ba2dbf5SRobert Mustacchi * shift. This will fill in the remaining bits with the value of bit 63. 5816ba2dbf5SRobert Mustacchi * If the address were canonical, the register would now have either all 5826ba2dbf5SRobert Mustacchi * zeroes or all ones in it. Therefore we add one (inducing overflow) 5836ba2dbf5SRobert Mustacchi * and compare against 1. A canonical address will either be zero or one 5846ba2dbf5SRobert Mustacchi * at this point, hence the use of ja. 5856ba2dbf5SRobert Mustacchi * 5866ba2dbf5SRobert Mustacchi * At this point, r12 and r13 have the return value so we can't use 5876ba2dbf5SRobert Mustacchi * those registers. 5886ba2dbf5SRobert Mustacchi */ 5896ba2dbf5SRobert Mustacchi movq REGOFF_RIP(%rsp), %rcx 5906ba2dbf5SRobert Mustacchi sarq $47, %rcx 5916ba2dbf5SRobert Mustacchi incq %rcx 5926ba2dbf5SRobert Mustacchi cmpq $1, %rcx 5936ba2dbf5SRobert Mustacchi ja _syscall_post 5946ba2dbf5SRobert Mustacchi 5956ba2dbf5SRobert Mustacchi 5967c478bd9Sstevel@tonic-gate SIMPLE_SYSCALL_POSTSYS(%r15, %r14, %bx) 5977c478bd9Sstevel@tonic-gate 5987c478bd9Sstevel@tonic-gate movq %r12, REGOFF_RAX(%rsp) 5997c478bd9Sstevel@tonic-gate movq %r13, REGOFF_RDX(%rsp) 6007c478bd9Sstevel@tonic-gate 6017c478bd9Sstevel@tonic-gate /* 6027c478bd9Sstevel@tonic-gate * To get back to userland, we need the return %rip in %rcx and 6037c478bd9Sstevel@tonic-gate * the return %rfl in %r11d. The sysretq instruction also arranges 6047c478bd9Sstevel@tonic-gate * to fix up %cs and %ss; everything else is our responsibility. 6057c478bd9Sstevel@tonic-gate */ 6067c478bd9Sstevel@tonic-gate movq REGOFF_RDI(%rsp), %rdi 6077c478bd9Sstevel@tonic-gate movq REGOFF_RSI(%rsp), %rsi 6087c478bd9Sstevel@tonic-gate movq REGOFF_RDX(%rsp), %rdx 6097c478bd9Sstevel@tonic-gate /* %rcx used to restore %rip value */ 6107c478bd9Sstevel@tonic-gate 6117c478bd9Sstevel@tonic-gate movq REGOFF_R8(%rsp), %r8 6127c478bd9Sstevel@tonic-gate movq REGOFF_R9(%rsp), %r9 6137c478bd9Sstevel@tonic-gate movq REGOFF_RAX(%rsp), %rax 6147c478bd9Sstevel@tonic-gate movq REGOFF_RBX(%rsp), %rbx 6157c478bd9Sstevel@tonic-gate 6167c478bd9Sstevel@tonic-gate movq REGOFF_RBP(%rsp), %rbp 6177c478bd9Sstevel@tonic-gate movq REGOFF_R10(%rsp), %r10 6187c478bd9Sstevel@tonic-gate /* %r11 used to restore %rfl value */ 6197c478bd9Sstevel@tonic-gate movq REGOFF_R12(%rsp), %r12 6207c478bd9Sstevel@tonic-gate 6217c478bd9Sstevel@tonic-gate movq REGOFF_R13(%rsp), %r13 6227c478bd9Sstevel@tonic-gate movq REGOFF_R14(%rsp), %r14 6237c478bd9Sstevel@tonic-gate movq REGOFF_R15(%rsp), %r15 6247c478bd9Sstevel@tonic-gate 6257c478bd9Sstevel@tonic-gate movq REGOFF_RIP(%rsp), %rcx 6267c478bd9Sstevel@tonic-gate movl REGOFF_RFL(%rsp), %r11d 627843e1988Sjohnlev 628843e1988Sjohnlev#if defined(__xpv) 629843e1988Sjohnlev addq $REGOFF_RIP, %rsp 630843e1988Sjohnlev#else 6317c478bd9Sstevel@tonic-gate movq REGOFF_RSP(%rsp), %rsp 632843e1988Sjohnlev#endif 633843e1988Sjohnlev 634843e1988Sjohnlev /* 635843e1988Sjohnlev * There can be no instructions between the ALTENTRY below and 636843e1988Sjohnlev * SYSRET or we could end up breaking brand support. See label usage 637843e1988Sjohnlev * in sn1_brand_syscall_callback for an example. 638843e1988Sjohnlev */ 639843e1988Sjohnlev ASSERT_UPCALL_MASK_IS_SET 64081fd181aSTodd Clayton#if defined(__xpv) 64181fd181aSTodd Clayton SYSRETQ 64281fd181aSTodd Clayton ALTENTRY(nopop_sys_syscall_swapgs_sysretq) 64381fd181aSTodd Clayton 64481fd181aSTodd Clayton /* 64581fd181aSTodd Clayton * We can only get here after executing a brand syscall 64681fd181aSTodd Clayton * interposition callback handler and simply need to 64781fd181aSTodd Clayton * "sysretq" back to userland. On the hypervisor this 64881fd181aSTodd Clayton * involves the iret hypercall which requires us to construct 64981fd181aSTodd Clayton * just enough of the stack needed for the hypercall. 65081fd181aSTodd Clayton * (rip, cs, rflags, rsp, ss). 65181fd181aSTodd Clayton */ 65281fd181aSTodd Clayton movq %rsp, %gs:CPU_RTMP_RSP /* save user's rsp */ 65381fd181aSTodd Clayton movq %gs:CPU_THREAD, %r11 65481fd181aSTodd Clayton movq T_STACK(%r11), %rsp 65581fd181aSTodd Clayton 65681fd181aSTodd Clayton movq %rcx, REGOFF_RIP(%rsp) 65781fd181aSTodd Clayton movl $UCS_SEL, REGOFF_CS(%rsp) 65881fd181aSTodd Clayton movq %gs:CPU_RTMP_RSP, %r11 65981fd181aSTodd Clayton movq %r11, REGOFF_RSP(%rsp) 66081fd181aSTodd Clayton pushfq 66181fd181aSTodd Clayton popq %r11 /* hypercall enables ints */ 66281fd181aSTodd Clayton movq %r11, REGOFF_RFL(%rsp) 66381fd181aSTodd Clayton movl $UDS_SEL, REGOFF_SS(%rsp) 66481fd181aSTodd Clayton addq $REGOFF_RIP, %rsp 66581fd181aSTodd Clayton /* 66681fd181aSTodd Clayton * XXPV: see comment in SYSRETQ definition for future optimization 66781fd181aSTodd Clayton * we could take. 66881fd181aSTodd Clayton */ 66981fd181aSTodd Clayton ASSERT_UPCALL_MASK_IS_SET 67081fd181aSTodd Clayton SYSRETQ 67181fd181aSTodd Clayton#else 67265488c97S ALTENTRY(nopop_sys_syscall_swapgs_sysretq) 673843e1988Sjohnlev SWAPGS /* user gsbase */ 674843e1988Sjohnlev SYSRETQ 67581fd181aSTodd Clayton#endif 676843e1988Sjohnlev /*NOTREACHED*/ 67765488c97S SET_SIZE(nopop_sys_syscall_swapgs_sysretq) 6787c478bd9Sstevel@tonic-gate 6797c478bd9Sstevel@tonic-gate_syscall_pre: 6807c478bd9Sstevel@tonic-gate call pre_syscall 6817c478bd9Sstevel@tonic-gate movl %eax, %r12d 6827c478bd9Sstevel@tonic-gate testl %eax, %eax 6837c478bd9Sstevel@tonic-gate jne _syscall_post_call 6847c478bd9Sstevel@tonic-gate /* 6857c478bd9Sstevel@tonic-gate * Didn't abort, so reload the syscall args and invoke the handler. 6867c478bd9Sstevel@tonic-gate */ 6877c478bd9Sstevel@tonic-gate movzwl T_SYSNUM(%r15), %eax 6887c478bd9Sstevel@tonic-gate jmp _syscall_invoke 6897c478bd9Sstevel@tonic-gate 6907c478bd9Sstevel@tonic-gate_syscall_ill: 6917c478bd9Sstevel@tonic-gate call nosys 6927c478bd9Sstevel@tonic-gate movq %rax, %r12 6937c478bd9Sstevel@tonic-gate movq %rdx, %r13 6947c478bd9Sstevel@tonic-gate jmp _syscall_post_call 6957c478bd9Sstevel@tonic-gate 6967c478bd9Sstevel@tonic-gate_syscall_post: 697ae115bc7Smrj STI 6987c478bd9Sstevel@tonic-gate /* 6997c478bd9Sstevel@tonic-gate * Sigh, our optimism wasn't justified, put it back to LMS_SYSTEM 7007c478bd9Sstevel@tonic-gate * so that we can account for the extra work it takes us to finish. 7017c478bd9Sstevel@tonic-gate */ 7027c478bd9Sstevel@tonic-gate MSTATE_TRANSITION(LMS_USER, LMS_SYSTEM) 7037c478bd9Sstevel@tonic-gate_syscall_post_call: 7047c478bd9Sstevel@tonic-gate movq %r12, %rdi 7057c478bd9Sstevel@tonic-gate movq %r13, %rsi 7067c478bd9Sstevel@tonic-gate call post_syscall 7077c478bd9Sstevel@tonic-gate MSTATE_TRANSITION(LMS_SYSTEM, LMS_USER) 708ae115bc7Smrj jmp _sys_rtt 7097c478bd9Sstevel@tonic-gate SET_SIZE(sys_syscall) 7109acbbeafSnn35248 SET_SIZE(brand_sys_syscall) 7117c478bd9Sstevel@tonic-gate 7127c478bd9Sstevel@tonic-gate#endif /* __lint */ 7137c478bd9Sstevel@tonic-gate 7147c478bd9Sstevel@tonic-gate#if defined(__lint) 7157c478bd9Sstevel@tonic-gate 7167c478bd9Sstevel@tonic-gate/*ARGSUSED*/ 7177c478bd9Sstevel@tonic-gatevoid 7187c478bd9Sstevel@tonic-gatesys_syscall32() 7197c478bd9Sstevel@tonic-gate{} 7207c478bd9Sstevel@tonic-gate 7217c478bd9Sstevel@tonic-gate#else /* __lint */ 7227c478bd9Sstevel@tonic-gate 7239acbbeafSnn35248 ENTRY_NP(brand_sys_syscall32) 724843e1988Sjohnlev SWAPGS /* kernel gsbase */ 725843e1988Sjohnlev XPV_TRAP_POP 726b72c368aS BRAND_CALLBACK(BRAND_CB_SYSCALL32, BRAND_URET_FROM_REG(%rcx)) 727843e1988Sjohnlev jmp nopop_sys_syscall32 7289acbbeafSnn35248 7299acbbeafSnn35248 ALTENTRY(sys_syscall32) 730843e1988Sjohnlev SWAPGS /* kernel gsbase */ 731843e1988Sjohnlev XPV_TRAP_POP 732843e1988Sjohnlev 73306b6cf06Snopop_sys_syscall32: 7347c478bd9Sstevel@tonic-gate movl %esp, %r10d 7357c478bd9Sstevel@tonic-gate movq %gs:CPU_THREAD, %r15 7367c478bd9Sstevel@tonic-gate movq T_STACK(%r15), %rsp 7377c478bd9Sstevel@tonic-gate movl %eax, %eax 7387c478bd9Sstevel@tonic-gate 7397c478bd9Sstevel@tonic-gate movl $U32CS_SEL, REGOFF_CS(%rsp) 7407c478bd9Sstevel@tonic-gate movl %ecx, REGOFF_RIP(%rsp) /* syscall: %rip -> %rcx */ 7417c478bd9Sstevel@tonic-gate movq %r11, REGOFF_RFL(%rsp) /* syscall: %rfl -> %r11d */ 7427c478bd9Sstevel@tonic-gate movq %r10, REGOFF_RSP(%rsp) 7437c478bd9Sstevel@tonic-gate movl $UDS_SEL, REGOFF_SS(%rsp) 7447c478bd9Sstevel@tonic-gate 7457c478bd9Sstevel@tonic-gate_syscall32_save: 7467c478bd9Sstevel@tonic-gate movl %edi, REGOFF_RDI(%rsp) 7477c478bd9Sstevel@tonic-gate movl %esi, REGOFF_RSI(%rsp) 7487c478bd9Sstevel@tonic-gate movl %ebp, REGOFF_RBP(%rsp) 7497c478bd9Sstevel@tonic-gate movl %ebx, REGOFF_RBX(%rsp) 7507c478bd9Sstevel@tonic-gate movl %edx, REGOFF_RDX(%rsp) 7517c478bd9Sstevel@tonic-gate movl %ecx, REGOFF_RCX(%rsp) 7527c478bd9Sstevel@tonic-gate movl %eax, REGOFF_RAX(%rsp) /* wrapper: sysc# -> %eax */ 7537c478bd9Sstevel@tonic-gate movq $0, REGOFF_SAVFP(%rsp) 7547c478bd9Sstevel@tonic-gate movq $0, REGOFF_SAVPC(%rsp) 7557c478bd9Sstevel@tonic-gate 7567c478bd9Sstevel@tonic-gate /* 7577c478bd9Sstevel@tonic-gate * Copy these registers here in case we end up stopped with 7587c478bd9Sstevel@tonic-gate * someone (like, say, /proc) messing with our register state. 7597c478bd9Sstevel@tonic-gate * We don't -restore- them unless we have to in update_sregs. 7607c478bd9Sstevel@tonic-gate * 7617c478bd9Sstevel@tonic-gate * Since userland -can't- change fsbase or gsbase directly, 7627c478bd9Sstevel@tonic-gate * we don't bother to capture them here. 7637c478bd9Sstevel@tonic-gate */ 7647c478bd9Sstevel@tonic-gate xorl %ebx, %ebx 7657c478bd9Sstevel@tonic-gate movw %ds, %bx 7667c478bd9Sstevel@tonic-gate movq %rbx, REGOFF_DS(%rsp) 7677c478bd9Sstevel@tonic-gate movw %es, %bx 7687c478bd9Sstevel@tonic-gate movq %rbx, REGOFF_ES(%rsp) 7697c478bd9Sstevel@tonic-gate movw %fs, %bx 7707c478bd9Sstevel@tonic-gate movq %rbx, REGOFF_FS(%rsp) 7717c478bd9Sstevel@tonic-gate movw %gs, %bx 7727c478bd9Sstevel@tonic-gate movq %rbx, REGOFF_GS(%rsp) 7737c478bd9Sstevel@tonic-gate 7747c478bd9Sstevel@tonic-gate /* 7757c478bd9Sstevel@tonic-gate * Application state saved in the regs structure on the stack 7767c478bd9Sstevel@tonic-gate * %eax is the syscall number 7777c478bd9Sstevel@tonic-gate * %rsp is the thread's stack, %r15 is curthread 7787c478bd9Sstevel@tonic-gate * REG_RSP(%rsp) is the user's stack 7797c478bd9Sstevel@tonic-gate */ 7807c478bd9Sstevel@tonic-gate 7817c478bd9Sstevel@tonic-gate SYSCALL_TRAPTRACE32($TT_SYSC) 7827c478bd9Sstevel@tonic-gate 7837c478bd9Sstevel@tonic-gate movq %rsp, %rbp 7847c478bd9Sstevel@tonic-gate 7857c478bd9Sstevel@tonic-gate movq T_LWP(%r15), %r14 7867c478bd9Sstevel@tonic-gate ASSERT_NO_RUPDATE_PENDING(%r14) 7877c478bd9Sstevel@tonic-gate 7887c478bd9Sstevel@tonic-gate ENABLE_INTR_FLAGS 7897c478bd9Sstevel@tonic-gate 7907c478bd9Sstevel@tonic-gate MSTATE_TRANSITION(LMS_USER, LMS_SYSTEM) 7917c478bd9Sstevel@tonic-gate movl REGOFF_RAX(%rsp), %eax /* (%rax damaged by mstate call) */ 7927c478bd9Sstevel@tonic-gate 7937c478bd9Sstevel@tonic-gate ASSERT_LWPTOREGS(%r14, %rsp) 7947c478bd9Sstevel@tonic-gate 7957c478bd9Sstevel@tonic-gate incq %gs:CPU_STATS_SYS_SYSCALL 7967c478bd9Sstevel@tonic-gate 7977c478bd9Sstevel@tonic-gate /* 7987c478bd9Sstevel@tonic-gate * Make some space for MAXSYSARGS (currently 8) 32-bit args placed 7997c478bd9Sstevel@tonic-gate * into 64-bit (long) arg slots, maintaining 16 byte alignment. Or 8007c478bd9Sstevel@tonic-gate * more succinctly: 8017c478bd9Sstevel@tonic-gate * 8027c478bd9Sstevel@tonic-gate * SA(MAXSYSARGS * sizeof (long)) == 64 8037c478bd9Sstevel@tonic-gate */ 8047c478bd9Sstevel@tonic-gate#define SYS_DROP 64 /* drop for args */ 8057c478bd9Sstevel@tonic-gate subq $SYS_DROP, %rsp 8067c478bd9Sstevel@tonic-gate movb $LWP_SYS, LWP_STATE(%r14) 8077c478bd9Sstevel@tonic-gate movq %r15, %rdi 8087c478bd9Sstevel@tonic-gate movq %rsp, %rsi 8097c478bd9Sstevel@tonic-gate call syscall_entry 8107c478bd9Sstevel@tonic-gate 8117c478bd9Sstevel@tonic-gate /* 8127c478bd9Sstevel@tonic-gate * Fetch the arguments copied onto the kernel stack and put 8137c478bd9Sstevel@tonic-gate * them in the right registers to invoke a C-style syscall handler. 8147c478bd9Sstevel@tonic-gate * %rax contains the handler address. 8157c478bd9Sstevel@tonic-gate * 8167c478bd9Sstevel@tonic-gate * Ideas for making all this go faster of course include simply 8177c478bd9Sstevel@tonic-gate * forcibly fetching 6 arguments from the user stack under lofault 8187c478bd9Sstevel@tonic-gate * protection, reverting to copyin_args only when watchpoints 8197c478bd9Sstevel@tonic-gate * are in effect. 8207c478bd9Sstevel@tonic-gate * 8217c478bd9Sstevel@tonic-gate * (If we do this, make sure that exec and libthread leave 8227c478bd9Sstevel@tonic-gate * enough space at the top of the stack to ensure that we'll 8237c478bd9Sstevel@tonic-gate * never do a fetch from an invalid page.) 8247c478bd9Sstevel@tonic-gate * 8257c478bd9Sstevel@tonic-gate * Lots of ideas here, but they won't really help with bringup B-) 8267c478bd9Sstevel@tonic-gate * Correctness can't wait, performance can wait a little longer .. 8277c478bd9Sstevel@tonic-gate */ 8287c478bd9Sstevel@tonic-gate 8297c478bd9Sstevel@tonic-gate movq %rax, %rbx 8307c478bd9Sstevel@tonic-gate movl 0(%rsp), %edi 8317c478bd9Sstevel@tonic-gate movl 8(%rsp), %esi 8327c478bd9Sstevel@tonic-gate movl 0x10(%rsp), %edx 8337c478bd9Sstevel@tonic-gate movl 0x18(%rsp), %ecx 8347c478bd9Sstevel@tonic-gate movl 0x20(%rsp), %r8d 8357c478bd9Sstevel@tonic-gate movl 0x28(%rsp), %r9d 8367c478bd9Sstevel@tonic-gate 8377c478bd9Sstevel@tonic-gate call *SY_CALLC(%rbx) 8387c478bd9Sstevel@tonic-gate 8397c478bd9Sstevel@tonic-gate movq %rbp, %rsp /* pop the args */ 8407c478bd9Sstevel@tonic-gate 8417c478bd9Sstevel@tonic-gate /* 8427c478bd9Sstevel@tonic-gate * amd64 syscall handlers -always- return a 64-bit value in %rax. 8437c478bd9Sstevel@tonic-gate * On the 32-bit kernel, they always return that value in %eax:%edx 8447c478bd9Sstevel@tonic-gate * as required by the 32-bit ABI. 8457c478bd9Sstevel@tonic-gate * 8467c478bd9Sstevel@tonic-gate * Simulate the same behaviour by unconditionally splitting the 8477c478bd9Sstevel@tonic-gate * return value in the same way. 8487c478bd9Sstevel@tonic-gate */ 8497c478bd9Sstevel@tonic-gate movq %rax, %r13 8507c478bd9Sstevel@tonic-gate shrq $32, %r13 /* upper 32-bits into %edx */ 8517c478bd9Sstevel@tonic-gate movl %eax, %r12d /* lower 32-bits into %eax */ 8527c478bd9Sstevel@tonic-gate 8537c478bd9Sstevel@tonic-gate /* 8547c478bd9Sstevel@tonic-gate * Optimistically assume that there's no post-syscall 8557c478bd9Sstevel@tonic-gate * work to do. (This is to avoid having to call syscall_mstate() 8567c478bd9Sstevel@tonic-gate * with interrupts disabled) 8577c478bd9Sstevel@tonic-gate */ 8587c478bd9Sstevel@tonic-gate MSTATE_TRANSITION(LMS_SYSTEM, LMS_USER) 8597c478bd9Sstevel@tonic-gate 8607c478bd9Sstevel@tonic-gate /* 8617c478bd9Sstevel@tonic-gate * We must protect ourselves from being descheduled here; 8627c478bd9Sstevel@tonic-gate * If we were, and we ended up on another cpu, or another 8637c478bd9Sstevel@tonic-gate * lwp got in ahead of us, it could change the segment 8647c478bd9Sstevel@tonic-gate * registers without us noticing before we return to userland. 8657c478bd9Sstevel@tonic-gate */ 866ae115bc7Smrj CLI(%r14) 8677c478bd9Sstevel@tonic-gate CHECK_POSTSYS_NE(%r15, %r14, %ebx) 8687c478bd9Sstevel@tonic-gate jne _full_syscall_postsys32 8697c478bd9Sstevel@tonic-gate SIMPLE_SYSCALL_POSTSYS(%r15, %r14, %bx) 8707c478bd9Sstevel@tonic-gate 8717c478bd9Sstevel@tonic-gate /* 8727c478bd9Sstevel@tonic-gate * To get back to userland, we need to put the return %rip in %rcx and 8737c478bd9Sstevel@tonic-gate * the return %rfl in %r11d. The sysret instruction also arranges 8747c478bd9Sstevel@tonic-gate * to fix up %cs and %ss; everything else is our responsibility. 8757c478bd9Sstevel@tonic-gate */ 8767c478bd9Sstevel@tonic-gate 8777c478bd9Sstevel@tonic-gate movl %r12d, %eax /* %eax: rval1 */ 8787c478bd9Sstevel@tonic-gate movl REGOFF_RBX(%rsp), %ebx 8797c478bd9Sstevel@tonic-gate /* %ecx used for return pointer */ 8807c478bd9Sstevel@tonic-gate movl %r13d, %edx /* %edx: rval2 */ 8817c478bd9Sstevel@tonic-gate movl REGOFF_RBP(%rsp), %ebp 8827c478bd9Sstevel@tonic-gate movl REGOFF_RSI(%rsp), %esi 8837c478bd9Sstevel@tonic-gate movl REGOFF_RDI(%rsp), %edi 8847c478bd9Sstevel@tonic-gate 8857c478bd9Sstevel@tonic-gate movl REGOFF_RFL(%rsp), %r11d /* %r11 -> eflags */ 8867c478bd9Sstevel@tonic-gate movl REGOFF_RIP(%rsp), %ecx /* %ecx -> %eip */ 8877c478bd9Sstevel@tonic-gate movl REGOFF_RSP(%rsp), %esp 8887c478bd9Sstevel@tonic-gate 889843e1988Sjohnlev ASSERT_UPCALL_MASK_IS_SET 89065488c97S ALTENTRY(nopop_sys_syscall32_swapgs_sysretl) 891843e1988Sjohnlev SWAPGS /* user gsbase */ 892843e1988Sjohnlev SYSRETL 89365488c97S SET_SIZE(nopop_sys_syscall32_swapgs_sysretl) 894843e1988Sjohnlev /*NOTREACHED*/ 8957c478bd9Sstevel@tonic-gate 8967c478bd9Sstevel@tonic-gate_full_syscall_postsys32: 897ae115bc7Smrj STI 8987c478bd9Sstevel@tonic-gate /* 8997c478bd9Sstevel@tonic-gate * Sigh, our optimism wasn't justified, put it back to LMS_SYSTEM 9007c478bd9Sstevel@tonic-gate * so that we can account for the extra work it takes us to finish. 9017c478bd9Sstevel@tonic-gate */ 9027c478bd9Sstevel@tonic-gate MSTATE_TRANSITION(LMS_USER, LMS_SYSTEM) 9037c478bd9Sstevel@tonic-gate movq %r15, %rdi 9047c478bd9Sstevel@tonic-gate movq %r12, %rsi /* rval1 - %eax */ 9057c478bd9Sstevel@tonic-gate movq %r13, %rdx /* rval2 - %edx */ 9067c478bd9Sstevel@tonic-gate call syscall_exit 9077c478bd9Sstevel@tonic-gate MSTATE_TRANSITION(LMS_SYSTEM, LMS_USER) 908ae115bc7Smrj jmp _sys_rtt 9097c478bd9Sstevel@tonic-gate SET_SIZE(sys_syscall32) 9109acbbeafSnn35248 SET_SIZE(brand_sys_syscall32) 9117c478bd9Sstevel@tonic-gate 9127c478bd9Sstevel@tonic-gate#endif /* __lint */ 9137c478bd9Sstevel@tonic-gate 9147c478bd9Sstevel@tonic-gate/* 9157c478bd9Sstevel@tonic-gate * System call handler via the sysenter instruction 9167c478bd9Sstevel@tonic-gate * Used only for 32-bit system calls on the 64-bit kernel. 9177c478bd9Sstevel@tonic-gate * 9187c478bd9Sstevel@tonic-gate * The caller in userland has arranged that: 9197c478bd9Sstevel@tonic-gate * 9207c478bd9Sstevel@tonic-gate * - %eax contains the syscall number 9217c478bd9Sstevel@tonic-gate * - %ecx contains the user %esp 9227c478bd9Sstevel@tonic-gate * - %edx contains the return %eip 9237c478bd9Sstevel@tonic-gate * - the user stack contains the args to the syscall 9247c478bd9Sstevel@tonic-gate * 9257c478bd9Sstevel@tonic-gate * Hardware and (privileged) initialization code have arranged that by 9267c478bd9Sstevel@tonic-gate * the time the sysenter instructions completes: 9277c478bd9Sstevel@tonic-gate * 9287c478bd9Sstevel@tonic-gate * - %rip is pointing to sys_sysenter (below). 9297c478bd9Sstevel@tonic-gate * - %cs and %ss are set to kernel text and stack (data) selectors. 9307c478bd9Sstevel@tonic-gate * - %rsp is pointing at the lwp's stack 9317c478bd9Sstevel@tonic-gate * - interrupts have been disabled. 9327c478bd9Sstevel@tonic-gate * 9337c478bd9Sstevel@tonic-gate * Note that we are unable to return both "rvals" to userland with 9347c478bd9Sstevel@tonic-gate * this call, as %edx is used by the sysexit instruction. 9359acbbeafSnn35248 * 9369acbbeafSnn35248 * One final complication in this routine is its interaction with 9379acbbeafSnn35248 * single-stepping in a debugger. For most of the system call mechanisms, 9389acbbeafSnn35248 * the CPU automatically clears the single-step flag before we enter the 9399acbbeafSnn35248 * kernel. The sysenter mechanism does not clear the flag, so a user 9409acbbeafSnn35248 * single-stepping through a libc routine may suddenly find him/herself 9419acbbeafSnn35248 * single-stepping through the kernel. To detect this, kmdb compares the 9429acbbeafSnn35248 * trap %pc to the [brand_]sys_enter addresses on each single-step trap. 9439acbbeafSnn35248 * If it finds that we have single-stepped to a sysenter entry point, it 9449acbbeafSnn35248 * explicitly clears the flag and executes the sys_sysenter routine. 9459acbbeafSnn35248 * 9469acbbeafSnn35248 * One final complication in this final complication is the fact that we 9479acbbeafSnn35248 * have two different entry points for sysenter: brand_sys_sysenter and 9489acbbeafSnn35248 * sys_sysenter. If we enter at brand_sys_sysenter and start single-stepping 9499acbbeafSnn35248 * through the kernel with kmdb, we will eventually hit the instruction at 9509acbbeafSnn35248 * sys_sysenter. kmdb cannot distinguish between that valid single-step 9519acbbeafSnn35248 * and the undesirable one mentioned above. To avoid this situation, we 9529acbbeafSnn35248 * simply add a jump over the instruction at sys_sysenter to make it 9539acbbeafSnn35248 * impossible to single-step to it. 9547c478bd9Sstevel@tonic-gate */ 9557c478bd9Sstevel@tonic-gate#if defined(__lint) 9567c478bd9Sstevel@tonic-gate 9577c478bd9Sstevel@tonic-gatevoid 9587c478bd9Sstevel@tonic-gatesys_sysenter() 9597c478bd9Sstevel@tonic-gate{} 9607c478bd9Sstevel@tonic-gate 9617c478bd9Sstevel@tonic-gate#else /* __lint */ 9627c478bd9Sstevel@tonic-gate 9639acbbeafSnn35248 ENTRY_NP(brand_sys_sysenter) 964843e1988Sjohnlev SWAPGS /* kernel gsbase */ 9659acbbeafSnn35248 ALTENTRY(_brand_sys_sysenter_post_swapgs) 966b72c368aS BRAND_CALLBACK(BRAND_CB_SYSENTER, BRAND_URET_FROM_REG(%rdx)) 9679acbbeafSnn35248 /* 9689acbbeafSnn35248 * Jump over sys_sysenter to allow single-stepping as described 9699acbbeafSnn35248 * above. 9709acbbeafSnn35248 */ 9719acbbeafSnn35248 jmp _sys_sysenter_post_swapgs 9729acbbeafSnn35248 9739acbbeafSnn35248 ALTENTRY(sys_sysenter) 974843e1988Sjohnlev SWAPGS /* kernel gsbase */ 9759acbbeafSnn35248 9767c478bd9Sstevel@tonic-gate ALTENTRY(_sys_sysenter_post_swapgs) 9777c478bd9Sstevel@tonic-gate movq %gs:CPU_THREAD, %r15 9787c478bd9Sstevel@tonic-gate 9797c478bd9Sstevel@tonic-gate movl $U32CS_SEL, REGOFF_CS(%rsp) 9807c478bd9Sstevel@tonic-gate movl %ecx, REGOFF_RSP(%rsp) /* wrapper: %esp -> %ecx */ 9817c478bd9Sstevel@tonic-gate movl %edx, REGOFF_RIP(%rsp) /* wrapper: %eip -> %edx */ 9827c478bd9Sstevel@tonic-gate pushfq 9837c478bd9Sstevel@tonic-gate popq %r10 9847c478bd9Sstevel@tonic-gate movl $UDS_SEL, REGOFF_SS(%rsp) 9857c478bd9Sstevel@tonic-gate 9867c478bd9Sstevel@tonic-gate /* 9877c478bd9Sstevel@tonic-gate * Set the interrupt flag before storing the flags to the 9887c478bd9Sstevel@tonic-gate * flags image on the stack so we can return to user with 9897c478bd9Sstevel@tonic-gate * interrupts enabled if we return via sys_rtt_syscall32 9907c478bd9Sstevel@tonic-gate */ 9917c478bd9Sstevel@tonic-gate orq $PS_IE, %r10 9927c478bd9Sstevel@tonic-gate movq %r10, REGOFF_RFL(%rsp) 9937c478bd9Sstevel@tonic-gate 9947c478bd9Sstevel@tonic-gate movl %edi, REGOFF_RDI(%rsp) 9957c478bd9Sstevel@tonic-gate movl %esi, REGOFF_RSI(%rsp) 9967c478bd9Sstevel@tonic-gate movl %ebp, REGOFF_RBP(%rsp) 9977c478bd9Sstevel@tonic-gate movl %ebx, REGOFF_RBX(%rsp) 9987c478bd9Sstevel@tonic-gate movl %edx, REGOFF_RDX(%rsp) 9997c478bd9Sstevel@tonic-gate movl %ecx, REGOFF_RCX(%rsp) 10007c478bd9Sstevel@tonic-gate movl %eax, REGOFF_RAX(%rsp) /* wrapper: sysc# -> %eax */ 10017c478bd9Sstevel@tonic-gate movq $0, REGOFF_SAVFP(%rsp) 10027c478bd9Sstevel@tonic-gate movq $0, REGOFF_SAVPC(%rsp) 10037c478bd9Sstevel@tonic-gate 10047c478bd9Sstevel@tonic-gate /* 10057c478bd9Sstevel@tonic-gate * Copy these registers here in case we end up stopped with 10067c478bd9Sstevel@tonic-gate * someone (like, say, /proc) messing with our register state. 10077c478bd9Sstevel@tonic-gate * We don't -restore- them unless we have to in update_sregs. 10087c478bd9Sstevel@tonic-gate * 10097c478bd9Sstevel@tonic-gate * Since userland -can't- change fsbase or gsbase directly, 10107c478bd9Sstevel@tonic-gate * we don't bother to capture them here. 10117c478bd9Sstevel@tonic-gate */ 10127c478bd9Sstevel@tonic-gate xorl %ebx, %ebx 10137c478bd9Sstevel@tonic-gate movw %ds, %bx 10147c478bd9Sstevel@tonic-gate movq %rbx, REGOFF_DS(%rsp) 10157c478bd9Sstevel@tonic-gate movw %es, %bx 10167c478bd9Sstevel@tonic-gate movq %rbx, REGOFF_ES(%rsp) 10177c478bd9Sstevel@tonic-gate movw %fs, %bx 10187c478bd9Sstevel@tonic-gate movq %rbx, REGOFF_FS(%rsp) 10197c478bd9Sstevel@tonic-gate movw %gs, %bx 10207c478bd9Sstevel@tonic-gate movq %rbx, REGOFF_GS(%rsp) 10217c478bd9Sstevel@tonic-gate 10227c478bd9Sstevel@tonic-gate /* 10237c478bd9Sstevel@tonic-gate * Application state saved in the regs structure on the stack 10247c478bd9Sstevel@tonic-gate * %eax is the syscall number 10257c478bd9Sstevel@tonic-gate * %rsp is the thread's stack, %r15 is curthread 10267c478bd9Sstevel@tonic-gate * REG_RSP(%rsp) is the user's stack 10277c478bd9Sstevel@tonic-gate */ 10287c478bd9Sstevel@tonic-gate 10297c478bd9Sstevel@tonic-gate SYSCALL_TRAPTRACE($TT_SYSENTER) 10307c478bd9Sstevel@tonic-gate 10317c478bd9Sstevel@tonic-gate movq %rsp, %rbp 10327c478bd9Sstevel@tonic-gate 10337c478bd9Sstevel@tonic-gate movq T_LWP(%r15), %r14 10347c478bd9Sstevel@tonic-gate ASSERT_NO_RUPDATE_PENDING(%r14) 10357c478bd9Sstevel@tonic-gate 10367c478bd9Sstevel@tonic-gate ENABLE_INTR_FLAGS 10377c478bd9Sstevel@tonic-gate 10387c478bd9Sstevel@tonic-gate /* 10397c478bd9Sstevel@tonic-gate * Catch 64-bit process trying to issue sysenter instruction 10407c478bd9Sstevel@tonic-gate * on Nocona based systems. 10417c478bd9Sstevel@tonic-gate */ 10427c478bd9Sstevel@tonic-gate movq LWP_PROCP(%r14), %rax 10437c478bd9Sstevel@tonic-gate cmpq $DATAMODEL_ILP32, P_MODEL(%rax) 10447c478bd9Sstevel@tonic-gate je 7f 10457c478bd9Sstevel@tonic-gate 10467c478bd9Sstevel@tonic-gate /* 10477c478bd9Sstevel@tonic-gate * For a non-32-bit process, simulate a #ud, since that's what 10487c478bd9Sstevel@tonic-gate * native hardware does. The traptrace entry (above) will 10497c478bd9Sstevel@tonic-gate * let you know what really happened. 10507c478bd9Sstevel@tonic-gate */ 10517c478bd9Sstevel@tonic-gate movq $T_ILLINST, REGOFF_TRAPNO(%rsp) 10527c478bd9Sstevel@tonic-gate movq REGOFF_CS(%rsp), %rdi 10537c478bd9Sstevel@tonic-gate movq %rdi, REGOFF_ERR(%rsp) 10547c478bd9Sstevel@tonic-gate movq %rsp, %rdi 10557c478bd9Sstevel@tonic-gate movq REGOFF_RIP(%rsp), %rsi 10567c478bd9Sstevel@tonic-gate movl %gs:CPU_ID, %edx 10577c478bd9Sstevel@tonic-gate call trap 10587c478bd9Sstevel@tonic-gate jmp _sys_rtt 10597c478bd9Sstevel@tonic-gate7: 10607c478bd9Sstevel@tonic-gate 10617c478bd9Sstevel@tonic-gate MSTATE_TRANSITION(LMS_USER, LMS_SYSTEM) 10627c478bd9Sstevel@tonic-gate movl REGOFF_RAX(%rsp), %eax /* (%rax damaged by mstate calls) */ 10637c478bd9Sstevel@tonic-gate 10647c478bd9Sstevel@tonic-gate ASSERT_LWPTOREGS(%r14, %rsp) 10657c478bd9Sstevel@tonic-gate 10667c478bd9Sstevel@tonic-gate incq %gs:CPU_STATS_SYS_SYSCALL 10677c478bd9Sstevel@tonic-gate 10687c478bd9Sstevel@tonic-gate /* 10697c478bd9Sstevel@tonic-gate * Make some space for MAXSYSARGS (currently 8) 32-bit args 10707c478bd9Sstevel@tonic-gate * placed into 64-bit (long) arg slots, plus one 64-bit 10717c478bd9Sstevel@tonic-gate * (long) arg count, maintaining 16 byte alignment. 10727c478bd9Sstevel@tonic-gate */ 10737c478bd9Sstevel@tonic-gate subq $SYS_DROP, %rsp 10747c478bd9Sstevel@tonic-gate movb $LWP_SYS, LWP_STATE(%r14) 10757c478bd9Sstevel@tonic-gate movq %r15, %rdi 10767c478bd9Sstevel@tonic-gate movq %rsp, %rsi 10777c478bd9Sstevel@tonic-gate call syscall_entry 10787c478bd9Sstevel@tonic-gate 10797c478bd9Sstevel@tonic-gate /* 10807c478bd9Sstevel@tonic-gate * Fetch the arguments copied onto the kernel stack and put 10817c478bd9Sstevel@tonic-gate * them in the right registers to invoke a C-style syscall handler. 10827c478bd9Sstevel@tonic-gate * %rax contains the handler address. 10837c478bd9Sstevel@tonic-gate */ 10847c478bd9Sstevel@tonic-gate movq %rax, %rbx 10857c478bd9Sstevel@tonic-gate movl 0(%rsp), %edi 10867c478bd9Sstevel@tonic-gate movl 8(%rsp), %esi 10877c478bd9Sstevel@tonic-gate movl 0x10(%rsp), %edx 10887c478bd9Sstevel@tonic-gate movl 0x18(%rsp), %ecx 10897c478bd9Sstevel@tonic-gate movl 0x20(%rsp), %r8d 10907c478bd9Sstevel@tonic-gate movl 0x28(%rsp), %r9d 10917c478bd9Sstevel@tonic-gate 10927c478bd9Sstevel@tonic-gate call *SY_CALLC(%rbx) 10937c478bd9Sstevel@tonic-gate 10947c478bd9Sstevel@tonic-gate movq %rbp, %rsp /* pop the args */ 10957c478bd9Sstevel@tonic-gate 10967c478bd9Sstevel@tonic-gate /* 10977c478bd9Sstevel@tonic-gate * amd64 syscall handlers -always- return a 64-bit value in %rax. 10987c478bd9Sstevel@tonic-gate * On the 32-bit kernel, the always return that value in %eax:%edx 10997c478bd9Sstevel@tonic-gate * as required by the 32-bit ABI. 11007c478bd9Sstevel@tonic-gate * 11017c478bd9Sstevel@tonic-gate * Simulate the same behaviour by unconditionally splitting the 11027c478bd9Sstevel@tonic-gate * return value in the same way. 11037c478bd9Sstevel@tonic-gate */ 11047c478bd9Sstevel@tonic-gate movq %rax, %r13 11057c478bd9Sstevel@tonic-gate shrq $32, %r13 /* upper 32-bits into %edx */ 11067c478bd9Sstevel@tonic-gate movl %eax, %r12d /* lower 32-bits into %eax */ 11077c478bd9Sstevel@tonic-gate 11087c478bd9Sstevel@tonic-gate /* 11097c478bd9Sstevel@tonic-gate * Optimistically assume that there's no post-syscall 11107c478bd9Sstevel@tonic-gate * work to do. (This is to avoid having to call syscall_mstate() 11117c478bd9Sstevel@tonic-gate * with interrupts disabled) 11127c478bd9Sstevel@tonic-gate */ 11137c478bd9Sstevel@tonic-gate MSTATE_TRANSITION(LMS_SYSTEM, LMS_USER) 11147c478bd9Sstevel@tonic-gate 11157c478bd9Sstevel@tonic-gate /* 11167c478bd9Sstevel@tonic-gate * We must protect ourselves from being descheduled here; 11177c478bd9Sstevel@tonic-gate * If we were, and we ended up on another cpu, or another 11187c478bd9Sstevel@tonic-gate * lwp got int ahead of us, it could change the segment 11197c478bd9Sstevel@tonic-gate * registers without us noticing before we return to userland. 11207c478bd9Sstevel@tonic-gate */ 11217c478bd9Sstevel@tonic-gate cli 11227c478bd9Sstevel@tonic-gate CHECK_POSTSYS_NE(%r15, %r14, %ebx) 11237c478bd9Sstevel@tonic-gate jne _full_syscall_postsys32 11247c478bd9Sstevel@tonic-gate SIMPLE_SYSCALL_POSTSYS(%r15, %r14, %bx) 11257c478bd9Sstevel@tonic-gate 11267c478bd9Sstevel@tonic-gate /* 11277c478bd9Sstevel@tonic-gate * To get back to userland, load up the 32-bit registers and 11287c478bd9Sstevel@tonic-gate * sysexit back where we came from. 11297c478bd9Sstevel@tonic-gate */ 11307c478bd9Sstevel@tonic-gate 11317c478bd9Sstevel@tonic-gate /* 11327c478bd9Sstevel@tonic-gate * Interrupts will be turned on by the 'sti' executed just before 11337c478bd9Sstevel@tonic-gate * sysexit. The following ensures that restoring the user's rflags 11347c478bd9Sstevel@tonic-gate * doesn't enable interrupts too soon. 11357c478bd9Sstevel@tonic-gate */ 11367c478bd9Sstevel@tonic-gate andq $_BITNOT(PS_IE), REGOFF_RFL(%rsp) 11377c478bd9Sstevel@tonic-gate 11387c478bd9Sstevel@tonic-gate /* 11397c478bd9Sstevel@tonic-gate * (There's no point in loading up %edx because the sysexit 11407c478bd9Sstevel@tonic-gate * mechanism smashes it.) 11417c478bd9Sstevel@tonic-gate */ 11427c478bd9Sstevel@tonic-gate movl %r12d, %eax 11437c478bd9Sstevel@tonic-gate movl REGOFF_RBX(%rsp), %ebx 11447c478bd9Sstevel@tonic-gate movl REGOFF_RBP(%rsp), %ebp 11457c478bd9Sstevel@tonic-gate movl REGOFF_RSI(%rsp), %esi 11467c478bd9Sstevel@tonic-gate movl REGOFF_RDI(%rsp), %edi 11477c478bd9Sstevel@tonic-gate 11487c478bd9Sstevel@tonic-gate movl REGOFF_RIP(%rsp), %edx /* sysexit: %edx -> %eip */ 11497c478bd9Sstevel@tonic-gate pushq REGOFF_RFL(%rsp) 11507c478bd9Sstevel@tonic-gate popfq 11517c478bd9Sstevel@tonic-gate movl REGOFF_RSP(%rsp), %ecx /* sysexit: %ecx -> %esp */ 115265488c97S ALTENTRY(sys_sysenter_swapgs_sysexit) 11537c478bd9Sstevel@tonic-gate swapgs 11547c478bd9Sstevel@tonic-gate sti 1155*0d2a0938SMarcel Telka sysexitl 115665488c97S SET_SIZE(sys_sysenter_swapgs_sysexit) 11577c478bd9Sstevel@tonic-gate SET_SIZE(sys_sysenter) 11587c478bd9Sstevel@tonic-gate SET_SIZE(_sys_sysenter_post_swapgs) 11599acbbeafSnn35248 SET_SIZE(brand_sys_sysenter) 11607c478bd9Sstevel@tonic-gate 11617c478bd9Sstevel@tonic-gate#endif /* __lint */ 11627c478bd9Sstevel@tonic-gate 11637c478bd9Sstevel@tonic-gate/* 11647c478bd9Sstevel@tonic-gate * This is the destination of the "int $T_SYSCALLINT" interrupt gate, used by 11657c478bd9Sstevel@tonic-gate * the generic i386 libc to do system calls. We do a small amount of setup 11667c478bd9Sstevel@tonic-gate * before jumping into the existing sys_syscall32 path. 11677c478bd9Sstevel@tonic-gate */ 11687c478bd9Sstevel@tonic-gate#if defined(__lint) 11697c478bd9Sstevel@tonic-gate 11707c478bd9Sstevel@tonic-gate/*ARGSUSED*/ 11717c478bd9Sstevel@tonic-gatevoid 11727c478bd9Sstevel@tonic-gatesys_syscall_int() 11737c478bd9Sstevel@tonic-gate{} 11747c478bd9Sstevel@tonic-gate 11757c478bd9Sstevel@tonic-gate#else /* __lint */ 11767c478bd9Sstevel@tonic-gate 11779acbbeafSnn35248 ENTRY_NP(brand_sys_syscall_int) 1178843e1988Sjohnlev SWAPGS /* kernel gsbase */ 1179843e1988Sjohnlev XPV_TRAP_POP 1180b72c368aS BRAND_CALLBACK(BRAND_CB_INT91, BRAND_URET_FROM_INTR_STACK()) 1181843e1988Sjohnlev jmp nopop_syscall_int 11829acbbeafSnn35248 11839acbbeafSnn35248 ALTENTRY(sys_syscall_int) 1184843e1988Sjohnlev SWAPGS /* kernel gsbase */ 1185843e1988Sjohnlev XPV_TRAP_POP 1186843e1988Sjohnlev 118706b6cf06Snopop_syscall_int: 11887c478bd9Sstevel@tonic-gate movq %gs:CPU_THREAD, %r15 11897c478bd9Sstevel@tonic-gate movq T_STACK(%r15), %rsp 11907c478bd9Sstevel@tonic-gate movl %eax, %eax 11917c478bd9Sstevel@tonic-gate /* 11927c478bd9Sstevel@tonic-gate * Set t_post_sys on this thread to force ourselves out via the slow 11937c478bd9Sstevel@tonic-gate * path. It might be possible at some later date to optimize this out 11947c478bd9Sstevel@tonic-gate * and use a faster return mechanism. 11957c478bd9Sstevel@tonic-gate */ 11967c478bd9Sstevel@tonic-gate movb $1, T_POST_SYS(%r15) 1197ae115bc7Smrj CLEAN_CS 11987c478bd9Sstevel@tonic-gate jmp _syscall32_save 119965488c97S /* 120065488c97S * There should be no instructions between this label and SWAPGS/IRET 120165488c97S * or we could end up breaking branded zone support. See the usage of 120265488c97S * this label in lx_brand_int80_callback and sn1_brand_int91_callback 120365488c97S * for examples. 120465488c97S */ 120565488c97S ALTENTRY(sys_sysint_swapgs_iret) 120665488c97S SWAPGS /* user gsbase */ 120765488c97S IRET 120865488c97S /*NOTREACHED*/ 120965488c97S SET_SIZE(sys_sysint_swapgs_iret) 12107c478bd9Sstevel@tonic-gate SET_SIZE(sys_syscall_int) 12119acbbeafSnn35248 SET_SIZE(brand_sys_syscall_int) 12127c478bd9Sstevel@tonic-gate 12137c478bd9Sstevel@tonic-gate#endif /* __lint */ 12147c478bd9Sstevel@tonic-gate 12157c478bd9Sstevel@tonic-gate/* 12167c478bd9Sstevel@tonic-gate * Legacy 32-bit applications and old libc implementations do lcalls; 12177c478bd9Sstevel@tonic-gate * we should never get here because the LDT entry containing the syscall 12187c478bd9Sstevel@tonic-gate * segment descriptor has the "segment present" bit cleared, which means 12197c478bd9Sstevel@tonic-gate * we end up processing those system calls in trap() via a not-present trap. 12207c478bd9Sstevel@tonic-gate * 12217c478bd9Sstevel@tonic-gate * We do it this way because a call gate unhelpfully does -nothing- to the 12227c478bd9Sstevel@tonic-gate * interrupt flag bit, so an interrupt can run us just after the lcall 12237c478bd9Sstevel@tonic-gate * completes, but just before the swapgs takes effect. Thus the INTR_PUSH and 12247c478bd9Sstevel@tonic-gate * INTR_POP paths would have to be slightly more complex to dance around 12257c478bd9Sstevel@tonic-gate * this problem, and end up depending explicitly on the first 12267c478bd9Sstevel@tonic-gate * instruction of this handler being either swapgs or cli. 12277c478bd9Sstevel@tonic-gate */ 12287c478bd9Sstevel@tonic-gate 12297c478bd9Sstevel@tonic-gate#if defined(__lint) 12307c478bd9Sstevel@tonic-gate 12317c478bd9Sstevel@tonic-gate/*ARGSUSED*/ 12327c478bd9Sstevel@tonic-gatevoid 12337c478bd9Sstevel@tonic-gatesys_lcall32() 12347c478bd9Sstevel@tonic-gate{} 12357c478bd9Sstevel@tonic-gate 12367c478bd9Sstevel@tonic-gate#else /* __lint */ 12377c478bd9Sstevel@tonic-gate 12387c478bd9Sstevel@tonic-gate ENTRY_NP(sys_lcall32) 1239843e1988Sjohnlev SWAPGS /* kernel gsbase */ 12407c478bd9Sstevel@tonic-gate pushq $0 12417c478bd9Sstevel@tonic-gate pushq %rbp 12427c478bd9Sstevel@tonic-gate movq %rsp, %rbp 12437c478bd9Sstevel@tonic-gate leaq __lcall_panic_str(%rip), %rdi 12447c478bd9Sstevel@tonic-gate xorl %eax, %eax 12457c478bd9Sstevel@tonic-gate call panic 12467c478bd9Sstevel@tonic-gate SET_SIZE(sys_lcall32) 12477c478bd9Sstevel@tonic-gate 12487c478bd9Sstevel@tonic-gate__lcall_panic_str: 12497c478bd9Sstevel@tonic-gate .string "sys_lcall32: shouldn't be here!" 12507c478bd9Sstevel@tonic-gate 12517c478bd9Sstevel@tonic-gate/* 12527c478bd9Sstevel@tonic-gate * Declare a uintptr_t which covers the entire pc range of syscall 12537c478bd9Sstevel@tonic-gate * handlers for the stack walkers that need this. 12547c478bd9Sstevel@tonic-gate */ 12557c478bd9Sstevel@tonic-gate .align CPTRSIZE 12567c478bd9Sstevel@tonic-gate .globl _allsyscalls_size 12577c478bd9Sstevel@tonic-gate .type _allsyscalls_size, @object 12587c478bd9Sstevel@tonic-gate_allsyscalls_size: 12597c478bd9Sstevel@tonic-gate .NWORD . - _allsyscalls 12607c478bd9Sstevel@tonic-gate SET_SIZE(_allsyscalls_size) 12617c478bd9Sstevel@tonic-gate 12627c478bd9Sstevel@tonic-gate#endif /* __lint */ 12637c478bd9Sstevel@tonic-gate 12647c478bd9Sstevel@tonic-gate/* 12657c478bd9Sstevel@tonic-gate * These are the thread context handlers for lwps using sysenter/sysexit. 12667c478bd9Sstevel@tonic-gate */ 12677c478bd9Sstevel@tonic-gate 12687c478bd9Sstevel@tonic-gate#if defined(__lint) 12697c478bd9Sstevel@tonic-gate 12707c478bd9Sstevel@tonic-gate/*ARGSUSED*/ 12717c478bd9Sstevel@tonic-gatevoid 12727c478bd9Sstevel@tonic-gatesep_save(void *ksp) 12737c478bd9Sstevel@tonic-gate{} 12747c478bd9Sstevel@tonic-gate 12757c478bd9Sstevel@tonic-gate/*ARGSUSED*/ 12767c478bd9Sstevel@tonic-gatevoid 12777c478bd9Sstevel@tonic-gatesep_restore(void *ksp) 12787c478bd9Sstevel@tonic-gate{} 12797c478bd9Sstevel@tonic-gate 12807c478bd9Sstevel@tonic-gate#else /* __lint */ 12817c478bd9Sstevel@tonic-gate 12827c478bd9Sstevel@tonic-gate /* 12837c478bd9Sstevel@tonic-gate * setting this value to zero as we switch away causes the 12847c478bd9Sstevel@tonic-gate * stack-pointer-on-sysenter to be NULL, ensuring that we 12857c478bd9Sstevel@tonic-gate * don't silently corrupt another (preempted) thread stack 12867c478bd9Sstevel@tonic-gate * when running an lwp that (somehow) didn't get sep_restore'd 12877c478bd9Sstevel@tonic-gate */ 12887c478bd9Sstevel@tonic-gate ENTRY_NP(sep_save) 12897c478bd9Sstevel@tonic-gate xorl %edx, %edx 12907c478bd9Sstevel@tonic-gate xorl %eax, %eax 12917c478bd9Sstevel@tonic-gate movl $MSR_INTC_SEP_ESP, %ecx 12927c478bd9Sstevel@tonic-gate wrmsr 12937c478bd9Sstevel@tonic-gate ret 12947c478bd9Sstevel@tonic-gate SET_SIZE(sep_save) 12957c478bd9Sstevel@tonic-gate 12967c478bd9Sstevel@tonic-gate /* 12977c478bd9Sstevel@tonic-gate * Update the kernel stack pointer as we resume onto this cpu. 12987c478bd9Sstevel@tonic-gate */ 12997c478bd9Sstevel@tonic-gate ENTRY_NP(sep_restore) 13007c478bd9Sstevel@tonic-gate movq %rdi, %rdx 13017c478bd9Sstevel@tonic-gate shrq $32, %rdx 13027c478bd9Sstevel@tonic-gate movl %edi, %eax 13037c478bd9Sstevel@tonic-gate movl $MSR_INTC_SEP_ESP, %ecx 13047c478bd9Sstevel@tonic-gate wrmsr 13057c478bd9Sstevel@tonic-gate ret 13067c478bd9Sstevel@tonic-gate SET_SIZE(sep_restore) 13077c478bd9Sstevel@tonic-gate 13087c478bd9Sstevel@tonic-gate#endif /* __lint */ 1309