Lines Matching +full:ia32 +full:- +full:3 +full:a
1 // SPDX-License-Identifier: GPL-2.0-only
8 * X86-64 port
11 * CPU hotplug support - ashok.raj@intel.com
15 * This file handles the architecture-dependent parts of process handling..
51 #include <asm/ia32.h>
79 if (regs->orig_ax != -1) in __show_regs()
80 pr_cont(" ORIG_RAX: %016lx\n", regs->orig_ax); in __show_regs()
85 log_lvl, regs->ax, regs->bx, regs->cx); in __show_regs()
87 log_lvl, regs->dx, regs->si, regs->di); in __show_regs()
89 log_lvl, regs->bp, regs->r8, regs->r9); in __show_regs()
91 log_lvl, regs->r10, regs->r11, regs->r12); in __show_regs()
93 log_lvl, regs->r13, regs->r14, regs->r15); in __show_regs()
123 log_lvl, regs->cs, ds, es, cr0); in __show_regs()
130 get_debugreg(d3, 3); in __show_regs()
134 /* Only print out debug registers if they are in their non-default state. */ in __show_regs()
149 WARN_ON(dead_task->mm); in release_thread()
159 * traced or probed than any access to a per CPU variable happens with
175 * - For events that occur in ring 3, FRED event delivery swaps in __rdgsbase_inactive()
177 * - ERETU (the FRED transition that returns to ring 3) also swaps in __rdgsbase_inactive()
180 * And the operating system can still setup the GS segment for a in __rdgsbase_inactive()
181 * user thread without the need of loading a user thread GS with: in __rdgsbase_inactive()
182 * - Using LKGS, available with FRED, to modify other attributes in __rdgsbase_inactive()
185 * - Accessing the GS segment base address for a user thread as in __rdgsbase_inactive()
208 * traced or probed than any access to a per CPU variable happens with
232 * not available. The goal is to be reasonably fast on non-FSGSBASE systems.
243 * be the pre-existing saved base or it could be zero. On AMD in save_base_legacy()
248 * context switch between 64-bit programs), and avoiding in save_base_legacy()
249 * the RDMSR helps a lot, so we just assume that whatever in save_base_legacy()
253 * To avoid leaking state, on non-X86_BUG_NULL_SEG CPUs, if we in save_base_legacy()
259 * If the selector is 1, 2, or 3, then the base is zero on in save_base_legacy()
265 * If selector > 3, then it refers to a real segment, and in save_base_legacy()
269 prev_p->thread.fsbase = 0; in save_base_legacy()
271 prev_p->thread.gsbase = 0; in save_base_legacy()
277 savesegment(fs, task->thread.fsindex); in save_fsgs()
278 savesegment(gs, task->thread.gsindex); in save_fsgs()
285 task->thread.fsbase = rdfsbase(); in save_fsgs()
286 task->thread.gsbase = __rdgsbase_inactive(); in save_fsgs()
288 save_base_legacy(task, task->thread.fsindex, FS); in save_fsgs()
289 save_base_legacy(task, task->thread.gsindex, GS); in save_fsgs()
294 * While a process is running,current->thread.fsbase and current->thread.gsbase
325 if (likely(next_index <= 3)) { in load_seg_legacy()
327 * The next task is using 64-bit TLS, is not using this in load_seg_legacy()
349 * Intel-style CPUs.) in load_seg_legacy()
362 * The next task is using a real segment. Loading the selector in load_seg_legacy()
371 * is not XSTATE managed on context switch because that would require a
382 prev->pkru = rdpkru(); in x86_pkru_load()
388 if (prev->pkru != next->pkru) in x86_pkru_load()
389 wrpkru(next->pkru); in x86_pkru_load()
397 if (unlikely(prev->fsindex || next->fsindex)) in x86_fsgsbase_load()
398 loadseg(FS, next->fsindex); in x86_fsgsbase_load()
399 if (unlikely(prev->gsindex || next->gsindex)) in x86_fsgsbase_load()
400 loadseg(GS, next->gsindex); in x86_fsgsbase_load()
403 wrfsbase(next->fsbase); in x86_fsgsbase_load()
404 __wrgsbase_inactive(next->gsbase); in x86_fsgsbase_load()
406 load_seg_legacy(prev->fsindex, prev->fsbase, in x86_fsgsbase_load()
407 next->fsindex, next->fsbase, FS); in x86_fsgsbase_load()
408 load_seg_legacy(prev->gsindex, prev->gsbase, in x86_fsgsbase_load()
409 next->gsindex, next->gsbase, GS); in x86_fsgsbase_load()
416 unsigned short idx = selector >> 3; in x86_fsgsbase_read_task()
430 idx -= GDT_ENTRY_TLS_MIN; in x86_fsgsbase_read_task()
431 base = get_desc_base(&task->thread.tls_array[idx]); in x86_fsgsbase_read_task()
438 * with RCU. This is a slow path, though, so we can just in x86_fsgsbase_read_task()
441 mutex_lock(&task->mm->context.lock); in x86_fsgsbase_read_task()
442 ldt = task->mm->context.ldt; in x86_fsgsbase_read_task()
443 if (unlikely(!ldt || idx >= ldt->nr_entries)) in x86_fsgsbase_read_task()
446 base = get_desc_base(ldt->entries + idx); in x86_fsgsbase_read_task()
447 mutex_unlock(&task->mm->context.lock); in x86_fsgsbase_read_task()
493 (task->thread.fsindex == 0)) in x86_fsbase_read_task()
494 fsbase = task->thread.fsbase; in x86_fsbase_read_task()
496 fsbase = x86_fsgsbase_read_task(task, task->thread.fsindex); in x86_fsbase_read_task()
508 (task->thread.gsindex == 0)) in x86_gsbase_read_task()
509 gsbase = task->thread.gsbase; in x86_gsbase_read_task()
511 gsbase = x86_fsgsbase_read_task(task, task->thread.gsindex); in x86_gsbase_read_task()
520 task->thread.fsbase = fsbase; in x86_fsbase_write_task()
527 task->thread.gsbase = gsbase; in x86_gsbase_write_task()
550 regs->ip = new_ip; in start_thread_common()
551 regs->sp = new_sp; in start_thread_common()
552 regs->csx = _cs; in start_thread_common()
553 regs->ssx = _ss; in start_thread_common()
555 * Allow single-step trap and NMI when starting a new task, thus in start_thread_common()
556 * once the new task enters user space, single-step trap and NMI in start_thread_common()
559 * Entering a new task is logically speaking a return from a in start_thread_common()
561 * enables single stepping a single step exception should be in start_thread_common()
568 * Paranoia: High-order 48 bits above the lowest 16 bit SS are in start_thread_common()
575 regs->fred_ss.swevent = true; in start_thread_common()
576 regs->fred_ss.nmi = true; in start_thread_common()
579 regs->flags = X86_EFLAGS_IF | X86_EFLAGS_FIXED; in start_thread_common()
603 * - fold all the options into a flag word and test it with a single test.
604 * - could test fs/gs bitsliced
613 struct thread_struct *prev = &prev_p->thread; in __switch_to()
614 struct thread_struct *next = &next_p->thread; in __switch_to()
656 savesegment(es, prev->es); in __switch_to()
657 if (unlikely(next->es | prev->es)) in __switch_to()
658 loadsegment(es, next->es); in __switch_to()
660 savesegment(ds, prev->ds); in __switch_to()
661 if (unlikely(next->ds | prev->ds)) in __switch_to()
662 loadsegment(ds, next->ds); in __switch_to()
681 * AMD CPUs have a misfeature: SYSRET sets the SS selector but in __switch_to()
682 * does not update the cached descriptor. As a result, if we in __switch_to()
690 * selectors at every context switch. SYSCALL sets up a valid in __switch_to()
691 * SS, so the only way to get NULL is to re-enter the kernel in __switch_to()
692 * from CPL 3 through an interrupt. Since that can't happen in __switch_to()
693 * in the same task as a running syscall, we are guaranteed to in __switch_to()
694 * context switch between every interrupt vector entry and a in __switch_to()
699 * it previously had a different non-NULL value. in __switch_to()
723 /* Pretend that this comes from a 64bit execve */ in set_personality_64bit()
724 task_pt_regs(current)->orig_ax = __NR_execve; in set_personality_64bit()
725 current_thread_info()->status &= ~TS_COMPAT; in set_personality_64bit()
726 if (current->mm) in set_personality_64bit()
727 __set_bit(MM_CONTEXT_HAS_VSYSCALL, ¤t->mm->context.flags); in set_personality_64bit()
733 current->personality &= ~READ_IMPLIES_EXEC; in set_personality_64bit()
739 if (current->mm) in __set_personality_x32()
740 current->mm->context.flags = 0; in __set_personality_x32()
742 current->personality &= ~READ_IMPLIES_EXEC; in __set_personality_x32()
749 * Pretend to come from a x32 execve. in __set_personality_x32()
751 task_pt_regs(current)->orig_ax = __NR_x32_execve | __X32_SYSCALL_BIT; in __set_personality_x32()
752 current_thread_info()->status &= ~TS_COMPAT; in __set_personality_x32()
759 if (current->mm) { in __set_personality_ia32()
764 __set_bit(MM_CONTEXT_UPROBE_IA32, ¤t->mm->context.flags); in __set_personality_ia32()
767 current->personality |= force_personality32; in __set_personality_ia32()
769 task_pt_regs(current)->orig_ax = __NR_ia32_execve; in __set_personality_ia32()
770 current_thread_info()->status |= TS_COMPAT; in __set_personality_ia32()
795 return (long)image->size; in prctl_map_vdso()
817 mm->context.lam_cr3_mask = X86_CR3_LAM_U57; in mm_enable_lam()
818 mm->context.untag_mask = ~GENMASK(62, 57); in mm_enable_lam()
821 * Even though the process must still be single-threaded at this in mm_enable_lam()
826 set_bit(MM_CONTEXT_LOCK_LAM, &mm->context.flags); in mm_enable_lam()
832 return -ENODEV; in prctl_enable_tagged_addr()
835 if (current->mm != mm) in prctl_enable_tagged_addr()
836 return -EINVAL; in prctl_enable_tagged_addr()
839 !test_bit(MM_CONTEXT_FORCE_TAGGED_SVA, &mm->context.flags)) in prctl_enable_tagged_addr()
840 return -EINVAL; in prctl_enable_tagged_addr()
843 return -EINTR; in prctl_enable_tagged_addr()
849 if (test_bit(MM_CONTEXT_LOCK_LAM, &mm->context.flags)) { in prctl_enable_tagged_addr()
851 return -EBUSY; in prctl_enable_tagged_addr()
856 return -EINVAL; in prctl_enable_tagged_addr()
874 return -EPERM; in do_arch_prctl_64()
888 * On non-FSGSBASE systems, save_base_legacy() expects in do_arch_prctl_64()
891 task->thread.gsbase = arg2; in do_arch_prctl_64()
894 task->thread.gsindex = 0; in do_arch_prctl_64()
906 return -EPERM; in do_arch_prctl_64()
918 * On non-FSGSBASE systems, save_base_legacy() expects in do_arch_prctl_64()
921 task->thread.fsbase = arg2; in do_arch_prctl_64()
923 task->thread.fsindex = 0; in do_arch_prctl_64()
956 return put_user(task->mm->context.untag_mask, in do_arch_prctl_64()
959 return prctl_enable_tagged_addr(task->mm, arg2); in do_arch_prctl_64()
962 return -EINVAL; in do_arch_prctl_64()
963 set_bit(MM_CONTEXT_FORCE_TAGGED_SVA, &task->mm->context.flags); in do_arch_prctl_64()
978 ret = -EINVAL; in do_arch_prctl_64()