| /linux/kernel/ |
| H A D | delayacct.c | 126 current->delays->blkio_start = local_clock(); in __delayacct_blkio_start() 210 current->delays->freepages_start = local_clock(); in __delayacct_freepages_start() 215 delayacct_end(¤t->delays->lock, in __delayacct_freepages_end() 216 ¤t->delays->freepages_start, in __delayacct_freepages_end() 217 ¤t->delays->freepages_delay, in __delayacct_freepages_end() 218 ¤t->delays->freepages_count, in __delayacct_freepages_end() 219 ¤t->delays->freepages_delay_max, in __delayacct_freepages_end() 220 ¤t->delays->freepages_delay_min); in __delayacct_freepages_end() 225 *in_thrashing = !!current->in_thrashing; in __delayacct_thrashing_start() 229 current->in_thrashing = 1; in __delayacct_thrashing_start() [all …]
|
| H A D | rseq.c | 303 if (current->rseq_sig != sig) { in rseq_get_rseq_cs() 306 sig, current->rseq_sig, current->pid, usig); in rseq_get_rseq_cs() 386 struct task_struct *t = current; in rseq_ip_fixup() 426 struct task_struct *t = current; in __rseq_handle_notify_resume() 460 struct task_struct *t = current; in rseq_syscall() 484 if (current->rseq != rseq || !current->rseq) in SYSCALL_DEFINE4() 486 if (rseq_len != current->rseq_len) in SYSCALL_DEFINE4() 488 if (current->rseq_sig != sig) in SYSCALL_DEFINE4() 490 ret = rseq_reset_rseq_cpu_node_id(current); in SYSCALL_DEFINE4() 493 current->rseq = NULL; in SYSCALL_DEFINE4() [all …]
|
| H A D | signal.c | 179 if (!recalc_sigpending_tsk(current) && !freezing(current)) { in recalc_sigpending() 191 spin_lock_irq(¤t->sighand->siglock); in calculate_sigpending() 192 set_tsk_thread_flag(current, TIF_SIGPENDING); in calculate_sigpending() 194 spin_unlock_irq(¤t->sighand->siglock); in calculate_sigpending() 260 current->comm, current->pid, sig); in print_dropped_signal() 389 unsigned long mask = current->jobctl & JOBCTL_STOP_SIGMASK; in task_join_group_stop() 390 struct signal_struct *sig = current->signal; in task_join_group_stop() 620 struct task_struct *tsk = current; in dequeue_signal() 656 current->jobctl |= JOBCTL_STOP_DEQUEUED; in dequeue_signal() 670 struct task_struct *tsk = current; in dequeue_synchronous_signal() [all …]
|
| H A D | acct.c | 220 struct pid_namespace *ns = task_active_pid_ns(current); in acct_on() 308 pin_kill(task_active_pid_ns(current)->bacct); in SYSCALL_DEFINE1() 434 struct pacct_struct *pacct = ¤t->signal->pacct; in fill_ac() 460 strscpy(ac->ac_comm, current->comm, sizeof(ac->ac_comm)); in fill_ac() 464 run_time -= current->group_leader->start_time; in fill_ac() 489 spin_lock_irq(¤t->sighand->siglock); in fill_ac() 490 tty = current->signal->tty; /* Safe as we hold the siglock */ in fill_ac() 499 spin_unlock_irq(¤t->sighand->siglock); in fill_ac() 512 ac->ac_pid = task_tgid_nr_ns(current, ns); in fill_ac() 514 ac->ac_ppid = task_tgid_nr_ns(rcu_dereference(current->real_parent), ns); in fill_ac() [all …]
|
| /linux/arch/sparc/kernel/ |
| H A D | sigutil_32.c | 19 if (test_tsk_thread_flag(current, TIF_USEDFPU)) { in save_fpu_state() 21 fpsave(¤t->thread.float_regs[0], ¤t->thread.fsr, in save_fpu_state() 22 ¤t->thread.fpqueue[0], ¤t->thread.fpqdepth); in save_fpu_state() 24 clear_tsk_thread_flag(current, TIF_USEDFPU); in save_fpu_state() 27 if (current == last_task_used_math) { in save_fpu_state() 29 fpsave(¤t->thread.float_regs[0], ¤t->thread.fsr, in save_fpu_state() 30 ¤t->thread.fpqueue[0], ¤t->thread.fpqdepth); in save_fpu_state() 36 ¤t->thread.float_regs[0], in save_fpu_state() 38 err |= __put_user(current->thread.fsr, &fpu->si_fsr); in save_fpu_state() 39 err |= __put_user(current->thread.fpqdepth, &fpu->si_fpqdepth); in save_fpu_state() [all …]
|
| H A D | traps_32.c | 63 printk("%s(%d): %s [#%d]\n", current->comm, task_pid_nr(current), str, ++die_counter); in die_if_kernel() 118 send_sig_fault(SIGILL, ILL_ILLOPC, (void __user *)pc, current); in do_illegal_instruction() 126 send_sig_fault(SIGILL, ILL_PRVOPC, (void __user *)pc, current); in do_priv_instruction() 147 current); in do_memaccess_unaligned() 167 if(last_task_used_math == current) in do_fpd_trap() 175 last_task_used_math = current; in do_fpd_trap() 177 fpload(¤t->thread.float_regs[0], ¤t->thread.fsr); in do_fpd_trap() 188 fpload(¤t->thread.float_regs[0], ¤t->thread.fsr); in do_fpd_trap() 209 struct task_struct *fpt = current; 256 fpload(¤t->thread.float_regs[0], ¤t->thread.fsr); [all …]
|
| /linux/arch/riscv/kernel/ |
| H A D | kernel_mode_vector.c | 23 WRITE_ONCE(current->thread.riscv_v_flags, flags); in riscv_v_flags_set() 86 return ¤t->thread.riscv_v_flags; in riscv_v_flags_ptr() 116 if (riscv_v_ctx_get_depth() != 0 || !riscv_preempt_v_started(current)) in riscv_v_stop_kernel_context() 119 riscv_preempt_v_clear_dirty(current); in riscv_v_stop_kernel_context() 128 kvstate = ¤t->thread.kernel_vstate; in riscv_v_start_kernel_context() 132 if (riscv_preempt_v_started(current)) { in riscv_v_start_kernel_context() 136 if (riscv_preempt_v_dirty(current)) { in riscv_v_start_kernel_context() 138 riscv_preempt_v_clear_dirty(current); in riscv_v_start_kernel_context() 140 riscv_preempt_v_set_restore(current); in riscv_v_start_kernel_context() 146 if (__riscv_v_vstate_check(task_pt_regs(current)->status, DIRTY)) { in riscv_v_start_kernel_context() [all …]
|
| /linux/drivers/tty/ |
| H A D | tty_jobctrl.c | 18 return (sigismember(¤t->blocked, sig) || in is_ignored() 19 current->sighand->action[sig-1].sa.sa_handler == SIG_IGN); in is_ignored() 39 if (current->signal->tty != tty) in __tty_check_change() 43 pgrp = task_pgrp(current); in __tty_check_change() 109 tty->ctrl.pgrp = get_pid(task_pgrp(current)); in __proc_set_tty() 110 tty->ctrl.session = get_pid(task_session(current)); in __proc_set_tty() 112 if (current->signal->tty) { in __proc_set_tty() 114 current->signal->tty->name); in __proc_set_tty() 115 tty_kref_put(current->signal->tty); in __proc_set_tty() 117 put_pid(current->signal->tty_old_pgrp); in __proc_set_tty() [all …]
|
| /linux/arch/powerpc/kernel/ |
| H A D | signal_32.c | 250 flush_fp_to_thread(current); in prepare_save_user_regs() 252 if (current->thread.used_vr) in prepare_save_user_regs() 253 flush_altivec_to_thread(current); in prepare_save_user_regs() 255 current->thread.vrsave = mfspr(SPRN_VRSAVE); in prepare_save_user_regs() 258 if (current->thread.used_vsr && ctx_has_vsx_region) in prepare_save_user_regs() 259 flush_vsx_to_thread(current); in prepare_save_user_regs() 262 if (current->thread.used_spe) in prepare_save_user_regs() 263 flush_spe_to_thread(current); in prepare_save_user_regs() 278 if (current->thread.used_vr) { in __unsafe_save_user_regs() 279 unsafe_copy_to_user(&frame->mc_vregs, ¤t->thread.vr_state, in __unsafe_save_user_regs() [all …]
|
| H A D | process.c | 91 if (tsk == current && tsk->thread.regs && in check_if_tm_restore_required() 197 BUG_ON(tsk != current); in flush_fp_to_thread() 213 if (current->thread.regs && (current->thread.regs->msr & MSR_FP)) { in enable_kernel_fp() 214 check_if_tm_restore_required(current); in enable_kernel_fp() 223 MSR_TM_ACTIVE(current->thread.regs->msr)) in enable_kernel_fp() 225 __giveup_fpu(current); in enable_kernel_fp() 264 if (current->thread.regs && (current->thread.regs->msr & MSR_VEC)) { in enable_kernel_altivec() 265 check_if_tm_restore_required(current); in enable_kernel_altivec() 274 MSR_TM_ACTIVE(current->thread.regs->msr)) in enable_kernel_altivec() 276 __giveup_altivec(current); in enable_kernel_altivec() [all …]
|
| H A D | uprobes.c | 66 struct arch_uprobe_task *autask = ¤t->utask->autask; in arch_uprobe_pre_xol() 68 autask->saved_trap_nr = current->thread.trap_nr; in arch_uprobe_pre_xol() 69 current->thread.trap_nr = UPROBE_TRAP_NR; in arch_uprobe_pre_xol() 70 regs_set_return_ip(regs, current->utask->xol_vaddr); in arch_uprobe_pre_xol() 72 user_enable_single_step(current); in arch_uprobe_pre_xol() 114 struct uprobe_task *utask = current->utask; in arch_uprobe_post_xol() 116 WARN_ON_ONCE(current->thread.trap_nr != UPROBE_TRAP_NR); in arch_uprobe_post_xol() 118 current->thread.trap_nr = utask->autask.saved_trap_nr; in arch_uprobe_post_xol() 129 user_disable_single_step(current); in arch_uprobe_post_xol() 170 struct uprobe_task *utask = current->utask; in arch_uprobe_abort_xol() [all …]
|
| /linux/kernel/events/ |
| H A D | hw_breakpoint_test.c | 127 TEST_EXPECT_NOSPC(register_test_bp(-1, current, idx)); in test_one_cpu() 150 fill_bp_slots(test, &idx, -1, current, 0); in test_one_task_on_all_cpus() 151 TEST_EXPECT_NOSPC(register_test_bp(-1, current, idx)); in test_one_task_on_all_cpus() 152 TEST_EXPECT_NOSPC(register_test_bp(get_test_cpu(0), current, idx)); in test_one_task_on_all_cpus() 164 fill_bp_slots(test, &idx, -1, current, 0); in test_two_tasks_on_all_cpus() 167 TEST_EXPECT_NOSPC(register_test_bp(-1, current, idx)); in test_two_tasks_on_all_cpus() 169 TEST_EXPECT_NOSPC(register_test_bp(get_test_cpu(0), current, idx)); in test_two_tasks_on_all_cpus() 181 fill_bp_slots(test, &idx, get_test_cpu(0), current, 0); in test_one_task_on_one_cpu() 182 TEST_EXPECT_NOSPC(register_test_bp(-1, current, idx)); in test_one_task_on_one_cpu() 183 TEST_EXPECT_NOSPC(register_test_bp(get_test_cpu(0), current, idx)); in test_one_task_on_one_cpu() [all …]
|
| /linux/tools/testing/selftests/cgroup/ |
| H A D | test_hugetlb_memcg.c | 99 long old_current, expected_current, current; in hugetlb_test_program() 102 old_current = cg_read_long(test_group, "memory.current"); in hugetlb_test_program() 104 current = cg_read_long(test_group, "memory.current"); in hugetlb_test_program() 105 if (current - old_current >= MB(2)) { in hugetlb_test_program() 108 ksft_print_msg("before: %ld, after: %ld\n", old_current, current); in hugetlb_test_program() 117 current = cg_read_long(test_group, "memory.current"); in hugetlb_test_program() 118 if (current - old_current >= MB(2)) { in hugetlb_test_program() 120 ksft_print_msg("before: %ld, after: %ld\n", old_current, current); in hugetlb_test_program() 97 long old_current, expected_current, current; hugetlb_test_program() local [all...] |
| /linux/arch/m68k/mm/ |
| H A D | fault.c | 29 signo = current->thread.signo; in send_fault_sig() 30 si_code = current->thread.code; in send_fault_sig() 31 addr = (void __user *)current->thread.faddr; in send_fault_sig() 73 struct mm_struct *mm = current->mm; in do_page_fault() 191 current->thread.signo = SIGBUS; in do_page_fault() 192 current->thread.faddr = address; in do_page_fault() 196 current->thread.signo = SIGBUS; in do_page_fault() 197 current->thread.code = BUS_ADRERR; in do_page_fault() 198 current->thread.faddr = address; in do_page_fault() 204 current->thread.signo = SIGSEGV; in do_page_fault() [all …]
|
| /linux/Documentation/hwmon/ |
| H A D | max16601.rst | 83 curr1_input VCORE input current, derived from duty cycle 84 and output current. 85 curr1_max Maximum input current. 89 curr[P+2]_input VCORE phase P input current. 92 curr[N+2]_input VCORE input current, derived from sensor 97 curr[N+3]_input VSA input current. 100 curr[N+4]_input VCORE output current. 101 curr[N+4]_crit Critical output current. 102 curr[N+4]_crit_alarm Output current critical alarm. 103 curr[N+4]_max Maximum output current. [all …]
|
| H A D | ina3221.rst | 21 The Texas Instruments INA3221 monitors voltage, current, and power on the high 23 and supply voltage, with programmable conversion times and averaging, current 35 curr[123]_crit Critical alert current(mA) setting, activates the 36 corresponding alarm when the respective current 38 curr[123]_crit_alarm Critical alert current limit exceeded 39 curr[123]_max Warning alert current(mA) setting, activates the 40 corresponding alarm when the respective current 42 curr[123]_max_alarm Warning alert current limit exceeded 46 curr4_input Sum of current(mA) measurement channels, 49 curr4_crit Critical alert current(mA) setting for sum of current [all …]
|
| /linux/arch/mips/math-emu/ |
| H A D | dsemul.c | 77 mm_context_t *mm_ctx = ¤t->mm->context; in alloc_emuframe() 116 pr_debug("allocate emuframe %d to %d\n", idx, current->pid); in alloc_emuframe() 128 pr_debug("free emuframe %d from %d\n", idx, current->pid); in free_emuframe() 180 fr_idx = atomic_read(¤t->thread.bd_emu_frame); in dsemul_thread_rollback() 193 regs->cp0_epc = current->thread.bd_emu_branch_pc; in dsemul_thread_rollback() 195 regs->cp0_epc = current->thread.bd_emu_cont_pc; in dsemul_thread_rollback() 197 atomic_set(¤t->thread.bd_emu_frame, BD_EMUFRAME_NONE); in dsemul_thread_rollback() 198 free_emuframe(fr_idx, current->mm); in dsemul_thread_rollback() 246 fr_idx = atomic_read(¤t->thread.bd_emu_frame); in mips_dsemul() 273 ret = access_process_vm(current, fr_uaddr, &fr, sizeof(fr), in mips_dsemul() [all …]
|
| /linux/arch/arm64/kernel/ |
| H A D | fpsimd.c | 363 switch (current->thread.fp_type) { in task_fpsimd_load() 369 if (!thread_sm_enabled(¤t->thread)) in task_fpsimd_load() 373 sve_set_vq(sve_vq_from_vl(task_get_sve_vl(current)) - 1); in task_fpsimd_load() 395 unsigned long sme_vl = task_get_sme_vl(current); in task_fpsimd_load() 401 write_sysreg_s(current->thread.svcr, SYS_SVCR); in task_fpsimd_load() 403 if (thread_za_enabled(¤t->thread)) in task_fpsimd_load() 404 sme_load_state(current->thread.sme_state, in task_fpsimd_load() 407 if (thread_sm_enabled(¤t->thread)) in task_fpsimd_load() 412 write_sysreg_s(current->thread.uw.fpmr, SYS_FPMR); in task_fpsimd_load() 415 WARN_ON_ONCE(current->thread.fp_type != FP_STATE_SVE); in task_fpsimd_load() [all …]
|
| /linux/arch/s390/kernel/ |
| H A D | signal.c | 111 save_access_regs(current->thread.acrs); in store_sigregs() 118 restore_access_regs(current->thread.acrs); in load_sigregs() 132 memcpy(&user_sregs.regs.acrs, current->thread.acrs, in save_sigregs() 134 fpregs_store(&user_sregs.fpregs, ¤t->thread.ufpu); in save_sigregs() 145 current->restart_block.fn = do_no_restart_syscall; in restore_sigregs() 150 if (!is_ri_task(current) && (user_sregs.regs.psw.mask & PSW_MASK_RI)) in restore_sigregs() 165 memcpy(¤t->thread.acrs, &user_sregs.regs.acrs, in restore_sigregs() 166 sizeof(current->thread.acrs)); in restore_sigregs() 168 fpregs_load(&user_sregs.fpregs, ¤t->thread.ufpu); in restore_sigregs() 184 vxrs[i] = current->thread.ufpu.vxrs[i].low; in save_sigregs_ext() [all …]
|
| /linux/include/linux/sched/ |
| H A D | signal.h | 289 struct task_struct *task = current; in kernel_dequeue_signal() 303 spin_lock_irq(¤t->sighand->siglock); in kernel_signal_stop() 304 if (current->jobctl & JOBCTL_STOP_DEQUEUED) { in kernel_signal_stop() 305 current->jobctl |= JOBCTL_STOPPED; in kernel_signal_stop() 308 spin_unlock_irq(¤t->sighand->siglock); in kernel_signal_stop() 377 set_tsk_thread_flag(current, TIF_SIGPENDING); in restart_syscall() 428 (fatal_signal_pending(current) || in fault_signal_pending() 429 (user_mode(regs) && signal_pending(current)))); in fault_signal_pending() 513 current->restore_sigmask = true; in set_restore_sigmask() 521 current->restore_sigmask = false; in clear_restore_sigmask() [all …]
|
| /linux/arch/um/kernel/ |
| H A D | process.c | 77 arch_switch_to(current); in __switch_to() 79 return current->thread.prev_sched; in __switch_to() 84 struct pt_regs *regs = ¤t->thread.regs; in interrupt_end() 101 return task_pid_nr(current); in get_current_pid() 113 if (current->thread.prev_sched != NULL) in new_thread_handler() 114 schedule_tail(current->thread.prev_sched); in new_thread_handler() 115 current->thread.prev_sched = NULL; in new_thread_handler() 117 fn = current->thread.request.thread.proc; in new_thread_handler() 118 arg = current->thread.request.thread.arg; in new_thread_handler() 124 userspace(¤t->thread.regs.regs); in new_thread_handler() [all …]
|
| /linux/arch/csky/kernel/probes/ |
| H A D | uprobes.c | 50 struct uprobe_task *utask = current->utask; in arch_uprobe_pre_xol() 52 utask->autask.saved_trap_no = current->thread.trap_no; in arch_uprobe_pre_xol() 53 current->thread.trap_no = UPROBE_TRAP_NR; in arch_uprobe_pre_xol() 57 user_enable_single_step(current); in arch_uprobe_pre_xol() 64 struct uprobe_task *utask = current->utask; in arch_uprobe_post_xol() 66 WARN_ON_ONCE(current->thread.trap_no != UPROBE_TRAP_NR); in arch_uprobe_post_xol() 67 current->thread.trap_no = utask->autask.saved_trap_no; in arch_uprobe_post_xol() 71 user_disable_single_step(current); in arch_uprobe_post_xol() 103 struct uprobe_task *utask = current->utask; in arch_uprobe_abort_xol() 105 current->thread.trap_no = utask->autask.saved_trap_no; in arch_uprobe_abort_xol() [all …]
|
| /linux/tools/iio/ |
| H A D | iio_utils.c | 31 char *current; in iioutils_break_up_name() local 43 current = strdup(full_name + strlen(prefix) + 1); in iioutils_break_up_name() 44 if (!current) in iioutils_break_up_name() 47 working = strtok(current, "_\0"); in iioutils_break_up_name() 49 free(current); in iioutils_break_up_name() 66 free(current); in iioutils_break_up_name() 321 struct iio_channel_info *current; in build_channel_array() local 385 current = &(*ci_array)[count++]; in build_channel_array() 421 current->scale = 1.0; in build_channel_array() 422 current->offset = 0; in build_channel_array() [all …]
|
| /linux/arch/powerpc/include/asm/book3s/32/ |
| H A D | kup.h | 60 unsigned long kuap = current->thread.kuap; in __kuap_save_and_lock() 66 current->thread.kuap = KUAP_NONE; in __kuap_save_and_lock() 78 current->thread.kuap = KUAP_NONE; in __kuap_kernel_restore() 85 current->thread.kuap = regs->kuap; in __kuap_kernel_restore() 92 unsigned long kuap = current->thread.kuap; in __kuap_get_and_assert_locked() 108 current->thread.kuap = (__force u32)to; in allow_user_access() 114 u32 kuap = current->thread.kuap; in prevent_user_access() 121 current->thread.kuap = KUAP_NONE; in prevent_user_access() 127 unsigned long flags = current->thread.kuap; in prevent_user_access_return() 130 current->thread.kuap = KUAP_NONE; in prevent_user_access_return() [all …]
|
| /linux/include/linux/ |
| H A D | vtime.h | 52 vtime_account_kernel(current); in vtime_account_guest_enter() 53 current->flags |= PF_VCPU; in vtime_account_guest_enter() 58 vtime_account_kernel(current); in vtime_account_guest_exit() 59 current->flags &= ~PF_VCPU; in vtime_account_guest_exit() 96 vtime_guest_enter(current); in vtime_account_guest_enter() 98 current->flags |= PF_VCPU; in vtime_account_guest_enter() 104 vtime_guest_exit(current); in vtime_account_guest_exit() 106 current->flags &= ~PF_VCPU; in vtime_account_guest_exit() 116 current->flags |= PF_VCPU; in vtime_account_guest_enter() 121 current->flags &= ~PF_VCPU; in vtime_account_guest_exit()
|