| /linux/kernel/ |
| H A D | delayacct.c | 126 current->delays->blkio_start = local_clock(); in __delayacct_blkio_start() 210 current->delays->freepages_start = local_clock(); in __delayacct_freepages_start() 215 delayacct_end(¤t->delays->lock, in __delayacct_freepages_end() 216 ¤t->delays->freepages_start, in __delayacct_freepages_end() 217 ¤t->delays->freepages_delay, in __delayacct_freepages_end() 218 ¤t->delays->freepages_count, in __delayacct_freepages_end() 219 ¤t->delays->freepages_delay_max, in __delayacct_freepages_end() 220 ¤t->delays->freepages_delay_min); in __delayacct_freepages_end() 225 *in_thrashing = !!current->in_thrashing; in __delayacct_thrashing_start() 229 current->in_thrashing = 1; in __delayacct_thrashing_start() [all …]
|
| H A D | acct.c | 220 struct pid_namespace *ns = task_active_pid_ns(current); in acct_on() 308 pin_kill(task_active_pid_ns(current)->bacct); in SYSCALL_DEFINE1() 434 struct pacct_struct *pacct = ¤t->signal->pacct; in fill_ac() 460 strscpy(ac->ac_comm, current->comm, sizeof(ac->ac_comm)); in fill_ac() 464 run_time -= current->group_leader->start_time; in fill_ac() 489 spin_lock_irq(¤t->sighand->siglock); in fill_ac() 490 tty = current->signal->tty; /* Safe as we hold the siglock */ in fill_ac() 499 spin_unlock_irq(¤t->sighand->siglock); in fill_ac() 512 ac->ac_pid = task_tgid_nr_ns(current, ns); in fill_ac() 514 ac->ac_ppid = task_tgid_nr_ns(rcu_dereference(current->real_parent), ns); in fill_ac() [all …]
|
| /linux/arch/sparc/kernel/ |
| H A D | sigutil_32.c | 19 if (test_tsk_thread_flag(current, TIF_USEDFPU)) { in save_fpu_state() 21 fpsave(¤t->thread.float_regs[0], ¤t->thread.fsr, in save_fpu_state() 22 ¤t->thread.fpqueue[0], ¤t->thread.fpqdepth); in save_fpu_state() 24 clear_tsk_thread_flag(current, TIF_USEDFPU); in save_fpu_state() 27 if (current == last_task_used_math) { in save_fpu_state() 29 fpsave(¤t->thread.float_regs[0], ¤t->thread.fsr, in save_fpu_state() 30 ¤t->thread.fpqueue[0], ¤t->thread.fpqdepth); in save_fpu_state() 36 ¤t->thread.float_regs[0], in save_fpu_state() 38 err |= __put_user(current->thread.fsr, &fpu->si_fsr); in save_fpu_state() 39 err |= __put_user(current->thread.fpqdepth, &fpu->si_fpqdepth); in save_fpu_state() [all …]
|
| H A D | traps_32.c | 63 printk("%s(%d): %s [#%d]\n", current->comm, task_pid_nr(current), str, ++die_counter); in die_if_kernel() 118 send_sig_fault(SIGILL, ILL_ILLOPC, (void __user *)pc, current); in do_illegal_instruction() 126 send_sig_fault(SIGILL, ILL_PRVOPC, (void __user *)pc, current); in do_priv_instruction() 147 current); in do_memaccess_unaligned() 167 if(last_task_used_math == current) in do_fpd_trap() 175 last_task_used_math = current; in do_fpd_trap() 177 fpload(¤t->thread.float_regs[0], ¤t->thread.fsr); in do_fpd_trap() 188 fpload(¤t->thread.float_regs[0], ¤t->thread.fsr); in do_fpd_trap() 209 struct task_struct *fpt = current; 256 fpload(¤t->thread.float_regs[0], ¤t->thread.fsr); [all …]
|
| /linux/drivers/tty/ |
| H A D | tty_jobctrl.c | 18 return (sigismember(¤t->blocked, sig) || in is_ignored() 19 current->sighand->action[sig-1].sa.sa_handler == SIG_IGN); in is_ignored() 39 if (current->signal->tty != tty) in __tty_check_change() 43 pgrp = task_pgrp(current); in __tty_check_change() 109 tty->ctrl.pgrp = get_pid(task_pgrp(current)); in __proc_set_tty() 110 tty->ctrl.session = get_pid(task_session(current)); in __proc_set_tty() 112 if (current->signal->tty) { in __proc_set_tty() 114 current->signal->tty->name); in __proc_set_tty() 115 tty_kref_put(current->signal->tty); in __proc_set_tty() 117 put_pid(current->signal->tty_old_pgrp); in __proc_set_tty() [all …]
|
| /linux/arch/riscv/kernel/ |
| H A D | kernel_mode_vector.c | 23 WRITE_ONCE(current->thread.riscv_v_flags, flags); in riscv_v_flags_set() 86 return ¤t->thread.riscv_v_flags; in riscv_v_flags_ptr() 116 if (riscv_v_ctx_get_depth() != 0 || !riscv_preempt_v_started(current)) in riscv_v_stop_kernel_context() 119 riscv_preempt_v_clear_dirty(current); in riscv_v_stop_kernel_context() 128 kvstate = ¤t->thread.kernel_vstate; in riscv_v_start_kernel_context() 132 if (riscv_preempt_v_started(current)) { in riscv_v_start_kernel_context() 136 if (riscv_preempt_v_dirty(current)) { in riscv_v_start_kernel_context() 138 riscv_preempt_v_clear_dirty(current); in riscv_v_start_kernel_context() 140 riscv_preempt_v_set_restore(current); in riscv_v_start_kernel_context() 146 if (__riscv_v_vstate_check(task_pt_regs(current)->status, DIRTY)) { in riscv_v_start_kernel_context() [all …]
|
| /linux/arch/powerpc/kernel/ |
| H A D | signal_32.c | 250 flush_fp_to_thread(current); in prepare_save_user_regs() 252 if (current->thread.used_vr) in prepare_save_user_regs() 253 flush_altivec_to_thread(current); in prepare_save_user_regs() 255 current->thread.vrsave = mfspr(SPRN_VRSAVE); in prepare_save_user_regs() 258 if (current->thread.used_vsr && ctx_has_vsx_region) in prepare_save_user_regs() 259 flush_vsx_to_thread(current); in prepare_save_user_regs() 262 if (current->thread.used_spe) in prepare_save_user_regs() 263 flush_spe_to_thread(current); in prepare_save_user_regs() 278 if (current->thread.used_vr) { in __unsafe_save_user_regs() 279 unsafe_copy_to_user(&frame->mc_vregs, ¤t->thread.vr_state, in __unsafe_save_user_regs() [all …]
|
| H A D | process.c | 91 if (tsk == current && tsk->thread.regs && in check_if_tm_restore_required() 197 BUG_ON(tsk != current); in flush_fp_to_thread() 213 if (current->thread.regs && (current->thread.regs->msr & MSR_FP)) { in enable_kernel_fp() 214 check_if_tm_restore_required(current); in enable_kernel_fp() 223 MSR_TM_ACTIVE(current->thread.regs->msr)) in enable_kernel_fp() 225 __giveup_fpu(current); in enable_kernel_fp() 264 if (current->thread.regs && (current->thread.regs->msr & MSR_VEC)) { in enable_kernel_altivec() 265 check_if_tm_restore_required(current); in enable_kernel_altivec() 274 MSR_TM_ACTIVE(current->thread.regs->msr)) in enable_kernel_altivec() 276 __giveup_altivec(current); in enable_kernel_altivec() [all …]
|
| H A D | uprobes.c | 66 struct arch_uprobe_task *autask = ¤t->utask->autask; in arch_uprobe_pre_xol() 68 autask->saved_trap_nr = current->thread.trap_nr; in arch_uprobe_pre_xol() 69 current->thread.trap_nr = UPROBE_TRAP_NR; in arch_uprobe_pre_xol() 70 regs_set_return_ip(regs, current->utask->xol_vaddr); in arch_uprobe_pre_xol() 72 user_enable_single_step(current); in arch_uprobe_pre_xol() 114 struct uprobe_task *utask = current->utask; in arch_uprobe_post_xol() 116 WARN_ON_ONCE(current->thread.trap_nr != UPROBE_TRAP_NR); in arch_uprobe_post_xol() 118 current->thread.trap_nr = utask->autask.saved_trap_nr; in arch_uprobe_post_xol() 129 user_disable_single_step(current); in arch_uprobe_post_xol() 170 struct uprobe_task *utask = current->utask; in arch_uprobe_abort_xol() [all …]
|
| /linux/kernel/events/ |
| H A D | hw_breakpoint_test.c | 127 TEST_EXPECT_NOSPC(register_test_bp(-1, current, idx)); in test_one_cpu() 150 fill_bp_slots(test, &idx, -1, current, 0); in test_one_task_on_all_cpus() 151 TEST_EXPECT_NOSPC(register_test_bp(-1, current, idx)); in test_one_task_on_all_cpus() 152 TEST_EXPECT_NOSPC(register_test_bp(get_test_cpu(0), current, idx)); in test_one_task_on_all_cpus() 164 fill_bp_slots(test, &idx, -1, current, 0); in test_two_tasks_on_all_cpus() 167 TEST_EXPECT_NOSPC(register_test_bp(-1, current, idx)); in test_two_tasks_on_all_cpus() 169 TEST_EXPECT_NOSPC(register_test_bp(get_test_cpu(0), current, idx)); in test_two_tasks_on_all_cpus() 181 fill_bp_slots(test, &idx, get_test_cpu(0), current, 0); in test_one_task_on_one_cpu() 182 TEST_EXPECT_NOSPC(register_test_bp(-1, current, idx)); in test_one_task_on_one_cpu() 183 TEST_EXPECT_NOSPC(register_test_bp(get_test_cpu(0), current, idx)); in test_one_task_on_one_cpu() [all …]
|
| /linux/tools/testing/selftests/cgroup/ |
| H A D | test_hugetlb_memcg.c | 99 long old_current, expected_current, current; in hugetlb_test_program() local 104 current = cg_read_long(test_group, "memory.current"); in hugetlb_test_program() 105 if (current - old_current >= MB(2)) { in hugetlb_test_program() 108 ksft_print_msg("before: %ld, after: %ld\n", old_current, current); in hugetlb_test_program() 117 current = cg_read_long(test_group, "memory.current"); in hugetlb_test_program() 118 if (current - old_current >= MB(2)) { in hugetlb_test_program() 120 ksft_print_msg("before: %ld, after: %ld\n", old_current, current); in hugetlb_test_program() 123 old_current = current; in hugetlb_test_program() 128 current = cg_read_long(test_group, "memory.current"); in hugetlb_test_program() 129 if (!values_close(expected_current, current, 5)) { in hugetlb_test_program() [all …]
|
| /linux/arch/m68k/mm/ |
| H A D | fault.c | 29 signo = current->thread.signo; in send_fault_sig() 30 si_code = current->thread.code; in send_fault_sig() 31 addr = (void __user *)current->thread.faddr; in send_fault_sig() 73 struct mm_struct *mm = current->mm; in do_page_fault() 191 current->thread.signo = SIGBUS; in do_page_fault() 192 current->thread.faddr = address; in do_page_fault() 196 current->thread.signo = SIGBUS; in do_page_fault() 197 current->thread.code = BUS_ADRERR; in do_page_fault() 198 current->thread.faddr = address; in do_page_fault() 204 current->thread.signo = SIGSEGV; in do_page_fault() [all …]
|
| /linux/arch/mips/math-emu/ |
| H A D | dsemul.c | 77 mm_context_t *mm_ctx = ¤t->mm->context; in alloc_emuframe() 116 pr_debug("allocate emuframe %d to %d\n", idx, current->pid); in alloc_emuframe() 128 pr_debug("free emuframe %d from %d\n", idx, current->pid); in free_emuframe() 180 fr_idx = atomic_read(¤t->thread.bd_emu_frame); in dsemul_thread_rollback() 193 regs->cp0_epc = current->thread.bd_emu_branch_pc; in dsemul_thread_rollback() 195 regs->cp0_epc = current->thread.bd_emu_cont_pc; in dsemul_thread_rollback() 197 atomic_set(¤t->thread.bd_emu_frame, BD_EMUFRAME_NONE); in dsemul_thread_rollback() 198 free_emuframe(fr_idx, current->mm); in dsemul_thread_rollback() 246 fr_idx = atomic_read(¤t->thread.bd_emu_frame); in mips_dsemul() 273 ret = access_process_vm(current, fr_uaddr, &fr, sizeof(fr), in mips_dsemul() [all …]
|
| /linux/rust/macros/ |
| H A D | fmt.rs | 52 let mut flush = |args: &mut TokenStream, current: &mut TokenStream| { in fmt() 53 let current = std::mem::take(current); in fmt() localVariable 54 if !current.is_empty() { in fmt() 56 let mut current = current.into_iter(); in fmt() localVariable 58 while let Some(tt) = current.next() { in fmt() 64 return (Some(acc), current.collect::<TokenStream>()); in fmt() 74 let mut current = TokenStream::new(); in fmt() localVariable 78 flush(&mut args, &mut current); in fmt() 81 _ => &mut current, in fmt() 85 flush(&mut args, &mut current); in fmt()
|
| /linux/include/linux/sched/ |
| H A D | signal.h | 289 struct task_struct *task = current; in kernel_dequeue_signal() 303 spin_lock_irq(¤t->sighand->siglock); in kernel_signal_stop() 304 if (current->jobctl & JOBCTL_STOP_DEQUEUED) { in kernel_signal_stop() 305 current->jobctl |= JOBCTL_STOPPED; in kernel_signal_stop() 308 spin_unlock_irq(¤t->sighand->siglock); in kernel_signal_stop() 377 set_tsk_thread_flag(current, TIF_SIGPENDING); in restart_syscall() 428 (fatal_signal_pending(current) || in fault_signal_pending() 429 (user_mode(regs) && signal_pending(current)))); in fault_signal_pending() 513 current->restore_sigmask = true; in set_restore_sigmask() 521 current->restore_sigmask = false; in clear_restore_sigmask() [all …]
|
| /linux/arch/um/kernel/ |
| H A D | process.c | 79 arch_switch_to(current); in __switch_to() 81 return current->thread.prev_sched; in __switch_to() 86 struct pt_regs *regs = ¤t->thread.regs; in interrupt_end() 103 return task_pid_nr(current); in get_current_pid() 115 if (current->thread.prev_sched != NULL) in new_thread_handler() 116 schedule_tail(current->thread.prev_sched); in new_thread_handler() 117 current->thread.prev_sched = NULL; in new_thread_handler() 119 fn = current->thread.request.thread.proc; in new_thread_handler() 120 arg = current->thread.request.thread.arg; in new_thread_handler() 126 userspace(¤t->thread.regs.regs); in new_thread_handler() [all …]
|
| /linux/arch/csky/kernel/probes/ |
| H A D | uprobes.c | 50 struct uprobe_task *utask = current->utask; in arch_uprobe_pre_xol() 52 utask->autask.saved_trap_no = current->thread.trap_no; in arch_uprobe_pre_xol() 53 current->thread.trap_no = UPROBE_TRAP_NR; in arch_uprobe_pre_xol() 57 user_enable_single_step(current); in arch_uprobe_pre_xol() 64 struct uprobe_task *utask = current->utask; in arch_uprobe_post_xol() 66 WARN_ON_ONCE(current->thread.trap_no != UPROBE_TRAP_NR); in arch_uprobe_post_xol() 67 current->thread.trap_no = utask->autask.saved_trap_no; in arch_uprobe_post_xol() 71 user_disable_single_step(current); in arch_uprobe_post_xol() 103 struct uprobe_task *utask = current->utask; in arch_uprobe_abort_xol() 105 current->thread.trap_no = utask->autask.saved_trap_no; in arch_uprobe_abort_xol() [all …]
|
| /linux/tools/iio/ |
| H A D | iio_utils.c | 31 char *current; in iioutils_break_up_name() local 43 current = strdup(full_name + strlen(prefix) + 1); in iioutils_break_up_name() 44 if (!current) in iioutils_break_up_name() 47 working = strtok(current, "_\0"); in iioutils_break_up_name() 49 free(current); in iioutils_break_up_name() 66 free(current); in iioutils_break_up_name() 321 struct iio_channel_info *current; in build_channel_array() local 385 current = &(*ci_array)[count++]; in build_channel_array() 421 current->scale = 1.0; in build_channel_array() 422 current->offset = 0; in build_channel_array() [all …]
|
| /linux/Documentation/hwmon/ |
| H A D | ina3221.rst | 21 The Texas Instruments INA3221 monitors voltage, current, and power on the high 23 and supply voltage, with programmable conversion times and averaging, current 35 curr[123]_crit Critical alert current(mA) setting, activates the 36 corresponding alarm when the respective current 38 curr[123]_crit_alarm Critical alert current limit exceeded 39 curr[123]_max Warning alert current(mA) setting, activates the 40 corresponding alarm when the respective current 42 curr[123]_max_alarm Warning alert current limit exceeded 46 curr4_input Sum of current(mA) measurement channels, 49 curr4_crit Critical alert current(mA) setting for sum of current [all …]
|
| H A D | acbel-fsg032.rst | 29 curr1_crit Critical maximum current. 30 curr1_crit_alarm Input current critical alarm. 31 curr1_input Measured output current. 33 curr1_max Maximum input current. 34 curr1_max_alarm Maximum input current high alarm. 35 curr1_rated_max Maximum rated input current. 36 curr2_crit Critical maximum current. 37 curr2_crit_alarm Output current critical alarm. 38 curr2_input Measured output current. 40 curr2_max Maximum output current. [all …]
|
| /linux/arch/powerpc/include/asm/book3s/32/ |
| H A D | kup.h | 60 unsigned long kuap = current->thread.kuap; in __kuap_save_and_lock() 66 current->thread.kuap = KUAP_NONE; in __kuap_save_and_lock() 78 current->thread.kuap = KUAP_NONE; in __kuap_kernel_restore() 85 current->thread.kuap = regs->kuap; in __kuap_kernel_restore() 92 unsigned long kuap = current->thread.kuap; in __kuap_get_and_assert_locked() 108 current->thread.kuap = (__force u32)to; in allow_user_access() 114 u32 kuap = current->thread.kuap; in prevent_user_access() 121 current->thread.kuap = KUAP_NONE; in prevent_user_access() 127 unsigned long flags = current->thread.kuap; in prevent_user_access_return() 130 current->thread.kuap = KUAP_NONE; in prevent_user_access_return() [all …]
|
| /linux/include/linux/ |
| H A D | vtime.h | 52 vtime_account_kernel(current); in vtime_account_guest_enter() 53 current->flags |= PF_VCPU; in vtime_account_guest_enter() 58 vtime_account_kernel(current); in vtime_account_guest_exit() 59 current->flags &= ~PF_VCPU; in vtime_account_guest_exit() 96 vtime_guest_enter(current); in vtime_account_guest_enter() 98 current->flags |= PF_VCPU; in vtime_account_guest_enter() 104 vtime_guest_exit(current); in vtime_account_guest_exit() 106 current->flags &= ~PF_VCPU; in vtime_account_guest_exit() 116 current->flags |= PF_VCPU; in vtime_account_guest_enter() 121 current->flags &= ~PF_VCPU; in vtime_account_guest_exit()
|
| /linux/arch/loongarch/kernel/ |
| H A D | signal.c | 93 __put_user(get_fpr64(¤t->thread.fpu.fpr[i], 0), in copy_fpu_to_sigcontext() 96 err |= __put_user(current->thread.fpu.fcc, fcc); in copy_fpu_to_sigcontext() 97 err |= __put_user(current->thread.fpu.fcsr, fcsr); in copy_fpu_to_sigcontext() 113 set_fpr64(¤t->thread.fpu.fpr[i], 0, fpr_val); in copy_fpu_from_sigcontext() 115 err |= __get_user(current->thread.fpu.fcc, fcc); in copy_fpu_from_sigcontext() 116 err |= __get_user(current->thread.fpu.fcsr, fcsr); in copy_fpu_from_sigcontext() 130 err |= __put_user(get_fpr64(¤t->thread.fpu.fpr[i], 0), in copy_lsx_to_sigcontext() 132 err |= __put_user(get_fpr64(¤t->thread.fpu.fpr[i], 1), in copy_lsx_to_sigcontext() 135 err |= __put_user(current->thread.fpu.fcc, fcc); in copy_lsx_to_sigcontext() 136 err |= __put_user(current->thread.fpu.fcsr, fcsr); in copy_lsx_to_sigcontext() [all …]
|
| /linux/arch/arm64/kernel/ |
| H A D | signal.c | 254 ¤t->thread.uw.fpsimd_state; in preserve_fpsimd_context() 257 fpsimd_sync_from_effective_state(current); in preserve_fpsimd_context() 299 current->thread.svcr &= ~SVCR_SM_MASK; in restore_fpsimd_context() 300 current->thread.fp_type = FP_STATE_FPSIMD; in restore_fpsimd_context() 313 __put_user_error(current->thread.uw.fpmr, &ctx->fpmr, err); in preserve_fpmr_context() 328 current->thread.uw.fpmr = fpmr; in restore_fpmr_context() 368 unsigned int vl = task_get_sve_vl(current); in preserve_sve_context() 371 if (thread_sm_enabled(¤t->thread)) { in preserve_sve_context() 372 vl = task_get_sme_vl(current); in preserve_sve_context() 375 } else if (current->thread.fp_type == FP_STATE_SVE) { in preserve_sve_context() [all …]
|
| /linux/kernel/trace/ |
| H A D | fgraph.c | 350 int curr_ret_stack = current->curr_ret_stack; in fgraph_reserve_data() 359 val = get_fgraph_entry(current, curr_ret_stack - 1); in fgraph_reserve_data() 360 data = ¤t->ret_stack[curr_ret_stack]; in fgraph_reserve_data() 369 current->ret_stack[curr_ret_stack - 1] = val; in fgraph_reserve_data() 373 current->curr_ret_stack = curr_ret_stack; in fgraph_reserve_data() 375 current->ret_stack[curr_ret_stack - 1] = val; in fgraph_reserve_data() 413 return ret_stack_get_task_var(current, gops->idx); in fgraph_get_task_var() 467 int offset = current->curr_ret_stack; in fgraph_retrieve_parent_data() 476 ret_stack = get_ret_stack(current, offset, &next_offset); in fgraph_retrieve_parent_data() 487 val = get_fgraph_entry(current, offset); in fgraph_retrieve_parent_data() [all …]
|