Home
last modified time | relevance | path

Searched refs:task_cpu (Results 1 – 25 of 30) sorted by relevance

12

/linux/Documentation/translations/zh_CN/scheduler/
H A Dsched-capacity.rst302 task_util(p) < capacity(task_cpu(p))
358 则任务可能变为CPU受限的,也就是说 ``task_util(p) > capacity(task_cpu(p))`` ;CPU算力
374 task_uclamp_min(p) <= capacity(task_cpu(cpu))
387 task_bandwidth(p) < capacity(task_cpu(p))
/linux/kernel/sched/
H A Dstop_task.c15 return task_cpu(p); /* stop tasks as never migrate */ in select_task_rq_stop()
H A Dcore.c342 int cpu = task_cpu(p); in sched_core_next()
2167 return cpu_curr(task_cpu(p)) == p; in task_curr()
2511 WARN_ON_ONCE(task_cpu(p) != new_cpu); in move_queued_task()
2605 if (cpumask_test_cpu(task_cpu(p), &p->cpus_mask)) in migration_cpu_stop()
2638 if (cpumask_test_cpu(task_cpu(p), p->cpus_ptr)) { in migration_cpu_stop()
2652 stop_one_cpu_nowait(task_cpu(p), migration_cpu_stop, in migration_cpu_stop()
2950 if (cpumask_test_cpu(task_cpu(p), &p->cpus_mask) || in affine_move_task()
3141 !cpumask_test_cpu(task_cpu(p), ctx->new_mask))) { in __set_cpus_allowed_ptr_locked()
3363 if (task_cpu(p) != new_cpu) { in set_task_cpu()
3424 if (task_cpu(arg->dst_task) != arg->dst_cpu) in migrate_swap_stop()
[all …]
H A Ddeadline.c451 struct dl_bw *dl_b = dl_bw_of(task_cpu(p)); in task_non_contending()
456 __dl_sub(dl_b, dl_se->dl_bw, dl_bw_cpus(task_cpu(p))); in task_non_contending()
1769 struct dl_bw *dl_b = dl_bw_of(task_cpu(p)); in inactive_task_timer()
1778 __dl_sub(dl_b, p->dl.dl_bw, dl_bw_cpus(task_cpu(p))); in inactive_task_timer()
2501 int cpu = task_cpu(task); in find_later_rq()
2647 WARN_ON_ONCE(rq->cpu != task_cpu(p)); in pick_next_pushable_dl_task()
2874 __dl_sub(src_dl_b, p->dl.dl_bw, dl_bw_cpus(task_cpu(p))); in set_cpus_allowed_dl()
3234 int cpus, err = -1, cpu = task_cpu(p); in sched_dl_overflow()
H A Dpsi.c900 task->pid, task->comm, task_cpu(task), in psi_flags_change()
911 int cpu = task_cpu(task); in psi_task_change()
930 int cpu = task_cpu(prev); in psi_task_switch()
1008 int cpu = task_cpu(curr); in psi_account_irqtime()
H A Didle.c438 return task_cpu(p); /* IDLE tasks as never migrated */ in select_task_rq_idle()
H A Dcpudeadline.c138 (cpu == task_cpu(p) && cap == max_cap)) { in cpudl_find()
H A Dcpuacct.c338 unsigned int cpu = task_cpu(tsk); in cpuacct_charge()
H A Dfair.c2493 .src_cpu = task_cpu(p), in task_numa_migrate()
3627 int src_nid = cpu_to_node(task_cpu(p)); in update_scan_period()
7138 if (cpu_of(rq) != task_cpu(p) || !READ_ONCE(p->se.avg.last_update_time)) in cpu_load_without()
7161 if (cpu_of(rq) != task_cpu(p) || !READ_ONCE(p->se.avg.last_update_time)) in cpu_runnable_without()
7914 if (p && task_cpu(p) == cpu && dst_cpu != cpu) in cpu_util()
7916 else if (p && task_cpu(p) != cpu && dst_cpu == cpu) in cpu_util()
7987 if (cpu != task_cpu(p) || !READ_ONCE(p->se.avg.last_update_time)) in cpu_util_without()
10533 if (cpu != task_cpu(p) || !READ_ONCE(p->se.avg.last_update_time)) in task_running_on_cpu()
13245 set_task_rq(p, task_cpu(p)); in task_change_group_fair()
H A Drt.c1762 int cpu = task_cpu(task); in find_lowest_rq()
1858 BUG_ON(rq->cpu != task_cpu(p)); in pick_next_pushable_task()
H A Dsched.h1327 #define task_rq(p) cpu_rq(task_cpu(p))
2398 int (*select_task_rq)(struct task_struct *p, int task_cpu, int flags);
H A Ddebug.c789 if (task_cpu(p) != rq_cpu) in print_rq()
H A Dext.c1239 return sch->global_dsqs[cpu_to_node(task_cpu(p))]; in find_global_dsq()
2618 WARN_ON_ONCE(task_cpu(p) == cpu); in task_can_run_on_remote_rq()
2635 p->comm, p->pid, task_cpu(p), cpu); in task_can_run_on_remote_rq()
7414 return task_cpu(p); in scx_bpf_task_cpu()
/linux/kernel/rcu/
H A Dtasks.h1006 cpu = task_cpu(t); in rcu_tasks_is_holdout()
1116 cpu = task_cpu(t); in check_holdout_task()
1669 int cpu = task_cpu(t); in trc_inspect_reader()
1690 WARN_ON_ONCE(ofl && task_curr(t) && (t != idle_task(task_cpu(t)))); in trc_inspect_reader()
1749 cpu = task_cpu(t); in trc_wait_for_one_reader()
1884 if (task_curr(t) && cpu_online(task_cpu(t))) in trc_check_slow_task()
1903 cpu = task_cpu(t); in show_stalled_task_trace()
H A Dtree_stall.h467 cpu = task_cpu(rcuc); in rcu_is_rcuc_kthread_starving()
576 cpu = gpk ? task_cpu(gpk) : -1; in rcu_check_gp_kthread_starvation()
619 cpu = task_cpu(gpk); in rcu_check_gp_kthread_expired_fqs_timer()
H A Dtree_nocb.h1546 rdp->nocb_gp_kthread ? (int)task_cpu(rdp->nocb_gp_kthread) : -1, in show_rcu_nocb_gp_state()
1603 rdp->nocb_cb_kthread ? (int)task_cpu(rdp->nocb_cb_kthread) : -1, in show_rcu_nocb_state()
/linux/kernel/trace/
H A Dtrace_sched_wakeup.c412 entry->next_cpu = task_cpu(next); in tracing_sched_switch_trace()
438 entry->next_cpu = task_cpu(wakee); in tracing_sched_wakeup_trace()
583 wakeup_cpu = task_cpu(p); in probe_wakeup()
/linux/include/linux/sched/
H A Dtopology.h250 return cpu_to_node(task_cpu(p)); in task_node()
/linux/Documentation/scheduler/
H A Dsched-capacity.rst342 task_util(p) < capacity(task_cpu(p))
405 then it might become CPU-bound, IOW ``task_util(p) > capacity(task_cpu(p))``;
424 task_uclamp_min(p) <= capacity(task_cpu(cpu))
438 task_bandwidth(p) < capacity(task_cpu(p))
/linux/include/linux/
H A Dsched.h2219 static inline unsigned int task_cpu(const struct task_struct *p) in task_cpu() function
2228 static inline unsigned int task_cpu(const struct task_struct *p) in task_cpu() function
2276 return READ_ONCE(owner->on_cpu) && !vcpu_is_preempted(task_cpu(owner)); in owner_on_cpu()
/linux/include/trace/events/
H A Dsched.h158 __entry->target_cpu = task_cpu(p);
290 __entry->orig_cpu = task_cpu(p);
/linux/kernel/
H A Dstop_machine.c58 struct cpu_stopper *stopper = per_cpu_ptr(&cpu_stopper, task_cpu(task)); in print_stop_info()
/linux/arch/parisc/kernel/
H A Dtraps.c153 level, task_cpu(current), cr30, cr31); in show_regs()
/linux/fs/proc/
H A Darray.c642 seq_put_decimal_ll(m, " ", task_cpu(task)); in do_task_stat()
/linux/fs/resctrl/
H A Drdtgroup.c613 smp_call_function_single(task_cpu(t), _update_task_closid_rmid, t, 1); in update_task_closid_rmid()
2812 cpumask_set_cpu(task_cpu(t), mask); in rdt_move_group_tasks()

12