Home
last modified time | relevance | path

Searched refs:prev_cpu (Results 1 – 25 of 31) sorted by relevance

12

/linux/kernel/sched/
H A Dext_idle.c451 s32 scx_select_cpu_dfl(struct task_struct *p, s32 prev_cpu, u64 wake_flags, in scx_select_cpu_dfl() argument
456 int node = scx_cpu_node_if_enabled(prev_cpu); in scx_select_cpu_dfl()
466 is_prev_allowed = cpumask_test_cpu(prev_cpu, allowed); in scx_select_cpu_dfl()
498 const struct cpumask *cpus = numa_span(prev_cpu); in scx_select_cpu_dfl()
508 const struct cpumask *cpus = llc_span(prev_cpu); in scx_select_cpu_dfl()
527 if (is_prev_allowed && cpus_share_cache(cpu, prev_cpu) && in scx_select_cpu_dfl()
528 scx_idle_test_and_clear_cpu(prev_cpu)) { in scx_select_cpu_dfl()
529 cpu = prev_cpu; in scx_select_cpu_dfl()
565 cpumask_test_cpu(prev_cpu, idle_cpumask(node)->smt) && in scx_select_cpu_dfl()
566 scx_idle_test_and_clear_cpu(prev_cpu)) { in scx_select_cpu_dfl()
[all …]
H A Dfair.c1051 static int select_idle_sibling(struct task_struct *p, int prev_cpu, int cpu);
7283 wake_affine_idle(int this_cpu, int prev_cpu, int sync) in wake_affine_idle() argument
7297 if (available_idle_cpu(this_cpu) && cpus_share_cache(this_cpu, prev_cpu)) in wake_affine_idle()
7298 return available_idle_cpu(prev_cpu) ? prev_cpu : this_cpu; in wake_affine_idle()
7307 if (available_idle_cpu(prev_cpu)) in wake_affine_idle()
7308 return prev_cpu; in wake_affine_idle()
7315 int this_cpu, int prev_cpu, int sync) in wake_affine_weight() argument
7336 this_eff_load *= capacity_of(prev_cpu); in wake_affine_weight()
7338 prev_eff_load = cpu_load(cpu_rq(prev_cpu)); in wake_affine_weight()
7357 int this_cpu, int prev_cpu, int sync) in wake_affine() argument
[all …]
/linux/arch/x86/include/asm/trace/
H A Dirq_vectors.h155 unsigned int prev_cpu),
157 TP_ARGS(irq, vector, cpu, prev_vector, prev_cpu),
164 __field( unsigned int, prev_cpu )
172 __entry->prev_cpu = prev_cpu;
178 __entry->prev_vector, __entry->prev_cpu)
185 unsigned int prev_cpu), \
186 TP_ARGS(irq, vector, cpu, prev_vector, prev_cpu), NULL, NULL); \
/linux/drivers/irqchip/
H A Dirq-loongarch-avec.c54 unsigned int prev_cpu; member
92 if (cpu_online(adata->prev_cpu)) { in avecintc_sync()
93 plist = per_cpu_ptr(&pending_list, adata->prev_cpu); in avecintc_sync()
96 mp_ops.send_ipi_single(adata->prev_cpu, ACTION_CLEAR_VECTOR); in avecintc_sync()
175 cpu = adata->prev_cpu; in complete_irq_moving()
201 adata->prev_cpu = adata->cpu; in complete_irq_moving()
265 adata->prev_cpu = adata->cpu = cpu; in avecintc_alloc_vector()
309 per_cpu(irq_map, adata->prev_cpu)[adata->prev_vec] = NULL; in avecintc_free_vector()
310 irq_matrix_free(loongarch_avec.vector_matrix, adata->prev_cpu, adata->prev_vec, false); in avecintc_free_vector()
/linux/tools/testing/selftests/sched_ext/
H A Dselect_cpu_dispatch.bpf.c16 s32 prev_cpu, u64 wake_flags) in BPF_STRUCT_OPS() argument
19 s32 cpu = prev_cpu; in BPF_STRUCT_OPS()
29 cpu = prev_cpu; in BPF_STRUCT_OPS()
H A Denq_select_cpu_fails.bpf.c
H A Dselect_cpu_dfl_nodispatch.bpf.c31 s32 scx_bpf_select_cpu_dfl(struct task_struct *p, s32 prev_cpu, u64 wake_flags,
35 s32 prev_cpu, u64 wake_flags) in BPF_STRUCT_OPS() argument
46 cpu = scx_bpf_select_cpu_dfl(p, prev_cpu, wake_flags, in BPF_STRUCT_OPS()
H A Dselect_cpu_dispatch_bad_dsq.bpf.c18 s32 prev_cpu, u64 wake_flags) in BPF_STRUCT_OPS() argument
23 return prev_cpu; in BPF_STRUCT_OPS()
H A Dselect_cpu_dispatch_dbl_dsp.bpf.c18 s32 prev_cpu, u64 wake_flags) in BPF_STRUCT_OPS() argument
24 return prev_cpu; in BPF_STRUCT_OPS()
H A Dddsp_bogus_dsq_fail.bpf.c14 s32 prev_cpu, u64 wake_flags) in BPF_STRUCT_OPS() argument
28 return prev_cpu; in BPF_STRUCT_OPS()
H A Dddsp_vtimelocal_fail.bpf.c14 s32 prev_cpu, u64 wake_flags) in BPF_STRUCT_OPS() argument
25 return prev_cpu; in BPF_STRUCT_OPS()
H A Ddsp_local_on.bpf.c20 s32 prev_cpu, u64 wake_flags) in BPF_STRUCT_OPS() argument
22 return prev_cpu; in BPF_STRUCT_OPS()
H A Dexit.bpf.c21 s32 prev_cpu, u64 wake_flags) in BPF_STRUCT_OPS() argument
28 return scx_bpf_select_cpu_dfl(p, prev_cpu, wake_flags, &found); in BPF_STRUCT_OPS()
H A Dselect_cpu_vtime.bpf.c39 s32 prev_cpu, u64 wake_flags) in BPF_STRUCT_OPS() argument
47 cpu = prev_cpu; in BPF_STRUCT_OPS()
H A Dmaximal.bpf.c17 s32 BPF_STRUCT_OPS(maximal_select_cpu, struct task_struct *p, s32 prev_cpu, in BPF_STRUCT_OPS() argument
20 return prev_cpu; in BPF_STRUCT_OPS()
/linux/arch/powerpc/lib/
H A Dqspinlock.c261 static struct qnode *get_tail_qnode(struct qspinlock *lock, int prev_cpu) in get_tail_qnode() argument
263 struct qnodes *qnodesp = per_cpu_ptr(&qnodes, prev_cpu); in get_tail_qnode()
377 static __always_inline bool yield_to_prev(struct qspinlock *lock, struct qnode *node, int prev_cpu,… in yield_to_prev() argument
392 if (node->sleepy || vcpu_is_preempted(prev_cpu)) { in yield_to_prev()
419 yield_count = yield_count_of(prev_cpu); in yield_to_prev()
431 yield_to_preempted(prev_cpu, yield_count); in yield_to_prev()
576 int prev_cpu = decode_tail_cpu(old); in queued_spin_lock_mcs_queue() local
577 struct qnode *prev = get_tail_qnode(lock, prev_cpu); in queued_spin_lock_mcs_queue()
587 if (yield_to_prev(lock, node, prev_cpu, paravirt)) in queued_spin_lock_mcs_queue()
/linux/tools/sched_ext/include/scx/
H A Dcompat.bpf.h235 s32 scx_bpf_select_cpu_and___compat(struct task_struct *p, s32 prev_cpu, u64 wake_flags,
252 scx_bpf_select_cpu_and(struct task_struct *p, s32 prev_cpu, u64 wake_flags, in scx_bpf_select_cpu_and() argument
257 .prev_cpu = prev_cpu, in scx_bpf_select_cpu_and()
264 return scx_bpf_select_cpu_and___compat(p, prev_cpu, wake_flags, in scx_bpf_select_cpu_and()
/linux/arch/sparc/kernel/
H A Dcpumap.c193 int n, id, cpu, prev_cpu, last_cpu, level; in build_cpuinfo_tree() local
204 prev_cpu = cpu = cpumask_first(cpu_online_mask); in build_cpuinfo_tree()
268 (cpu == last_cpu) ? cpu : prev_cpu; in build_cpuinfo_tree()
290 prev_cpu = cpu; in build_cpuinfo_tree()
/linux/Documentation/translations/zh_CN/scheduler/
H A Dsched-energy.rst118 依然放在之前活动的prev_cpu是否可以节省能量。
132 prev_cpu = 0(上一次运行在CPU0)。
197 **情况3. P依旧留在prev_cpu/CPU0**::
/linux/arch/x86/kernel/apic/
H A Dvector.c31 unsigned int prev_cpu; member
181 apicd->prev_cpu = apicd->cpu; in chip_data_update()
363 apicd->prev_cpu); in clear_irq_vector()
374 per_cpu(vector_irq, apicd->prev_cpu)[vector] = VECTOR_SHUTDOWN; in clear_irq_vector()
375 apic_free_vector(apicd->prev_cpu, vector, managed); in clear_irq_vector()
654 seq_printf(m, "%*sPrevious target: %5u\n", ind, "", apicd.prev_cpu); in x86_vector_debug_show()
900 unsigned int cpu = apicd->prev_cpu; in free_moved_vector()
940 if (!vector || (apicd->cpu != cpu && apicd->prev_cpu != cpu)) in apic_force_complete_move()
1094 unsigned int cpu = apicd->prev_cpu; in __vector_schedule_cleanup()
/linux/tools/sched_ext/
H A Dscx_qmap.bpf.c136 static s32 pick_direct_dispatch_cpu(struct task_struct *p, s32 prev_cpu) in pick_direct_dispatch_cpu() argument
141 scx_bpf_test_and_clear_cpu_idle(prev_cpu)) in pick_direct_dispatch_cpu()
142 return prev_cpu; in pick_direct_dispatch_cpu()
163 s32 prev_cpu, u64 wake_flags) in BPF_STRUCT_OPS() argument
171 cpu = pick_direct_dispatch_cpu(p, prev_cpu); in BPF_STRUCT_OPS()
177 return prev_cpu; in BPF_STRUCT_OPS()
H A Dscx_cpu0.bpf.c45 s32 BPF_STRUCT_OPS(cpu0_select_cpu, struct task_struct *p, s32 prev_cpu, u64 wake_flags) in BPF_STRUCT_OPS() argument
H A Dscx_central.bpf.c91 s32 prev_cpu, u64 wake_flags) in BPF_STRUCT_OPS() argument
/linux/arch/riscv/include/asm/
H A Dswitch_to.h99 bool thread_migrated = smp_processor_id() != task->thread.prev_cpu; in switch_to_should_flush_icache()
108 #define __set_prev_cpu(thread) ((thread).prev_cpu = smp_processor_id())
H A Dprocessor.h123 unsigned int prev_cpu; member

12