| /linux/samples/trace_events/ |
| H A D | trace-events-sample.c | 41 current->cpus_ptr, fmt, &va); in do_simple_thread_func() 53 trace_foo_rel_loc("Hello __rel_loc", cnt, bitmask, current->cpus_ptr); in do_simple_thread_func()
|
| /linux/kernel/sched/ |
| H A D | ext_idle.c | 455 const struct cpumask *allowed = cpus_allowed ?: p->cpus_ptr; in scx_select_cpu_dfl() 471 if (allowed != p->cpus_ptr) { in scx_select_cpu_dfl() 476 } else if (cpumask_and(local_cpus, cpus_allowed, p->cpus_ptr)) { in scx_select_cpu_dfl() 500 if (allowed == p->cpus_ptr && task_affinity_all(p)) in scx_select_cpu_dfl() 510 if (allowed == p->cpus_ptr && task_affinity_all(p)) in scx_select_cpu_dfl() 929 if (cpumask_test_cpu(prev_cpu, allowed ?: p->cpus_ptr) && in select_cpu_from_kfunc() 936 allowed ?: p->cpus_ptr, flags); in select_cpu_from_kfunc()
|
| H A D | fair.c | 2118 !cpumask_test_cpu(cpu, env->p->cpus_ptr)) in update_numa_stats() 2150 !cpumask_test_cpu(cpu, env->p->cpus_ptr)) { in task_numa_assign() 2264 if (!cpumask_test_cpu(env->src_cpu, cur->cpus_ptr)) in task_numa_compare() 2463 if (!cpumask_test_cpu(cpu, env->p->cpus_ptr)) in task_numa_find_cpu() 7397 for_each_cpu_and(i, sched_group_span(group), p->cpus_ptr) { in sched_balance_find_dst_group_cpu() 7444 if (!cpumask_intersects(sched_domain_span(sd), p->cpus_ptr)) in sched_balance_find_dst_cpu() 7594 for_each_cpu_and(cpu, cpu_smt_mask(target), p->cpus_ptr) { in select_idle_smt() 7644 cpumask_and(cpus, sched_domain_span(sd), p->cpus_ptr); in select_idle_cpu() 7716 cpumask_and(cpus, sched_domain_span(sd), p->cpus_ptr); in select_idle_capacity() 7837 cpumask_test_cpu(recent_used_cpu, p->cpus_ptr) && in select_idle_sibling() [all …]
|
| H A D | core.c | 2347 if (p->cpus_ptr != &p->cpus_mask) in migrate_disable_switch() 2571 if (cpumask_test_cpu(task_cpu(p), p->cpus_ptr)) { in migration_cpu_stop() 2653 p->cpus_ptr = ctx->new_mask; in set_cpus_allowed_common() 3326 if (!cpumask_test_cpu(arg->dst_cpu, arg->src_task->cpus_ptr)) in migrate_swap_stop() 3329 if (!cpumask_test_cpu(arg->src_cpu, arg->dst_task->cpus_ptr)) in migrate_swap_stop() 3364 if (!cpumask_test_cpu(arg.dst_cpu, arg.src_task->cpus_ptr)) in migrate_swap() 3367 if (!cpumask_test_cpu(arg.src_cpu, arg.dst_task->cpus_ptr)) in migrate_swap() 3447 for_each_cpu(dest_cpu, p->cpus_ptr) { in select_fallback_rq() 3500 cpu = cpumask_any(p->cpus_ptr); in select_task_rq() 3843 if (!cpumask_test_cpu(cpu, p->cpus_ptr)) in ttwu_queue_cond() [all …]
|
| H A D | ext.c | 1678 WARN_ON_ONCE(!cpumask_test_cpu(cpu_of(dst_rq), p->cpus_ptr)); in move_remote_task_to_local_dsq() 2631 p, (struct cpumask *)p->cpus_ptr); in set_cpus_allowed_scx() 3087 p, (struct cpumask *)p->cpus_ptr); in switching_to_scx() 3847 donee = cpumask_any_and_distribute(donee_mask, p->cpus_ptr); in bypass_lb_cpu() 4513 dump_line(s, " cpus=%*pb no_mig=%u", cpumask_pr_args(p->cpus_ptr), in scx_dump_task()
|
| H A D | syscalls.c | 630 if (!cpumask_subset(span, p->cpus_ptr) || in __sched_setscheduler()
|
| H A D | deadline.c | 658 cpu = cpumask_any_and(cpu_active_mask, p->cpus_ptr); in dl_task_offline_migration() 3132 if (!cpumask_intersects(p->cpus_ptr, hk_msk)) { in dl_get_task_effective_cpus()
|
| H A D | sched.h | 1470 for_each_cpu_and(cpu, sched_group_span(group), p->cpus_ptr) { in sched_group_cookie_match() 2742 if (!cpumask_test_cpu(cpu, p->cpus_ptr)) in task_allowed_on_cpu()
|
| /linux/tools/testing/selftests/bpf/progs/ |
| H A D | cpumask_failure.c | 69 cpumask = bpf_cpumask_acquire((struct bpf_cpumask *)task->cpus_ptr); in BPF_PROG() 80 bpf_cpumask_set_cpu(0, (struct bpf_cpumask *)task->cpus_ptr); in BPF_PROG()
|
| H A D | nested_trust_success.c | 24 bpf_cpumask_test_cpu(0, task->cpus_ptr); in BPF_PROG()
|
| /linux/tools/testing/selftests/sched_ext/ |
| H A D | ddsp_bogus_dsq_fail.bpf.c | 16 s32 cpu = scx_bpf_pick_idle_cpu(p->cpus_ptr, 0); in BPF_STRUCT_OPS()
|
| H A D | select_cpu_dispatch.bpf.c | 24 cpu = scx_bpf_pick_idle_cpu(p->cpus_ptr, 0); in BPF_STRUCT_OPS()
|
| H A D | ddsp_vtimelocal_fail.bpf.c | 16 s32 cpu = scx_bpf_pick_idle_cpu(p->cpus_ptr, 0); in BPF_STRUCT_OPS()
|
| H A D | select_cpu_vtime.bpf.c | 43 cpu = scx_bpf_pick_idle_cpu(p->cpus_ptr, 0); in BPF_STRUCT_OPS()
|
| /linux/tools/sched_ext/ |
| H A D | scx_qmap.bpf.c | 144 cpu = scx_bpf_pick_idle_cpu(p->cpus_ptr, 0); in pick_direct_dispatch_cpu() 256 cpu = scx_bpf_pick_idle_cpu(p->cpus_ptr, 0); in BPF_STRUCT_OPS() 340 if (bpf_cpumask_test_cpu(this_cpu, p->cpus_ptr)) in dispatch_highpri() 343 cpu = scx_bpf_pick_any_cpu(p->cpus_ptr, 0); in dispatch_highpri()
|
| H A D | scx_central.bpf.c | 154 if (!bpf_cpumask_test_cpu(cpu, p->cpus_ptr)) { in dispatch_to_cpu()
|
| /linux/Documentation/bpf/ |
| H A D | cpumasks.rst | 105 if (!bpf_cpumask_full(task->cpus_ptr)) 108 bpf_cpumask_copy(cpumask, task->cpus_ptr); 243 /* struct cpumask * pointers such as task->cpus_ptr can also be queried. */ 244 if (bpf_cpumask_test_cpu(0, task->cpus_ptr))
|
| /linux/arch/mips/kernel/ |
| H A D | mips-mt-fpaff.c | 181 cpumask_or(&allowed, &p->thread.user_cpus_allowed, p->cpus_ptr); in mipsmt_sys_sched_getaffinity()
|
| /linux/include/linux/ |
| H A D | sched.h | 922 const cpumask_t *cpus_ptr; member 2379 if (unlikely(p->cpus_ptr != &p->cpus_mask)) in __migrate_enable()
|
| /linux/kernel/trace/ |
| H A D | trace_hwlat.c | 323 if (!cpumask_equal(current_mask, current->cpus_ptr)) in move_to_next_cpu()
|
| /linux/fs/resctrl/ |
| H A D | pseudo_lock.c | 1030 if (!cpumask_subset(current->cpus_ptr, &plr->d->hdr.cpu_mask)) { in pseudo_lock_dev_mmap_prepare()
|
| /linux/arch/powerpc/platforms/cell/spufs/ |
| H A D | sched.c | 131 cpumask_copy(&ctx->cpus_allowed, current->cpus_ptr); in __spu_update_sched_info()
|
| /linux/kernel/ |
| H A D | fork.c | 955 if (orig->cpus_ptr == &orig->cpus_mask) in dup_task_struct() 956 tsk->cpus_ptr = &tsk->cpus_mask; in dup_task_struct()
|
| /linux/kernel/cgroup/ |
| H A D | cpuset.c | 3841 set_cpus_allowed_ptr(task, current->cpus_ptr); in cpuset_fork()
|
| /linux/kernel/bpf/ |
| H A D | verifier.c | 7088 const cpumask_t *cpus_ptr; in BTF_TYPE_SAFE_RCU() local
|