| /linux/kernel/bpf/ |
| H A D | cpumask.c | 7 #include <linux/cpumask.h> 10 * struct bpf_cpumask - refcounted BPF cpumask wrapper structure 11 * @cpumask: The actual cpumask embedded in the struct. 19 * the details in <linux/cpumask.h>. The consequence is that this structure is 26 cpumask_t cpumask; member 40 * bpf_cpumask_create() - Create a mutable BPF cpumask. 42 * Allocates a cpumask that can be queried, mutated, acquired, and released by 43 * a BPF program. The cpumask returned by this function must either be embedded 55 struct bpf_cpumask *cpumask; in bpf_cpumask_create() 51 struct bpf_cpumask *cpumask; bpf_cpumask_create() local 75 bpf_cpumask_acquire(struct bpf_cpumask * cpumask) bpf_cpumask_acquire() argument 89 bpf_cpumask_release(struct bpf_cpumask * cpumask) bpf_cpumask_release() argument 97 bpf_cpumask_release_dtor(void * cpumask) bpf_cpumask_release_dtor() argument 110 bpf_cpumask_first(const struct cpumask * cpumask) bpf_cpumask_first() argument 123 bpf_cpumask_first_zero(const struct cpumask * cpumask) bpf_cpumask_first_zero() argument 148 bpf_cpumask_set_cpu(u32 cpu,struct bpf_cpumask * cpumask) bpf_cpumask_set_cpu() argument 161 bpf_cpumask_clear_cpu(u32 cpu,struct bpf_cpumask * cpumask) bpf_cpumask_clear_cpu() argument 178 bpf_cpumask_test_cpu(u32 cpu,const struct cpumask * cpumask) bpf_cpumask_test_cpu() argument 195 bpf_cpumask_test_and_set_cpu(u32 cpu,struct bpf_cpumask * cpumask) bpf_cpumask_test_and_set_cpu() argument 213 bpf_cpumask_test_and_clear_cpu(u32 cpu,struct bpf_cpumask * cpumask) bpf_cpumask_test_and_clear_cpu() argument 225 bpf_cpumask_setall(struct bpf_cpumask * cpumask) bpf_cpumask_setall() argument 234 bpf_cpumask_clear(struct bpf_cpumask * cpumask) bpf_cpumask_clear() argument 346 bpf_cpumask_empty(const struct cpumask * cpumask) bpf_cpumask_empty() argument 361 bpf_cpumask_full(const struct cpumask * cpumask) bpf_cpumask_full() argument 388 bpf_cpumask_any_distribute(const struct cpumask * cpumask) bpf_cpumask_any_distribute() argument 418 bpf_cpumask_weight(const struct cpumask * cpumask) bpf_cpumask_weight() argument [all...] |
| /linux/tools/testing/selftests/bpf/progs/ |
| H A D | cpumask_common.h | 21 struct bpf_cpumask __kptr * cpumask; member 32 void bpf_cpumask_release(struct bpf_cpumask *cpumask) __ksym __weak; 33 struct bpf_cpumask *bpf_cpumask_acquire(struct bpf_cpumask *cpumask) __ksym __weak; 34 u32 bpf_cpumask_first(const struct cpumask *cpumask) __ksym __weak; 35 u32 bpf_cpumask_first_zero(const struct cpumask *cpumask) __ksym __weak; 36 u32 bpf_cpumask_first_and(const struct cpumask *src1, 37 const struct cpumask *src2) __ksym __weak; 38 void bpf_cpumask_set_cpu(u32 cpu, struct bpf_cpumask *cpumask) __ksy 68 cast(struct bpf_cpumask * cpumask) cast() argument 75 struct bpf_cpumask *cpumask; create_cpumask() local [all...] |
| H A D | cpumask_success.c | 141 struct bpf_cpumask *cpumask; in BPF_PROG() local 146 cpumask = create_cpumask(); in BPF_PROG() 147 if (!cpumask) in BPF_PROG() 150 bpf_cpumask_release(cpumask); in BPF_PROG() 157 struct bpf_cpumask *cpumask; in BPF_PROG() local 162 cpumask = create_cpumask(); in BPF_PROG() 163 if (!cpumask) in BPF_PROG() 166 bpf_cpumask_set_cpu(0, cpumask); in BPF_PROG() 167 if (!bpf_cpumask_test_cpu(0, cast(cpumask))) { in BPF_PROG() 172 bpf_cpumask_clear_cpu(0, cpumask); in BPF_PROG() 186 struct bpf_cpumask *cpumask; BPF_PROG() local 215 struct bpf_cpumask *cpumask; BPF_PROG() local 286 struct bpf_cpumask *cpumask; BPF_PROG() local 461 struct bpf_cpumask *cpumask; BPF_PROG() local 476 struct bpf_cpumask *cpumask; BPF_PROG() local [all...] |
| H A D | cpumask_failure.c | 38 struct bpf_cpumask *cpumask; in BPF_PROG() local 40 cpumask = create_cpumask(); in BPF_PROG() 41 __sink(cpumask); in BPF_PROG() 43 /* cpumask is never released. */ in BPF_PROG() 51 struct bpf_cpumask *cpumask; in BPF_PROG() local 53 cpumask = create_cpumask(); in BPF_PROG() 55 /* cpumask is released twice. */ in BPF_PROG() 56 bpf_cpumask_release(cpumask); in BPF_PROG() 57 bpf_cpumask_release(cpumask); in BPF_PROG() 66 struct bpf_cpumask *cpumask; in BPF_PROG() local 89 struct bpf_cpumask *cpumask; BPF_PROG() local [all...] |
| H A D | nested_trust_common.h | 9 bool bpf_cpumask_test_cpu(unsigned int cpu, const struct cpumask *cpumask) __ksym; 10 __u32 bpf_cpumask_first_zero(const struct cpumask *cpumask) __ksym;
|
| /linux/include/linux/ |
| H A D | cpumask.h | 117 extern struct cpumask __cpu_possible_mask; 118 extern struct cpumask __cpu_online_mask; 119 extern struct cpumask __cpu_enabled_mask; 120 extern struct cpumask __cpu_present_mask; 121 extern struct cpumask __cpu_active_mask; 122 extern struct cpumask __cpu_dying_mask; 123 #define cpu_possible_mask ((const struct cpumask *)&__cpu_possible_mask) 124 #define cpu_online_mask ((const struct cpumask *)&__cpu_online_mask) 125 #define cpu_enabled_mask ((const struct cpumask *)&__cpu_enabled_mask) 126 #define cpu_present_mask ((const struct cpumask *)&__cpu_present_mask) [all …]
|
| H A D | stop_machine.h | 38 void stop_machine_yield(const struct cpumask *cpumask); 130 int stop_machine(cpu_stop_fn_t fn, void *data, const struct cpumask *cpus); 142 int stop_machine_cpuslocked(cpu_stop_fn_t fn, void *data, const struct cpumask *cpus); 162 const struct cpumask *cpus); 166 const struct cpumask *cpus) in stop_machine_cpuslocked() 177 stop_machine(cpu_stop_fn_t fn, void *data, const struct cpumask *cpus) in stop_machine() 184 const struct cpumask *cpus) in stop_machine_from_inactive_cpu()
|
| H A D | nmi.h | 25 extern struct cpumask watchdog_cpumask; 170 static inline bool trigger_cpumask_backtrace(struct cpumask *mask) in trigger_cpumask_backtrace() 197 static inline bool trigger_cpumask_backtrace(struct cpumask *mask) in trigger_cpumask_backtrace() 224 void nmi_backtrace_stall_snap(const struct cpumask *btp); 225 void nmi_backtrace_stall_check(const struct cpumask *btp); 227 static inline void nmi_backtrace_stall_snap(const struct cpumask *btp) {} in nmi_backtrace_stall_snap() 228 static inline void nmi_backtrace_stall_check(const struct cpumask *btp) {} in nmi_backtrace_stall_check()
|
| H A D | arch_topology.h | 32 void topology_set_freq_scale(const struct cpumask *cpus, unsigned long cur_freq, 49 void topology_set_scale_freq_source(struct scale_freq_data *data, const struct cpumask *cpus); 50 void topology_clear_scale_freq_source(enum scale_freq_source source, const struct cpumask *cpus); 59 void topology_update_hw_pressure(const struct cpumask *cpus, 90 const struct cpumask *cpu_coregroup_mask(int cpu); 91 const struct cpumask *cpu_clustergroup_mask(int cpu);
|
| /linux/Documentation/bpf/ |
| H A D | cpumasks.rst | 6 BPF cpumask kfuncs 12 ``struct cpumask`` is a bitmap data structure in the kernel whose indices 21 2. BPF cpumask objects 29 ``struct bpf_cpumask *`` is a cpumask that is allocated by BPF, on behalf of a 32 to a ``struct cpumask *``. 40 .. kernel-doc:: kernel/bpf/cpumask.c 43 .. kernel-doc:: kernel/bpf/cpumask.c 46 .. kernel-doc:: kernel/bpf/cpumask.c 54 struct bpf_cpumask __kptr * cpumask; 71 local.cpumask = NULL; [all …]
|
| /linux/tools/sched_ext/include/scx/ |
| H A D | common.bpf.h | 63 s32 __scx_bpf_select_cpu_and(struct task_struct *p, const struct cpumask *cpus_allowed, 84 const struct cpumask *scx_bpf_get_possible_cpumask(void) __ksym __weak; 85 const struct cpumask *scx_bpf_get_online_cpumask(void) __ksym __weak; 86 void scx_bpf_put_cpumask(const struct cpumask *cpumask) __ksym __weak; 87 const struct cpumask *scx_bpf_get_idle_cpumask_node(int node) __ksym __weak; 88 const struct cpumask *scx_bpf_get_idle_cpumask(void) __ksym; 89 const struct cpumask *scx_bpf_get_idle_smtmask_node(int node) __ksym __weak; 90 const struct cpumask *scx_bpf_get_idle_smtmask(void) __ksym; 91 void scx_bpf_put_idle_cpumask(const struct cpumask *cpumask) __ksym; 354 struct bpf_cpumask *bpf_cpumask_acquire(struct bpf_cpumask *cpumask) __ksym; [all …]
|
| /linux/Documentation/translations/zh_CN/core-api/ |
| H A D | padata.rst | 57 cpumask_var_t cpumask); 60 行cpumask描述了哪些处理器将被用来并行执行提交给这个实例的作业,串行cpumask 61 定义了哪些处理器被允许用作串行化回调处理器。 cpumask指定了要使用的新cpumask。 65 和serial_cpumask,任何一个cpumask都可以通过在文件中回显(echo)一个bitmask 70 读取其中一个文件会显示用户提供的cpumask,它可能与“可用”的cpumask不同。 72 Padata内部维护着两对cpumask,用户提供的cpumask和“可用的”cpumask(每一对由一个 73 并行和一个串行cpumask组成)。用户提供的cpumasks在实例分配时默认为所有可能的CPU, 76 供一个包含离线CPU的cpumask是合法的。一旦用户提供的cpumask中的一个离线CPU上线, 104 在其他地方正在搞乱实例的CPU掩码,而当cb_cpu不在串行cpumask中、并行或串行cpumasks
|
| /linux/rust/helpers/ |
| H A D | cpumask.c | 5 void rust_helper_cpumask_set_cpu(unsigned int cpu, struct cpumask *dstp) in rust_helper_cpumask_set_cpu() 10 void rust_helper___cpumask_set_cpu(unsigned int cpu, struct cpumask *dstp) in rust_helper___cpumask_set_cpu() 15 void rust_helper_cpumask_clear_cpu(int cpu, struct cpumask *dstp) in rust_helper_cpumask_clear_cpu() 20 void rust_helper___cpumask_clear_cpu(int cpu, struct cpumask *dstp) in rust_helper___cpumask_clear_cpu() 25 bool rust_helper_cpumask_test_cpu(int cpu, struct cpumask *srcp) in rust_helper_cpumask_test_cpu() 30 void rust_helper_cpumask_setall(struct cpumask *dstp) in rust_helper_cpumask_setall() 35 bool rust_helper_cpumask_empty(struct cpumask *srcp) in rust_helper_cpumask_empty() 40 bool rust_helper_cpumask_full(struct cpumask *srcp) in rust_helper_cpumask_full() 45 unsigned int rust_helper_cpumask_weight(struct cpumask *srcp) in rust_helper_cpumask_weight() 50 void rust_helper_cpumask_copy(struct cpumask *dstp, const struct cpumask *srcp) in rust_helper_cpumask_copy()
|
| /linux/kernel/ |
| H A D | padata.c | 72 int cpu_index = seq_nr % cpumask_weight(pd->cpumask.pcpu); in padata_cpu_hash() 74 return cpumask_nth(cpu_index, pd->cpumask.pcpu); in padata_cpu_hash() 196 if (!cpumask_test_cpu(*cb_cpu, pd->cpumask.cbcpu)) { in padata_do_parallel() 197 if (cpumask_empty(pd->cpumask.cbcpu)) in padata_do_parallel() 201 cpu_index = *cb_cpu % cpumask_weight(pd->cpumask.cbcpu); in padata_do_parallel() 202 *cb_cpu = cpumask_nth(cpu_index, pd->cpumask.cbcpu); in padata_do_parallel() 297 cpu = cpumask_first(pd->cpumask.pcpu); in padata_reorder() 299 cpu = cpumask_next_wrap(cpu, pd->cpumask.pcpu); in padata_reorder() 397 cpumask_copy(attrs->cpumask, pinst->cpumask.pcpu); in padata_setup_cpumasks() 515 for_each_cpu(cpu, pd->cpumask.cbcpu) { in padata_init_squeues() [all …]
|
| H A D | stop_machine.c | 173 const struct cpumask *active_cpus; 195 notrace void __weak stop_machine_yield(const struct cpumask *cpumask) in stop_machine_yield() argument 206 const struct cpumask *cpumask; in multi_cpu_stop() local 217 cpumask = cpu_online_mask; in multi_cpu_stop() 218 is_active = cpu == cpumask_first(cpumask); in multi_cpu_stop() 220 cpumask = msdata->active_cpus; in multi_cpu_stop() 221 is_active = cpumask_test_cpu(cpu, cpumask); in multi_cpu_stop() 227 stop_machine_yield(cpumask); in multi_cpu_stop() 392 static bool queue_stop_cpus_work(const struct cpumask *cpumask, in queue_stop_cpus_work() argument 408 for_each_cpu(cpu, cpumask) { in queue_stop_cpus_work() [all …]
|
| /linux/kernel/sched/ |
| H A D | ext_idle.c | 80 struct cpumask *idle_cpus = idle_cpumask(node)->cpu; in scx_idle_test_and_clear_cpu() 89 const struct cpumask *smt = cpu_smt_mask(cpu); in scx_idle_test_and_clear_cpu() 90 struct cpumask *idle_smts = idle_cpumask(node)->smt; in scx_idle_test_and_clear_cpu() 115 static s32 pick_idle_cpu_in_node(const struct cpumask *cpus_allowed, int node, u64 flags) in pick_idle_cpu_in_node() 150 static s32 pick_idle_cpu_from_online_nodes(const struct cpumask *cpus_allowed, int node, u64 flags) in pick_idle_cpu_from_online_nodes() 191 pick_idle_cpu_from_online_nodes(const struct cpumask *cpus_allowed, int node, u64 flags) in pick_idle_cpu_from_online_nodes() 200 static s32 scx_pick_idle_cpu(const struct cpumask *cpus_allowed, int node, u64 flags) in scx_pick_idle_cpu() 246 static struct cpumask *llc_span(s32 cpu) in llc_span() 280 static struct cpumask *numa_span(s32 cpu) in numa_span() 452 const struct cpumask *cpus_allowed, u64 flags) in scx_select_cpu_dfl() [all …]
|
| /linux/arch/arc/kernel/ |
| H A D | smp.c | 42 static int __init arc_get_cpu_map(const char *name, struct cpumask *cpumask) in arc_get_cpu_map() argument 51 if (cpulist_parse(buf, cpumask)) in arc_get_cpu_map() 63 struct cpumask cpumask; in arc_init_cpu_possible() local 65 if (arc_get_cpu_map("possible-cpus", &cpumask)) { in arc_init_cpu_possible() 69 cpumask_setall(&cpumask); in arc_init_cpu_possible() 72 if (!cpumask_test_cpu(0, &cpumask)) in arc_init_cpu_possible() 75 init_cpu_possible(&cpumask); in arc_init_cpu_possible() 283 static void ipi_send_msg(const struct cpumask *callmap, enum ipi_msg_type msg) in ipi_send_msg() 298 struct cpumask targets; in smp_send_stop() 309 void arch_send_call_function_ipi_mask(const struct cpumask *mask) in arch_send_call_function_ipi_mask()
|
| /linux/include/trace/events/ |
| H A D | ipi.h | 34 TP_PROTO(const struct cpumask *cpumask, unsigned long callsite, void *callback), 36 TP_ARGS(cpumask, callsite, callback), 39 __cpumask(cpumask) 45 __assign_cpumask(cpumask, cpumask_bits(cpumask)); 51 __get_cpumask(cpumask), __entry->callsite, __entry->callback) 66 TP_PROTO(const struct cpumask *mask, const char *reason),
|
| /linux/arch/x86/kernel/apic/ |
| H A D | x2apic_cluster.c | 22 static DEFINE_PER_CPU_READ_MOSTLY(struct cpumask *, cluster_masks); 39 __x2apic_send_IPI_mask(const struct cpumask *mask, int vector, int apic_dest) in __x2apic_send_IPI_mask() 42 struct cpumask *tmpmsk; in __x2apic_send_IPI_mask() 58 struct cpumask *cmsk = per_cpu(cluster_masks, cpu); in __x2apic_send_IPI_mask() 75 static void x2apic_send_IPI_mask(const struct cpumask *mask, int vector) in x2apic_send_IPI_mask() 81 x2apic_send_IPI_mask_allbutself(const struct cpumask *mask, int vector) in x2apic_send_IPI_mask_allbutself() 93 struct cpumask *cmsk = this_cpu_read(cluster_masks); in init_x2apic_ldr() 105 static void prefill_clustermask(struct cpumask *cmsk, unsigned int cpu, u32 cluster) in prefill_clustermask() 110 struct cpumask **cpu_cmsk = &per_cpu(cluster_masks, cpu_i); in prefill_clustermask() 126 struct cpumask *cmsk = NULL; in alloc_clustermask() [all …]
|
| /linux/lib/ |
| H A D | group_cpus.c | 14 static void grp_spread_init_one(struct cpumask *irqmsk, struct cpumask *nmsk, in grp_spread_init_one() 17 const struct cpumask *siblmsk; in grp_spread_init_one() 86 const struct cpumask *mask, nodemask_t *nodemsk) in get_nodes_in_cpumask() 132 const struct cpumask *cpu_mask, in alloc_nodes_groups() 134 struct cpumask *nmsk, in alloc_nodes_groups() 251 const struct cpumask *cpu_mask, in __group_cpus_evenly() 252 struct cpumask *nmsk, struct cpumask *masks) in __group_cpus_evenly() 349 struct cpumask *group_cpus_evenly(unsigned int numgrps, unsigned int *nummasks) in group_cpus_evenly() 355 struct cpumask *masks = NULL; in group_cpus_evenly() 429 struct cpumask *group_cpus_evenly(unsigned int numgrps, unsigned int *nummasks) in group_cpus_evenly() [all …]
|
| /linux/drivers/md/ |
| H A D | dm-ps-io-affinity.c | 16 cpumask_var_t cpumask; member 36 free_cpumask_var(pi->cpumask); in ioa_free_path() 66 if (!zalloc_cpumask_var(&pi->cpumask, GFP_KERNEL)) { in ioa_add_path() 72 ret = cpumask_parse(argv[0], pi->cpumask); in ioa_add_path() 79 for_each_cpu(cpu, pi->cpumask) { in ioa_add_path() 105 free_cpumask_var(pi->cpumask); in ioa_add_path() 171 DMEMIT("%*pb ", cpumask_pr_args(pi->cpumask)); in ioa_status() 201 const struct cpumask *cpumask; in ioa_select_path() local 219 cpumask = cpumask_of_node(node); in ioa_select_path() 220 for_each_cpu(i, cpumask) { in ioa_select_path()
|
| /linux/drivers/cpuidle/ |
| H A D | dt_idle_states.c | 97 const cpumask_t *cpumask) in idle_state_valid() argument 109 cpu = cpumask_first(cpumask) + 1; in idle_state_valid() 110 for_each_cpu_from(cpu, cpumask) { in idle_state_valid() 152 const cpumask_t *cpumask; in dt_init_idle_driver() local 163 cpumask = drv->cpumask ? : cpu_possible_mask; in dt_init_idle_driver() 164 cpu_node = of_cpu_device_node_get(cpumask_first(cpumask)); in dt_init_idle_driver() 182 if (!idle_state_valid(state_node, i, cpumask)) { in dt_init_idle_driver()
|
| /linux/arch/powerpc/include/asm/ |
| H A D | smp.h | 114 static inline struct cpumask *cpu_sibling_mask(int cpu) in cpu_sibling_mask() 119 static inline struct cpumask *cpu_core_mask(int cpu) in cpu_core_mask() 124 static inline struct cpumask *cpu_l2_cache_mask(int cpu) in cpu_l2_cache_mask() 129 static inline struct cpumask *cpu_smallcore_mask(int cpu) in cpu_smallcore_mask() 142 static inline const struct cpumask *cpu_smt_mask(int cpu) in cpu_smt_mask() 194 static inline const struct cpumask *cpu_sibling_mask(int cpu) in cpu_sibling_mask() 199 static inline const struct cpumask *cpu_smallcore_mask(int cpu) in cpu_smallcore_mask() 204 static inline const struct cpumask *cpu_l2_cache_mask(int cpu) in cpu_l2_cache_mask() 256 extern void arch_send_call_function_ipi_mask(const struct cpumask *mask);
|
| /linux/drivers/infiniband/hw/hfi1/ |
| H A D | affinity.h | 28 struct cpumask mask; 29 struct cpumask used; 64 struct cpumask general_intr_mask; 65 struct cpumask comp_vect_mask; 71 struct cpumask real_cpu_mask;
|
| /linux/kernel/time/ |
| H A D | tick-common.c | 186 const struct cpumask *cpumask) in tick_setup_device() argument 242 if (!cpumask_equal(newdev->cpumask, cpumask)) in tick_setup_device() 243 irq_set_affinity(newdev->irq, cpumask); in tick_setup_device() 275 if (!cpumask_test_cpu(cpu, newdev->cpumask)) in tick_check_percpu() 277 if (cpumask_equal(newdev->cpumask, cpumask_of(cpu))) in tick_check_percpu() 283 if (curdev && cpumask_equal(curdev->cpumask, cpumask_of(cpu))) in tick_check_percpu() 305 !cpumask_equal(curdev->cpumask, newdev->cpumask); in tick_check_preferred()
|