/linux/lib/ |
H A D | cpumask_kunit.c | 13 "%s contains %sCPUs %*pbl", #m, (cpumask_weight(m) ? "" : "no "), \ 19 int mask_weight = cpumask_weight(m); \ 33 weight = cpumask_weight(&mask_tmp); \ 42 int mask_weight = cpumask_weight(m); \ 67 KUNIT_EXPECT_EQ_MSG(test, 0, cpumask_weight(&mask_empty), MASK_MSG(&mask_empty)); in test_cpumask_weight() 68 KUNIT_EXPECT_EQ_MSG(test, nr_cpu_ids, cpumask_weight(cpu_possible_mask), in test_cpumask_weight() 70 KUNIT_EXPECT_EQ_MSG(test, nr_cpu_ids, cpumask_weight(&mask_all), MASK_MSG(&mask_all)); in test_cpumask_weight()
|
H A D | group_cpus.c | 148 ncpus = cpumask_weight(nmsk); in alloc_nodes_groups() 298 ncpus = cpumask_weight(nmsk); in __group_cpus_evenly()
|
/linux/drivers/infiniband/hw/hfi1/ |
H A D | affinity.c | 105 possible = cpumask_weight(&node_affinity.real_cpu_mask); in init_real_cpu_mask() 106 ht = cpumask_weight(topology_sibling_cpumask( in init_real_cpu_mask() 137 cpumask_weight(topology_sibling_cpumask( in node_affinity_init() 509 if (cpumask_weight(&entry->comp_vect_mask) == 1) { in _dev_comp_vect_cpu_mask_init() 515 cpumask_weight(&entry->comp_vect_mask) / in _dev_comp_vect_cpu_mask_init() 524 cpumask_weight(&entry->comp_vect_mask) % in _dev_comp_vect_cpu_mask_init() 628 possible = cpumask_weight(&entry->def_intr.mask); in hfi1_dev_affinity_init() 974 possible = cpumask_weight(hw_thread_mask); in find_hw_thread_mask() 1019 } else if (current->nr_cpus_allowed < cpumask_weight(&set->mask)) { in hfi1_get_proc_affinity()
|
/linux/arch/x86/include/asm/trace/ |
H A D | hyperv.h | 21 TP_fast_assign(__entry->ncpus = cpumask_weight(cpus); 67 TP_fast_assign(__entry->ncpus = cpumask_weight(cpus);
|
/linux/drivers/thermal/intel/ |
H A D | intel_hfi.c | 209 cpu_count = cpumask_weight(hfi_instance->cpus); in update_capabilities() 485 if (cpumask_weight(hfi_instance->cpus) == 1 && hfi_clients_nr > 0) { in intel_hfi_online() 529 if (!cpumask_weight(hfi_instance->cpus)) in intel_hfi_offline()
|
/linux/kernel/irq/ |
H A D | ipi.c | 40 nr_irqs = cpumask_weight(dest); in irq_reserve_ipi() 144 nr_irqs = cpumask_weight(dest); in irq_destroy_ipi()
|
H A D | affinity.c | 123 set_vecs = cpumask_weight(cpu_possible_mask); in irq_calc_affinity_vectors()
|
/linux/arch/mips/kernel/ |
H A D | crash.c | 75 while ((cpumask_weight(&cpus_in_crash) < ncpus) && (--msecs > 0)) { in crash_kexec_prepare_cpus()
|
/linux/arch/x86/kernel/ |
H A D | smpboot.c | 592 threads = cpumask_weight(topology_sibling_cpumask(cpu)); in set_cpu_sibling_map() 1166 int threads = cpumask_weight(topology_sibling_cpumask(cpu)); in recompute_smt_state() 1184 if (cpumask_weight(topology_sibling_cpumask(cpu)) == 1) in remove_siblinginfo() 1193 if (cpumask_weight(topology_sibling_cpumask(sibling)) == 1) in remove_siblinginfo()
|
H A D | tsc_sync.c | 341 return (cpumask_weight(topology_core_cpumask(cpu)) > 1) ? 2 : 20; in loop_timeout()
|
/linux/arch/x86/platform/uv/ |
H A D | uv_nmi.c | 629 k = n - cpumask_weight(uv_nmi_cpu_mask); in uv_nmi_wait_cpus() 687 cpumask_weight(uv_nmi_cpu_mask), in uv_nmi_wait() 697 cpumask_weight(uv_nmi_cpu_mask), in uv_nmi_wait()
|
/linux/kernel/sched/ |
H A D | topology.c | 173 if (cpumask_weight(sched_domain_span(sd)) == 1) in sd_degenerate() 697 size = cpumask_weight(sched_domain_span(sd)); in update_top_cache_domain() 1010 sg->sgc->capacity = SCHED_CAPACITY_SCALE * cpumask_weight(sg_span); in init_overlap_sched_group() 1237 sg->sgc->capacity = SCHED_CAPACITY_SCALE * cpumask_weight(sched_group_span(sg)); in get_group() 1307 sg->group_weight = cpumask_weight(sched_group_span(sg)); in init_sched_groups_capacity() 1604 sd_weight = cpumask_weight(tl->mask(cpu)); in sd_init() 2050 if (cpumask_weight(cpumask_of_node(node)) != 1) in sched_update_numa() 2439 sd->span_weight = cpumask_weight(sched_domain_span(sd)); in build_sched_domains()
|
/linux/drivers/thermal/ |
H A D | cpufreq_cooling.c | 287 num_cpus = cpumask_weight(cpufreq_cdev->policy->cpus); in cpufreq_state2power() 374 unsigned int num_cpus = cpumask_weight(cpufreq_cdev->policy->related_cpus); in allocate_idle_time()
|
/linux/drivers/infiniband/sw/siw/ |
H A D | siw_main.c | 171 num_cpus = cpumask_weight(tx_cpumask); in siw_get_tx_cpu() 175 num_cpus = cpumask_weight(tx_cpumask); in siw_get_tx_cpu()
|
/linux/include/linux/ |
H A D | topology.h | 39 #define nr_cpus_node(node) cpumask_weight(cpumask_of_node(node))
|
/linux/arch/riscv/kernel/ |
H A D | unaligned_access_speed.c | 134 if (cpumask_weight(mask) == weight) in modify_unaligned_access_branches()
|
/linux/drivers/base/ |
H A D | cacheinfo.c | 902 return cpumask_weight(*map); in cpu_map_shared_cache() 911 return cpumask_weight(*map); in cpu_map_shared_cache() 943 nr_shared = cpumask_weight(&llc->shared_cpu_map); in update_per_cpu_data_slice_size_cpu()
|
/linux/arch/x86/kernel/cpu/microcode/ |
H A D | core.c | 448 unsigned int nr_offl = cpumask_weight(&cpu_offline_mask); in load_primary() 581 nr_offl = cpumask_weight(&cpu_offline_mask); in load_late_stop_cpus()
|
/linux/kernel/ |
H A D | stop_machine.c | 428 cpu_stop_init_done(&done, cpumask_weight(cpumask)); in __stop_cpus() 642 .num_threads = cpumask_weight(smt_mask), in stop_core_cpuslocked()
|
/linux/drivers/net/ethernet/mellanox/mlx5/core/ |
H A D | irq_affinity.c | 59 if (cpumask_weight(&af_desc->mask) > 1) in irq_pool_request_irq()
|
/linux/arch/x86/kernel/cpu/ |
H A D | proc.c | 25 cpumask_weight(topology_core_cpumask(cpu))); in show_cpuinfo_core()
|
/linux/drivers/net/wireguard/ |
H A D | queueing.h | 111 cpu_index = id % cpumask_weight(cpu_online_mask); in wg_cpumask_choose_online()
|
/linux/arch/x86/hyperv/ |
H A D | hv_apic.c | 167 weight = cpumask_weight(mask); in __send_ipi_mask()
|
/linux/drivers/cpufreq/ |
H A D | qcom-cpufreq-hw.c | 124 for (i = 1; i < cpumask_weight(policy->related_cpus); i++) in qcom_cpufreq_hw_target_index() 195 for (i = 1; i < cpumask_weight(policy->related_cpus); i++) in qcom_cpufreq_hw_fast_switch()
|
/linux/kernel/bpf/ |
H A D | cpumask.c | 422 return cpumask_weight(cpumask); in bpf_cpumask_weight()
|