| /linux/arch/x86/include/asm/trace/ |
| H A D | hyperv.h | 21 TP_fast_assign(__entry->ncpus = cpumask_weight(cpus); 67 TP_fast_assign(__entry->ncpus = cpumask_weight(cpus);
|
| /linux/drivers/infiniband/hw/hfi1/ |
| H A D | affinity.c | 103 possible = cpumask_weight(&node_affinity.real_cpu_mask); in init_real_cpu_mask() 104 ht = cpumask_weight(topology_sibling_cpumask( in init_real_cpu_mask() 128 cpumask_weight(topology_sibling_cpumask( in node_affinity_init() 500 if (cpumask_weight(&entry->comp_vect_mask) == 1) { in _dev_comp_vect_cpu_mask_init() 506 cpumask_weight(&entry->comp_vect_mask) / in _dev_comp_vect_cpu_mask_init() 515 cpumask_weight(&entry->comp_vect_mask) % in _dev_comp_vect_cpu_mask_init() 619 possible = cpumask_weight(&entry->def_intr.mask); in hfi1_dev_affinity_init() 1001 } else if (current->nr_cpus_allowed < cpumask_weight(&set->mask)) { in hfi1_get_proc_affinity()
|
| /linux/lib/ |
| H A D | group_cpus.c | 246 ncpus = cpumask_weight(nmsk); in alloc_nodes_groups() 311 if (!cpumask_weight(cluster_mask)) in alloc_cluster_groups() 387 nc = cpumask_weight(nmsk); in __try_group_cluster_cpus() 451 ncpus = cpumask_weight(nmsk); in __group_cpus_evenly()
|
| /linux/kernel/irq/ |
| H A D | ipi.c | 40 nr_irqs = cpumask_weight(dest); in irq_reserve_ipi() 144 nr_irqs = cpumask_weight(dest); in irq_destroy_ipi()
|
| /linux/arch/mips/kernel/ |
| H A D | crash.c | 75 while ((cpumask_weight(&cpus_in_crash) < ncpus) && (--msecs > 0)) { in crash_kexec_prepare_cpus()
|
| /linux/rust/helpers/ |
| H A D | cpumask.c | 56 return cpumask_weight(srcp); in rust_helper_alloc_cpumask_var()
|
| /linux/block/ |
| H A D | blk-mq-cpumap.c | 25 num = cpumask_weight(mask); in blk_mq_num_queues()
|
| /linux/include/linux/ |
| H A D | cpumask.h | 868 static __always_inline unsigned int cpumask_weight(const struct cpumask *srcp) in cpumask_weight() function 1225 #define num_enabled_cpus() cpumask_weight(cpu_enabled_mask) 1226 #define num_present_cpus() cpumask_weight(cpu_present_mask) 1227 #define num_active_cpus() cpumask_weight(cpu_active_mask)
|
| H A D | topology.h | 40 #define nr_cpus_node(node) cpumask_weight(cpumask_of_node(node))
|
| /linux/arch/x86/kernel/ |
| H A D | smpboot.c | 713 threads = cpumask_weight(topology_sibling_cpumask(cpu)); in set_cpu_sibling_map() 1286 int threads = cpumask_weight(topology_sibling_cpumask(cpu)); in recompute_smt_state() 1304 if (cpumask_weight(topology_sibling_cpumask(cpu)) == 1) in remove_siblinginfo() 1313 if (cpumask_weight(topology_sibling_cpumask(sibling)) == 1) in remove_siblinginfo()
|
| /linux/arch/x86/platform/uv/ |
| H A D | uv_nmi.c | 629 k = n - cpumask_weight(uv_nmi_cpu_mask); in uv_nmi_wait_cpus() 687 cpumask_weight(uv_nmi_cpu_mask), in uv_nmi_wait() 697 cpumask_weight(uv_nmi_cpu_mask), in uv_nmi_wait()
|
| /linux/drivers/thermal/ |
| H A D | cpufreq_cooling.c | 285 num_cpus = cpumask_weight(cpufreq_cdev->policy->cpus); in cpufreq_state2power() 372 unsigned int num_cpus = cpumask_weight(cpufreq_cdev->policy->related_cpus); in allocate_idle_time()
|
| /linux/drivers/infiniband/sw/siw/ |
| H A D | siw_main.c | 170 num_cpus = cpumask_weight(tx_cpumask); in siw_get_tx_cpu() 174 num_cpus = cpumask_weight(tx_cpumask); in siw_get_tx_cpu()
|
| /linux/lib/tests/ |
| H A D | test_ratelimit.c | 105 const int n_stress_kthread = cpumask_weight(cpu_online_mask); in test_ratelimit_stress()
|
| /linux/drivers/base/ |
| H A D | cacheinfo.c | 950 return cpumask_weight(*map); in cpu_map_shared_cache() 959 return cpumask_weight(*map); in cpu_map_shared_cache() 991 nr_shared = cpumask_weight(&llc->shared_cpu_map); in update_per_cpu_data_slice_size_cpu()
|
| /linux/kernel/ |
| H A D | stop_machine.c | 429 cpu_stop_init_done(&done, cpumask_weight(cpumask)); in __stop_cpus() 643 .num_threads = cpumask_weight(smt_mask), in stop_core_cpuslocked()
|
| /linux/arch/loongarch/kernel/ |
| H A D | machine_kexec.c | 207 while ((cpumask_weight(&cpus_in_crash) < ncpus) && timeout--) { in crash_smp_send_stop()
|
| H A D | numa.c | 256 loongson_sysconf.cores_per_node = cpumask_weight(&phys_cpus_on_node[0]); in init_numa_memory()
|
| /linux/drivers/net/ethernet/mellanox/mlx5/core/ |
| H A D | irq_affinity.c | 66 if (cpumask_weight(&af_desc->mask) > 1) in irq_pool_request_irq()
|
| /linux/arch/x86/hyperv/ |
| H A D | hv_apic.c | 177 weight = cpumask_weight(mask); in __send_ipi_mask()
|
| /linux/drivers/cpufreq/ |
| H A D | qcom-cpufreq-hw.c | 124 for (i = 1; i < cpumask_weight(policy->related_cpus); i++) in qcom_cpufreq_hw_target_index() 196 for (i = 1; i < cpumask_weight(policy->related_cpus); i++) in qcom_cpufreq_hw_fast_switch()
|
| /linux/drivers/firmware/psci/ |
| H A D | psci_checker.c | 93 if (cpumask_weight(offlined_cpus) + 1 == nb_available_cpus) { in down_and_up_cpus()
|
| /linux/arch/riscv/kernel/ |
| H A D | unaligned_access_speed.c | 197 if (cpumask_weight(mask) == weight) in modify_unaligned_access_branches()
|
| /linux/arch/powerpc/kernel/ |
| H A D | watchdog.c | 495 if (cpumask_weight(&wd_cpus_enabled) == 1) { in start_watchdog()
|
| /linux/kernel/power/ |
| H A D | em_netlink.c | 33 nr_cpus = cpumask_weight(to_cpumask(pd->cpus)); in __em_nl_get_pd_size()
|