/linux/kernel/ |
H A D | watchdog_buddy.c | 16 if (next_cpu >= nr_cpu_ids) in watchdog_next_cpu() 20 return nr_cpu_ids; in watchdog_next_cpu() 51 if (next_cpu < nr_cpu_ids) in watchdog_hardlockup_enable() 75 if (next_cpu < nr_cpu_ids) in watchdog_hardlockup_disable() 102 if (next_cpu >= nr_cpu_ids) in watchdog_buddy_check_hardlockup()
|
/linux/lib/ |
H A D | cpumask_kunit.c | 44 for_each_cpu_wrap(cpu, m, nr_cpu_ids / 2) \ 68 KUNIT_EXPECT_EQ_MSG(test, nr_cpu_ids, cpumask_weight(cpu_possible_mask), in test_cpumask_weight() 70 KUNIT_EXPECT_EQ_MSG(test, nr_cpu_ids, cpumask_weight(&mask_all), MASK_MSG(&mask_all)); in test_cpumask_weight() 75 KUNIT_EXPECT_LE_MSG(test, nr_cpu_ids, cpumask_first(&mask_empty), MASK_MSG(&mask_empty)); in test_cpumask_first() 79 KUNIT_EXPECT_LE_MSG(test, nr_cpu_ids, cpumask_first_zero(cpu_possible_mask), in test_cpumask_first() 87 KUNIT_EXPECT_EQ_MSG(test, nr_cpu_ids - 1, cpumask_last(cpu_possible_mask), in test_cpumask_last() 94 KUNIT_EXPECT_LE_MSG(test, nr_cpu_ids, cpumask_next_zero(-1, cpu_possible_mask), in test_cpumask_next() 97 KUNIT_EXPECT_LE_MSG(test, nr_cpu_ids, cpumask_next(-1, &mask_empty), in test_cpumask_next()
|
H A D | cpumask.c | 17 * Return: >= nr_cpu_ids on completion 136 * enumeration is O(sched_domains_numa_levels * nr_cpu_ids), while 138 * O(sched_domains_numa_levels * nr_cpu_ids * log(nr_cpu_ids)). 149 WARN_ON(cpu >= nr_cpu_ids); in cpumask_local_spread() 164 * Return: >= nr_cpu_ids if the intersection is empty. 176 if (next < nr_cpu_ids) in cpumask_any_and_distribute() 187 * Return: >= nr_cpu_ids if the intersection is empty. 196 if (next < nr_cpu_ids) in cpumask_any_distribute()
|
H A D | objpool.c | 53 for (i = 0; i < nr_cpu_ids; i++) { in objpool_init_percpu_slots() 112 for (i = 0; i < nr_cpu_ids; i++) in objpool_fini_percpu_slots() 145 slot_size = nr_cpu_ids * sizeof(struct objpool_slot); in objpool_init()
|
/linux/tools/sched_ext/ |
H A D | scx_central.c | 60 skel->rodata->nr_cpu_ids = libbpf_num_possible_cpus(); in main() 81 RESIZE_ARRAY(skel, data, cpu_gimme_task, skel->rodata->nr_cpu_ids); in main() 82 RESIZE_ARRAY(skel, data, cpu_started_at, skel->rodata->nr_cpu_ids); in main() 97 cpuset = CPU_ALLOC(skel->rodata->nr_cpu_ids); in main() 103 skel->rodata->central_cpu, skel->rodata->nr_cpu_ids - 1); in main()
|
/linux/arch/powerpc/kernel/ |
H A D | paca.c | 62 size_t shared_lppaca_total_size = PAGE_ALIGN(nr_cpu_ids * LPPACA_SIZE); in alloc_shared_lppaca() 245 paca_nr_cpu_ids = nr_cpu_ids; in allocate_paca_ptrs() 247 paca_ptrs_size = sizeof(struct paca_struct *) * nr_cpu_ids; in allocate_paca_ptrs() 291 new_ptrs_size = sizeof(struct paca_struct *) * nr_cpu_ids; in free_unused_pacas() 296 paca_nr_cpu_ids = nr_cpu_ids; in free_unused_pacas() 309 paca_ptrs_size + paca_struct_size, nr_cpu_ids); in free_unused_pacas()
|
/linux/arch/x86/kernel/cpu/ |
H A D | topology.c | 288 if (apic_id != topo_info.boot_cpu_apic_id && topo_info.nr_assigned_cpus >= nr_cpu_ids) { in topology_register_apic() 289 pr_warn_once("CPU limit of %d reached. Ignoring further CPUs\n", nr_cpu_ids); in topology_register_apic() 429 unsigned int possible = nr_cpu_ids; in topology_apply_cmdline_limits_early() 438 if (possible < nr_cpu_ids) { in topology_apply_cmdline_limits_early() 480 if (WARN_ON_ONCE(assigned > nr_cpu_ids)) { in topology_init_possible_cpus() 481 disabled += assigned - nr_cpu_ids; in topology_init_possible_cpus() 482 assigned = nr_cpu_ids; in topology_init_possible_cpus() 484 allowed = min_t(unsigned int, total, nr_cpu_ids); in topology_init_possible_cpus()
|
/linux/arch/arm/mach-spear/ |
H A D | platsmp.c | 102 if (ncores > nr_cpu_ids) { in spear13xx_smp_init_cpus() 104 ncores, nr_cpu_ids); in spear13xx_smp_init_cpus() 105 ncores = nr_cpu_ids; in spear13xx_smp_init_cpus()
|
/linux/include/linux/ |
H A D | cpumask.h | 25 #define cpumask_pr_args(maskp) nr_cpu_ids, cpumask_bits(maskp) 28 #define nr_cpu_ids ((unsigned int)NR_CPUS) macro 30 extern unsigned int nr_cpu_ids; 36 WARN_ON(nr != nr_cpu_ids); in set_nr_cpu_ids() 38 nr_cpu_ids = nr; in set_nr_cpu_ids() 70 #define small_cpumask_bits nr_cpu_ids 73 #define small_cpumask_bits nr_cpu_ids 74 #define large_cpumask_bits nr_cpu_ids 76 #define nr_cpumask_bits nr_cpu_ids 1215 nr_cpu_ids); in cpumap_print_to_pagebuf() [all …]
|
/linux/arch/arm/mach-bcm/ |
H A D | bcm63xx_smp.c | 64 if (ncores > nr_cpu_ids) { in scu_a9_enable() 66 ncores, nr_cpu_ids); in scu_a9_enable() 67 ncores = nr_cpu_ids; in scu_a9_enable()
|
/linux/arch/riscv/kernel/ |
H A D | smpboot.c | 153 if (cpuid > nr_cpu_ids) in of_parse_and_init_cpus() 155 cpuid, nr_cpu_ids); in of_parse_and_init_cpus() 169 for (cpuid = 1; cpuid < nr_cpu_ids; cpuid++) in setup_smp()
|
H A D | acpi_numa.c | 42 for (cpu = 0; cpu < nr_cpu_ids; cpu++) in get_cpu_for_acpi_id() 100 for (i = 0; i < nr_cpu_ids; i++) in acpi_map_cpus_to_nodes()
|
/linux/kernel/irq/ |
H A D | ipi.c | 70 if (next < nr_cpu_ids) in irq_reserve_ipi() 72 if (next < nr_cpu_ids) { in irq_reserve_ipi() 168 if (!data || cpu >= nr_cpu_ids) in ipi_get_hwirq() 199 if (cpu >= nr_cpu_ids) in ipi_send_verify()
|
/linux/scripts/gdb/linux/ |
H A D | timerlist.py | 152 nr_cpu_ids = 1 154 nr_cpu_ids = gdb.parse_and_eval("nr_cpu_ids") 158 num_bytes = (nr_cpu_ids + 7) / 8 174 extra = nr_cpu_ids % 8
|
/linux/net/netfilter/ |
H A D | nf_flow_table_procfs.c | 14 for (cpu = *pos - 1; cpu < nr_cpu_ids; ++cpu) { in nf_flow_table_cpu_seq_start() 29 for (cpu = *pos; cpu < nr_cpu_ids; ++cpu) { in nf_flow_table_cpu_seq_next()
|
/linux/arch/x86/xen/ |
H A D | smp_pv.c | 156 for (i = 0; i < nr_cpu_ids; i++) in xen_pv_smp_config() 215 for (cpu = nr_cpu_ids - 1; !cpu_possible(cpu); cpu--) in xen_pv_smp_prepare_cpus() 414 for (cpus = 0; cpus < nr_cpu_ids; cpus++) { in xen_smp_count_cpus() 420 if (cpus < nr_cpu_ids) in xen_smp_count_cpus()
|
/linux/arch/arm/mach-omap2/ |
H A D | omap-smp.c | 278 if (ncores > nr_cpu_ids) { in omap4_smp_init_cpus() 280 ncores, nr_cpu_ids); in omap4_smp_init_cpus() 281 ncores = nr_cpu_ids; in omap4_smp_init_cpus()
|
/linux/drivers/perf/ |
H A D | arm_pmu_platform.c | 87 cpu = nr_cpu_ids; in pmu_parse_irq_affinity() 123 if (nr_cpu_ids != 1 && !pmu_has_irq_affinity(dev->of_node)) in pmu_parse_irqs() 141 if (cpu >= nr_cpu_ids) in pmu_parse_irqs()
|
/linux/arch/x86/kernel/apic/ |
H A D | probe_32.c | 98 if (nr_cpu_ids <= 8 || xen_pv_domain()) in x86_32_probe_bigsmp_early() 121 if (nr_cpu_ids > 8 && !xen_pv_domain()) in x86_32_install_bigsmp()
|
/linux/kernel/sched/ |
H A D | isolation.c | 40 if (cpu < nr_cpu_ids) in housekeeping_any_cpu() 44 if (likely(cpu < nr_cpu_ids)) in housekeeping_any_cpu() 137 if (first_cpu >= nr_cpu_ids || first_cpu >= setup_max_cpus) { in housekeeping_setup()
|
H A D | cpupri.c | 99 if (cpumask_any_and(&p->cpus_mask, vec->mask) >= nr_cpu_ids) in __cpupri_find() 290 cp->cpu_to_pri = kcalloc(nr_cpu_ids, sizeof(int), GFP_KERNEL); in cpupri_init()
|
/linux/drivers/md/ |
H A D | dm-ps-io-affinity.c | 80 if (cpu >= nr_cpu_ids) { in ioa_add_path() 82 cpu, nr_cpu_ids); in ioa_add_path() 119 s->path_map = kcalloc(nr_cpu_ids, sizeof(struct path_info *), in ioa_create()
|
/linux/arch/x86/mm/ |
H A D | numa.c | 79 if (cpu >= nr_cpu_ids || !cpu_possible(cpu)) { in numa_set_node() 158 for (i = 0; i < nr_cpu_ids; i++) { in numa_init_array() 182 for (i = 0; i < nr_cpu_ids; i++) { in numa_init()
|
/linux/arch/x86/kernel/ |
H A D | tsc_sync.c | 102 if (next_cpu >= nr_cpu_ids) in tsc_sync_check_timer_fn() 208 refcpu = mask ? cpumask_any_but(mask, cpu) : nr_cpu_ids; in tsc_store_and_check_tsc_adjust() 210 if (refcpu >= nr_cpu_ids) { in tsc_store_and_check_tsc_adjust()
|
/linux/drivers/nvdimm/ |
H A D | nd_perf.c | 155 if (target >= nr_cpu_ids) { in nvdimm_pmu_cpu_offline() 163 if (target >= 0 && target < nr_cpu_ids) in nvdimm_pmu_cpu_offline() 175 if (nd_pmu->cpu >= nr_cpu_ids) in nvdimm_pmu_cpu_online()
|