| /linux/tools/sched_ext/ |
| H A D | scx_central.c | 61 skel->rodata->nr_cpu_ids = libbpf_num_possible_cpus(); in main() 64 assert(skel->rodata->nr_cpu_ids > 0); in main() 65 assert(skel->rodata->nr_cpu_ids <= INT32_MAX); in main() 74 if (central_cpu >= skel->rodata->nr_cpu_ids) { in main() 75 …tderr, "invalid central CPU id value, %u given (%u max)\n", central_cpu, skel->rodata->nr_cpu_ids); in main() 91 RESIZE_ARRAY(skel, data, cpu_gimme_task, skel->rodata->nr_cpu_ids); in main() 92 RESIZE_ARRAY(skel, data, cpu_started_at, skel->rodata->nr_cpu_ids); in main() 107 cpuset = CPU_ALLOC(skel->rodata->nr_cpu_ids); in main() 109 CPU_ZERO_S(CPU_ALLOC_SIZE(skel->rodata->nr_cpu_ids), cpuset); in main() 113 skel->rodata->central_cpu, skel->rodata->nr_cpu_ids - 1); in main()
|
| H A D | scx_central.bpf.c | 59 const volatile u32 nr_cpu_ids = 1; /* !0 for veristat, set during init */ variable 188 bpf_for(cpu, 0, nr_cpu_ids) { in BPF_STRUCT_OPS() 195 gimme = ARRAY_ELEM_PTR(cpu_gimme_task, cpu, nr_cpu_ids); in BPF_STRUCT_OPS() 226 gimme = ARRAY_ELEM_PTR(cpu_gimme_task, cpu, nr_cpu_ids); in BPF_STRUCT_OPS() 241 u64 *started_at = ARRAY_ELEM_PTR(cpu_started_at, cpu, nr_cpu_ids); in BPF_STRUCT_OPS() 249 u64 *started_at = ARRAY_ELEM_PTR(cpu_started_at, cpu, nr_cpu_ids); in BPF_STRUCT_OPS() 267 bpf_for(i, 0, nr_cpu_ids) { in central_timerfn() 268 s32 cpu = (nr_timers + i) % nr_cpu_ids; in central_timerfn() 275 started_at = ARRAY_ELEM_PTR(cpu_started_at, cpu, nr_cpu_ids); in central_timerfn()
|
| /linux/arch/powerpc/kernel/ |
| H A D | paca.c | 62 size_t shared_lppaca_total_size = PAGE_ALIGN(nr_cpu_ids * LPPACA_SIZE); in alloc_shared_lppaca() 245 paca_nr_cpu_ids = nr_cpu_ids; in allocate_paca_ptrs() 247 paca_ptrs_size = sizeof(struct paca_struct *) * nr_cpu_ids; in allocate_paca_ptrs() 291 new_ptrs_size = sizeof(struct paca_struct *) * nr_cpu_ids; in free_unused_pacas() 296 paca_nr_cpu_ids = nr_cpu_ids; in free_unused_pacas() 309 paca_ptrs_size + paca_struct_size, nr_cpu_ids); in free_unused_pacas()
|
| H A D | setup-common.c | 327 if (cpumask_next(cpu_id, cpu_online_mask) >= nr_cpu_ids) in show_cpuinfo() 339 if ((*pos) < nr_cpu_ids) in c_start() 420 for (int i = 0; i < nthreads && cpu < nr_cpu_ids; i++) { in assign_threads() 462 cpu_to_phys_id = memblock_alloc_or_panic(nr_cpu_ids * sizeof(u32), in smp_setup_cpu_maps() 508 } else if (cpu >= nr_cpu_ids) { in smp_setup_cpu_maps() 513 if (cpu < nr_cpu_ids) in smp_setup_cpu_maps() 547 if (maxcpus > nr_cpu_ids) { in smp_setup_cpu_maps() 551 maxcpus, nr_cpu_ids); in smp_setup_cpu_maps() 552 maxcpus = nr_cpu_ids; in smp_setup_cpu_maps() 899 memblock_free(cpu_to_phys_id, nr_cpu_ids * sizeof(u32)); in smp_setup_pacas()
|
| /linux/arch/arm/mach-spear/ |
| H A D | platsmp.c | 102 if (ncores > nr_cpu_ids) { in spear13xx_smp_init_cpus() 104 ncores, nr_cpu_ids); in spear13xx_smp_init_cpus() 105 ncores = nr_cpu_ids; in spear13xx_smp_init_cpus()
|
| /linux/arch/arm/mach-bcm/ |
| H A D | bcm63xx_smp.c | 64 if (ncores > nr_cpu_ids) { in scu_a9_enable() 66 ncores, nr_cpu_ids); in scu_a9_enable() 67 ncores = nr_cpu_ids; in scu_a9_enable()
|
| /linux/include/linux/ |
| H A D | cpumask.h | 27 #define cpumask_pr_args(maskp) nr_cpu_ids, cpumask_bits(maskp) 30 #define nr_cpu_ids ((unsigned int)NR_CPUS) macro 32 extern unsigned int nr_cpu_ids; 38 WARN_ON(nr != nr_cpu_ids); in set_nr_cpu_ids() 40 nr_cpu_ids = nr; in set_nr_cpu_ids() 72 #define small_cpumask_bits nr_cpu_ids 75 #define small_cpumask_bits nr_cpu_ids 76 #define large_cpumask_bits nr_cpu_ids 78 #define nr_cpumask_bits nr_cpu_ids 369 return find_random_bit(cpumask_bits(src), nr_cpu_ids); in cpumask_random() [all …]
|
| /linux/arch/riscv/kernel/ |
| H A D | smpboot.c | 155 if (cpuid > nr_cpu_ids) in of_parse_and_init_cpus() 157 cpuid, nr_cpu_ids); in of_parse_and_init_cpus() 171 for (cpuid = 1; cpuid < nr_cpu_ids; cpuid++) in setup_smp()
|
| H A D | acpi_numa.c | 42 for (cpu = 0; cpu < nr_cpu_ids; cpu++) in get_cpu_for_acpi_id() 100 for (i = 0; i < nr_cpu_ids; i++) in acpi_map_cpus_to_nodes()
|
| /linux/kernel/irq/ |
| H A D | ipi.c | 70 if (next < nr_cpu_ids) in irq_reserve_ipi() 72 if (next < nr_cpu_ids) { in irq_reserve_ipi() 168 if (!data || cpu >= nr_cpu_ids) in ipi_get_hwirq() 199 if (cpu >= nr_cpu_ids) in ipi_send_verify()
|
| /linux/scripts/gdb/linux/ |
| H A D | timerlist.py | 150 nr_cpu_ids = 1 152 nr_cpu_ids = gdb.parse_and_eval("nr_cpu_ids") 156 num_bytes = (nr_cpu_ids + 7) / 8 172 extra = nr_cpu_ids % 8
|
| /linux/arch/arm64/kernel/ |
| H A D | topology.c | 67 if ((cpu >= nr_cpu_ids) || !cpumask_test_cpu(cpu, cpu_present_mask)) in freq_counters_valid() 224 ref_cpu = nr_cpu_ids; in arch_freq_get_on_cpu() 233 if (ref_cpu >= nr_cpu_ids) in arch_freq_get_on_cpu() 375 if ((cpu >= nr_cpu_ids) || !cpumask_test_cpu(cpu, cpu_present_mask)) in cpc_ffh_supported()
|
| /linux/net/netfilter/ |
| H A D | nf_flow_table_procfs.c | 14 for (cpu = *pos - 1; cpu < nr_cpu_ids; ++cpu) { in nf_flow_table_cpu_seq_start() 29 for (cpu = *pos; cpu < nr_cpu_ids; ++cpu) { in nf_flow_table_cpu_seq_next()
|
| /linux/lib/ |
| H A D | objpool.c | 53 for (i = 0; i < nr_cpu_ids; i++) { in objpool_init_percpu_slots() 112 for (i = 0; i < nr_cpu_ids; i++) in objpool_fini_percpu_slots() 145 slot_size = nr_cpu_ids * sizeof(struct objpool_slot); in objpool_init()
|
| H A D | flex_proportions.c | 89 #define PROP_BATCH (8*(1+ilog2(nr_cpu_ids))) 127 if (val < (nr_cpu_ids * PROP_BATCH)) in fprop_reflect_period_percpu()
|
| /linux/arch/arm/mach-omap2/ |
| H A D | omap-smp.c | 278 if (ncores > nr_cpu_ids) { in omap4_smp_init_cpus() 280 ncores, nr_cpu_ids); in omap4_smp_init_cpus() 281 ncores = nr_cpu_ids; in omap4_smp_init_cpus()
|
| /linux/arch/x86/xen/ |
| H A D | smp_pv.c | 156 for (i = 0; i < nr_cpu_ids; i++) in xen_pv_smp_config() 215 for (cpu = nr_cpu_ids - 1; !cpu_possible(cpu); cpu--) in xen_pv_smp_prepare_cpus() 413 for (cpus = 0; cpus < nr_cpu_ids; cpus++) { in xen_smp_count_cpus() 419 if (cpus < nr_cpu_ids) in xen_smp_count_cpus()
|
| /linux/drivers/md/ |
| H A D | dm-ps-io-affinity.c | 80 if (cpu >= nr_cpu_ids) { in ioa_add_path() 82 cpu, nr_cpu_ids); in ioa_add_path() 119 s->path_map = kcalloc(nr_cpu_ids, sizeof(struct path_info *), in ioa_create()
|
| /linux/arch/x86/mm/ |
| H A D | numa.c | 80 if (cpu >= nr_cpu_ids || !cpu_possible(cpu)) { in numa_set_node() 159 for (i = 0; i < nr_cpu_ids; i++) { in numa_init_array() 183 for (i = 0; i < nr_cpu_ids; i++) { in numa_init()
|
| /linux/kernel/ |
| H A D | smp.c | 271 if (WARN_ONCE(cpu < 0 || cpu >= nr_cpu_ids, "%s: cpu = %d\n", __func__, cpu)) in csd_lock_wait_toolong() 445 if ((unsigned)cpu >= nr_cpu_ids || !cpu_online(cpu)) { in generic_exec_single() 812 if (cpumask_any_and_but(mask, cpu_online_mask, this_cpu) < nr_cpu_ids) { in smp_call_function_many_cond() 960 if (get_option(&str, &nr_cpus) && nr_cpus > 0 && nr_cpus < nr_cpu_ids) in nrcpus() 981 unsigned int nr_cpu_ids __read_mostly = NR_CPUS; 982 EXPORT_SYMBOL(nr_cpu_ids); 985 /* An arch may set nr_cpu_ids earlier if needed, so this would be redundant */ 1159 if (cpu >= nr_cpu_ids || !cpu_online(cpu))
|
| /linux/drivers/net/ethernet/intel/libeth/ |
| H A D | xsk.c | 230 if (unlikely(qid >= nr_cpu_ids)) in libeth_xsk_wakeup() 231 qid %= nr_cpu_ids; in libeth_xsk_wakeup()
|
| /linux/kernel/sched/ |
| H A D | cpupri.c | 100 if (cpumask_any_and(&p->cpus_mask, vec->mask) >= nr_cpu_ids) in __cpupri_find() 291 cp->cpu_to_pri = kcalloc(nr_cpu_ids, sizeof(int), GFP_KERNEL); in cpupri_init()
|
| /linux/drivers/nvdimm/ |
| H A D | nd_perf.c | 155 if (target >= nr_cpu_ids) { in nvdimm_pmu_cpu_offline() 163 if (target >= 0 && target < nr_cpu_ids) in nvdimm_pmu_cpu_offline() 175 if (nd_pmu->cpu >= nr_cpu_ids) in nvdimm_pmu_cpu_online()
|
| /linux/drivers/platform/x86/intel/speed_select_if/ |
| H A D | isst_if_common.c | 215 if (cmd->logical_cpu >= nr_cpu_ids) in isst_if_mbox_cmd_invalid() 298 cpu >= nr_cpu_ids || cpu >= num_possible_cpus()) in _isst_if_get_pci_dev() 372 cpu >= nr_cpu_ids || cpu >= num_possible_cpus()) in isst_if_get_pci_dev() 468 if (cpu_map->logical_cpu >= nr_cpu_ids || in isst_if_proc_phyid_req() 500 if (msr_cmd->logical_cpu >= nr_cpu_ids) in isst_if_msr_cmd_req()
|
| /linux/drivers/base/ |
| H A D | cpu.c | 261 if (total_cpus && nr_cpu_ids < total_cpus) { in print_cpus_offline() 264 if (nr_cpu_ids == total_cpus-1) in print_cpus_offline() 265 len += sysfs_emit_at(buf, len, "%u", nr_cpu_ids); in print_cpus_offline() 268 nr_cpu_ids, total_cpus - 1); in print_cpus_offline() 448 if (cpu < nr_cpu_ids && cpu_possible(cpu)) in get_cpu_device()
|