Home
last modified time | relevance | path

Searched refs:nr_cpu_ids (Results 1 – 25 of 252) sorted by relevance

1234567891011

/linux/tools/sched_ext/
H A Dscx_central.c63 skel->rodata->nr_cpu_ids = libbpf_num_possible_cpus(); in main()
66 assert(skel->rodata->nr_cpu_ids > 0); in main()
67 assert(skel->rodata->nr_cpu_ids <= INT32_MAX); in main()
76 if (central_cpu >= skel->rodata->nr_cpu_ids) { in main()
77 …tderr, "invalid central CPU id value, %u given (%u max)\n", central_cpu, skel->rodata->nr_cpu_ids); in main()
94 RESIZE_ARRAY(skel, data, cpu_gimme_task, skel->rodata->nr_cpu_ids); in main()
95 RESIZE_ARRAY(skel, data, cpu_started_at, skel->rodata->nr_cpu_ids); in main()
110 cpuset = CPU_ALLOC(skel->rodata->nr_cpu_ids); in main()
112 cpuset_size = CPU_ALLOC_SIZE(skel->rodata->nr_cpu_ids); in main()
117 skel->rodata->central_cpu, skel->rodata->nr_cpu_ids - 1); in main()
H A Dscx_pair.c59 skel->rodata->nr_cpu_ids = libbpf_num_possible_cpus(); in main()
63 stride = skel->rodata->nr_cpu_ids / 2; in main()
85 bpf_map__set_max_entries(skel->maps.pair_ctx, skel->rodata->nr_cpu_ids / 2); in main()
88 RESIZE_ARRAY(skel, rodata, pair_cpu, skel->rodata->nr_cpu_ids); in main()
89 RESIZE_ARRAY(skel, rodata, pair_id, skel->rodata->nr_cpu_ids); in main()
90 RESIZE_ARRAY(skel, rodata, in_pair_idx, skel->rodata->nr_cpu_ids); in main()
92 for (i = 0; i < skel->rodata->nr_cpu_ids; i++) in main()
96 for (i = 0; i < skel->rodata->nr_cpu_ids; i++) { in main()
97 int j = (i + stride) % skel->rodata->nr_cpu_ids; in main()
H A Dscx_central.bpf.c59 const volatile u32 nr_cpu_ids = 1; /* !0 for veristat, set during init */ variable
188 bpf_for(cpu, 0, nr_cpu_ids) { in BPF_STRUCT_OPS()
195 gimme = ARRAY_ELEM_PTR(cpu_gimme_task, cpu, nr_cpu_ids); in BPF_STRUCT_OPS()
226 gimme = ARRAY_ELEM_PTR(cpu_gimme_task, cpu, nr_cpu_ids); in BPF_STRUCT_OPS()
241 u64 *started_at = ARRAY_ELEM_PTR(cpu_started_at, cpu, nr_cpu_ids); in BPF_STRUCT_OPS()
249 u64 *started_at = ARRAY_ELEM_PTR(cpu_started_at, cpu, nr_cpu_ids); in BPF_STRUCT_OPS()
267 bpf_for(i, 0, nr_cpu_ids) { in central_timerfn()
268 s32 cpu = (nr_timers + i) % nr_cpu_ids; in central_timerfn()
275 started_at = ARRAY_ELEM_PTR(cpu_started_at, cpu, nr_cpu_ids); in central_timerfn()
/linux/arch/powerpc/kernel/
H A Dpaca.c62 size_t shared_lppaca_total_size = PAGE_ALIGN(nr_cpu_ids * LPPACA_SIZE); in alloc_shared_lppaca()
245 paca_nr_cpu_ids = nr_cpu_ids; in allocate_paca_ptrs()
247 paca_ptrs_size = sizeof(struct paca_struct *) * nr_cpu_ids; in allocate_paca_ptrs()
291 new_ptrs_size = sizeof(struct paca_struct *) * nr_cpu_ids; in free_unused_pacas()
296 paca_nr_cpu_ids = nr_cpu_ids; in free_unused_pacas()
309 paca_ptrs_size + paca_struct_size, nr_cpu_ids); in free_unused_pacas()
H A Dsetup-common.c326 if (cpumask_next(cpu_id, cpu_online_mask) >= nr_cpu_ids) in show_cpuinfo()
338 if ((*pos) < nr_cpu_ids) in c_start()
419 for (int i = 0; i < nthreads && cpu < nr_cpu_ids; i++) { in assign_threads()
461 cpu_to_phys_id = memblock_alloc_or_panic(nr_cpu_ids * sizeof(u32), in smp_setup_cpu_maps()
507 } else if (cpu >= nr_cpu_ids) { in smp_setup_cpu_maps()
512 if (cpu < nr_cpu_ids) in smp_setup_cpu_maps()
546 if (maxcpus > nr_cpu_ids) { in smp_setup_cpu_maps()
550 maxcpus, nr_cpu_ids); in smp_setup_cpu_maps()
551 maxcpus = nr_cpu_ids; in smp_setup_cpu_maps()
898 memblock_free(cpu_to_phys_id, nr_cpu_ids * sizeof(u32)); in smp_setup_pacas()
/linux/arch/x86/kernel/cpu/
H A Dtopology.c271 if (apic_id != topo_info.boot_cpu_apic_id && topo_info.nr_assigned_cpus >= nr_cpu_ids) { in topology_register_apic()
272 pr_warn_once("CPU limit of %d reached. Ignoring further CPUs\n", nr_cpu_ids); in topology_register_apic()
425 unsigned int possible = nr_cpu_ids; in topology_apply_cmdline_limits_early()
434 if (possible < nr_cpu_ids) { in topology_apply_cmdline_limits_early()
476 if (WARN_ON_ONCE(assigned > nr_cpu_ids)) { in topology_init_possible_cpus()
477 disabled += assigned - nr_cpu_ids; in topology_init_possible_cpus()
478 assigned = nr_cpu_ids; in topology_init_possible_cpus()
480 allowed = min_t(unsigned int, total, nr_cpu_ids); in topology_init_possible_cpus()
/linux/arch/arm/mach-spear/
H A Dplatsmp.c102 if (ncores > nr_cpu_ids) { in spear13xx_smp_init_cpus()
104 ncores, nr_cpu_ids); in spear13xx_smp_init_cpus()
105 ncores = nr_cpu_ids; in spear13xx_smp_init_cpus()
/linux/include/linux/
H A Dcpumask.h27 #define cpumask_pr_args(maskp) nr_cpu_ids, cpumask_bits(maskp)
30 #define nr_cpu_ids ((unsigned int)NR_CPUS) macro
32 extern unsigned int nr_cpu_ids;
38 WARN_ON(nr != nr_cpu_ids); in set_nr_cpu_ids()
40 nr_cpu_ids = nr; in set_nr_cpu_ids()
72 #define small_cpumask_bits nr_cpu_ids
75 #define small_cpumask_bits nr_cpu_ids
76 #define large_cpumask_bits nr_cpu_ids
78 #define nr_cpumask_bits nr_cpu_ids
369 return find_random_bit(cpumask_bits(src), nr_cpu_ids); in cpumask_random()
[all …]
/linux/kernel/
H A Dscftorture.c372 cpu = torture_random(trsp) % nr_cpu_ids; in scftorture_invoke_one()
379 cpu = torture_random(trsp) % nr_cpu_ids; in scftorture_invoke_one()
402 cpu = torture_random(trsp) % nr_cpu_ids; in scftorture_invoke_one()
485 cpu = scfp->cpu % nr_cpu_ids; in scftorture_invoker()
497 __func__, scfp->cpu, curcpu, nr_cpu_ids); in scftorture_invoker()
563 for (i = 0; i < nr_cpu_ids; i++) in scf_torture_cleanup()
599 weight_resched1 = weight_resched == 0 ? 0 : 2 * nr_cpu_ids; in scf_torture_init()
600 weight_single1 = weight_single == 0 ? 0 : 2 * nr_cpu_ids; in scf_torture_init()
601 weight_single_rpc1 = weight_single_rpc == 0 ? 0 : 2 * nr_cpu_ids; in scf_torture_init()
602 weight_single_wait1 = weight_single_wait == 0 ? 0 : 2 * nr_cpu_ids; in scf_torture_init()
H A Dsmp.c271 if (WARN_ONCE(cpu < 0 || cpu >= nr_cpu_ids, "%s: cpu = %d\n", __func__, cpu)) in csd_lock_wait_toolong()
445 if ((unsigned)cpu >= nr_cpu_ids || !cpu_online(cpu)) { in generic_exec_single()
812 if (cpumask_any_and_but(mask, cpu_online_mask, this_cpu) < nr_cpu_ids) { in smp_call_function_many_cond()
960 if (get_option(&str, &nr_cpus) && nr_cpus > 0 && nr_cpus < nr_cpu_ids) in nrcpus()
981 unsigned int nr_cpu_ids __read_mostly = NR_CPUS;
982 EXPORT_SYMBOL(nr_cpu_ids);
985 /* An arch may set nr_cpu_ids earlier if needed, so this would be redundant */
1159 if (cpu >= nr_cpu_ids || !cpu_online(cpu))
/linux/arch/arm/mach-bcm/
H A Dbcm63xx_smp.c64 if (ncores > nr_cpu_ids) { in scu_a9_enable()
66 ncores, nr_cpu_ids); in scu_a9_enable()
67 ncores = nr_cpu_ids; in scu_a9_enable()
/linux/kernel/irq/
H A Dipi.c70 if (next < nr_cpu_ids) in irq_reserve_ipi()
72 if (next < nr_cpu_ids) { in irq_reserve_ipi()
168 if (!data || cpu >= nr_cpu_ids) in ipi_get_hwirq()
199 if (cpu >= nr_cpu_ids) in ipi_send_verify()
/linux/scripts/gdb/linux/
H A Dtimerlist.py150 nr_cpu_ids = 1
152 nr_cpu_ids = gdb.parse_and_eval("nr_cpu_ids")
156 num_bytes = (nr_cpu_ids + 7) / 8
172 extra = nr_cpu_ids % 8
/linux/arch/riscv/kernel/
H A Dsmpboot.c155 if (cpuid > nr_cpu_ids) in of_parse_and_init_cpus()
157 cpuid, nr_cpu_ids); in of_parse_and_init_cpus()
171 for (cpuid = 1; cpuid < nr_cpu_ids; cpuid++) in setup_smp()
H A Dacpi_numa.c42 for (cpu = 0; cpu < nr_cpu_ids; cpu++) in get_cpu_for_acpi_id()
100 for (i = 0; i < nr_cpu_ids; i++) in acpi_map_cpus_to_nodes()
/linux/arch/arm64/kernel/
H A Dtopology.c67 if ((cpu >= nr_cpu_ids) || !cpumask_test_cpu(cpu, cpu_present_mask)) in freq_counters_valid()
224 ref_cpu = nr_cpu_ids; in arch_freq_get_on_cpu()
233 if (ref_cpu >= nr_cpu_ids) in arch_freq_get_on_cpu()
444 if ((cpu >= nr_cpu_ids) || !cpumask_test_cpu(cpu, cpu_present_mask)) in cpc_ffh_supported()
/linux/lib/
H A Dobjpool.c53 for (i = 0; i < nr_cpu_ids; i++) { in objpool_init_percpu_slots()
112 for (i = 0; i < nr_cpu_ids; i++) in objpool_fini_percpu_slots()
145 slot_size = nr_cpu_ids * sizeof(struct objpool_slot *); in objpool_init()
/linux/drivers/infiniband/hw/hfi1/
H A Daffinity.c75 if (cpu >= nr_cpu_ids) /* empty */ in cpu_mask_set_get_first()
114 cpumask_clear_cpus(&node_affinity.real_cpu_mask, curr_cpu, nr_cpu_ids - curr_cpu); in init_real_cpu_mask()
253 if (ret_cpu >= nr_cpu_ids) { in per_cpu_affinity_get()
289 if (max_cpu >= nr_cpu_ids) in per_cpu_affinity_put_max()
343 if (cpu >= nr_cpu_ids) in _dev_comp_vect_cpu_get()
346 if (cpu >= nr_cpu_ids) { /* empty */ in _dev_comp_vect_cpu_get()
590 if (cpumask_first(local_mask) >= nr_cpu_ids) in hfi1_dev_affinity_init()
651 if (curr_cpu >= nr_cpu_ids) in hfi1_dev_affinity_init()
970 cpumask_clear_cpus(hw_thread_mask, curr_cpu, nr_cpu_ids - curr_cpu); in find_hw_thread_mask()
1138 if (cpu >= nr_cpu_ids) /* empty */ in hfi1_get_proc_affinity()
/linux/kernel/sched/
H A Disolation.c85 if (cpu < nr_cpu_ids) in housekeeping_any_cpu()
89 if (likely(cpu < nr_cpu_ids)) in housekeeping_any_cpu()
230 if (first_cpu >= nr_cpu_ids || first_cpu >= setup_max_cpus) { in housekeeping_setup()
277 if (first_cpu >= min(nr_cpu_ids, setup_max_cpus)) { in housekeeping_setup()
/linux/net/netfilter/
H A Dnf_flow_table_procfs.c14 for (cpu = *pos - 1; cpu < nr_cpu_ids; ++cpu) { in nf_flow_table_cpu_seq_start()
29 for (cpu = *pos; cpu < nr_cpu_ids; ++cpu) { in nf_flow_table_cpu_seq_next()
/linux/arch/x86/xen/
H A Dsmp_pv.c155 for (i = 0; i < nr_cpu_ids; i++) in xen_pv_smp_config()
214 for (cpu = nr_cpu_ids - 1; !cpu_possible(cpu); cpu--) in xen_pv_smp_prepare_cpus()
412 for (cpus = 0; cpus < nr_cpu_ids; cpus++) { in xen_smp_count_cpus()
418 if (cpus < nr_cpu_ids) in xen_smp_count_cpus()
/linux/arch/arm/mach-omap2/
H A Domap-smp.c278 if (ncores > nr_cpu_ids) { in omap4_smp_init_cpus()
280 ncores, nr_cpu_ids); in omap4_smp_init_cpus()
281 ncores = nr_cpu_ids; in omap4_smp_init_cpus()
/linux/drivers/md/
H A Ddm-ps-io-affinity.c80 if (cpu >= nr_cpu_ids) { in ioa_add_path()
82 cpu, nr_cpu_ids); in ioa_add_path()
119 s->path_map = kzalloc_objs(struct path_info *, nr_cpu_ids); in ioa_create()
/linux/arch/x86/mm/
H A Dnuma.c87 if (cpu >= nr_cpu_ids || !cpu_possible(cpu)) { in numa_set_node()
166 for (i = 0; i < nr_cpu_ids; i++) { in numa_init_array()
190 for (i = 0; i < nr_cpu_ids; i++) { in numa_init()
/linux/drivers/nvdimm/
H A Dnd_perf.c155 if (target >= nr_cpu_ids) { in nvdimm_pmu_cpu_offline()
163 if (target >= 0 && target < nr_cpu_ids) in nvdimm_pmu_cpu_offline()
175 if (nd_pmu->cpu >= nr_cpu_ids) in nvdimm_pmu_cpu_online()

1234567891011