Home
last modified time | relevance | path

Searched full:cpus (Results 1 – 25 of 2246) sorted by relevance

12345678910>>...90

/linux/tools/lib/perf/
H A Dcpumap.c25 RC_STRUCT(perf_cpu_map) *cpus; in perf_cpu_map__alloc()
31 cpus = malloc(sizeof(*cpus) + sizeof(struct perf_cpu) * nr_cpus); in perf_cpu_map__new_any_cpu()
32 if (ADD_RC_CHK(result, cpus)) { in perf_cpu_map__new_any_cpu()
33 cpus->nr = nr_cpus; in perf_cpu_map__new_any_cpu() local
34 refcount_set(&cpus->refcnt, 1); in perf_cpu_map__new_any_cpu()
41 struct perf_cpu_map *cpus = perf_cpu_map__alloc(1); in cpu_map__delete()
43 if (cpus) in cpu_map__delete()
44 RC_CHK_ACCESS(cpus)->map[0].cpu = -1; in cpu_map__delete()
46 return cpus; in cpu_map__delete()
72 struct perf_cpu_map *cpus; cpu_map__new_sysconf() local
98 struct perf_cpu_map *cpus = NULL; cpu_map__new_sysfs_online() local
111 struct perf_cpu_map *cpus = cpu_map__new_sysfs_online(); perf_cpu_map__new_online_cpus() local
127 __perf_cpu_map__cpu(const struct perf_cpu_map * cpus,int idx) __perf_cpu_map__cpu() argument
135 struct perf_cpu_map *cpus = perf_cpu_map__alloc(nr_cpus); cpu_map__trim_new() local
159 struct perf_cpu_map *cpus = NULL; perf_cpu_map__read() local
215 struct perf_cpu_map *cpus = NULL; perf_cpu_map__new() local
292 __perf_cpu_map__nr(const struct perf_cpu_map * cpus) __perf_cpu_map__nr() argument
297 perf_cpu_map__cpu(const struct perf_cpu_map * cpus,int idx) perf_cpu_map__cpu() argument
309 perf_cpu_map__nr(const struct perf_cpu_map * cpus) perf_cpu_map__nr() argument
319 perf_cpu_map__idx(const struct perf_cpu_map * cpus,struct perf_cpu cpu) perf_cpu_map__idx() argument
344 perf_cpu_map__has(const struct perf_cpu_map * cpus,struct perf_cpu cpu) perf_cpu_map__has() argument
[all...]
/linux/tools/testing/selftests/riscv/hwprobe/
H A Dwhich-cpus.c22 "which-cpus: [-h] [<key=value> [<key=value> ...]]\n\n" in help()
25 " <key=value>, outputs the cpulist for cpus which all match the given set\n" in help()
29 static void print_cpulist(cpu_set_t *cpus) in print_cpulist() argument
33 if (!CPU_COUNT(cpus)) { in print_cpulist()
34 printf("cpus: None\n"); in print_cpulist()
38 printf("cpus:"); in print_cpulist()
39 for (int i = 0, c = 0; i < CPU_COUNT(cpus); i++, c++) { in print_cpulist()
40 if (start != end && !CPU_ISSET(c, cpus)) in print_cpulist()
43 while (!CPU_ISSET(c, cpus)) in print_cpulist()
59 static void do_which_cpus(int argc, char **argv, cpu_set_t *cpus) in do_which_cpus() argument
[all …]
/linux/drivers/cpuidle/
H A Dcoupled.c3 * coupled.c - helper functions to enter the same idle state on multiple cpus
24 * cpus cannot be independently powered down, either due to
31 * shared between the cpus (L2 cache, interrupt controller, and
33 * be tightly controlled on both cpus.
36 * WFI state until all cpus are ready to enter a coupled state, at
38 * cpus at approximately the same time.
40 * Once all cpus are ready to enter idle, they are woken by an smp
42 * cpus will find work to do, and choose not to enter idle. A
43 * final pass is needed to guarantee that all cpus will call the
46 * ready counter matches the number of online coupled cpus. If any
[all …]
/linux/tools/testing/selftests/cgroup/
H A Dtest_cpuset_prs.sh25 SUBPARTS_CPUS=$CGROUP2/.__DEBUG__.cpuset.cpus.subpartitions
26 CPULIST=$(cat $CGROUP2/cpuset.cpus.effective)
29 [[ $NR_CPUS -lt 8 ]] && skip_test "Test needs at least 8 cpus available!"
79 echo 0-6 > test/cpuset.cpus
80 echo root > test/cpuset.cpus.partition
81 cat test/cpuset.cpus.partition | grep -q invalid
83 echo member > test/cpuset.cpus.partition
84 echo "" > test/cpuset.cpus
88 # If isolated CPUs have been reserved at boot time (as shown in
89 # cpuset.cpus.isolated), these isolated CPUs should be outside of CPUs 0-8
[all …]
/linux/Documentation/timers/
H A Dno_hz.rst19 2. Omit scheduling-clock ticks on idle CPUs (CONFIG_NO_HZ_IDLE=y or
23 3. Omit scheduling-clock ticks on CPUs that are either idle or that
65 Omit Scheduling-Clock Ticks For Idle CPUs
78 scheduling-clock interrupts to idle CPUs, which is critically important
86 idle CPUs. That said, dyntick-idle mode is not free:
104 Omit Scheduling-Clock Ticks For CPUs With Only One Runnable Task
109 Note that omitting scheduling-clock ticks for CPUs with only one runnable
110 task implies also omitting them for idle CPUs.
113 sending scheduling-clock interrupts to CPUs with a single runnable task,
114 and such CPUs are said to be "adaptive-ticks CPUs". This is important
[all …]
/linux/sound/soc/intel/boards/
H A Dsof_board_helpers.c182 struct snd_soc_dai_link_component *cpus; in set_ssp_codec_link() local
192 /* cpus */ in set_ssp_codec_link()
193 cpus = devm_kzalloc(dev, sizeof(struct snd_soc_dai_link_component), in set_ssp_codec_link()
195 if (!cpus) in set_ssp_codec_link()
200 cpus->dai_name = devm_kasprintf(dev, GFP_KERNEL, "ssp%d-port", in set_ssp_codec_link()
203 cpus->dai_name = devm_kasprintf(dev, GFP_KERNEL, "SSP%d Pin", in set_ssp_codec_link()
206 if (!cpus->dai_name) in set_ssp_codec_link()
209 link->cpus = cpus; in set_ssp_codec_link()
227 struct snd_soc_dai_link_component *cpus; in set_dmic_link() local
229 /* cpus */ in set_dmic_link()
[all …]
H A Dsof_pcm512x.c225 struct snd_soc_dai_link_component *cpus; in sof_card_dai_links_create() local
231 cpus = devm_kcalloc(dev, sof_audio_card_pcm512x.num_links, in sof_card_dai_links_create()
233 if (!links || !cpus) in sof_card_dai_links_create()
255 links[id].cpus = &cpus[id]; in sof_card_dai_links_create()
258 links[id].cpus->dai_name = devm_kasprintf(dev, GFP_KERNEL, in sof_card_dai_links_create()
261 if (!links[id].cpus->dai_name) in sof_card_dai_links_create()
264 links[id].cpus->dai_name = devm_kasprintf(dev, GFP_KERNEL, in sof_card_dai_links_create()
267 if (!links[id].cpus->dai_name) in sof_card_dai_links_create()
276 links[id].cpus = &cpus[id]; in sof_card_dai_links_create()
277 links[id].cpus->dai_name = "DMIC01 Pin"; in sof_card_dai_links_create()
[all …]
/linux/include/linux/
H A Dstop_machine.h13 * function to be executed on a single or multiple cpus preempting all
14 * other processes and monopolizing those cpus until it finishes.
18 * cpus are online.
99 * stop_machine: freeze the machine on all CPUs and run this function
102 * @cpus: the cpus to run the @fn() on (NULL = any online cpu)
114 int stop_machine(cpu_stop_fn_t fn, void *data, const struct cpumask *cpus);
117 * stop_machine_cpuslocked: freeze the machine on all CPUs and run this function
120 * @cpus: the cpus to run the @fn() on (NULL = any online cpu)
125 int stop_machine_cpuslocked(cpu_stop_fn_t fn, void *data, const struct cpumask *cpus);
133 * Same as above, but instead of every CPU, only the logical CPUs of a
[all …]
/linux/tools/lib/perf/tests/
H A Dtest-cpumap.c16 struct perf_cpu_map *cpus; in test_cpumap() local
24 cpus = perf_cpu_map__new_any_cpu(); in test_cpumap()
25 if (!cpus) in test_cpumap()
28 perf_cpu_map__get(cpus); in test_cpumap()
29 perf_cpu_map__put(cpus); in test_cpumap()
30 perf_cpu_map__put(cpus); in test_cpumap()
32 cpus = perf_cpu_map__new_online_cpus(); in test_cpumap()
33 if (!cpus) in test_cpumap()
36 perf_cpu_map__for_each_cpu(cpu, idx, cpus) in test_cpumap()
39 perf_cpu_map__put(cpus); in test_cpumap()
H A Dtest-evlist.c36 struct perf_cpu_map *cpus; in test_stat_cpu() local
49 cpus = perf_cpu_map__new_online_cpus(); in test_stat_cpu()
50 __T("failed to create cpus", cpus); in test_stat_cpu()
69 perf_evlist__set_maps(evlist, cpus, NULL); in test_stat_cpu()
75 cpus = perf_evsel__cpus(evsel); in test_stat_cpu()
77 for (idx = 0; idx < perf_cpu_map__nr(cpus); idx++) { in test_stat_cpu()
88 perf_cpu_map__put(cpus); in test_stat_cpu()
218 struct perf_cpu_map *cpus; in test_mmap_thread() local
264 cpus = perf_cpu_map__new_any_cpu(); in test_mmap_thread()
265 __T("failed to create cpus", cpus); in test_mmap_thread()
[all …]
/linux/drivers/clk/sunxi/
H A Dclk-sun9i-cpus.c7 * Allwinner A80 CPUS clock driver
52 struct sun9i_a80_cpus_clk *cpus = to_sun9i_a80_cpus_clk(hw); in sun9i_a80_cpus_clk_recalc_rate() local
57 reg = readl(cpus->reg); in sun9i_a80_cpus_clk_recalc_rate()
152 struct sun9i_a80_cpus_clk *cpus = to_sun9i_a80_cpus_clk(hw); in sun9i_a80_cpus_clk_set_rate() local
159 reg = readl(cpus->reg); in sun9i_a80_cpus_clk_set_rate()
167 writel(reg, cpus->reg); in sun9i_a80_cpus_clk_set_rate()
181 * sun9i_a80_cpus_setup() - Setup function for a80 cpus composite clk
189 struct sun9i_a80_cpus_clk *cpus; in sun9i_a80_cpus_setup() local
194 cpus = kzalloc(sizeof(*cpus), GFP_KERNEL); in sun9i_a80_cpus_setup()
195 if (!cpus) in sun9i_a80_cpus_setup()
[all …]
/linux/Documentation/arch/arm64/
H A Dcpu-hotplug.rst9 CPUs online/offline using PSCI. This document is about ACPI firmware allowing
10 CPUs that were not available during boot to be added to the system later.
15 CPU Hotplug on physical systems - CPUs not present at boot
24 In the arm64 world CPUs are not a single device but a slice of the system.
25 There are no systems that support the physical addition (or removal) of CPUs
29 e.g. New CPUs come with new caches, but the platform's cache topology is
30 described in a static table, the PPTT. How caches are shared between CPUs is
42 CPU Hotplug on virtual systems - CPUs not enabled at boot
50 CPU Hotplug as all resources are described as ``present``, but CPUs may be
53 single CPU, and additional CPUs are added once a cloud orchestrator deploys
[all …]
/linux/tools/lib/perf/include/perf/
H A Dcpumap.h49 LIBPERF_API struct perf_cpu perf_cpu_map__cpu(const struct perf_cpu_map *cpus, int idx);
54 * the result is the number CPUs in the map plus one if the
57 LIBPERF_API int perf_cpu_map__nr(const struct perf_cpu_map *cpus);
87 #define perf_cpu_map__for_each_cpu(cpu, idx, cpus) \
88 for ((idx) = 0, (cpu) = perf_cpu_map__cpu(cpus, idx); \
89 (idx) < perf_cpu_map__nr(cpus); \
90 (idx)++, (cpu) = perf_cpu_map__cpu(cpus, idx))
92 #define perf_cpu_map__for_each_cpu_skip_any(_cpu, idx, cpus) \
93 for ((idx) = 0, (_cpu) = perf_cpu_map__cpu(cpus, idx); \
94 (idx) < perf_cpu_map__nr(cpus); \
73 perf_cpu_map__for_each_cpu(cpu,idx,cpus) global() argument
78 perf_cpu_map__for_each_cpu_skip_any(_cpu,idx,cpus) global() argument
84 perf_cpu_map__for_each_idx(idx,cpus) global() argument
[all...]
/linux/Documentation/scheduler/
H A Dsched-energy.rst9 the impact of its decisions on the energy consumed by CPUs. EAS relies on an
10 Energy Model (EM) of the CPUs to select an energy efficient CPU for each task,
59 In short, EAS changes the way CFS tasks are assigned to CPUs. When it is time
64 knowledge about the platform's topology, which include the 'capacity' of CPUs,
72 differentiate CPUs with different computing throughput. The 'capacity' of a CPU
76 tasks and CPUs computed by the Per-Entity Load Tracking (PELT) mechanism. Thanks
79 energy trade-offs. The capacity of CPUs is provided via arch-specific code
99 Let us consider a platform with 12 CPUs, split in 3 performance domains
102 CPUs: 0 1 2 3 4 5 6 7 8 9 10 11
108 containing 6 CPUs. The two root domains are denoted rd1 and rd2 in the
[all …]
H A Dsched-domains.rst10 Each scheduling domain spans a number of CPUs (stored in the ->span field).
13 i. The top domain for each CPU will generally span all CPUs in the system
15 CPUs will never be given tasks to run unless the CPUs allowed mask is
17 CPUs".
23 to which the domain belongs. Groups may be shared among CPUs as they contain
27 shared between CPUs.
31 load of each of its member CPUs, and only when the load of a group becomes
49 If it succeeds, it looks for the busiest runqueue of all the CPUs' runqueues in
62 In SMP, the parent of the base domain will span all physical CPUs in the
/linux/drivers/cpufreq/
H A Dcpufreq-dt.c30 cpumask_var_t cpus; member
50 if (cpumask_test_cpu(cpu, priv->cpus)) in cpufreq_dt_find_data()
115 cpumask_copy(policy->cpus, priv->cpus); in cpufreq_init()
196 if (!zalloc_cpumask_var(&priv->cpus, GFP_KERNEL)) in dt_cpufreq_early_init()
199 cpumask_set_cpu(cpu, priv->cpus); in dt_cpufreq_early_init()
217 ret = dev_pm_opp_of_get_sharing_cpus(cpu_dev, priv->cpus); in dt_cpufreq_early_init()
223 * operating-points-v2 not supported, fallback to all CPUs share in dt_cpufreq_early_init()
225 * sharing CPUs. in dt_cpufreq_early_init()
227 if (dev_pm_opp_get_sharing_cpus(cpu_dev, priv->cpus)) in dt_cpufreq_early_init()
232 * Initialize OPP tables for all priv->cpus. They will be shared by in dt_cpufreq_early_init()
[all …]
/linux/Documentation/power/
H A Dsuspend-and-cpuhotplug.rst27 |tasks | | cpus | | | | cpus | |tasks|
59 online CPUs
75 Note down these cpus in | P
100 | Call _cpu_up() [for all those cpus in the frozen_cpus mask, in a loop]
158 the non-boot CPUs are offlined or onlined, the _cpu_*() functions are called
177 update on the CPUs, as discussed below:
184 a. When all the CPUs are identical:
187 to apply the same microcode revision to each of the CPUs.
192 all CPUs, in order to handle case 'b' described below.
195 b. When some of the CPUs are different than the rest:
[all …]
/linux/tools/perf/tests/
H A Dopenat-syscall-all-cpus.c27 struct perf_cpu_map *cpus; in test__openat_syscall_event_on_all_cpus() local
40 cpus = perf_cpu_map__new_online_cpus(); in test__openat_syscall_event_on_all_cpus()
41 if (cpus == NULL) { in test__openat_syscall_event_on_all_cpus()
56 if (evsel__open(evsel, cpus, threads) < 0) { in test__openat_syscall_event_on_all_cpus()
64 perf_cpu_map__for_each_cpu(cpu, idx, cpus) { in test__openat_syscall_event_on_all_cpus()
69 * without CPU_ALLOC. 1024 cpus in 2010 still seems in test__openat_syscall_event_on_all_cpus()
91 evsel->core.cpus = perf_cpu_map__get(cpus); in test__openat_syscall_event_on_all_cpus()
95 perf_cpu_map__for_each_cpu(cpu, idx, cpus) { in test__openat_syscall_event_on_all_cpus()
121 perf_cpu_map__put(cpus); in test__openat_syscall_event_on_all_cpus()
129 TEST_CASE_REASON("Detect openat syscall event on all cpus",
[all …]
/linux/Documentation/devicetree/bindings/csky/
H A Dcpus.txt5 The device tree allows to describe the layout of CPUs in a system through
6 the "cpus" node, which in turn contains a number of subnodes (ie "cpu")
9 Only SMP system need to care about the cpus node and single processor
10 needn't define cpus node at all.
13 cpus and cpu node bindings definition
16 - cpus node
20 The node name must be "cpus".
22 A cpus node must define the following properties:
59 cpus {
/linux/sound/soc/fsl/
H A DKconfig2 menu "SoC Audio for Freescale CPUs"
4 comment "Common SoC Audio options for Freescale CPUs:"
16 support for the Freescale CPUs.
28 support for the Freescale CPUs.
39 support for the Freescale CPUs.
48 support for the NXP iMX CPUs.
57 support for the Freescale CPUs.
70 support for the Freescale CPUs.
80 (ESAI) support for the Freescale CPUs.
113 iMX CPUs. XCVR is a digital module that supports HDMI2.1 eARC,
[all …]
/linux/Documentation/arch/arm/
H A Dcluster-pm-race-avoidance.rst18 In a system containing multiple CPUs, it is desirable to have the
19 ability to turn off individual CPUs when the system is idle, reducing
22 In a system containing multiple clusters of CPUs, it is also desirable
27 of independently running CPUs, while the OS continues to run. This
92 CPUs in the cluster simultaneously modifying the state. The cluster-
104 referred to as a "CPU". CPUs are assumed to be single-threaded:
107 This means that CPUs fit the basic model closely.
216 A cluster is a group of connected CPUs with some common resources.
217 Because a cluster contains multiple CPUs, it can be doing multiple
272 which exact CPUs within the cluster play these roles. This must
[all …]
/linux/kernel/
H A Dstop_machine.c28 * be shared by works on different cpus.
36 /* the actual stopper, one per every possible cpu, enabled on online cpus */
128 * partially or fully on different cpus. @fn should either be ready
248 * At this stage all other CPUs we depend on must spin in multi_cpu_stop()
324 * stop_two_cpus - stops two cpus
436 * stop_cpus - stop multiple cpus
437 * @cpumask: cpus to stop
441 * Execute @fn(@arg) on online cpus in @cpumask. On each target cpu,
446 * This function doesn't guarantee the cpus in @cpumask stay online
447 * till @fn completes. If some cpus g
587 stop_machine_cpuslocked(cpu_stop_fn_t fn,void * data,const struct cpumask * cpus) stop_machine_cpuslocked() argument
622 stop_machine(cpu_stop_fn_t fn,void * data,const struct cpumask * cpus) stop_machine() argument
678 stop_machine_from_inactive_cpu(cpu_stop_fn_t fn,void * data,const struct cpumask * cpus) stop_machine_from_inactive_cpu() argument
[all...]
/linux/tools/testing/selftests/rcutorture/bin/
H A Djitter.sh4 # Alternate sleeping and spinning on randomly selected CPUs. The purpose
58 if cpus=`grep 1 /sys/devices/system/cpu/*/online 2>&1 |
63 cpus=
65 # Do not leave out non-hot-pluggable CPUs
66 cpus="$cpus $nohotplugcpus"
68 cpumask=`awk -v cpus="$cpus" -v me=$me -v n=$n 'BEGIN {
70 ncpus = split(cpus, ca);
/linux/tools/perf/util/
H A Dperf_api_probe.c63 struct perf_cpu_map *cpus; in perf_probe_api() local
67 cpus = perf_cpu_map__new_online_cpus(); in perf_probe_api()
68 if (!cpus) in perf_probe_api()
70 cpu = perf_cpu_map__cpu(cpus, 0); in perf_probe_api()
71 perf_cpu_map__put(cpus); in perf_probe_api()
139 struct perf_cpu_map *cpus; in perf_can_record_cpu_wide() local
143 cpus = perf_cpu_map__new_online_cpus(); in perf_can_record_cpu_wide()
144 if (!cpus) in perf_can_record_cpu_wide()
147 cpu = perf_cpu_map__cpu(cpus, 0); in perf_can_record_cpu_wide()
148 perf_cpu_map__put(cpus); in perf_can_record_cpu_wide()
/linux/Documentation/admin-guide/
H A Dcputopology.rst61 offline: CPUs that are not online because they have been
62 HOTPLUGGED off or exceed the limit of CPUs allowed by the
64 [~cpu_online_mask + cpus >= NR_CPUS]
66 online: CPUs that are online and being scheduled [cpu_online_mask]
68 possible: CPUs that have been allocated resources and can be
71 present: CPUs that have been identified as being present in the
78 In this example, there are 64 CPUs in the system but cpus 32-63 exceed
80 being 32. Note also that CPUs 2 and 4-31 are not online but could be
90 started with possible_cpus=144. There are 4 CPUs in the system and cpu2

12345678910>>...90