/linux/tools/testing/selftests/riscv/hwprobe/ |
H A D | which-cpus.c | 22 "which-cpus: [-h] [<key=value> [<key=value> ...]]\n\n" in help() 25 " <key=value>, outputs the cpulist for cpus which all match the given set\n" in help() 29 static void print_cpulist(cpu_set_t *cpus) in print_cpulist() argument 33 if (!CPU_COUNT(cpus)) { in print_cpulist() 34 printf("cpus: None\n"); in print_cpulist() 38 printf("cpus:"); in print_cpulist() 39 for (int i = 0, c = 0; i < CPU_COUNT(cpus); i++, c++) { in print_cpulist() 40 if (start != end && !CPU_ISSET(c, cpus)) in print_cpulist() 43 while (!CPU_ISSET(c, cpus)) in print_cpulist() 59 static void do_which_cpus(int argc, char **argv, cpu_set_t *cpus) in do_which_cpus() argument [all …]
|
H A D | cbo.c | 6 * subset of cpus, as well as only executing the tests on those cpus. 93 cpu_set_t *cpus = (cpu_set_t *)arg; in test_zicbom() local 97 rc = riscv_hwprobe(&pair, 1, sizeof(cpu_set_t), (unsigned long *)cpus, 0); in test_zicbom() 117 cpu_set_t *cpus = (cpu_set_t *)arg; in test_zicboz() local 122 rc = riscv_hwprobe(&pair, 1, sizeof(cpu_set_t), (unsigned long *)cpus, 0); in test_zicboz() 160 static void check_no_zicbo_cpus(cpu_set_t *cpus, __u64 cbo) in check_no_zicbo_cpus() argument 170 while (i++ < CPU_COUNT(cpus)) { in check_no_zicbo_cpus() 171 while (!CPU_ISSET(c, cpus)) in check_no_zicbo_cpus() 219 cpu_set_t cpus; in main() local 231 rc = sched_getaffinity(0, sizeof(cpu_set_t), &cpus); in main() [all …]
|
/linux/arch/riscv/kernel/ |
H A D | sys_hwprobe.c | 24 const struct cpumask *cpus) in hwprobe_arch_id() argument 30 for_each_cpu(cpu, cpus) { in hwprobe_arch_id() 64 const struct cpumask *cpus) in hwprobe_isa_ext0() argument 83 for_each_cpu(cpu, cpus) { in hwprobe_isa_ext0() 174 static bool hwprobe_ext0_has(const struct cpumask *cpus, u64 ext) in hwprobe_ext0_has() argument 178 hwprobe_isa_ext0(&pair, cpus); in hwprobe_ext0_has() 183 static u64 hwprobe_misaligned(const struct cpumask *cpus) in hwprobe_misaligned() argument 188 for_each_cpu(cpu, cpus) { in hwprobe_misaligned() 206 static u64 hwprobe_misaligned(const struct cpumask *cpus) in hwprobe_misaligned() argument 219 static u64 hwprobe_vec_misaligned(const struct cpumask *cpus) in hwprobe_vec_misaligned() argument [all …]
|
/linux/drivers/cpuidle/ |
H A D | coupled.c | 3 * coupled.c - helper functions to enter the same idle state on multiple cpus 24 * cpus cannot be independently powered down, either due to 31 * shared between the cpus (L2 cache, interrupt controller, and 33 * be tightly controlled on both cpus. 36 * WFI state until all cpus are ready to enter a coupled state, at 38 * cpus at approximately the same time. 40 * Once all cpus are ready to enter idle, they are woken by an smp 42 * cpus will find work to do, and choose not to enter idle. A 43 * final pass is needed to guarantee that all cpus will call the 46 * ready counter matches the number of online coupled cpus. If any [all …]
|
/linux/Documentation/admin-guide/cgroup-v1/ |
H A D | cpusets.rst | 31 2.2 Adding/removing cpus 43 Cpusets provide a mechanism for assigning a set of CPUs and Memory 57 include CPUs in its CPU affinity mask, and using the mbind(2) and 60 CPUs or Memory Nodes not in that cpuset. The scheduler will not 67 cpusets and which CPUs and Memory Nodes are assigned to each cpuset, 75 The management of large computer systems, with many processors (CPUs), 113 Cpusets provide a Linux kernel mechanism to constrain which CPUs and 117 CPUs a task may be scheduled (sched_setaffinity) and on which Memory 122 - Cpusets are sets of allowed CPUs and Memory Nodes, known to the 126 - Calls to sched_setaffinity are filtered to just those CPUs [all …]
|
/linux/Documentation/timers/ |
H A D | no_hz.rst | 19 2. Omit scheduling-clock ticks on idle CPUs (CONFIG_NO_HZ_IDLE=y or 23 3. Omit scheduling-clock ticks on CPUs that are either idle or that 65 Omit Scheduling-Clock Ticks For Idle CPUs 78 scheduling-clock interrupts to idle CPUs, which is critically important 86 idle CPUs. That said, dyntick-idle mode is not free: 104 Omit Scheduling-Clock Ticks For CPUs With Only One Runnable Task 109 Note that omitting scheduling-clock ticks for CPUs with only one runnable 110 task implies also omitting them for idle CPUs. 113 sending scheduling-clock interrupts to CPUs with a single runnable task, 114 and such CPUs are said to be "adaptive-ticks CPUs". This is important [all …]
|
/linux/sound/soc/intel/boards/ |
H A D | sof_board_helpers.c | 182 struct snd_soc_dai_link_component *cpus; in set_ssp_codec_link() local 192 /* cpus */ in set_ssp_codec_link() 193 cpus = devm_kzalloc(dev, sizeof(struct snd_soc_dai_link_component), in set_ssp_codec_link() 195 if (!cpus) in set_ssp_codec_link() 200 cpus->dai_name = devm_kasprintf(dev, GFP_KERNEL, "ssp%d-port", in set_ssp_codec_link() 203 cpus->dai_name = devm_kasprintf(dev, GFP_KERNEL, "SSP%d Pin", in set_ssp_codec_link() 206 if (!cpus->dai_name) in set_ssp_codec_link() 209 link->cpus = cpus; in set_ssp_codec_link() 227 struct snd_soc_dai_link_component *cpus; in set_dmic_link() local 229 /* cpus */ in set_dmic_link() [all …]
|
H A D | sof_pcm512x.c | 225 struct snd_soc_dai_link_component *cpus; in sof_card_dai_links_create() local 231 cpus = devm_kcalloc(dev, sof_audio_card_pcm512x.num_links, in sof_card_dai_links_create() 233 if (!links || !cpus) in sof_card_dai_links_create() 255 links[id].cpus = &cpus[id]; in sof_card_dai_links_create() 258 links[id].cpus->dai_name = devm_kasprintf(dev, GFP_KERNEL, in sof_card_dai_links_create() 261 if (!links[id].cpus->dai_name) in sof_card_dai_links_create() 264 links[id].cpus->dai_name = devm_kasprintf(dev, GFP_KERNEL, in sof_card_dai_links_create() 267 if (!links[id].cpus->dai_name) in sof_card_dai_links_create() 276 links[id].cpus = &cpus[id]; in sof_card_dai_links_create() 277 links[id].cpus->dai_name = "DMIC01 Pin"; in sof_card_dai_links_create() [all …]
|
/linux/tools/testing/selftests/cgroup/ |
H A D | test_cpuset_prs.sh | 25 SUBPARTS_CPUS=$CGROUP2/.__DEBUG__.cpuset.cpus.subpartitions 26 CPULIST=$(cat $CGROUP2/cpuset.cpus.effective) 29 [[ $NR_CPUS -lt 8 ]] && skip_test "Test needs at least 8 cpus available!" 79 echo 0-6 > test/cpuset.cpus 80 echo root > test/cpuset.cpus.partition 81 cat test/cpuset.cpus.partition | grep -q invalid 83 echo member > test/cpuset.cpus.partition 84 echo "" > test/cpuset.cpus 88 # If isolated CPUs have been reserved at boot time (as shown in 89 # cpuset.cpus.isolated), these isolated CPUs should be outside of CPUs 0-8 [all …]
|
/linux/include/linux/ |
H A D | stop_machine.h | 13 * function to be executed on a single or multiple cpus preempting all 14 * other processes and monopolizing those cpus until it finishes. 18 * cpus are online. 99 * stop_machine: freeze the machine on all CPUs and run this function 102 * @cpus: the cpus to run the @fn() on (NULL = any online cpu) 114 int stop_machine(cpu_stop_fn_t fn, void *data, const struct cpumask *cpus); 117 * stop_machine_cpuslocked: freeze the machine on all CPUs and run this function 120 * @cpus: the cpus to run the @fn() on (NULL = any online cpu) 125 int stop_machine_cpuslocked(cpu_stop_fn_t fn, void *data, const struct cpumask *cpus); 133 * Same as above, but instead of every CPU, only the logical CPUs of a [all …]
|
H A D | energy_model.h | 61 * @cpus: Cpumask covering the CPUs of the domain. It's here 66 * In case of CPU device, a "performance domain" represents a group of CPUs 67 * whose performance is scaled together. All CPUs of a performance domain 69 * a 1-to-1 mapping with CPUFreq policies. In case of other devices the @cpus 78 unsigned long cpus[]; member 97 #define em_span_cpus(em) (to_cpumask((em)->cpus)) 105 * maximum CPUs in such domain to 64. 111 * limits to number of CPUs in the Perf. Domain. 134 * In case of CPUs, the power is the one of a single CPU in the domain, 151 * In case of CPUs, the cost is the one of a single CPU in the domain. [all …]
|
/linux/tools/lib/perf/tests/ |
H A D | test-cpumap.c | 16 struct perf_cpu_map *cpus; in test_cpumap() local 24 cpus = perf_cpu_map__new_any_cpu(); in test_cpumap() 25 if (!cpus) in test_cpumap() 28 perf_cpu_map__get(cpus); in test_cpumap() 29 perf_cpu_map__put(cpus); in test_cpumap() 30 perf_cpu_map__put(cpus); in test_cpumap() 32 cpus = perf_cpu_map__new_online_cpus(); in test_cpumap() 33 if (!cpus) in test_cpumap() 36 perf_cpu_map__for_each_cpu(cpu, idx, cpus) in test_cpumap() 39 perf_cpu_map__put(cpus); in test_cpumap()
|
H A D | test-evlist.c | 36 struct perf_cpu_map *cpus; in test_stat_cpu() local 49 cpus = perf_cpu_map__new_online_cpus(); in test_stat_cpu() 50 __T("failed to create cpus", cpus); in test_stat_cpu() 69 perf_evlist__set_maps(evlist, cpus, NULL); in test_stat_cpu() 75 cpus = perf_evsel__cpus(evsel); in test_stat_cpu() 77 for (idx = 0; idx < perf_cpu_map__nr(cpus); idx++) { in test_stat_cpu() 88 perf_cpu_map__put(cpus); in test_stat_cpu() 218 struct perf_cpu_map *cpus; in test_mmap_thread() local 264 cpus = perf_cpu_map__new_any_cpu(); in test_mmap_thread() 265 __T("failed to create cpus", cpus); in test_mmap_thread() [all …]
|
/linux/drivers/clk/sunxi/ |
H A D | clk-sun9i-cpus.c | 7 * Allwinner A80 CPUS clock driver 52 struct sun9i_a80_cpus_clk *cpus = to_sun9i_a80_cpus_clk(hw); in sun9i_a80_cpus_clk_recalc_rate() local 57 reg = readl(cpus->reg); in sun9i_a80_cpus_clk_recalc_rate() 152 struct sun9i_a80_cpus_clk *cpus = to_sun9i_a80_cpus_clk(hw); in sun9i_a80_cpus_clk_set_rate() local 159 reg = readl(cpus->reg); in sun9i_a80_cpus_clk_set_rate() 167 writel(reg, cpus->reg); in sun9i_a80_cpus_clk_set_rate() 181 * sun9i_a80_cpus_setup() - Setup function for a80 cpus composite clk 189 struct sun9i_a80_cpus_clk *cpus; in sun9i_a80_cpus_setup() local 194 cpus = kzalloc(sizeof(*cpus), GFP_KERNEL); in sun9i_a80_cpus_setup() 195 if (!cpus) in sun9i_a80_cpus_setup() [all …]
|
/linux/Documentation/arch/arm64/ |
H A D | cpu-hotplug.rst | 9 CPUs online/offline using PSCI. This document is about ACPI firmware allowing 10 CPUs that were not available during boot to be added to the system later. 15 CPU Hotplug on physical systems - CPUs not present at boot 24 In the arm64 world CPUs are not a single device but a slice of the system. 25 There are no systems that support the physical addition (or removal) of CPUs 29 e.g. New CPUs come with new caches, but the platform's cache topology is 30 described in a static table, the PPTT. How caches are shared between CPUs is 42 CPU Hotplug on virtual systems - CPUs not enabled at boot 50 CPU Hotplug as all resources are described as ``present``, but CPUs may be 53 single CPU, and additional CPUs are added once a cloud orchestrator deploys [all …]
|
H A D | booting.rst | 196 be programmed with a consistent value on all CPUs. If entering the 202 All CPUs to be booted by the kernel must be part of the same coherency 217 - SCR_EL3.FIQ must have the same value across all CPUs the kernel is 232 all CPUs the kernel is executing on, and must stay constant 255 For CPUs with pointer authentication functionality: 267 For CPUs with Activity Monitors Unit v1 (AMUv1) extension present: 285 For CPUs with the Fine Grained Traps (FEAT_FGT) extension present: 291 For CPUs with the Fine Grained Traps 2 (FEAT_FGT2) extension present: 297 For CPUs with support for HCRX_EL2 (FEAT_HCX) present: 303 For CPUs with Advanced SIMD and floating point support: [all …]
|
/linux/Documentation/scheduler/ |
H A D | sched-energy.rst | 9 the impact of its decisions on the energy consumed by CPUs. EAS relies on an 10 Energy Model (EM) of the CPUs to select an energy efficient CPU for each task, 59 In short, EAS changes the way CFS tasks are assigned to CPUs. When it is time 64 knowledge about the platform's topology, which include the 'capacity' of CPUs, 72 differentiate CPUs with different computing throughput. The 'capacity' of a CPU 76 tasks and CPUs computed by the Per-Entity Load Tracking (PELT) mechanism. Thanks 79 energy trade-offs. The capacity of CPUs is provided via arch-specific code 99 Let us consider a platform with 12 CPUs, split in 3 performance domains 102 CPUs: 0 1 2 3 4 5 6 7 8 9 10 11 108 containing 6 CPUs. The two root domains are denoted rd1 and rd2 in the [all …]
|
/linux/Documentation/power/ |
H A D | suspend-and-cpuhotplug.rst | 27 |tasks | | cpus | | | | cpus | |tasks| 59 online CPUs 75 Note down these cpus in | P 100 | Call _cpu_up() [for all those cpus in the frozen_cpus mask, in a loop] 158 the non-boot CPUs are offlined or onlined, the _cpu_*() functions are called 177 update on the CPUs, as discussed below: 184 a. When all the CPUs are identical: 187 to apply the same microcode revision to each of the CPUs. 192 all CPUs, in order to handle case 'b' described below. 195 b. When some of the CPUs are different than the rest: [all …]
|
/linux/tools/perf/tests/ |
H A D | openat-syscall-all-cpus.c | 27 struct perf_cpu_map *cpus; in test__openat_syscall_event_on_all_cpus() local 40 cpus = perf_cpu_map__new_online_cpus(); in test__openat_syscall_event_on_all_cpus() 41 if (cpus == NULL) { in test__openat_syscall_event_on_all_cpus() 56 if (evsel__open(evsel, cpus, threads) < 0) { in test__openat_syscall_event_on_all_cpus() 64 perf_cpu_map__for_each_cpu(cpu, idx, cpus) { in test__openat_syscall_event_on_all_cpus() 69 * without CPU_ALLOC. 1024 cpus in 2010 still seems in test__openat_syscall_event_on_all_cpus() 91 evsel->core.cpus = perf_cpu_map__get(cpus); in test__openat_syscall_event_on_all_cpus() 95 perf_cpu_map__for_each_cpu(cpu, idx, cpus) { in test__openat_syscall_event_on_all_cpus() 121 perf_cpu_map__put(cpus); in test__openat_syscall_event_on_all_cpus() 129 TEST_CASE_REASON("Detect openat syscall event on all cpus", [all …]
|
/linux/Documentation/devicetree/bindings/csky/ |
H A D | cpus.txt | 5 The device tree allows to describe the layout of CPUs in a system through 6 the "cpus" node, which in turn contains a number of subnodes (ie "cpu") 9 Only SMP system need to care about the cpus node and single processor 10 needn't define cpus node at all. 13 cpus and cpu node bindings definition 16 - cpus node 20 The node name must be "cpus". 22 A cpus node must define the following properties: 59 cpus {
|
/linux/sound/soc/fsl/ |
H A D | Kconfig | 2 menu "SoC Audio for Freescale CPUs" 4 comment "Common SoC Audio options for Freescale CPUs:" 16 support for the Freescale CPUs. 28 support for the Freescale CPUs. 39 support for the Freescale CPUs. 48 support for the NXP iMX CPUs. 57 support for the Freescale CPUs. 70 support for the Freescale CPUs. 80 (ESAI) support for the Freescale CPUs. 113 iMX CPUs. XCVR is a digital module that supports HDMI2.1 eARC, [all …]
|
/linux/Documentation/arch/arm/ |
H A D | cluster-pm-race-avoidance.rst | 18 In a system containing multiple CPUs, it is desirable to have the 19 ability to turn off individual CPUs when the system is idle, reducing 22 In a system containing multiple clusters of CPUs, it is also desirable 27 of independently running CPUs, while the OS continues to run. This 92 CPUs in the cluster simultaneously modifying the state. The cluster- 104 referred to as a "CPU". CPUs are assumed to be single-threaded: 107 This means that CPUs fit the basic model closely. 216 A cluster is a group of connected CPUs with some common resources. 217 Because a cluster contains multiple CPUs, it can be doing multiple 272 which exact CPUs within the cluster play these roles. This must [all …]
|
/linux/tools/testing/selftests/rcutorture/bin/ |
H A D | jitter.sh | 4 # Alternate sleeping and spinning on randomly selected CPUs. The purpose 58 if cpus=`grep 1 /sys/devices/system/cpu/*/online 2>&1 | 63 cpus= 65 # Do not leave out non-hot-pluggable CPUs 66 cpus="$cpus $nohotplugcpus" 68 cpumask=`awk -v cpus="$cpus" -v me=$me -v n=$n 'BEGIN { 70 ncpus = split(cpus, ca);
|
/linux/tools/perf/util/ |
H A D | perf_api_probe.c | 63 struct perf_cpu_map *cpus; in perf_probe_api() local 67 cpus = perf_cpu_map__new_online_cpus(); in perf_probe_api() 68 if (!cpus) in perf_probe_api() 70 cpu = perf_cpu_map__cpu(cpus, 0); in perf_probe_api() 71 perf_cpu_map__put(cpus); in perf_probe_api() 139 struct perf_cpu_map *cpus; in perf_can_record_cpu_wide() local 143 cpus = perf_cpu_map__new_online_cpus(); in perf_can_record_cpu_wide() 144 if (!cpus) in perf_can_record_cpu_wide() 147 cpu = perf_cpu_map__cpu(cpus, 0); in perf_can_record_cpu_wide() 148 perf_cpu_map__put(cpus); in perf_can_record_cpu_wide()
|
/linux/Documentation/admin-guide/ |
H A D | cputopology.rst | 61 offline: CPUs that are not online because they have been 62 HOTPLUGGED off or exceed the limit of CPUs allowed by the 64 [~cpu_online_mask + cpus >= NR_CPUS] 66 online: CPUs that are online and being scheduled [cpu_online_mask] 68 possible: CPUs that have been allocated resources and can be 71 present: CPUs that have been identified as being present in the 78 In this example, there are 64 CPUs in the system but cpus 32-63 exceed 80 being 32. Note also that CPUs 2 and 4-31 are not online but could be 90 started with possible_cpus=144. There are 4 CPUs in the system and cpu2
|