/linux/drivers/cpufreq/ |
H A D | intel_pstate.c | 228 int cpu; member 283 int (*get_max)(int cpu); 284 int (*get_max_physical)(int cpu); 285 int (*get_min)(int cpu); 286 int (*get_turbo)(int cpu); 288 int (*get_cpu_scaling)(int cpu); 356 static void intel_pstate_set_itmt_prio(int cpu) in intel_pstate_set_itmt_prio() argument 362 ret = cppc_get_perf_caps(cpu, &cppc_perf); in intel_pstate_set_itmt_prio() 371 cppc_perf.highest_perf = HWP_HIGHEST_PERF(READ_ONCE(all_cpu_data[cpu]->hwp_cap_cached)); in intel_pstate_set_itmt_prio() 378 sched_set_itmt_core_prio(cppc_perf.highest_perf, cpu); in intel_pstate_set_itmt_prio() [all …]
|
/linux/arch/arm/boot/dts/intel/axm/ |
H A D | axm5516-cpus.dtsi | 13 cpu-map { 16 cpu = <&CPU0>; 19 cpu = <&CPU1>; 22 cpu = <&CPU2>; 25 cpu = <&CPU3>; 30 cpu = <&CPU4>; 33 cpu = <&CPU5>; 36 cpu = <&CPU6>; 39 cpu = <&CPU7>; 44 cpu = <&CPU8>; [all …]
|
/linux/arch/powerpc/kernel/ |
H A D | smp.c | 278 void smp_muxed_ipi_set_message(int cpu, int msg) in smp_muxed_ipi_set_message() argument 280 struct cpu_messages *info = &per_cpu(ipi_message, cpu); in smp_muxed_ipi_set_message() 290 void smp_muxed_ipi_message_pass(int cpu, int msg) in smp_muxed_ipi_message_pass() argument 292 smp_muxed_ipi_set_message(cpu, msg); in smp_muxed_ipi_message_pass() 298 smp_ops->cause_ipi(cpu); in smp_muxed_ipi_message_pass() 352 static inline void do_message_pass(int cpu, int msg) in do_message_pass() argument 355 smp_ops->message_pass(cpu, msg); in do_message_pass() 358 smp_muxed_ipi_message_pass(cpu, msg); in do_message_pass() 362 void arch_smp_send_reschedule(int cpu) in arch_smp_send_reschedule() argument 365 do_message_pass(cpu, PPC_MSG_RESCHEDULE); in arch_smp_send_reschedule() [all …]
|
H A D | tau_6xx.c | 55 static void set_thresholds(unsigned long cpu) in set_thresholds() argument 60 mtspr(SPRN_THRM1, THRM1_THRES(tau[cpu].low) | THRM1_V | maybe_tie | THRM1_TID); in set_thresholds() 63 mtspr(SPRN_THRM2, THRM1_THRES(tau[cpu].high) | THRM1_V | maybe_tie); in set_thresholds() 66 static void TAUupdate(int cpu) in TAUupdate() argument 77 if (tau[cpu].low >= step_size) { in TAUupdate() 78 tau[cpu].low -= step_size; in TAUupdate() 79 tau[cpu].high -= (step_size - window_expand); in TAUupdate() 81 tau[cpu].grew = 1; in TAUupdate() 88 if (tau[cpu].high <= 127 - step_size) { in TAUupdate() 89 tau[cpu].low += (step_size - window_expand); in TAUupdate() [all …]
|
H A D | watchdog.c | 148 int cpu = raw_smp_processor_id(); in wd_lockup_ipi() local 151 pr_emerg("CPU %d Hard LOCKUP\n", cpu); in wd_lockup_ipi() 153 cpu, tb, per_cpu(wd_timer_tb, cpu), in wd_lockup_ipi() 154 tb_to_ns(tb - per_cpu(wd_timer_tb, cpu)) / 1000000); in wd_lockup_ipi() 182 static bool set_cpu_stuck(int cpu) in set_cpu_stuck() argument 184 cpumask_set_cpu(cpu, &wd_smp_cpus_stuck); in set_cpu_stuck() 185 cpumask_clear_cpu(cpu, &wd_smp_cpus_pending); in set_cpu_stuck() 200 static void watchdog_smp_panic(int cpu) in watchdog_smp_panic() argument 213 if (cpumask_test_cpu(cpu, &wd_smp_cpus_pending)) in watchdog_smp_panic() 220 if (c == cpu) in watchdog_smp_panic() [all …]
|
/linux/drivers/base/ |
H A D | arch_topology.c | 66 int cpu; in topology_set_scale_freq_source() local 77 for_each_cpu(cpu, cpus) { in topology_set_scale_freq_source() 78 sfd = rcu_dereference(*per_cpu_ptr(&sft_data, cpu)); in topology_set_scale_freq_source() 82 rcu_assign_pointer(per_cpu(sft_data, cpu), data); in topology_set_scale_freq_source() 83 cpumask_set_cpu(cpu, &scale_freq_counters_mask); in topology_set_scale_freq_source() 97 int cpu; in topology_clear_scale_freq_source() local 101 for_each_cpu(cpu, cpus) { in topology_clear_scale_freq_source() 102 sfd = rcu_dereference(*per_cpu_ptr(&sft_data, cpu)); in topology_clear_scale_freq_source() 105 rcu_assign_pointer(per_cpu(sft_data, cpu), NULL); in topology_clear_scale_freq_source() 106 cpumask_clear_cpu(cpu, &scale_freq_counters_mask); in topology_clear_scale_freq_source() [all …]
|
H A D | cacheinfo.c | 25 #define ci_cacheinfo(cpu) (&per_cpu(ci_cpu_cacheinfo, cpu)) argument 26 #define cache_leaves(cpu) (ci_cacheinfo(cpu)->num_leaves) argument 27 #define per_cpu_cacheinfo(cpu) (ci_cacheinfo(cpu)->info_list) argument 28 #define per_cpu_cacheinfo_idx(cpu, idx) \ argument 29 (per_cpu_cacheinfo(cpu) + (idx)) 34 struct cpu_cacheinfo *get_cpu_cacheinfo(unsigned int cpu) in get_cpu_cacheinfo() argument 36 return ci_cacheinfo(cpu); in get_cpu_cacheinfo() 57 bool last_level_cache_is_valid(unsigned int cpu) in last_level_cache_is_valid() argument 61 if (!cache_leaves(cpu) || !per_cpu_cacheinfo(cpu)) in last_level_cache_is_valid() 64 llc = per_cpu_cacheinfo_idx(cpu, cache_leaves(cpu) - 1); in last_level_cache_is_valid() [all …]
|
/linux/tools/testing/selftests/cpu-hotplug/ |
H A D | cpu-on-off-test.sh | 27 if ! ls $SYSFS/devices/system/cpu/cpu* > /dev/null 2>&1; then 28 echo $msg cpu hotplug is not supported >&2 33 online_cpus=`cat $SYSFS/devices/system/cpu/online` 41 present_cpus=`cat $SYSFS/devices/system/cpu/present` 47 offline_cpus=`cat $SYSFS/devices/system/cpu/offline` 63 for cpu in $SYSFS/devices/system/cpu/cpu*; do 64 if [ -f $cpu/online ] && grep -q $state $cpu/online; then 65 echo ${cpu##/*/cpu} 82 grep -q 1 $SYSFS/devices/system/cpu/cpu$1/online 87 grep -q 0 $SYSFS/devices/system/cpu/cpu$1/online [all …]
|
/linux/include/linux/ |
H A D | topology.h | 94 static inline int cpu_to_node(int cpu) in cpu_to_node() argument 96 return per_cpu(numa_node, cpu); in cpu_to_node() 108 static inline void set_cpu_numa_node(int cpu, int node) in set_cpu_numa_node() argument 110 per_cpu(numa_node, cpu) = node; in set_cpu_numa_node() 151 static inline int cpu_to_mem(int cpu) in cpu_to_mem() argument 153 return per_cpu(_numa_mem_, cpu); in cpu_to_mem() 158 static inline void set_cpu_numa_mem(int cpu, int node) in set_cpu_numa_mem() argument 160 per_cpu(_numa_mem_, cpu) = node; in set_cpu_numa_mem() 175 static inline int cpu_to_mem(int cpu) in cpu_to_mem() argument 177 return cpu_to_node(cpu); in cpu_to_mem() [all …]
|
H A D | cpumask.h | 132 static __always_inline void cpu_max_bits_warn(unsigned int cpu, unsigned int bits) in cpu_max_bits_warn() argument 135 WARN_ON_ONCE(cpu >= bits); in cpu_max_bits_warn() 140 static __always_inline unsigned int cpumask_check(unsigned int cpu) in cpumask_check() argument 142 cpu_max_bits_warn(cpu, small_cpumask_bits); in cpumask_check() 143 return cpu; in cpumask_check() 294 #define for_each_cpu(cpu, mask) \ argument 295 for_each_set_bit(cpu, cpumask_bits(mask), small_cpumask_bits) 328 #define for_each_cpu_wrap(cpu, mask, start) \ argument 329 for_each_set_bit_wrap(cpu, cpumask_bits(mask), small_cpumask_bits, start) 345 #define for_each_cpu_and(cpu, mask1, mask2) \ argument [all …]
|
/linux/arch/arm64/kernel/ |
H A D | smp.c | 90 static void ipi_setup(int cpu); 93 static void ipi_teardown(int cpu); 94 static int op_cpu_kill(unsigned int cpu); 96 static inline int op_cpu_kill(unsigned int cpu) in op_cpu_kill() argument 107 static int boot_secondary(unsigned int cpu, struct task_struct *idle) in boot_secondary() argument 109 const struct cpu_operations *ops = get_cpu_ops(cpu); in boot_secondary() 112 return ops->cpu_boot(cpu); in boot_secondary() 119 int __cpu_up(unsigned int cpu, struct task_struct *idle) in __cpu_up() argument 132 ret = boot_secondary(cpu, idle); in __cpu_up() 135 pr_err("CPU%u: failed to boot: %d\n", cpu, ret); in __cpu_up() [all …]
|
/linux/Documentation/translations/zh_CN/scheduler/ |
H A D | sched-bwc.rst | 24 达“配额”微秒的CPU时间。当cgroup中的线程可运行时,该配额以时间片段的方式被分配到每个cpu 29 它以需求为基础被转移到cpu-local“筒仓”,在每次更新中转移的数量是可调整的,被描述为“片“(时 65 配额、周期和突发是在cpu子系统内通过cgroupfs管理的。 69 :ref:`Documentation/admin-guide/cgroup-v2.rst <cgroup-v2-cpu>`. 71 - cpu.cfs_quota_us:在一个时期内补充的运行时间(微秒)。 72 - cpu.cfs_period_us:一个周期的长度(微秒)。 73 - cpu.stat: 输出节流统计数据[下面进一步解释] 74 - cpu.cfs_burst_us:最大累积运行时间(微秒)。 78 cpu.cfs_period_us=100ms 79 cpu.cfs_quota_us=-1 [all …]
|
/linux/arch/arm/mach-meson/ |
H A D | platsmp.c | 38 static struct reset_control *meson_smp_get_core_reset(int cpu) in meson_smp_get_core_reset() argument 40 struct device_node *np = of_get_cpu_node(cpu, 0); in meson_smp_get_core_reset() 45 static void meson_smp_set_cpu_ctrl(int cpu, bool on_off) in meson_smp_set_cpu_ctrl() argument 50 val |= BIT(cpu); in meson_smp_set_cpu_ctrl() 52 val &= ~BIT(cpu); in meson_smp_set_cpu_ctrl() 116 static void meson_smp_begin_secondary_boot(unsigned int cpu) in meson_smp_begin_secondary_boot() argument 125 sram_base + MESON_SMP_SRAM_CPU_CTRL_ADDR_REG(cpu)); in meson_smp_begin_secondary_boot() 131 scu_cpu_power_enable(scu_base, cpu); in meson_smp_begin_secondary_boot() 134 static int meson_smp_finalize_secondary_boot(unsigned int cpu) in meson_smp_finalize_secondary_boot() argument 139 while (readl(sram_base + MESON_SMP_SRAM_CPU_CTRL_ADDR_REG(cpu))) { in meson_smp_finalize_secondary_boot() [all …]
|
/linux/arch/powerpc/include/asm/ |
H A D | smp.h | 35 extern int cpu_to_chip_id(int cpu); 45 void (*message_pass)(int cpu, int msg); 47 void (*cause_ipi)(int cpu); 49 int (*cause_nmi_ipi)(int cpu); 68 extern int smp_send_nmi_ipi(int cpu, void (*fn)(struct pt_regs *), u64 delay_us); 69 extern int smp_send_safe_nmi_ipi(int cpu, void (*fn)(struct pt_regs *), u64 delay_us); 79 void generic_cpu_die(unsigned int cpu); 80 void generic_set_cpu_dead(unsigned int cpu); 81 void generic_set_cpu_up(unsigned int cpu); 82 int generic_check_cpu_restart(unsigned int cpu); [all …]
|
/linux/arch/arm/mach-bcm/ |
H A D | platsmp-brcmstb.c | 59 static int per_cpu_sw_state_rd(u32 cpu) in per_cpu_sw_state_rd() argument 61 sync_cache_r(SHIFT_PERCPU_PTR(&per_cpu_sw_state, per_cpu_offset(cpu))); in per_cpu_sw_state_rd() 62 return per_cpu(per_cpu_sw_state, cpu); in per_cpu_sw_state_rd() 65 static void per_cpu_sw_state_wr(u32 cpu, int val) in per_cpu_sw_state_wr() argument 68 per_cpu(per_cpu_sw_state, cpu) = val; in per_cpu_sw_state_wr() 69 sync_cache_w(SHIFT_PERCPU_PTR(&per_cpu_sw_state, per_cpu_offset(cpu))); in per_cpu_sw_state_wr() 72 static inline void per_cpu_sw_state_wr(u32 cpu, int val) { } in per_cpu_sw_state_wr() argument 75 static void __iomem *pwr_ctrl_get_base(u32 cpu) in pwr_ctrl_get_base() argument 78 base += (cpu_logical_map(cpu) * 4); in pwr_ctrl_get_base() 82 static u32 pwr_ctrl_rd(u32 cpu) in pwr_ctrl_rd() argument [all …]
|
/linux/arch/arm/mach-tegra/ |
H A D | platsmp.c | 36 static void tegra_secondary_init(unsigned int cpu) in tegra_secondary_init() argument 38 cpumask_set_cpu(cpu, &tegra_cpu_init_mask); in tegra_secondary_init() 42 static int tegra20_boot_secondary(unsigned int cpu, struct task_struct *idle) in tegra20_boot_secondary() argument 44 cpu = cpu_logical_map(cpu); in tegra20_boot_secondary() 54 tegra_put_cpu_in_reset(cpu); in tegra20_boot_secondary() 62 flowctrl_write_cpu_halt(cpu, 0); in tegra20_boot_secondary() 64 tegra_enable_cpu_clock(cpu); in tegra20_boot_secondary() 65 flowctrl_write_cpu_csr(cpu, 0); /* Clear flow controller CSR. */ in tegra20_boot_secondary() 66 tegra_cpu_out_of_reset(cpu); in tegra20_boot_secondary() 70 static int tegra30_boot_secondary(unsigned int cpu, struct task_struct *idle) in tegra30_boot_secondary() argument [all …]
|
/linux/arch/loongarch/kernel/ |
H A D | smp.c | 80 unsigned int cpu, i; in show_ipi_list() local 84 for_each_online_cpu(cpu) in show_ipi_list() 85 seq_put_decimal_ull_width(p, " ", per_cpu(irq_stat, cpu).ipi_irqs[i], 10); in show_ipi_list() 90 static inline void set_cpu_core_map(int cpu) in set_cpu_core_map() argument 94 cpumask_set_cpu(cpu, &cpu_core_setup_map); in set_cpu_core_map() 97 if (cpu_data[cpu].package == cpu_data[i].package) { in set_cpu_core_map() 98 cpumask_set_cpu(i, &cpu_core_map[cpu]); in set_cpu_core_map() 99 cpumask_set_cpu(cpu, &cpu_core_map[i]); in set_cpu_core_map() 104 static inline void set_cpu_sibling_map(int cpu) in set_cpu_sibling_map() argument 108 cpumask_set_cpu(cpu, &cpu_sibling_setup_map); in set_cpu_sibling_map() [all …]
|
/linux/arch/x86/xen/ |
H A D | smp.c | 32 void xen_smp_intr_free(unsigned int cpu) in xen_smp_intr_free() argument 34 kfree(per_cpu(xen_resched_irq, cpu).name); in xen_smp_intr_free() 35 per_cpu(xen_resched_irq, cpu).name = NULL; in xen_smp_intr_free() 36 if (per_cpu(xen_resched_irq, cpu).irq >= 0) { in xen_smp_intr_free() 37 unbind_from_irqhandler(per_cpu(xen_resched_irq, cpu).irq, NULL); in xen_smp_intr_free() 38 per_cpu(xen_resched_irq, cpu).irq = -1; in xen_smp_intr_free() 40 kfree(per_cpu(xen_callfunc_irq, cpu).name); in xen_smp_intr_free() 41 per_cpu(xen_callfunc_irq, cpu).name = NULL; in xen_smp_intr_free() 42 if (per_cpu(xen_callfunc_irq, cpu).irq >= 0) { in xen_smp_intr_free() 43 unbind_from_irqhandler(per_cpu(xen_callfunc_irq, cpu).irq, NULL); in xen_smp_intr_free() [all …]
|
H A D | smp_pv.c | 59 int cpu; in cpu_bringup() local 72 cpu = smp_processor_id(); in cpu_bringup() 73 smp_store_cpu_info(cpu); in cpu_bringup() 74 set_cpu_sibling_map(cpu); in cpu_bringup() 80 notify_cpu_starting(cpu); in cpu_bringup() 82 set_cpu_online(cpu, true); in cpu_bringup() 96 void xen_smp_intr_free_pv(unsigned int cpu) in xen_smp_intr_free_pv() argument 98 kfree(per_cpu(xen_irq_work, cpu).name); in xen_smp_intr_free_pv() 99 per_cpu(xen_irq_work, cpu).name = NULL; in xen_smp_intr_free_pv() 100 if (per_cpu(xen_irq_work, cpu).irq >= 0) { in xen_smp_intr_free_pv() [all …]
|
/linux/kernel/ |
H A D | smpboot.c | 30 struct task_struct *idle_thread_get(unsigned int cpu) in idle_thread_get() argument 32 struct task_struct *tsk = per_cpu(idle_threads, cpu); in idle_thread_get() 50 static __always_inline void idle_init(unsigned int cpu) in idle_init() argument 52 struct task_struct *tsk = per_cpu(idle_threads, cpu); in idle_init() 55 tsk = fork_idle(cpu); in idle_init() 57 pr_err("SMP: fork_idle() failed for CPU %u\n", cpu); in idle_init() 59 per_cpu(idle_threads, cpu) = tsk; in idle_init() 68 unsigned int cpu, boot_cpu; in idle_threads_init() local 72 for_each_possible_cpu(cpu) { in idle_threads_init() 73 if (cpu != boot_cpu) in idle_threads_init() [all …]
|
/linux/arch/s390/kernel/ |
H A D | smp.c | 173 int cpu; in pcpu_find_address() local 175 for_each_cpu(cpu, mask) in pcpu_find_address() 176 if (per_cpu(pcpu_devices, cpu).address == address) in pcpu_find_address() 177 return &per_cpu(pcpu_devices, cpu); in pcpu_find_address() 192 static int pcpu_alloc_lowcore(struct pcpu *pcpu, int cpu) in pcpu_alloc_lowcore() argument 208 lc->cpu_nr = cpu; in pcpu_alloc_lowcore() 209 lc->spinlock_lockval = arch_spin_lockval(cpu); in pcpu_alloc_lowcore() 216 if (abs_lowcore_map(cpu, lc, true)) in pcpu_alloc_lowcore() 218 lowcore_ptr[cpu] = lc; in pcpu_alloc_lowcore() 232 static void pcpu_free_lowcore(struct pcpu *pcpu, int cpu) in pcpu_free_lowcore() argument [all …]
|
/linux/tools/perf/util/ |
H A D | cpumap.c | 75 * Special treatment for -1, which is not real cpu number, in cpu_map__from_entries() 79 if (data->cpus_data.cpu[i] == (u16) -1) in cpu_map__from_entries() 80 RC_CHK_ACCESS(map)->map[i].cpu = -1; in cpu_map__from_entries() 82 RC_CHK_ACCESS(map)->map[i].cpu = (int) data->cpus_data.cpu[i]; in cpu_map__from_entries() 106 int cpu; in cpu_map__from_mask() local 109 for_each_set_bit(cpu, local_copy, 64) in cpu_map__from_mask() 110 RC_CHK_ACCESS(map)->map[j++].cpu = cpu + cpus_per_i; in cpu_map__from_mask() 127 RC_CHK_ACCESS(map)->map[i++].cpu in cpu_map__from_range() 129 for (int cpu = data->range_cpu_data.start_cpu; cpu <= data->range_cpu_data.end_cpu; cpu_map__from_range() local 190 cpu__get_topology_int(int cpu,const char * name,int * value) cpu__get_topology_int() argument 200 cpu__get_socket_id(struct perf_cpu cpu) cpu__get_socket_id() argument 206 aggr_cpu_id__socket(struct perf_cpu cpu,void * data __maybe_unused) aggr_cpu_id__socket() argument 242 struct perf_cpu cpu; cpu_aggr_map__new() local 284 cpu__get_die_id(struct perf_cpu cpu) cpu__get_die_id() argument 291 aggr_cpu_id__die(struct perf_cpu cpu,void * data) aggr_cpu_id__die() argument 314 cpu__get_cluster_id(struct perf_cpu cpu) cpu__get_cluster_id() argument 321 aggr_cpu_id__cluster(struct perf_cpu cpu,void * data) aggr_cpu_id__cluster() argument 338 cpu__get_core_id(struct perf_cpu cpu) cpu__get_core_id() argument 344 aggr_cpu_id__core(struct perf_cpu cpu,void * data) aggr_cpu_id__core() argument 363 aggr_cpu_id__cpu(struct perf_cpu cpu,void * data) aggr_cpu_id__cpu() argument 377 aggr_cpu_id__node(struct perf_cpu cpu,void * data __maybe_unused) aggr_cpu_id__node() argument 385 aggr_cpu_id__global(struct perf_cpu cpu,void * data __maybe_unused) aggr_cpu_id__global() argument 520 cpu__get_node(struct perf_cpu cpu) cpu__get_node() argument 553 unsigned int cpu, mem; cpu__setup_cpunode_map() local 611 struct perf_cpu cpu = { .cpu = INT_MAX }; cpu_map__snprint() local 658 int i, cpu; cpu_map__snprint_mask() local [all...] |
/linux/arch/arm64/boot/dts/amd/ |
H A D | elba-16core.dtsi | 11 cpu-map { 13 core0 { cpu = <&cpu0>; }; 14 core1 { cpu = <&cpu1>; }; 15 core2 { cpu = <&cpu2>; }; 16 core3 { cpu = <&cpu3>; }; 20 core0 { cpu = <&cpu4>; }; 21 core1 { cpu = <&cpu5>; }; 22 core2 { cpu = <&cpu6>; }; 23 core3 { cpu = <&cpu7>; }; 27 core0 { cpu = <&cpu8>; }; [all …]
|
/linux/Documentation/devicetree/bindings/cpu/ |
H A D | cpu-topology.txt | 20 For instance in a system where CPUs support SMT, "cpu" nodes represent all 22 In systems where SMT is not supported "cpu" nodes represent all cores present 25 CPU topology bindings allow one to associate cpu nodes with hierarchical groups 29 Currently, only ARM/RISC-V intend to use this cpu topology binding but it may be 32 The cpu nodes, as per bindings defined in [4], represent the devices that 35 A topology description containing phandles to cpu nodes that are not compliant 39 2 - cpu-map node 42 The ARM/RISC-V CPU topology is defined within the cpu-map node, which is a direct 46 - cpu-map node 51 cpu-map node. [all …]
|
/linux/tools/power/cpupower/utils/ |
H A D | cpufreq-info.c | 58 unsigned int cpu, nr_cpus; in proc_cpufreq_output() local 67 for (cpu = 0; cpu < nr_cpus; cpu++) { in proc_cpufreq_output() 68 policy = cpufreq_get_policy(cpu); in proc_cpufreq_output() 72 if (cpufreq_get_hardware_limits(cpu, &min, &max)) { in proc_cpufreq_output() 79 cpu , policy->min, max ? min_pctg : 0, policy->max, in proc_cpufreq_output() 126 static int get_boost_mode_x86(unsigned int cpu) in get_boost_mode_x86() argument 132 ret = cpufreq_has_boost_support(cpu, &support, &active, &b_states); in get_boost_mode_x86() 135 " on CPU %d -- are you root?\n"), cpu); in get_boost_mode_x86() 155 ret = decode_pstates(cpu, b_states, pstates, &pstate_no); in get_boost_mode_x86() 181 intel_turbo_ratio = msr_intel_get_turbo_ratio(cpu); in get_boost_mode_x86() [all …]
|