1 /* 2 * arch/arm64/kernel/topology.c 3 * 4 * Copyright (C) 2011,2013,2014 Linaro Limited. 5 * 6 * Based on the arm32 version written by Vincent Guittot in turn based on 7 * arch/sh/kernel/topology.c 8 * 9 * This file is subject to the terms and conditions of the GNU General Public 10 * License. See the file "COPYING" in the main directory of this archive 11 * for more details. 12 */ 13 14 #include <linux/acpi.h> 15 #include <linux/arch_topology.h> 16 #include <linux/cacheinfo.h> 17 #include <linux/cpufreq.h> 18 #include <linux/init.h> 19 #include <linux/percpu.h> 20 21 #include <asm/cpu.h> 22 #include <asm/cputype.h> 23 #include <asm/topology.h> 24 25 void store_cpu_topology(unsigned int cpuid) 26 { 27 struct cpu_topology *cpuid_topo = &cpu_topology[cpuid]; 28 u64 mpidr; 29 30 if (cpuid_topo->package_id != -1) 31 goto topology_populated; 32 33 mpidr = read_cpuid_mpidr(); 34 35 /* Uniprocessor systems can rely on default topology values */ 36 if (mpidr & MPIDR_UP_BITMASK) 37 return; 38 39 /* 40 * This would be the place to create cpu topology based on MPIDR. 41 * 42 * However, it cannot be trusted to depict the actual topology; some 43 * pieces of the architecture enforce an artificial cap on Aff0 values 44 * (e.g. GICv3's ICC_SGI1R_EL1 limits it to 15), leading to an 45 * artificial cycling of Aff1, Aff2 and Aff3 values. IOW, these end up 46 * having absolutely no relationship to the actual underlying system 47 * topology, and cannot be reasonably used as core / package ID. 48 * 49 * If the MT bit is set, Aff0 *could* be used to define a thread ID, but 50 * we still wouldn't be able to obtain a sane core ID. This means we 51 * need to entirely ignore MPIDR for any topology deduction. 52 */ 53 cpuid_topo->thread_id = -1; 54 cpuid_topo->core_id = cpuid; 55 cpuid_topo->package_id = cpu_to_node(cpuid); 56 57 pr_debug("CPU%u: cluster %d core %d thread %d mpidr %#016llx\n", 58 cpuid, cpuid_topo->package_id, cpuid_topo->core_id, 59 cpuid_topo->thread_id, mpidr); 60 61 topology_populated: 62 update_siblings_masks(cpuid); 63 } 64 65 #ifdef CONFIG_ACPI 66 static bool __init acpi_cpu_is_threaded(int cpu) 67 { 68 int is_threaded = acpi_pptt_cpu_is_thread(cpu); 69 70 /* 71 * if the PPTT doesn't have thread information, assume a homogeneous 72 * machine and return the current CPU's thread state. 73 */ 74 if (is_threaded < 0) 75 is_threaded = read_cpuid_mpidr() & MPIDR_MT_BITMASK; 76 77 return !!is_threaded; 78 } 79 80 /* 81 * Propagate the topology information of the processor_topology_node tree to the 82 * cpu_topology array. 83 */ 84 int __init parse_acpi_topology(void) 85 { 86 int cpu, topology_id; 87 88 if (acpi_disabled) 89 return 0; 90 91 for_each_possible_cpu(cpu) { 92 int i, cache_id; 93 94 topology_id = find_acpi_cpu_topology(cpu, 0); 95 if (topology_id < 0) 96 return topology_id; 97 98 if (acpi_cpu_is_threaded(cpu)) { 99 cpu_topology[cpu].thread_id = topology_id; 100 topology_id = find_acpi_cpu_topology(cpu, 1); 101 cpu_topology[cpu].core_id = topology_id; 102 } else { 103 cpu_topology[cpu].thread_id = -1; 104 cpu_topology[cpu].core_id = topology_id; 105 } 106 topology_id = find_acpi_cpu_topology_package(cpu); 107 cpu_topology[cpu].package_id = topology_id; 108 109 i = acpi_find_last_cache_level(cpu); 110 111 if (i > 0) { 112 /* 113 * this is the only part of cpu_topology that has 114 * a direct relationship with the cache topology 115 */ 116 cache_id = find_acpi_cpu_cache_topology(cpu, i); 117 if (cache_id > 0) 118 cpu_topology[cpu].llc_id = cache_id; 119 } 120 } 121 122 return 0; 123 } 124 #endif 125 126 #ifdef CONFIG_ARM64_AMU_EXTN 127 #define read_corecnt() read_sysreg_s(SYS_AMEVCNTR0_CORE_EL0) 128 #define read_constcnt() read_sysreg_s(SYS_AMEVCNTR0_CONST_EL0) 129 #else 130 #define read_corecnt() (0UL) 131 #define read_constcnt() (0UL) 132 #endif 133 134 #undef pr_fmt 135 #define pr_fmt(fmt) "AMU: " fmt 136 137 static DEFINE_PER_CPU_READ_MOSTLY(unsigned long, arch_max_freq_scale); 138 static DEFINE_PER_CPU(u64, arch_const_cycles_prev); 139 static DEFINE_PER_CPU(u64, arch_core_cycles_prev); 140 static cpumask_var_t amu_fie_cpus; 141 142 void update_freq_counters_refs(void) 143 { 144 this_cpu_write(arch_core_cycles_prev, read_corecnt()); 145 this_cpu_write(arch_const_cycles_prev, read_constcnt()); 146 } 147 148 static inline bool freq_counters_valid(int cpu) 149 { 150 if ((cpu >= nr_cpu_ids) || !cpumask_test_cpu(cpu, cpu_present_mask)) 151 return false; 152 153 if (!cpu_has_amu_feat(cpu)) { 154 pr_debug("CPU%d: counters are not supported.\n", cpu); 155 return false; 156 } 157 158 if (unlikely(!per_cpu(arch_const_cycles_prev, cpu) || 159 !per_cpu(arch_core_cycles_prev, cpu))) { 160 pr_debug("CPU%d: cycle counters are not enabled.\n", cpu); 161 return false; 162 } 163 164 return true; 165 } 166 167 static int freq_inv_set_max_ratio(int cpu, u64 max_rate, u64 ref_rate) 168 { 169 u64 ratio; 170 171 if (unlikely(!max_rate || !ref_rate)) { 172 pr_debug("CPU%d: invalid maximum or reference frequency.\n", 173 cpu); 174 return -EINVAL; 175 } 176 177 /* 178 * Pre-compute the fixed ratio between the frequency of the constant 179 * reference counter and the maximum frequency of the CPU. 180 * 181 * ref_rate 182 * arch_max_freq_scale = ---------- * SCHED_CAPACITY_SCALE² 183 * max_rate 184 * 185 * We use a factor of 2 * SCHED_CAPACITY_SHIFT -> SCHED_CAPACITY_SCALE² 186 * in order to ensure a good resolution for arch_max_freq_scale for 187 * very low reference frequencies (down to the KHz range which should 188 * be unlikely). 189 */ 190 ratio = ref_rate << (2 * SCHED_CAPACITY_SHIFT); 191 ratio = div64_u64(ratio, max_rate); 192 if (!ratio) { 193 WARN_ONCE(1, "Reference frequency too low.\n"); 194 return -EINVAL; 195 } 196 197 per_cpu(arch_max_freq_scale, cpu) = (unsigned long)ratio; 198 199 return 0; 200 } 201 202 static DEFINE_STATIC_KEY_FALSE(amu_fie_key); 203 #define amu_freq_invariant() static_branch_unlikely(&amu_fie_key) 204 205 static void amu_fie_setup(const struct cpumask *cpus) 206 { 207 bool invariant; 208 int cpu; 209 210 /* We are already set since the last insmod of cpufreq driver */ 211 if (unlikely(cpumask_subset(cpus, amu_fie_cpus))) 212 return; 213 214 for_each_cpu(cpu, cpus) { 215 if (!freq_counters_valid(cpu) || 216 freq_inv_set_max_ratio(cpu, 217 cpufreq_get_hw_max_freq(cpu) * 1000, 218 arch_timer_get_rate())) 219 return; 220 } 221 222 cpumask_or(amu_fie_cpus, amu_fie_cpus, cpus); 223 224 invariant = topology_scale_freq_invariant(); 225 226 /* We aren't fully invariant yet */ 227 if (!invariant && !cpumask_equal(amu_fie_cpus, cpu_present_mask)) 228 return; 229 230 static_branch_enable(&amu_fie_key); 231 232 pr_debug("CPUs[%*pbl]: counters will be used for FIE.", 233 cpumask_pr_args(cpus)); 234 235 /* 236 * Task scheduler behavior depends on frequency invariance support, 237 * either cpufreq or counter driven. If the support status changes as 238 * a result of counter initialisation and use, retrigger the build of 239 * scheduling domains to ensure the information is propagated properly. 240 */ 241 if (!invariant) 242 rebuild_sched_domains_energy(); 243 } 244 245 static int init_amu_fie_callback(struct notifier_block *nb, unsigned long val, 246 void *data) 247 { 248 struct cpufreq_policy *policy = data; 249 250 if (val == CPUFREQ_CREATE_POLICY) 251 amu_fie_setup(policy->related_cpus); 252 253 /* 254 * We don't need to handle CPUFREQ_REMOVE_POLICY event as the AMU 255 * counters don't have any dependency on cpufreq driver once we have 256 * initialized AMU support and enabled invariance. The AMU counters will 257 * keep on working just fine in the absence of the cpufreq driver, and 258 * for the CPUs for which there are no counters available, the last set 259 * value of freq_scale will remain valid as that is the frequency those 260 * CPUs are running at. 261 */ 262 263 return 0; 264 } 265 266 static struct notifier_block init_amu_fie_notifier = { 267 .notifier_call = init_amu_fie_callback, 268 }; 269 270 static int __init init_amu_fie(void) 271 { 272 int ret; 273 274 if (!zalloc_cpumask_var(&amu_fie_cpus, GFP_KERNEL)) 275 return -ENOMEM; 276 277 ret = cpufreq_register_notifier(&init_amu_fie_notifier, 278 CPUFREQ_POLICY_NOTIFIER); 279 if (ret) 280 free_cpumask_var(amu_fie_cpus); 281 282 return ret; 283 } 284 core_initcall(init_amu_fie); 285 286 bool arch_freq_counters_available(const struct cpumask *cpus) 287 { 288 return amu_freq_invariant() && 289 cpumask_subset(cpus, amu_fie_cpus); 290 } 291 292 void topology_scale_freq_tick(void) 293 { 294 u64 prev_core_cnt, prev_const_cnt; 295 u64 core_cnt, const_cnt, scale; 296 int cpu = smp_processor_id(); 297 298 if (!amu_freq_invariant()) 299 return; 300 301 if (!cpumask_test_cpu(cpu, amu_fie_cpus)) 302 return; 303 304 prev_const_cnt = this_cpu_read(arch_const_cycles_prev); 305 prev_core_cnt = this_cpu_read(arch_core_cycles_prev); 306 307 update_freq_counters_refs(); 308 309 const_cnt = this_cpu_read(arch_const_cycles_prev); 310 core_cnt = this_cpu_read(arch_core_cycles_prev); 311 312 if (unlikely(core_cnt <= prev_core_cnt || 313 const_cnt <= prev_const_cnt)) 314 return; 315 316 /* 317 * /\core arch_max_freq_scale 318 * scale = ------- * -------------------- 319 * /\const SCHED_CAPACITY_SCALE 320 * 321 * See validate_cpu_freq_invariance_counters() for details on 322 * arch_max_freq_scale and the use of SCHED_CAPACITY_SHIFT. 323 */ 324 scale = core_cnt - prev_core_cnt; 325 scale *= this_cpu_read(arch_max_freq_scale); 326 scale = div64_u64(scale >> SCHED_CAPACITY_SHIFT, 327 const_cnt - prev_const_cnt); 328 329 scale = min_t(unsigned long, scale, SCHED_CAPACITY_SCALE); 330 this_cpu_write(freq_scale, (unsigned long)scale); 331 } 332 333 #ifdef CONFIG_ACPI_CPPC_LIB 334 #include <acpi/cppc_acpi.h> 335 336 static void cpu_read_corecnt(void *val) 337 { 338 *(u64 *)val = read_corecnt(); 339 } 340 341 static void cpu_read_constcnt(void *val) 342 { 343 *(u64 *)val = read_constcnt(); 344 } 345 346 static inline 347 int counters_read_on_cpu(int cpu, smp_call_func_t func, u64 *val) 348 { 349 /* 350 * Abort call on counterless CPU or when interrupts are 351 * disabled - can lead to deadlock in smp sync call. 352 */ 353 if (!cpu_has_amu_feat(cpu)) 354 return -EOPNOTSUPP; 355 356 if (WARN_ON_ONCE(irqs_disabled())) 357 return -EPERM; 358 359 smp_call_function_single(cpu, func, val, 1); 360 361 return 0; 362 } 363 364 /* 365 * Refer to drivers/acpi/cppc_acpi.c for the description of the functions 366 * below. 367 */ 368 bool cpc_ffh_supported(void) 369 { 370 return freq_counters_valid(get_cpu_with_amu_feat()); 371 } 372 373 int cpc_read_ffh(int cpu, struct cpc_reg *reg, u64 *val) 374 { 375 int ret = -EOPNOTSUPP; 376 377 switch ((u64)reg->address) { 378 case 0x0: 379 ret = counters_read_on_cpu(cpu, cpu_read_corecnt, val); 380 break; 381 case 0x1: 382 ret = counters_read_on_cpu(cpu, cpu_read_constcnt, val); 383 break; 384 } 385 386 if (!ret) { 387 *val &= GENMASK_ULL(reg->bit_offset + reg->bit_width - 1, 388 reg->bit_offset); 389 *val >>= reg->bit_offset; 390 } 391 392 return ret; 393 } 394 395 int cpc_write_ffh(int cpunum, struct cpc_reg *reg, u64 val) 396 { 397 return -EOPNOTSUPP; 398 } 399 #endif /* CONFIG_ACPI_CPPC_LIB */ 400