1 /* 2 * Written by: Matthew Dobson, IBM Corporation 3 * 4 * Copyright (C) 2002, IBM Corp. 5 * 6 * All rights reserved. 7 * 8 * This program is free software; you can redistribute it and/or modify 9 * it under the terms of the GNU General Public License as published by 10 * the Free Software Foundation; either version 2 of the License, or 11 * (at your option) any later version. 12 * 13 * This program is distributed in the hope that it will be useful, but 14 * WITHOUT ANY WARRANTY; without even the implied warranty of 15 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or 16 * NON INFRINGEMENT. See the GNU General Public License for more 17 * details. 18 * 19 * You should have received a copy of the GNU General Public License 20 * along with this program; if not, write to the Free Software 21 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 22 * 23 * Send feedback to <colpatch@us.ibm.com> 24 */ 25 #ifndef _ASM_X86_TOPOLOGY_H 26 #define _ASM_X86_TOPOLOGY_H 27 28 /* 29 * to preserve the visibility of NUMA_NO_NODE definition, 30 * moved to there from here. May be used independent of 31 * CONFIG_NUMA. 32 */ 33 #include <linux/numa.h> 34 #include <linux/cpumask.h> 35 36 #ifdef CONFIG_NUMA 37 38 #include <asm/mpspec.h> 39 #include <asm/percpu.h> 40 41 /* Mappings between logical cpu number and node number */ 42 DECLARE_EARLY_PER_CPU(int, x86_cpu_to_node_map); 43 44 #ifdef CONFIG_DEBUG_PER_CPU_MAPS 45 /* 46 * override generic percpu implementation of cpu_to_node 47 */ 48 extern int __cpu_to_node(int cpu); 49 #define cpu_to_node __cpu_to_node 50 51 extern int early_cpu_to_node(int cpu); 52 53 #else /* !CONFIG_DEBUG_PER_CPU_MAPS */ 54 55 /* Same function but used if called before per_cpu areas are setup */ 56 static inline int early_cpu_to_node(int cpu) 57 { 58 return early_per_cpu(x86_cpu_to_node_map, cpu); 59 } 60 61 #endif /* !CONFIG_DEBUG_PER_CPU_MAPS */ 62 63 /* Mappings between node number and cpus on that node. */ 64 extern cpumask_var_t node_to_cpumask_map[MAX_NUMNODES]; 65 66 #ifdef CONFIG_DEBUG_PER_CPU_MAPS 67 extern const struct cpumask *cpumask_of_node(int node); 68 #else 69 /* Returns a pointer to the cpumask of CPUs on Node 'node'. */ 70 static inline const struct cpumask *cpumask_of_node(int node) 71 { 72 return node_to_cpumask_map[node]; 73 } 74 #endif 75 76 extern void setup_node_to_cpumask_map(void); 77 78 #define pcibus_to_node(bus) __pcibus_to_node(bus) 79 80 extern int __node_distance(int, int); 81 #define node_distance(a, b) __node_distance(a, b) 82 83 #else /* !CONFIG_NUMA */ 84 85 static inline int numa_node_id(void) 86 { 87 return 0; 88 } 89 /* 90 * indicate override: 91 */ 92 #define numa_node_id numa_node_id 93 94 static inline int early_cpu_to_node(int cpu) 95 { 96 return 0; 97 } 98 99 static inline void setup_node_to_cpumask_map(void) { } 100 101 #endif 102 103 #include <asm-generic/topology.h> 104 105 /* Topology information */ 106 enum x86_topology_domains { 107 TOPO_SMT_DOMAIN, 108 TOPO_CORE_DOMAIN, 109 TOPO_MODULE_DOMAIN, 110 TOPO_TILE_DOMAIN, 111 TOPO_DIE_DOMAIN, 112 TOPO_DIEGRP_DOMAIN, 113 TOPO_PKG_DOMAIN, 114 TOPO_MAX_DOMAIN, 115 }; 116 117 struct x86_topology_system { 118 unsigned int dom_shifts[TOPO_MAX_DOMAIN]; 119 unsigned int dom_size[TOPO_MAX_DOMAIN]; 120 }; 121 122 extern struct x86_topology_system x86_topo_system; 123 124 static inline unsigned int topology_get_domain_size(enum x86_topology_domains dom) 125 { 126 return x86_topo_system.dom_size[dom]; 127 } 128 129 static inline unsigned int topology_get_domain_shift(enum x86_topology_domains dom) 130 { 131 return dom == TOPO_SMT_DOMAIN ? 0 : x86_topo_system.dom_shifts[dom - 1]; 132 } 133 134 extern const struct cpumask *cpu_coregroup_mask(int cpu); 135 extern const struct cpumask *cpu_clustergroup_mask(int cpu); 136 137 #define topology_logical_package_id(cpu) (cpu_data(cpu).topo.logical_pkg_id) 138 #define topology_physical_package_id(cpu) (cpu_data(cpu).topo.pkg_id) 139 #define topology_logical_die_id(cpu) (cpu_data(cpu).topo.logical_die_id) 140 #define topology_die_id(cpu) (cpu_data(cpu).topo.die_id) 141 #define topology_core_id(cpu) (cpu_data(cpu).topo.core_id) 142 #define topology_ppin(cpu) (cpu_data(cpu).ppin) 143 144 #define topology_amd_node_id(cpu) (cpu_data(cpu).topo.amd_node_id) 145 146 extern unsigned int __max_die_per_package; 147 148 #ifdef CONFIG_SMP 149 #define topology_cluster_id(cpu) (cpu_data(cpu).topo.l2c_id) 150 #define topology_die_cpumask(cpu) (per_cpu(cpu_die_map, cpu)) 151 #define topology_cluster_cpumask(cpu) (cpu_clustergroup_mask(cpu)) 152 #define topology_core_cpumask(cpu) (per_cpu(cpu_core_map, cpu)) 153 #define topology_sibling_cpumask(cpu) (per_cpu(cpu_sibling_map, cpu)) 154 155 extern unsigned int __max_logical_packages; 156 #define topology_max_packages() (__max_logical_packages) 157 158 static inline int topology_max_die_per_package(void) 159 { 160 return __max_die_per_package; 161 } 162 163 extern int __max_smt_threads; 164 165 static inline int topology_max_smt_threads(void) 166 { 167 return __max_smt_threads; 168 } 169 170 #include <linux/cpu_smt.h> 171 172 int topology_update_package_map(unsigned int apicid, unsigned int cpu); 173 int topology_update_die_map(unsigned int dieid, unsigned int cpu); 174 int topology_phys_to_logical_pkg(unsigned int pkg); 175 176 extern unsigned int __amd_nodes_per_pkg; 177 178 static inline unsigned int topology_amd_nodes_per_pkg(void) 179 { 180 return __amd_nodes_per_pkg; 181 } 182 183 extern struct cpumask __cpu_primary_thread_mask; 184 #define cpu_primary_thread_mask ((const struct cpumask *)&__cpu_primary_thread_mask) 185 186 /** 187 * topology_is_primary_thread - Check whether CPU is the primary SMT thread 188 * @cpu: CPU to check 189 */ 190 static inline bool topology_is_primary_thread(unsigned int cpu) 191 { 192 return cpumask_test_cpu(cpu, cpu_primary_thread_mask); 193 } 194 #else /* CONFIG_SMP */ 195 #define topology_max_packages() (1) 196 static inline int 197 topology_update_package_map(unsigned int apicid, unsigned int cpu) { return 0; } 198 static inline int 199 topology_update_die_map(unsigned int dieid, unsigned int cpu) { return 0; } 200 static inline int topology_phys_to_logical_pkg(unsigned int pkg) { return 0; } 201 static inline int topology_max_die_per_package(void) { return 1; } 202 static inline int topology_max_smt_threads(void) { return 1; } 203 static inline bool topology_is_primary_thread(unsigned int cpu) { return true; } 204 static inline unsigned int topology_amd_nodes_per_pkg(void) { return 0; }; 205 #endif /* !CONFIG_SMP */ 206 207 static inline void arch_fix_phys_package_id(int num, u32 slot) 208 { 209 } 210 211 struct pci_bus; 212 int x86_pci_root_bus_node(int bus); 213 void x86_pci_root_bus_resources(int bus, struct list_head *resources); 214 215 extern bool x86_topology_update; 216 217 #ifdef CONFIG_SCHED_MC_PRIO 218 #include <asm/percpu.h> 219 220 DECLARE_PER_CPU_READ_MOSTLY(int, sched_core_priority); 221 extern unsigned int __read_mostly sysctl_sched_itmt_enabled; 222 223 /* Interface to set priority of a cpu */ 224 void sched_set_itmt_core_prio(int prio, int core_cpu); 225 226 /* Interface to notify scheduler that system supports ITMT */ 227 int sched_set_itmt_support(void); 228 229 /* Interface to notify scheduler that system revokes ITMT support */ 230 void sched_clear_itmt_support(void); 231 232 #else /* CONFIG_SCHED_MC_PRIO */ 233 234 #define sysctl_sched_itmt_enabled 0 235 static inline void sched_set_itmt_core_prio(int prio, int core_cpu) 236 { 237 } 238 static inline int sched_set_itmt_support(void) 239 { 240 return 0; 241 } 242 static inline void sched_clear_itmt_support(void) 243 { 244 } 245 #endif /* CONFIG_SCHED_MC_PRIO */ 246 247 #if defined(CONFIG_SMP) && defined(CONFIG_X86_64) 248 #include <asm/cpufeature.h> 249 250 DECLARE_STATIC_KEY_FALSE(arch_scale_freq_key); 251 252 #define arch_scale_freq_invariant() static_branch_likely(&arch_scale_freq_key) 253 254 DECLARE_PER_CPU(unsigned long, arch_freq_scale); 255 256 static inline long arch_scale_freq_capacity(int cpu) 257 { 258 return per_cpu(arch_freq_scale, cpu); 259 } 260 #define arch_scale_freq_capacity arch_scale_freq_capacity 261 262 extern void arch_set_max_freq_ratio(bool turbo_disabled); 263 extern void freq_invariance_set_perf_ratio(u64 ratio, bool turbo_disabled); 264 #else 265 static inline void arch_set_max_freq_ratio(bool turbo_disabled) { } 266 static inline void freq_invariance_set_perf_ratio(u64 ratio, bool turbo_disabled) { } 267 #endif 268 269 extern void arch_scale_freq_tick(void); 270 #define arch_scale_freq_tick arch_scale_freq_tick 271 272 #ifdef CONFIG_ACPI_CPPC_LIB 273 void init_freq_invariance_cppc(void); 274 #define arch_init_invariance_cppc init_freq_invariance_cppc 275 #endif 276 277 #endif /* _ASM_X86_TOPOLOGY_H */ 278