1 /* 2 * Written by: Matthew Dobson, IBM Corporation 3 * 4 * Copyright (C) 2002, IBM Corp. 5 * 6 * All rights reserved. 7 * 8 * This program is free software; you can redistribute it and/or modify 9 * it under the terms of the GNU General Public License as published by 10 * the Free Software Foundation; either version 2 of the License, or 11 * (at your option) any later version. 12 * 13 * This program is distributed in the hope that it will be useful, but 14 * WITHOUT ANY WARRANTY; without even the implied warranty of 15 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or 16 * NON INFRINGEMENT. See the GNU General Public License for more 17 * details. 18 * 19 * You should have received a copy of the GNU General Public License 20 * along with this program; if not, write to the Free Software 21 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 22 * 23 * Send feedback to <colpatch@us.ibm.com> 24 */ 25 #ifndef _ASM_X86_TOPOLOGY_H 26 #define _ASM_X86_TOPOLOGY_H 27 28 /* 29 * to preserve the visibility of NUMA_NO_NODE definition, 30 * moved to there from here. May be used independent of 31 * CONFIG_NUMA. 32 */ 33 #include <linux/numa.h> 34 #include <linux/cpumask.h> 35 36 #ifdef CONFIG_NUMA 37 38 #include <asm/mpspec.h> 39 #include <asm/percpu.h> 40 41 /* Mappings between logical cpu number and node number */ 42 DECLARE_EARLY_PER_CPU(int, x86_cpu_to_node_map); 43 44 #ifdef CONFIG_DEBUG_PER_CPU_MAPS 45 /* 46 * override generic percpu implementation of cpu_to_node 47 */ 48 extern int __cpu_to_node(int cpu); 49 #define cpu_to_node __cpu_to_node 50 51 extern int early_cpu_to_node(int cpu); 52 53 #else /* !CONFIG_DEBUG_PER_CPU_MAPS */ 54 55 /* Same function but used if called before per_cpu areas are setup */ 56 static inline int early_cpu_to_node(int cpu) 57 { 58 return early_per_cpu(x86_cpu_to_node_map, cpu); 59 } 60 61 #endif /* !CONFIG_DEBUG_PER_CPU_MAPS */ 62 63 /* Mappings between node number and cpus on that node. */ 64 extern cpumask_var_t node_to_cpumask_map[MAX_NUMNODES]; 65 66 #ifdef CONFIG_DEBUG_PER_CPU_MAPS 67 extern const struct cpumask *cpumask_of_node(int node); 68 #else 69 /* Returns a pointer to the cpumask of CPUs on Node 'node'. */ 70 static inline const struct cpumask *cpumask_of_node(int node) 71 { 72 return node_to_cpumask_map[node]; 73 } 74 #endif 75 76 extern void setup_node_to_cpumask_map(void); 77 78 #define pcibus_to_node(bus) __pcibus_to_node(bus) 79 80 extern int __node_distance(int, int); 81 #define node_distance(a, b) __node_distance(a, b) 82 83 #else /* !CONFIG_NUMA */ 84 85 static inline int numa_node_id(void) 86 { 87 return 0; 88 } 89 /* 90 * indicate override: 91 */ 92 #define numa_node_id numa_node_id 93 94 static inline int early_cpu_to_node(int cpu) 95 { 96 return 0; 97 } 98 99 static inline void setup_node_to_cpumask_map(void) { } 100 101 #endif 102 103 #include <asm-generic/topology.h> 104 105 extern const struct cpumask *cpu_coregroup_mask(int cpu); 106 extern const struct cpumask *cpu_clustergroup_mask(int cpu); 107 108 #define topology_logical_package_id(cpu) (cpu_data(cpu).logical_proc_id) 109 #define topology_physical_package_id(cpu) (cpu_data(cpu).phys_proc_id) 110 #define topology_logical_die_id(cpu) (cpu_data(cpu).logical_die_id) 111 #define topology_die_id(cpu) (cpu_data(cpu).cpu_die_id) 112 #define topology_core_id(cpu) (cpu_data(cpu).cpu_core_id) 113 #define topology_ppin(cpu) (cpu_data(cpu).ppin) 114 115 extern unsigned int __max_die_per_package; 116 117 #ifdef CONFIG_SMP 118 #define topology_cluster_id(cpu) (per_cpu(cpu_l2c_id, cpu)) 119 #define topology_die_cpumask(cpu) (per_cpu(cpu_die_map, cpu)) 120 #define topology_cluster_cpumask(cpu) (cpu_clustergroup_mask(cpu)) 121 #define topology_core_cpumask(cpu) (per_cpu(cpu_core_map, cpu)) 122 #define topology_sibling_cpumask(cpu) (per_cpu(cpu_sibling_map, cpu)) 123 124 extern unsigned int __max_logical_packages; 125 #define topology_max_packages() (__max_logical_packages) 126 127 static inline int topology_max_die_per_package(void) 128 { 129 return __max_die_per_package; 130 } 131 132 extern int __max_smt_threads; 133 134 static inline int topology_max_smt_threads(void) 135 { 136 return __max_smt_threads; 137 } 138 139 #include <linux/cpu_smt.h> 140 141 int topology_update_package_map(unsigned int apicid, unsigned int cpu); 142 int topology_update_die_map(unsigned int dieid, unsigned int cpu); 143 int topology_phys_to_logical_pkg(unsigned int pkg); 144 145 extern struct cpumask __cpu_primary_thread_mask; 146 #define cpu_primary_thread_mask ((const struct cpumask *)&__cpu_primary_thread_mask) 147 148 /** 149 * topology_is_primary_thread - Check whether CPU is the primary SMT thread 150 * @cpu: CPU to check 151 */ 152 static inline bool topology_is_primary_thread(unsigned int cpu) 153 { 154 return cpumask_test_cpu(cpu, cpu_primary_thread_mask); 155 } 156 #else /* CONFIG_SMP */ 157 #define topology_max_packages() (1) 158 static inline int 159 topology_update_package_map(unsigned int apicid, unsigned int cpu) { return 0; } 160 static inline int 161 topology_update_die_map(unsigned int dieid, unsigned int cpu) { return 0; } 162 static inline int topology_phys_to_logical_pkg(unsigned int pkg) { return 0; } 163 static inline int topology_max_die_per_package(void) { return 1; } 164 static inline int topology_max_smt_threads(void) { return 1; } 165 static inline bool topology_is_primary_thread(unsigned int cpu) { return true; } 166 #endif /* !CONFIG_SMP */ 167 168 static inline void arch_fix_phys_package_id(int num, u32 slot) 169 { 170 } 171 172 struct pci_bus; 173 int x86_pci_root_bus_node(int bus); 174 void x86_pci_root_bus_resources(int bus, struct list_head *resources); 175 176 extern bool x86_topology_update; 177 178 #ifdef CONFIG_SCHED_MC_PRIO 179 #include <asm/percpu.h> 180 181 DECLARE_PER_CPU_READ_MOSTLY(int, sched_core_priority); 182 extern unsigned int __read_mostly sysctl_sched_itmt_enabled; 183 184 /* Interface to set priority of a cpu */ 185 void sched_set_itmt_core_prio(int prio, int core_cpu); 186 187 /* Interface to notify scheduler that system supports ITMT */ 188 int sched_set_itmt_support(void); 189 190 /* Interface to notify scheduler that system revokes ITMT support */ 191 void sched_clear_itmt_support(void); 192 193 #else /* CONFIG_SCHED_MC_PRIO */ 194 195 #define sysctl_sched_itmt_enabled 0 196 static inline void sched_set_itmt_core_prio(int prio, int core_cpu) 197 { 198 } 199 static inline int sched_set_itmt_support(void) 200 { 201 return 0; 202 } 203 static inline void sched_clear_itmt_support(void) 204 { 205 } 206 #endif /* CONFIG_SCHED_MC_PRIO */ 207 208 #if defined(CONFIG_SMP) && defined(CONFIG_X86_64) 209 #include <asm/cpufeature.h> 210 211 DECLARE_STATIC_KEY_FALSE(arch_scale_freq_key); 212 213 #define arch_scale_freq_invariant() static_branch_likely(&arch_scale_freq_key) 214 215 DECLARE_PER_CPU(unsigned long, arch_freq_scale); 216 217 static inline long arch_scale_freq_capacity(int cpu) 218 { 219 return per_cpu(arch_freq_scale, cpu); 220 } 221 #define arch_scale_freq_capacity arch_scale_freq_capacity 222 223 extern void arch_set_max_freq_ratio(bool turbo_disabled); 224 extern void freq_invariance_set_perf_ratio(u64 ratio, bool turbo_disabled); 225 #else 226 static inline void arch_set_max_freq_ratio(bool turbo_disabled) { } 227 static inline void freq_invariance_set_perf_ratio(u64 ratio, bool turbo_disabled) { } 228 #endif 229 230 extern void arch_scale_freq_tick(void); 231 #define arch_scale_freq_tick arch_scale_freq_tick 232 233 #ifdef CONFIG_ACPI_CPPC_LIB 234 void init_freq_invariance_cppc(void); 235 #define arch_init_invariance_cppc init_freq_invariance_cppc 236 #endif 237 238 #endif /* _ASM_X86_TOPOLOGY_H */ 239