1 /* 2 * Written by: Matthew Dobson, IBM Corporation 3 * 4 * Copyright (C) 2002, IBM Corp. 5 * 6 * All rights reserved. 7 * 8 * This program is free software; you can redistribute it and/or modify 9 * it under the terms of the GNU General Public License as published by 10 * the Free Software Foundation; either version 2 of the License, or 11 * (at your option) any later version. 12 * 13 * This program is distributed in the hope that it will be useful, but 14 * WITHOUT ANY WARRANTY; without even the implied warranty of 15 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or 16 * NON INFRINGEMENT. See the GNU General Public License for more 17 * details. 18 * 19 * You should have received a copy of the GNU General Public License 20 * along with this program; if not, write to the Free Software 21 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 22 * 23 * Send feedback to <colpatch@us.ibm.com> 24 */ 25 #ifndef _ASM_X86_TOPOLOGY_H 26 #define _ASM_X86_TOPOLOGY_H 27 28 #ifdef CONFIG_X86_32 29 # ifdef CONFIG_X86_HT 30 # define ENABLE_TOPO_DEFINES 31 # endif 32 #else 33 # ifdef CONFIG_SMP 34 # define ENABLE_TOPO_DEFINES 35 # endif 36 #endif 37 38 /* 39 * to preserve the visibility of NUMA_NO_NODE definition, 40 * moved to there from here. May be used independent of 41 * CONFIG_NUMA. 42 */ 43 #include <linux/numa.h> 44 45 #ifdef CONFIG_NUMA 46 #include <linux/cpumask.h> 47 48 #include <asm/mpspec.h> 49 50 /* Mappings between logical cpu number and node number */ 51 DECLARE_EARLY_PER_CPU(int, x86_cpu_to_node_map); 52 53 #ifdef CONFIG_DEBUG_PER_CPU_MAPS 54 /* 55 * override generic percpu implementation of cpu_to_node 56 */ 57 extern int __cpu_to_node(int cpu); 58 #define cpu_to_node __cpu_to_node 59 60 extern int early_cpu_to_node(int cpu); 61 62 #else /* !CONFIG_DEBUG_PER_CPU_MAPS */ 63 64 /* Same function but used if called before per_cpu areas are setup */ 65 static inline int early_cpu_to_node(int cpu) 66 { 67 return early_per_cpu(x86_cpu_to_node_map, cpu); 68 } 69 70 #endif /* !CONFIG_DEBUG_PER_CPU_MAPS */ 71 72 /* Mappings between node number and cpus on that node. */ 73 extern cpumask_var_t node_to_cpumask_map[MAX_NUMNODES]; 74 75 #ifdef CONFIG_DEBUG_PER_CPU_MAPS 76 extern const struct cpumask *cpumask_of_node(int node); 77 #else 78 /* Returns a pointer to the cpumask of CPUs on Node 'node'. */ 79 static inline const struct cpumask *cpumask_of_node(int node) 80 { 81 return node_to_cpumask_map[node]; 82 } 83 #endif 84 85 extern void setup_node_to_cpumask_map(void); 86 87 /* 88 * Returns the number of the node containing Node 'node'. This 89 * architecture is flat, so it is a pretty simple function! 90 */ 91 #define parent_node(node) (node) 92 93 #define pcibus_to_node(bus) __pcibus_to_node(bus) 94 95 #ifdef CONFIG_X86_32 96 extern unsigned long node_start_pfn[]; 97 extern unsigned long node_end_pfn[]; 98 extern unsigned long node_remap_size[]; 99 #define node_has_online_mem(nid) (node_start_pfn[nid] != node_end_pfn[nid]) 100 101 # define SD_CACHE_NICE_TRIES 1 102 # define SD_IDLE_IDX 1 103 104 #else 105 106 # define SD_CACHE_NICE_TRIES 2 107 # define SD_IDLE_IDX 2 108 109 #endif 110 111 /* sched_domains SD_NODE_INIT for NUMA machines */ 112 #define SD_NODE_INIT (struct sched_domain) { \ 113 .min_interval = 8, \ 114 .max_interval = 32, \ 115 .busy_factor = 32, \ 116 .imbalance_pct = 125, \ 117 .cache_nice_tries = SD_CACHE_NICE_TRIES, \ 118 .busy_idx = 3, \ 119 .idle_idx = SD_IDLE_IDX, \ 120 .newidle_idx = 0, \ 121 .wake_idx = 0, \ 122 .forkexec_idx = 0, \ 123 \ 124 .flags = 1*SD_LOAD_BALANCE \ 125 | 1*SD_BALANCE_NEWIDLE \ 126 | 1*SD_BALANCE_EXEC \ 127 | 1*SD_BALANCE_FORK \ 128 | 0*SD_BALANCE_WAKE \ 129 | 1*SD_WAKE_AFFINE \ 130 | 0*SD_PREFER_LOCAL \ 131 | 0*SD_SHARE_CPUPOWER \ 132 | 0*SD_POWERSAVINGS_BALANCE \ 133 | 0*SD_SHARE_PKG_RESOURCES \ 134 | 1*SD_SERIALIZE \ 135 | 0*SD_PREFER_SIBLING \ 136 , \ 137 .last_balance = jiffies, \ 138 .balance_interval = 1, \ 139 } 140 141 #ifdef CONFIG_X86_64 142 extern int __node_distance(int, int); 143 #define node_distance(a, b) __node_distance(a, b) 144 #endif 145 146 #else /* !CONFIG_NUMA */ 147 148 static inline int numa_node_id(void) 149 { 150 return 0; 151 } 152 /* 153 * indicate override: 154 */ 155 #define numa_node_id numa_node_id 156 157 static inline int early_cpu_to_node(int cpu) 158 { 159 return 0; 160 } 161 162 static inline void setup_node_to_cpumask_map(void) { } 163 164 #endif 165 166 #include <asm-generic/topology.h> 167 168 extern const struct cpumask *cpu_coregroup_mask(int cpu); 169 170 #ifdef ENABLE_TOPO_DEFINES 171 #define topology_physical_package_id(cpu) (cpu_data(cpu).phys_proc_id) 172 #define topology_core_id(cpu) (cpu_data(cpu).cpu_core_id) 173 #define topology_core_cpumask(cpu) (per_cpu(cpu_core_map, cpu)) 174 #define topology_thread_cpumask(cpu) (per_cpu(cpu_sibling_map, cpu)) 175 176 /* indicates that pointers to the topology cpumask_t maps are valid */ 177 #define arch_provides_topology_pointers yes 178 #endif 179 180 static inline void arch_fix_phys_package_id(int num, u32 slot) 181 { 182 } 183 184 struct pci_bus; 185 void x86_pci_root_bus_res_quirks(struct pci_bus *b); 186 187 #ifdef CONFIG_SMP 188 #define mc_capable() ((boot_cpu_data.x86_max_cores > 1) && \ 189 (cpumask_weight(cpu_core_mask(0)) != nr_cpu_ids)) 190 #define smt_capable() (smp_num_siblings > 1) 191 #endif 192 193 #ifdef CONFIG_NUMA 194 extern int get_mp_bus_to_node(int busnum); 195 extern void set_mp_bus_to_node(int busnum, int node); 196 #else 197 static inline int get_mp_bus_to_node(int busnum) 198 { 199 return 0; 200 } 201 static inline void set_mp_bus_to_node(int busnum, int node) 202 { 203 } 204 #endif 205 206 #endif /* _ASM_X86_TOPOLOGY_H */ 207