1 #include <linux/init.h> 2 #include <linux/pci.h> 3 #include <linux/topology.h> 4 #include <linux/cpu.h> 5 #include <linux/range.h> 6 7 #include <asm/amd_nb.h> 8 #include <asm/pci_x86.h> 9 10 #include <asm/pci-direct.h> 11 12 #include "bus_numa.h" 13 14 /* 15 * This discovers the pcibus <-> node mapping on AMD K8. 16 * also get peer root bus resource for io,mmio 17 */ 18 19 struct pci_hostbridge_probe { 20 u32 bus; 21 u32 slot; 22 u32 vendor; 23 u32 device; 24 }; 25 26 static struct pci_hostbridge_probe pci_probes[] __initdata = { 27 { 0, 0x18, PCI_VENDOR_ID_AMD, 0x1100 }, 28 { 0, 0x18, PCI_VENDOR_ID_AMD, 0x1200 }, 29 { 0xff, 0, PCI_VENDOR_ID_AMD, 0x1200 }, 30 { 0, 0x18, PCI_VENDOR_ID_AMD, 0x1300 }, 31 }; 32 33 #define RANGE_NUM 16 34 35 static struct pci_root_info __init *find_pci_root_info(int node, int link) 36 { 37 struct pci_root_info *info; 38 39 /* find the position */ 40 list_for_each_entry(info, &pci_root_infos, list) 41 if (info->node == node && info->link == link) 42 return info; 43 44 return NULL; 45 } 46 47 static void __init set_mp_bus_range_to_node(int min_bus, int max_bus, int node) 48 { 49 #ifdef CONFIG_NUMA 50 int j; 51 52 for (j = min_bus; j <= max_bus; j++) 53 set_mp_bus_to_node(j, node); 54 #endif 55 } 56 /** 57 * early_fill_mp_bus_to_node() 58 * called before pcibios_scan_root and pci_scan_bus 59 * fills the mp_bus_to_cpumask array based according to the LDT Bus Number 60 * Registers found in the K8 northbridge 61 */ 62 static int __init early_fill_mp_bus_info(void) 63 { 64 int i; 65 unsigned bus; 66 unsigned slot; 67 int node; 68 int link; 69 int def_node; 70 int def_link; 71 struct pci_root_info *info; 72 u32 reg; 73 u64 start; 74 u64 end; 75 struct range range[RANGE_NUM]; 76 u64 val; 77 u32 address; 78 bool found; 79 struct resource fam10h_mmconf_res, *fam10h_mmconf; 80 u64 fam10h_mmconf_start; 81 u64 fam10h_mmconf_end; 82 83 if (!early_pci_allowed()) 84 return -1; 85 86 found = false; 87 for (i = 0; i < ARRAY_SIZE(pci_probes); i++) { 88 u32 id; 89 u16 device; 90 u16 vendor; 91 92 bus = pci_probes[i].bus; 93 slot = pci_probes[i].slot; 94 id = read_pci_config(bus, slot, 0, PCI_VENDOR_ID); 95 96 vendor = id & 0xffff; 97 device = (id>>16) & 0xffff; 98 if (pci_probes[i].vendor == vendor && 99 pci_probes[i].device == device) { 100 found = true; 101 break; 102 } 103 } 104 105 if (!found) 106 return 0; 107 108 for (i = 0; i < 4; i++) { 109 int min_bus; 110 int max_bus; 111 reg = read_pci_config(bus, slot, 1, 0xe0 + (i << 2)); 112 113 /* Check if that register is enabled for bus range */ 114 if ((reg & 7) != 3) 115 continue; 116 117 min_bus = (reg >> 16) & 0xff; 118 max_bus = (reg >> 24) & 0xff; 119 node = (reg >> 4) & 0x07; 120 set_mp_bus_range_to_node(min_bus, max_bus, node); 121 link = (reg >> 8) & 0x03; 122 123 info = alloc_pci_root_info(min_bus, max_bus, node, link); 124 } 125 126 /* get the default node and link for left over res */ 127 reg = read_pci_config(bus, slot, 0, 0x60); 128 def_node = (reg >> 8) & 0x07; 129 reg = read_pci_config(bus, slot, 0, 0x64); 130 def_link = (reg >> 8) & 0x03; 131 132 memset(range, 0, sizeof(range)); 133 add_range(range, RANGE_NUM, 0, 0, 0xffff + 1); 134 /* io port resource */ 135 for (i = 0; i < 4; i++) { 136 reg = read_pci_config(bus, slot, 1, 0xc0 + (i << 3)); 137 if (!(reg & 3)) 138 continue; 139 140 start = reg & 0xfff000; 141 reg = read_pci_config(bus, slot, 1, 0xc4 + (i << 3)); 142 node = reg & 0x07; 143 link = (reg >> 4) & 0x03; 144 end = (reg & 0xfff000) | 0xfff; 145 146 info = find_pci_root_info(node, link); 147 if (!info) 148 continue; /* not found */ 149 150 printk(KERN_DEBUG "node %d link %d: io port [%llx, %llx]\n", 151 node, link, start, end); 152 153 /* kernel only handle 16 bit only */ 154 if (end > 0xffff) 155 end = 0xffff; 156 update_res(info, start, end, IORESOURCE_IO, 1); 157 subtract_range(range, RANGE_NUM, start, end + 1); 158 } 159 /* add left over io port range to def node/link, [0, 0xffff] */ 160 /* find the position */ 161 info = find_pci_root_info(def_node, def_link); 162 if (info) { 163 for (i = 0; i < RANGE_NUM; i++) { 164 if (!range[i].end) 165 continue; 166 167 update_res(info, range[i].start, range[i].end - 1, 168 IORESOURCE_IO, 1); 169 } 170 } 171 172 memset(range, 0, sizeof(range)); 173 /* 0xfd00000000-0xffffffffff for HT */ 174 end = cap_resource((0xfdULL<<32) - 1); 175 end++; 176 add_range(range, RANGE_NUM, 0, 0, end); 177 178 /* need to take out [0, TOM) for RAM*/ 179 address = MSR_K8_TOP_MEM1; 180 rdmsrl(address, val); 181 end = (val & 0xffffff800000ULL); 182 printk(KERN_INFO "TOM: %016llx aka %lldM\n", end, end>>20); 183 if (end < (1ULL<<32)) 184 subtract_range(range, RANGE_NUM, 0, end); 185 186 /* get mmconfig */ 187 fam10h_mmconf = amd_get_mmconfig_range(&fam10h_mmconf_res); 188 /* need to take out mmconf range */ 189 if (fam10h_mmconf) { 190 printk(KERN_DEBUG "Fam 10h mmconf %pR\n", fam10h_mmconf); 191 fam10h_mmconf_start = fam10h_mmconf->start; 192 fam10h_mmconf_end = fam10h_mmconf->end; 193 subtract_range(range, RANGE_NUM, fam10h_mmconf_start, 194 fam10h_mmconf_end + 1); 195 } else { 196 fam10h_mmconf_start = 0; 197 fam10h_mmconf_end = 0; 198 } 199 200 /* mmio resource */ 201 for (i = 0; i < 8; i++) { 202 reg = read_pci_config(bus, slot, 1, 0x80 + (i << 3)); 203 if (!(reg & 3)) 204 continue; 205 206 start = reg & 0xffffff00; /* 39:16 on 31:8*/ 207 start <<= 8; 208 reg = read_pci_config(bus, slot, 1, 0x84 + (i << 3)); 209 node = reg & 0x07; 210 link = (reg >> 4) & 0x03; 211 end = (reg & 0xffffff00); 212 end <<= 8; 213 end |= 0xffff; 214 215 info = find_pci_root_info(node, link); 216 217 if (!info) 218 continue; 219 220 printk(KERN_DEBUG "node %d link %d: mmio [%llx, %llx]", 221 node, link, start, end); 222 /* 223 * some sick allocation would have range overlap with fam10h 224 * mmconf range, so need to update start and end. 225 */ 226 if (fam10h_mmconf_end) { 227 int changed = 0; 228 u64 endx = 0; 229 if (start >= fam10h_mmconf_start && 230 start <= fam10h_mmconf_end) { 231 start = fam10h_mmconf_end + 1; 232 changed = 1; 233 } 234 235 if (end >= fam10h_mmconf_start && 236 end <= fam10h_mmconf_end) { 237 end = fam10h_mmconf_start - 1; 238 changed = 1; 239 } 240 241 if (start < fam10h_mmconf_start && 242 end > fam10h_mmconf_end) { 243 /* we got a hole */ 244 endx = fam10h_mmconf_start - 1; 245 update_res(info, start, endx, IORESOURCE_MEM, 0); 246 subtract_range(range, RANGE_NUM, start, 247 endx + 1); 248 printk(KERN_CONT " ==> [%llx, %llx]", start, endx); 249 start = fam10h_mmconf_end + 1; 250 changed = 1; 251 } 252 if (changed) { 253 if (start <= end) { 254 printk(KERN_CONT " %s [%llx, %llx]", endx ? "and" : "==>", start, end); 255 } else { 256 printk(KERN_CONT "%s\n", endx?"":" ==> none"); 257 continue; 258 } 259 } 260 } 261 262 update_res(info, cap_resource(start), cap_resource(end), 263 IORESOURCE_MEM, 1); 264 subtract_range(range, RANGE_NUM, start, end + 1); 265 printk(KERN_CONT "\n"); 266 } 267 268 /* need to take out [4G, TOM2) for RAM*/ 269 /* SYS_CFG */ 270 address = MSR_K8_SYSCFG; 271 rdmsrl(address, val); 272 /* TOP_MEM2 is enabled? */ 273 if (val & (1<<21)) { 274 /* TOP_MEM2 */ 275 address = MSR_K8_TOP_MEM2; 276 rdmsrl(address, val); 277 end = (val & 0xffffff800000ULL); 278 printk(KERN_INFO "TOM2: %016llx aka %lldM\n", end, end>>20); 279 subtract_range(range, RANGE_NUM, 1ULL<<32, end); 280 } 281 282 /* 283 * add left over mmio range to def node/link ? 284 * that is tricky, just record range in from start_min to 4G 285 */ 286 info = find_pci_root_info(def_node, def_link); 287 if (info) { 288 for (i = 0; i < RANGE_NUM; i++) { 289 if (!range[i].end) 290 continue; 291 292 update_res(info, cap_resource(range[i].start), 293 cap_resource(range[i].end - 1), 294 IORESOURCE_MEM, 1); 295 } 296 } 297 298 list_for_each_entry(info, &pci_root_infos, list) { 299 int busnum; 300 struct pci_root_res *root_res; 301 302 busnum = info->busn.start; 303 printk(KERN_DEBUG "bus: %pR on node %x link %x\n", 304 &info->busn, info->node, info->link); 305 list_for_each_entry(root_res, &info->resources, list) 306 printk(KERN_DEBUG "bus: %02x %pR\n", 307 busnum, &root_res->res); 308 } 309 310 return 0; 311 } 312 313 #define ENABLE_CF8_EXT_CFG (1ULL << 46) 314 315 static void __cpuinit enable_pci_io_ecs(void *unused) 316 { 317 u64 reg; 318 rdmsrl(MSR_AMD64_NB_CFG, reg); 319 if (!(reg & ENABLE_CF8_EXT_CFG)) { 320 reg |= ENABLE_CF8_EXT_CFG; 321 wrmsrl(MSR_AMD64_NB_CFG, reg); 322 } 323 } 324 325 static int __cpuinit amd_cpu_notify(struct notifier_block *self, 326 unsigned long action, void *hcpu) 327 { 328 int cpu = (long)hcpu; 329 switch (action) { 330 case CPU_ONLINE: 331 case CPU_ONLINE_FROZEN: 332 smp_call_function_single(cpu, enable_pci_io_ecs, NULL, 0); 333 break; 334 default: 335 break; 336 } 337 return NOTIFY_OK; 338 } 339 340 static struct notifier_block __cpuinitdata amd_cpu_notifier = { 341 .notifier_call = amd_cpu_notify, 342 }; 343 344 static void __init pci_enable_pci_io_ecs(void) 345 { 346 #ifdef CONFIG_AMD_NB 347 unsigned int i, n; 348 349 for (n = i = 0; !n && amd_nb_bus_dev_ranges[i].dev_limit; ++i) { 350 u8 bus = amd_nb_bus_dev_ranges[i].bus; 351 u8 slot = amd_nb_bus_dev_ranges[i].dev_base; 352 u8 limit = amd_nb_bus_dev_ranges[i].dev_limit; 353 354 for (; slot < limit; ++slot) { 355 u32 val = read_pci_config(bus, slot, 3, 0); 356 357 if (!early_is_amd_nb(val)) 358 continue; 359 360 val = read_pci_config(bus, slot, 3, 0x8c); 361 if (!(val & (ENABLE_CF8_EXT_CFG >> 32))) { 362 val |= ENABLE_CF8_EXT_CFG >> 32; 363 write_pci_config(bus, slot, 3, 0x8c, val); 364 } 365 ++n; 366 } 367 } 368 #endif 369 } 370 371 static int __init pci_io_ecs_init(void) 372 { 373 int cpu; 374 375 /* assume all cpus from fam10h have IO ECS */ 376 if (boot_cpu_data.x86 < 0x10) 377 return 0; 378 379 /* Try the PCI method first. */ 380 if (early_pci_allowed()) 381 pci_enable_pci_io_ecs(); 382 383 register_cpu_notifier(&amd_cpu_notifier); 384 for_each_online_cpu(cpu) 385 amd_cpu_notify(&amd_cpu_notifier, (unsigned long)CPU_ONLINE, 386 (void *)(long)cpu); 387 pci_probe |= PCI_HAS_IO_ECS; 388 389 return 0; 390 } 391 392 static int __init amd_postcore_init(void) 393 { 394 if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD) 395 return 0; 396 397 early_fill_mp_bus_info(); 398 pci_io_ecs_init(); 399 400 return 0; 401 } 402 403 postcore_initcall(amd_postcore_init); 404