1 #include <linux/device.h> 2 #include <linux/cpu.h> 3 #include <linux/smp.h> 4 #include <linux/percpu.h> 5 #include <linux/init.h> 6 #include <linux/sched.h> 7 #include <linux/export.h> 8 #include <linux/nodemask.h> 9 #include <linux/cpumask.h> 10 #include <linux/notifier.h> 11 12 #include <asm/current.h> 13 #include <asm/processor.h> 14 #include <asm/cputable.h> 15 #include <asm/hvcall.h> 16 #include <asm/prom.h> 17 #include <asm/machdep.h> 18 #include <asm/smp.h> 19 #include <asm/pmc.h> 20 #include <asm/system.h> 21 22 #include "cacheinfo.h" 23 24 #ifdef CONFIG_PPC64 25 #include <asm/paca.h> 26 #include <asm/lppaca.h> 27 #endif 28 29 static DEFINE_PER_CPU(struct cpu, cpu_devices); 30 31 /* 32 * SMT snooze delay stuff, 64-bit only for now 33 */ 34 35 #ifdef CONFIG_PPC64 36 37 /* Time in microseconds we delay before sleeping in the idle loop */ 38 DEFINE_PER_CPU(long, smt_snooze_delay) = { 100 }; 39 40 static ssize_t store_smt_snooze_delay(struct device *dev, 41 struct device_attribute *attr, 42 const char *buf, 43 size_t count) 44 { 45 struct cpu *cpu = container_of(dev, struct cpu, dev); 46 ssize_t ret; 47 long snooze; 48 49 ret = sscanf(buf, "%ld", &snooze); 50 if (ret != 1) 51 return -EINVAL; 52 53 per_cpu(smt_snooze_delay, cpu->dev.id) = snooze; 54 update_smt_snooze_delay(snooze); 55 56 return count; 57 } 58 59 static ssize_t show_smt_snooze_delay(struct device *dev, 60 struct device_attribute *attr, 61 char *buf) 62 { 63 struct cpu *cpu = container_of(dev, struct cpu, dev); 64 65 return sprintf(buf, "%ld\n", per_cpu(smt_snooze_delay, cpu->dev.id)); 66 } 67 68 static DEVICE_ATTR(smt_snooze_delay, 0644, show_smt_snooze_delay, 69 store_smt_snooze_delay); 70 71 static int __init setup_smt_snooze_delay(char *str) 72 { 73 unsigned int cpu; 74 long snooze; 75 76 if (!cpu_has_feature(CPU_FTR_SMT)) 77 return 1; 78 79 snooze = simple_strtol(str, NULL, 10); 80 for_each_possible_cpu(cpu) 81 per_cpu(smt_snooze_delay, cpu) = snooze; 82 83 return 1; 84 } 85 __setup("smt-snooze-delay=", setup_smt_snooze_delay); 86 87 #endif /* CONFIG_PPC64 */ 88 89 /* 90 * Enabling PMCs will slow partition context switch times so we only do 91 * it the first time we write to the PMCs. 92 */ 93 94 static DEFINE_PER_CPU(char, pmcs_enabled); 95 96 void ppc_enable_pmcs(void) 97 { 98 ppc_set_pmu_inuse(1); 99 100 /* Only need to enable them once */ 101 if (__get_cpu_var(pmcs_enabled)) 102 return; 103 104 __get_cpu_var(pmcs_enabled) = 1; 105 106 if (ppc_md.enable_pmcs) 107 ppc_md.enable_pmcs(); 108 } 109 EXPORT_SYMBOL(ppc_enable_pmcs); 110 111 #define SYSFS_PMCSETUP(NAME, ADDRESS) \ 112 static void read_##NAME(void *val) \ 113 { \ 114 *(unsigned long *)val = mfspr(ADDRESS); \ 115 } \ 116 static void write_##NAME(void *val) \ 117 { \ 118 ppc_enable_pmcs(); \ 119 mtspr(ADDRESS, *(unsigned long *)val); \ 120 } \ 121 static ssize_t show_##NAME(struct device *dev, \ 122 struct device_attribute *attr, \ 123 char *buf) \ 124 { \ 125 struct cpu *cpu = container_of(dev, struct cpu, dev); \ 126 unsigned long val; \ 127 smp_call_function_single(cpu->dev.id, read_##NAME, &val, 1); \ 128 return sprintf(buf, "%lx\n", val); \ 129 } \ 130 static ssize_t __used \ 131 store_##NAME(struct device *dev, struct device_attribute *attr, \ 132 const char *buf, size_t count) \ 133 { \ 134 struct cpu *cpu = container_of(dev, struct cpu, dev); \ 135 unsigned long val; \ 136 int ret = sscanf(buf, "%lx", &val); \ 137 if (ret != 1) \ 138 return -EINVAL; \ 139 smp_call_function_single(cpu->dev.id, write_##NAME, &val, 1); \ 140 return count; \ 141 } 142 143 144 /* Let's define all possible registers, we'll only hook up the ones 145 * that are implemented on the current processor 146 */ 147 148 #if defined(CONFIG_PPC64) 149 #define HAS_PPC_PMC_CLASSIC 1 150 #define HAS_PPC_PMC_IBM 1 151 #define HAS_PPC_PMC_PA6T 1 152 #elif defined(CONFIG_6xx) 153 #define HAS_PPC_PMC_CLASSIC 1 154 #define HAS_PPC_PMC_IBM 1 155 #define HAS_PPC_PMC_G4 1 156 #endif 157 158 159 #ifdef HAS_PPC_PMC_CLASSIC 160 SYSFS_PMCSETUP(mmcr0, SPRN_MMCR0); 161 SYSFS_PMCSETUP(mmcr1, SPRN_MMCR1); 162 SYSFS_PMCSETUP(pmc1, SPRN_PMC1); 163 SYSFS_PMCSETUP(pmc2, SPRN_PMC2); 164 SYSFS_PMCSETUP(pmc3, SPRN_PMC3); 165 SYSFS_PMCSETUP(pmc4, SPRN_PMC4); 166 SYSFS_PMCSETUP(pmc5, SPRN_PMC5); 167 SYSFS_PMCSETUP(pmc6, SPRN_PMC6); 168 169 #ifdef HAS_PPC_PMC_G4 170 SYSFS_PMCSETUP(mmcr2, SPRN_MMCR2); 171 #endif 172 173 #ifdef CONFIG_PPC64 174 SYSFS_PMCSETUP(pmc7, SPRN_PMC7); 175 SYSFS_PMCSETUP(pmc8, SPRN_PMC8); 176 177 SYSFS_PMCSETUP(mmcra, SPRN_MMCRA); 178 SYSFS_PMCSETUP(purr, SPRN_PURR); 179 SYSFS_PMCSETUP(spurr, SPRN_SPURR); 180 SYSFS_PMCSETUP(dscr, SPRN_DSCR); 181 SYSFS_PMCSETUP(pir, SPRN_PIR); 182 183 static DEVICE_ATTR(mmcra, 0600, show_mmcra, store_mmcra); 184 static DEVICE_ATTR(spurr, 0600, show_spurr, NULL); 185 static DEVICE_ATTR(dscr, 0600, show_dscr, store_dscr); 186 static DEVICE_ATTR(purr, 0600, show_purr, store_purr); 187 static DEVICE_ATTR(pir, 0400, show_pir, NULL); 188 189 unsigned long dscr_default = 0; 190 EXPORT_SYMBOL(dscr_default); 191 192 static ssize_t show_dscr_default(struct device *dev, 193 struct device_attribute *attr, char *buf) 194 { 195 return sprintf(buf, "%lx\n", dscr_default); 196 } 197 198 static ssize_t __used store_dscr_default(struct device *dev, 199 struct device_attribute *attr, const char *buf, 200 size_t count) 201 { 202 unsigned long val; 203 int ret = 0; 204 205 ret = sscanf(buf, "%lx", &val); 206 if (ret != 1) 207 return -EINVAL; 208 dscr_default = val; 209 210 return count; 211 } 212 213 static DEVICE_ATTR(dscr_default, 0600, 214 show_dscr_default, store_dscr_default); 215 216 static void sysfs_create_dscr_default(void) 217 { 218 int err = 0; 219 if (cpu_has_feature(CPU_FTR_DSCR)) 220 err = device_create_file(cpu_subsys.dev_root, &dev_attr_dscr_default); 221 } 222 #endif /* CONFIG_PPC64 */ 223 224 #ifdef HAS_PPC_PMC_PA6T 225 SYSFS_PMCSETUP(pa6t_pmc0, SPRN_PA6T_PMC0); 226 SYSFS_PMCSETUP(pa6t_pmc1, SPRN_PA6T_PMC1); 227 SYSFS_PMCSETUP(pa6t_pmc2, SPRN_PA6T_PMC2); 228 SYSFS_PMCSETUP(pa6t_pmc3, SPRN_PA6T_PMC3); 229 SYSFS_PMCSETUP(pa6t_pmc4, SPRN_PA6T_PMC4); 230 SYSFS_PMCSETUP(pa6t_pmc5, SPRN_PA6T_PMC5); 231 #ifdef CONFIG_DEBUG_KERNEL 232 SYSFS_PMCSETUP(hid0, SPRN_HID0); 233 SYSFS_PMCSETUP(hid1, SPRN_HID1); 234 SYSFS_PMCSETUP(hid4, SPRN_HID4); 235 SYSFS_PMCSETUP(hid5, SPRN_HID5); 236 SYSFS_PMCSETUP(ima0, SPRN_PA6T_IMA0); 237 SYSFS_PMCSETUP(ima1, SPRN_PA6T_IMA1); 238 SYSFS_PMCSETUP(ima2, SPRN_PA6T_IMA2); 239 SYSFS_PMCSETUP(ima3, SPRN_PA6T_IMA3); 240 SYSFS_PMCSETUP(ima4, SPRN_PA6T_IMA4); 241 SYSFS_PMCSETUP(ima5, SPRN_PA6T_IMA5); 242 SYSFS_PMCSETUP(ima6, SPRN_PA6T_IMA6); 243 SYSFS_PMCSETUP(ima7, SPRN_PA6T_IMA7); 244 SYSFS_PMCSETUP(ima8, SPRN_PA6T_IMA8); 245 SYSFS_PMCSETUP(ima9, SPRN_PA6T_IMA9); 246 SYSFS_PMCSETUP(imaat, SPRN_PA6T_IMAAT); 247 SYSFS_PMCSETUP(btcr, SPRN_PA6T_BTCR); 248 SYSFS_PMCSETUP(pccr, SPRN_PA6T_PCCR); 249 SYSFS_PMCSETUP(rpccr, SPRN_PA6T_RPCCR); 250 SYSFS_PMCSETUP(der, SPRN_PA6T_DER); 251 SYSFS_PMCSETUP(mer, SPRN_PA6T_MER); 252 SYSFS_PMCSETUP(ber, SPRN_PA6T_BER); 253 SYSFS_PMCSETUP(ier, SPRN_PA6T_IER); 254 SYSFS_PMCSETUP(sier, SPRN_PA6T_SIER); 255 SYSFS_PMCSETUP(siar, SPRN_PA6T_SIAR); 256 SYSFS_PMCSETUP(tsr0, SPRN_PA6T_TSR0); 257 SYSFS_PMCSETUP(tsr1, SPRN_PA6T_TSR1); 258 SYSFS_PMCSETUP(tsr2, SPRN_PA6T_TSR2); 259 SYSFS_PMCSETUP(tsr3, SPRN_PA6T_TSR3); 260 #endif /* CONFIG_DEBUG_KERNEL */ 261 #endif /* HAS_PPC_PMC_PA6T */ 262 263 #ifdef HAS_PPC_PMC_IBM 264 static struct device_attribute ibm_common_attrs[] = { 265 __ATTR(mmcr0, 0600, show_mmcr0, store_mmcr0), 266 __ATTR(mmcr1, 0600, show_mmcr1, store_mmcr1), 267 }; 268 #endif /* HAS_PPC_PMC_G4 */ 269 270 #ifdef HAS_PPC_PMC_G4 271 static struct device_attribute g4_common_attrs[] = { 272 __ATTR(mmcr0, 0600, show_mmcr0, store_mmcr0), 273 __ATTR(mmcr1, 0600, show_mmcr1, store_mmcr1), 274 __ATTR(mmcr2, 0600, show_mmcr2, store_mmcr2), 275 }; 276 #endif /* HAS_PPC_PMC_G4 */ 277 278 static struct device_attribute classic_pmc_attrs[] = { 279 __ATTR(pmc1, 0600, show_pmc1, store_pmc1), 280 __ATTR(pmc2, 0600, show_pmc2, store_pmc2), 281 __ATTR(pmc3, 0600, show_pmc3, store_pmc3), 282 __ATTR(pmc4, 0600, show_pmc4, store_pmc4), 283 __ATTR(pmc5, 0600, show_pmc5, store_pmc5), 284 __ATTR(pmc6, 0600, show_pmc6, store_pmc6), 285 #ifdef CONFIG_PPC64 286 __ATTR(pmc7, 0600, show_pmc7, store_pmc7), 287 __ATTR(pmc8, 0600, show_pmc8, store_pmc8), 288 #endif 289 }; 290 291 #ifdef HAS_PPC_PMC_PA6T 292 static struct device_attribute pa6t_attrs[] = { 293 __ATTR(mmcr0, 0600, show_mmcr0, store_mmcr0), 294 __ATTR(mmcr1, 0600, show_mmcr1, store_mmcr1), 295 __ATTR(pmc0, 0600, show_pa6t_pmc0, store_pa6t_pmc0), 296 __ATTR(pmc1, 0600, show_pa6t_pmc1, store_pa6t_pmc1), 297 __ATTR(pmc2, 0600, show_pa6t_pmc2, store_pa6t_pmc2), 298 __ATTR(pmc3, 0600, show_pa6t_pmc3, store_pa6t_pmc3), 299 __ATTR(pmc4, 0600, show_pa6t_pmc4, store_pa6t_pmc4), 300 __ATTR(pmc5, 0600, show_pa6t_pmc5, store_pa6t_pmc5), 301 #ifdef CONFIG_DEBUG_KERNEL 302 __ATTR(hid0, 0600, show_hid0, store_hid0), 303 __ATTR(hid1, 0600, show_hid1, store_hid1), 304 __ATTR(hid4, 0600, show_hid4, store_hid4), 305 __ATTR(hid5, 0600, show_hid5, store_hid5), 306 __ATTR(ima0, 0600, show_ima0, store_ima0), 307 __ATTR(ima1, 0600, show_ima1, store_ima1), 308 __ATTR(ima2, 0600, show_ima2, store_ima2), 309 __ATTR(ima3, 0600, show_ima3, store_ima3), 310 __ATTR(ima4, 0600, show_ima4, store_ima4), 311 __ATTR(ima5, 0600, show_ima5, store_ima5), 312 __ATTR(ima6, 0600, show_ima6, store_ima6), 313 __ATTR(ima7, 0600, show_ima7, store_ima7), 314 __ATTR(ima8, 0600, show_ima8, store_ima8), 315 __ATTR(ima9, 0600, show_ima9, store_ima9), 316 __ATTR(imaat, 0600, show_imaat, store_imaat), 317 __ATTR(btcr, 0600, show_btcr, store_btcr), 318 __ATTR(pccr, 0600, show_pccr, store_pccr), 319 __ATTR(rpccr, 0600, show_rpccr, store_rpccr), 320 __ATTR(der, 0600, show_der, store_der), 321 __ATTR(mer, 0600, show_mer, store_mer), 322 __ATTR(ber, 0600, show_ber, store_ber), 323 __ATTR(ier, 0600, show_ier, store_ier), 324 __ATTR(sier, 0600, show_sier, store_sier), 325 __ATTR(siar, 0600, show_siar, store_siar), 326 __ATTR(tsr0, 0600, show_tsr0, store_tsr0), 327 __ATTR(tsr1, 0600, show_tsr1, store_tsr1), 328 __ATTR(tsr2, 0600, show_tsr2, store_tsr2), 329 __ATTR(tsr3, 0600, show_tsr3, store_tsr3), 330 #endif /* CONFIG_DEBUG_KERNEL */ 331 }; 332 #endif /* HAS_PPC_PMC_PA6T */ 333 #endif /* HAS_PPC_PMC_CLASSIC */ 334 335 static void __cpuinit register_cpu_online(unsigned int cpu) 336 { 337 struct cpu *c = &per_cpu(cpu_devices, cpu); 338 struct device *s = &c->dev; 339 struct device_attribute *attrs, *pmc_attrs; 340 int i, nattrs; 341 342 #ifdef CONFIG_PPC64 343 if (cpu_has_feature(CPU_FTR_SMT)) 344 device_create_file(s, &dev_attr_smt_snooze_delay); 345 #endif 346 347 /* PMC stuff */ 348 switch (cur_cpu_spec->pmc_type) { 349 #ifdef HAS_PPC_PMC_IBM 350 case PPC_PMC_IBM: 351 attrs = ibm_common_attrs; 352 nattrs = sizeof(ibm_common_attrs) / sizeof(struct device_attribute); 353 pmc_attrs = classic_pmc_attrs; 354 break; 355 #endif /* HAS_PPC_PMC_IBM */ 356 #ifdef HAS_PPC_PMC_G4 357 case PPC_PMC_G4: 358 attrs = g4_common_attrs; 359 nattrs = sizeof(g4_common_attrs) / sizeof(struct device_attribute); 360 pmc_attrs = classic_pmc_attrs; 361 break; 362 #endif /* HAS_PPC_PMC_G4 */ 363 #ifdef HAS_PPC_PMC_PA6T 364 case PPC_PMC_PA6T: 365 /* PA Semi starts counting at PMC0 */ 366 attrs = pa6t_attrs; 367 nattrs = sizeof(pa6t_attrs) / sizeof(struct device_attribute); 368 pmc_attrs = NULL; 369 break; 370 #endif /* HAS_PPC_PMC_PA6T */ 371 default: 372 attrs = NULL; 373 nattrs = 0; 374 pmc_attrs = NULL; 375 } 376 377 for (i = 0; i < nattrs; i++) 378 device_create_file(s, &attrs[i]); 379 380 if (pmc_attrs) 381 for (i = 0; i < cur_cpu_spec->num_pmcs; i++) 382 device_create_file(s, &pmc_attrs[i]); 383 384 #ifdef CONFIG_PPC64 385 if (cpu_has_feature(CPU_FTR_MMCRA)) 386 device_create_file(s, &dev_attr_mmcra); 387 388 if (cpu_has_feature(CPU_FTR_PURR)) 389 device_create_file(s, &dev_attr_purr); 390 391 if (cpu_has_feature(CPU_FTR_SPURR)) 392 device_create_file(s, &dev_attr_spurr); 393 394 if (cpu_has_feature(CPU_FTR_DSCR)) 395 device_create_file(s, &dev_attr_dscr); 396 397 if (cpu_has_feature(CPU_FTR_PPCAS_ARCH_V2)) 398 device_create_file(s, &dev_attr_pir); 399 #endif /* CONFIG_PPC64 */ 400 401 cacheinfo_cpu_online(cpu); 402 } 403 404 #ifdef CONFIG_HOTPLUG_CPU 405 static void unregister_cpu_online(unsigned int cpu) 406 { 407 struct cpu *c = &per_cpu(cpu_devices, cpu); 408 struct device *s = &c->dev; 409 struct device_attribute *attrs, *pmc_attrs; 410 int i, nattrs; 411 412 BUG_ON(!c->hotpluggable); 413 414 #ifdef CONFIG_PPC64 415 if (cpu_has_feature(CPU_FTR_SMT)) 416 device_remove_file(s, &dev_attr_smt_snooze_delay); 417 #endif 418 419 /* PMC stuff */ 420 switch (cur_cpu_spec->pmc_type) { 421 #ifdef HAS_PPC_PMC_IBM 422 case PPC_PMC_IBM: 423 attrs = ibm_common_attrs; 424 nattrs = sizeof(ibm_common_attrs) / sizeof(struct device_attribute); 425 pmc_attrs = classic_pmc_attrs; 426 break; 427 #endif /* HAS_PPC_PMC_IBM */ 428 #ifdef HAS_PPC_PMC_G4 429 case PPC_PMC_G4: 430 attrs = g4_common_attrs; 431 nattrs = sizeof(g4_common_attrs) / sizeof(struct device_attribute); 432 pmc_attrs = classic_pmc_attrs; 433 break; 434 #endif /* HAS_PPC_PMC_G4 */ 435 #ifdef HAS_PPC_PMC_PA6T 436 case PPC_PMC_PA6T: 437 /* PA Semi starts counting at PMC0 */ 438 attrs = pa6t_attrs; 439 nattrs = sizeof(pa6t_attrs) / sizeof(struct device_attribute); 440 pmc_attrs = NULL; 441 break; 442 #endif /* HAS_PPC_PMC_PA6T */ 443 default: 444 attrs = NULL; 445 nattrs = 0; 446 pmc_attrs = NULL; 447 } 448 449 for (i = 0; i < nattrs; i++) 450 device_remove_file(s, &attrs[i]); 451 452 if (pmc_attrs) 453 for (i = 0; i < cur_cpu_spec->num_pmcs; i++) 454 device_remove_file(s, &pmc_attrs[i]); 455 456 #ifdef CONFIG_PPC64 457 if (cpu_has_feature(CPU_FTR_MMCRA)) 458 device_remove_file(s, &dev_attr_mmcra); 459 460 if (cpu_has_feature(CPU_FTR_PURR)) 461 device_remove_file(s, &dev_attr_purr); 462 463 if (cpu_has_feature(CPU_FTR_SPURR)) 464 device_remove_file(s, &dev_attr_spurr); 465 466 if (cpu_has_feature(CPU_FTR_DSCR)) 467 device_remove_file(s, &dev_attr_dscr); 468 469 if (cpu_has_feature(CPU_FTR_PPCAS_ARCH_V2)) 470 device_remove_file(s, &dev_attr_pir); 471 #endif /* CONFIG_PPC64 */ 472 473 cacheinfo_cpu_offline(cpu); 474 } 475 476 #ifdef CONFIG_ARCH_CPU_PROBE_RELEASE 477 ssize_t arch_cpu_probe(const char *buf, size_t count) 478 { 479 if (ppc_md.cpu_probe) 480 return ppc_md.cpu_probe(buf, count); 481 482 return -EINVAL; 483 } 484 485 ssize_t arch_cpu_release(const char *buf, size_t count) 486 { 487 if (ppc_md.cpu_release) 488 return ppc_md.cpu_release(buf, count); 489 490 return -EINVAL; 491 } 492 #endif /* CONFIG_ARCH_CPU_PROBE_RELEASE */ 493 494 #endif /* CONFIG_HOTPLUG_CPU */ 495 496 static int __cpuinit sysfs_cpu_notify(struct notifier_block *self, 497 unsigned long action, void *hcpu) 498 { 499 unsigned int cpu = (unsigned int)(long)hcpu; 500 501 switch (action) { 502 case CPU_ONLINE: 503 case CPU_ONLINE_FROZEN: 504 register_cpu_online(cpu); 505 break; 506 #ifdef CONFIG_HOTPLUG_CPU 507 case CPU_DEAD: 508 case CPU_DEAD_FROZEN: 509 unregister_cpu_online(cpu); 510 break; 511 #endif 512 } 513 return NOTIFY_OK; 514 } 515 516 static struct notifier_block __cpuinitdata sysfs_cpu_nb = { 517 .notifier_call = sysfs_cpu_notify, 518 }; 519 520 static DEFINE_MUTEX(cpu_mutex); 521 522 int cpu_add_dev_attr(struct device_attribute *attr) 523 { 524 int cpu; 525 526 mutex_lock(&cpu_mutex); 527 528 for_each_possible_cpu(cpu) { 529 device_create_file(get_cpu_device(cpu), attr); 530 } 531 532 mutex_unlock(&cpu_mutex); 533 return 0; 534 } 535 EXPORT_SYMBOL_GPL(cpu_add_dev_attr); 536 537 int cpu_add_dev_attr_group(struct attribute_group *attrs) 538 { 539 int cpu; 540 struct device *dev; 541 int ret; 542 543 mutex_lock(&cpu_mutex); 544 545 for_each_possible_cpu(cpu) { 546 dev = get_cpu_device(cpu); 547 ret = sysfs_create_group(&dev->kobj, attrs); 548 WARN_ON(ret != 0); 549 } 550 551 mutex_unlock(&cpu_mutex); 552 return 0; 553 } 554 EXPORT_SYMBOL_GPL(cpu_add_dev_attr_group); 555 556 557 void cpu_remove_dev_attr(struct device_attribute *attr) 558 { 559 int cpu; 560 561 mutex_lock(&cpu_mutex); 562 563 for_each_possible_cpu(cpu) { 564 device_remove_file(get_cpu_device(cpu), attr); 565 } 566 567 mutex_unlock(&cpu_mutex); 568 } 569 EXPORT_SYMBOL_GPL(cpu_remove_dev_attr); 570 571 void cpu_remove_dev_attr_group(struct attribute_group *attrs) 572 { 573 int cpu; 574 struct device *dev; 575 576 mutex_lock(&cpu_mutex); 577 578 for_each_possible_cpu(cpu) { 579 dev = get_cpu_device(cpu); 580 sysfs_remove_group(&dev->kobj, attrs); 581 } 582 583 mutex_unlock(&cpu_mutex); 584 } 585 EXPORT_SYMBOL_GPL(cpu_remove_dev_attr_group); 586 587 588 /* NUMA stuff */ 589 590 #ifdef CONFIG_NUMA 591 static void register_nodes(void) 592 { 593 int i; 594 595 for (i = 0; i < MAX_NUMNODES; i++) 596 register_one_node(i); 597 } 598 599 int sysfs_add_device_to_node(struct device *dev, int nid) 600 { 601 struct node *node = &node_devices[nid]; 602 return sysfs_create_link(&node->dev.kobj, &dev->kobj, 603 kobject_name(&dev->kobj)); 604 } 605 EXPORT_SYMBOL_GPL(sysfs_add_device_to_node); 606 607 void sysfs_remove_device_from_node(struct device *dev, int nid) 608 { 609 struct node *node = &node_devices[nid]; 610 sysfs_remove_link(&node->dev.kobj, kobject_name(&dev->kobj)); 611 } 612 EXPORT_SYMBOL_GPL(sysfs_remove_device_from_node); 613 614 #else 615 static void register_nodes(void) 616 { 617 return; 618 } 619 620 #endif 621 622 /* Only valid if CPU is present. */ 623 static ssize_t show_physical_id(struct device *dev, 624 struct device_attribute *attr, char *buf) 625 { 626 struct cpu *cpu = container_of(dev, struct cpu, dev); 627 628 return sprintf(buf, "%d\n", get_hard_smp_processor_id(cpu->dev.id)); 629 } 630 static DEVICE_ATTR(physical_id, 0444, show_physical_id, NULL); 631 632 static int __init topology_init(void) 633 { 634 int cpu; 635 636 register_nodes(); 637 register_cpu_notifier(&sysfs_cpu_nb); 638 639 for_each_possible_cpu(cpu) { 640 struct cpu *c = &per_cpu(cpu_devices, cpu); 641 642 /* 643 * For now, we just see if the system supports making 644 * the RTAS calls for CPU hotplug. But, there may be a 645 * more comprehensive way to do this for an individual 646 * CPU. For instance, the boot cpu might never be valid 647 * for hotplugging. 648 */ 649 if (ppc_md.cpu_die) 650 c->hotpluggable = 1; 651 652 if (cpu_online(cpu) || c->hotpluggable) { 653 register_cpu(c, cpu); 654 655 device_create_file(&c->dev, &dev_attr_physical_id); 656 } 657 658 if (cpu_online(cpu)) 659 register_cpu_online(cpu); 660 } 661 #ifdef CONFIG_PPC64 662 sysfs_create_dscr_default(); 663 #endif /* CONFIG_PPC64 */ 664 665 return 0; 666 } 667 subsys_initcall(topology_init); 668