xref: /linux/arch/powerpc/kernel/sysfs.c (revision dc6876a288cc6a446a6617ccfcb96082f67fa0c4)
1 #include <linux/device.h>
2 #include <linux/cpu.h>
3 #include <linux/smp.h>
4 #include <linux/percpu.h>
5 #include <linux/init.h>
6 #include <linux/sched.h>
7 #include <linux/export.h>
8 #include <linux/nodemask.h>
9 #include <linux/cpumask.h>
10 #include <linux/notifier.h>
11 
12 #include <asm/current.h>
13 #include <asm/processor.h>
14 #include <asm/cputable.h>
15 #include <asm/firmware.h>
16 #include <asm/hvcall.h>
17 #include <asm/prom.h>
18 #include <asm/machdep.h>
19 #include <asm/smp.h>
20 #include <asm/pmc.h>
21 
22 #include "cacheinfo.h"
23 
24 #ifdef CONFIG_PPC64
25 #include <asm/paca.h>
26 #include <asm/lppaca.h>
27 #endif
28 
29 static DEFINE_PER_CPU(struct cpu, cpu_devices);
30 
31 /*
32  * SMT snooze delay stuff, 64-bit only for now
33  */
34 
35 #ifdef CONFIG_PPC64
36 
37 /* Time in microseconds we delay before sleeping in the idle loop */
38 DEFINE_PER_CPU(long, smt_snooze_delay) = { 100 };
39 
40 static ssize_t store_smt_snooze_delay(struct device *dev,
41 				      struct device_attribute *attr,
42 				      const char *buf,
43 				      size_t count)
44 {
45 	struct cpu *cpu = container_of(dev, struct cpu, dev);
46 	ssize_t ret;
47 	long snooze;
48 
49 	ret = sscanf(buf, "%ld", &snooze);
50 	if (ret != 1)
51 		return -EINVAL;
52 
53 	per_cpu(smt_snooze_delay, cpu->dev.id) = snooze;
54 
55 	return count;
56 }
57 
58 static ssize_t show_smt_snooze_delay(struct device *dev,
59 				     struct device_attribute *attr,
60 				     char *buf)
61 {
62 	struct cpu *cpu = container_of(dev, struct cpu, dev);
63 
64 	return sprintf(buf, "%ld\n", per_cpu(smt_snooze_delay, cpu->dev.id));
65 }
66 
67 static DEVICE_ATTR(smt_snooze_delay, 0644, show_smt_snooze_delay,
68 		   store_smt_snooze_delay);
69 
70 static int __init setup_smt_snooze_delay(char *str)
71 {
72 	unsigned int cpu;
73 	long snooze;
74 
75 	if (!cpu_has_feature(CPU_FTR_SMT))
76 		return 1;
77 
78 	snooze = simple_strtol(str, NULL, 10);
79 	for_each_possible_cpu(cpu)
80 		per_cpu(smt_snooze_delay, cpu) = snooze;
81 
82 	return 1;
83 }
84 __setup("smt-snooze-delay=", setup_smt_snooze_delay);
85 
86 #endif /* CONFIG_PPC64 */
87 
88 /*
89  * Enabling PMCs will slow partition context switch times so we only do
90  * it the first time we write to the PMCs.
91  */
92 
93 static DEFINE_PER_CPU(char, pmcs_enabled);
94 
95 void ppc_enable_pmcs(void)
96 {
97 	ppc_set_pmu_inuse(1);
98 
99 	/* Only need to enable them once */
100 	if (__get_cpu_var(pmcs_enabled))
101 		return;
102 
103 	__get_cpu_var(pmcs_enabled) = 1;
104 
105 	if (ppc_md.enable_pmcs)
106 		ppc_md.enable_pmcs();
107 }
108 EXPORT_SYMBOL(ppc_enable_pmcs);
109 
110 #define SYSFS_PMCSETUP(NAME, ADDRESS) \
111 static void read_##NAME(void *val) \
112 { \
113 	*(unsigned long *)val = mfspr(ADDRESS);	\
114 } \
115 static void write_##NAME(void *val) \
116 { \
117 	ppc_enable_pmcs(); \
118 	mtspr(ADDRESS, *(unsigned long *)val);	\
119 } \
120 static ssize_t show_##NAME(struct device *dev, \
121 			struct device_attribute *attr, \
122 			char *buf) \
123 { \
124 	struct cpu *cpu = container_of(dev, struct cpu, dev); \
125 	unsigned long val; \
126 	smp_call_function_single(cpu->dev.id, read_##NAME, &val, 1);	\
127 	return sprintf(buf, "%lx\n", val); \
128 } \
129 static ssize_t __used \
130 	store_##NAME(struct device *dev, struct device_attribute *attr, \
131 			const char *buf, size_t count) \
132 { \
133 	struct cpu *cpu = container_of(dev, struct cpu, dev); \
134 	unsigned long val; \
135 	int ret = sscanf(buf, "%lx", &val); \
136 	if (ret != 1) \
137 		return -EINVAL; \
138 	smp_call_function_single(cpu->dev.id, write_##NAME, &val, 1); \
139 	return count; \
140 }
141 
142 
143 /* Let's define all possible registers, we'll only hook up the ones
144  * that are implemented on the current processor
145  */
146 
147 #if defined(CONFIG_PPC64)
148 #define HAS_PPC_PMC_CLASSIC	1
149 #define HAS_PPC_PMC_IBM		1
150 #define HAS_PPC_PMC_PA6T	1
151 #elif defined(CONFIG_6xx)
152 #define HAS_PPC_PMC_CLASSIC	1
153 #define HAS_PPC_PMC_IBM		1
154 #define HAS_PPC_PMC_G4		1
155 #endif
156 
157 
158 #ifdef HAS_PPC_PMC_CLASSIC
159 SYSFS_PMCSETUP(mmcr0, SPRN_MMCR0);
160 SYSFS_PMCSETUP(mmcr1, SPRN_MMCR1);
161 SYSFS_PMCSETUP(pmc1, SPRN_PMC1);
162 SYSFS_PMCSETUP(pmc2, SPRN_PMC2);
163 SYSFS_PMCSETUP(pmc3, SPRN_PMC3);
164 SYSFS_PMCSETUP(pmc4, SPRN_PMC4);
165 SYSFS_PMCSETUP(pmc5, SPRN_PMC5);
166 SYSFS_PMCSETUP(pmc6, SPRN_PMC6);
167 
168 #ifdef HAS_PPC_PMC_G4
169 SYSFS_PMCSETUP(mmcr2, SPRN_MMCR2);
170 #endif
171 
172 #ifdef CONFIG_PPC64
173 SYSFS_PMCSETUP(pmc7, SPRN_PMC7);
174 SYSFS_PMCSETUP(pmc8, SPRN_PMC8);
175 
176 SYSFS_PMCSETUP(mmcra, SPRN_MMCRA);
177 SYSFS_PMCSETUP(purr, SPRN_PURR);
178 SYSFS_PMCSETUP(spurr, SPRN_SPURR);
179 SYSFS_PMCSETUP(dscr, SPRN_DSCR);
180 
181 static DEVICE_ATTR(mmcra, 0600, show_mmcra, store_mmcra);
182 static DEVICE_ATTR(spurr, 0600, show_spurr, NULL);
183 static DEVICE_ATTR(dscr, 0600, show_dscr, store_dscr);
184 static DEVICE_ATTR(purr, 0600, show_purr, store_purr);
185 
186 unsigned long dscr_default = 0;
187 EXPORT_SYMBOL(dscr_default);
188 
189 static ssize_t show_dscr_default(struct device *dev,
190 		struct device_attribute *attr, char *buf)
191 {
192 	return sprintf(buf, "%lx\n", dscr_default);
193 }
194 
195 static ssize_t __used store_dscr_default(struct device *dev,
196 		struct device_attribute *attr, const char *buf,
197 		size_t count)
198 {
199 	unsigned long val;
200 	int ret = 0;
201 
202 	ret = sscanf(buf, "%lx", &val);
203 	if (ret != 1)
204 		return -EINVAL;
205 	dscr_default = val;
206 
207 	return count;
208 }
209 
210 static DEVICE_ATTR(dscr_default, 0600,
211 		show_dscr_default, store_dscr_default);
212 
213 static void sysfs_create_dscr_default(void)
214 {
215 	int err = 0;
216 	if (cpu_has_feature(CPU_FTR_DSCR))
217 		err = device_create_file(cpu_subsys.dev_root, &dev_attr_dscr_default);
218 }
219 #endif /* CONFIG_PPC64 */
220 
221 #ifdef HAS_PPC_PMC_PA6T
222 SYSFS_PMCSETUP(pa6t_pmc0, SPRN_PA6T_PMC0);
223 SYSFS_PMCSETUP(pa6t_pmc1, SPRN_PA6T_PMC1);
224 SYSFS_PMCSETUP(pa6t_pmc2, SPRN_PA6T_PMC2);
225 SYSFS_PMCSETUP(pa6t_pmc3, SPRN_PA6T_PMC3);
226 SYSFS_PMCSETUP(pa6t_pmc4, SPRN_PA6T_PMC4);
227 SYSFS_PMCSETUP(pa6t_pmc5, SPRN_PA6T_PMC5);
228 #ifdef CONFIG_DEBUG_KERNEL
229 SYSFS_PMCSETUP(hid0, SPRN_HID0);
230 SYSFS_PMCSETUP(hid1, SPRN_HID1);
231 SYSFS_PMCSETUP(hid4, SPRN_HID4);
232 SYSFS_PMCSETUP(hid5, SPRN_HID5);
233 SYSFS_PMCSETUP(ima0, SPRN_PA6T_IMA0);
234 SYSFS_PMCSETUP(ima1, SPRN_PA6T_IMA1);
235 SYSFS_PMCSETUP(ima2, SPRN_PA6T_IMA2);
236 SYSFS_PMCSETUP(ima3, SPRN_PA6T_IMA3);
237 SYSFS_PMCSETUP(ima4, SPRN_PA6T_IMA4);
238 SYSFS_PMCSETUP(ima5, SPRN_PA6T_IMA5);
239 SYSFS_PMCSETUP(ima6, SPRN_PA6T_IMA6);
240 SYSFS_PMCSETUP(ima7, SPRN_PA6T_IMA7);
241 SYSFS_PMCSETUP(ima8, SPRN_PA6T_IMA8);
242 SYSFS_PMCSETUP(ima9, SPRN_PA6T_IMA9);
243 SYSFS_PMCSETUP(imaat, SPRN_PA6T_IMAAT);
244 SYSFS_PMCSETUP(btcr, SPRN_PA6T_BTCR);
245 SYSFS_PMCSETUP(pccr, SPRN_PA6T_PCCR);
246 SYSFS_PMCSETUP(rpccr, SPRN_PA6T_RPCCR);
247 SYSFS_PMCSETUP(der, SPRN_PA6T_DER);
248 SYSFS_PMCSETUP(mer, SPRN_PA6T_MER);
249 SYSFS_PMCSETUP(ber, SPRN_PA6T_BER);
250 SYSFS_PMCSETUP(ier, SPRN_PA6T_IER);
251 SYSFS_PMCSETUP(sier, SPRN_PA6T_SIER);
252 SYSFS_PMCSETUP(siar, SPRN_PA6T_SIAR);
253 SYSFS_PMCSETUP(tsr0, SPRN_PA6T_TSR0);
254 SYSFS_PMCSETUP(tsr1, SPRN_PA6T_TSR1);
255 SYSFS_PMCSETUP(tsr2, SPRN_PA6T_TSR2);
256 SYSFS_PMCSETUP(tsr3, SPRN_PA6T_TSR3);
257 #endif /* CONFIG_DEBUG_KERNEL */
258 #endif /* HAS_PPC_PMC_PA6T */
259 
260 #ifdef HAS_PPC_PMC_IBM
261 static struct device_attribute ibm_common_attrs[] = {
262 	__ATTR(mmcr0, 0600, show_mmcr0, store_mmcr0),
263 	__ATTR(mmcr1, 0600, show_mmcr1, store_mmcr1),
264 };
265 #endif /* HAS_PPC_PMC_G4 */
266 
267 #ifdef HAS_PPC_PMC_G4
268 static struct device_attribute g4_common_attrs[] = {
269 	__ATTR(mmcr0, 0600, show_mmcr0, store_mmcr0),
270 	__ATTR(mmcr1, 0600, show_mmcr1, store_mmcr1),
271 	__ATTR(mmcr2, 0600, show_mmcr2, store_mmcr2),
272 };
273 #endif /* HAS_PPC_PMC_G4 */
274 
275 static struct device_attribute classic_pmc_attrs[] = {
276 	__ATTR(pmc1, 0600, show_pmc1, store_pmc1),
277 	__ATTR(pmc2, 0600, show_pmc2, store_pmc2),
278 	__ATTR(pmc3, 0600, show_pmc3, store_pmc3),
279 	__ATTR(pmc4, 0600, show_pmc4, store_pmc4),
280 	__ATTR(pmc5, 0600, show_pmc5, store_pmc5),
281 	__ATTR(pmc6, 0600, show_pmc6, store_pmc6),
282 #ifdef CONFIG_PPC64
283 	__ATTR(pmc7, 0600, show_pmc7, store_pmc7),
284 	__ATTR(pmc8, 0600, show_pmc8, store_pmc8),
285 #endif
286 };
287 
288 #ifdef HAS_PPC_PMC_PA6T
289 static struct device_attribute pa6t_attrs[] = {
290 	__ATTR(mmcr0, 0600, show_mmcr0, store_mmcr0),
291 	__ATTR(mmcr1, 0600, show_mmcr1, store_mmcr1),
292 	__ATTR(pmc0, 0600, show_pa6t_pmc0, store_pa6t_pmc0),
293 	__ATTR(pmc1, 0600, show_pa6t_pmc1, store_pa6t_pmc1),
294 	__ATTR(pmc2, 0600, show_pa6t_pmc2, store_pa6t_pmc2),
295 	__ATTR(pmc3, 0600, show_pa6t_pmc3, store_pa6t_pmc3),
296 	__ATTR(pmc4, 0600, show_pa6t_pmc4, store_pa6t_pmc4),
297 	__ATTR(pmc5, 0600, show_pa6t_pmc5, store_pa6t_pmc5),
298 #ifdef CONFIG_DEBUG_KERNEL
299 	__ATTR(hid0, 0600, show_hid0, store_hid0),
300 	__ATTR(hid1, 0600, show_hid1, store_hid1),
301 	__ATTR(hid4, 0600, show_hid4, store_hid4),
302 	__ATTR(hid5, 0600, show_hid5, store_hid5),
303 	__ATTR(ima0, 0600, show_ima0, store_ima0),
304 	__ATTR(ima1, 0600, show_ima1, store_ima1),
305 	__ATTR(ima2, 0600, show_ima2, store_ima2),
306 	__ATTR(ima3, 0600, show_ima3, store_ima3),
307 	__ATTR(ima4, 0600, show_ima4, store_ima4),
308 	__ATTR(ima5, 0600, show_ima5, store_ima5),
309 	__ATTR(ima6, 0600, show_ima6, store_ima6),
310 	__ATTR(ima7, 0600, show_ima7, store_ima7),
311 	__ATTR(ima8, 0600, show_ima8, store_ima8),
312 	__ATTR(ima9, 0600, show_ima9, store_ima9),
313 	__ATTR(imaat, 0600, show_imaat, store_imaat),
314 	__ATTR(btcr, 0600, show_btcr, store_btcr),
315 	__ATTR(pccr, 0600, show_pccr, store_pccr),
316 	__ATTR(rpccr, 0600, show_rpccr, store_rpccr),
317 	__ATTR(der, 0600, show_der, store_der),
318 	__ATTR(mer, 0600, show_mer, store_mer),
319 	__ATTR(ber, 0600, show_ber, store_ber),
320 	__ATTR(ier, 0600, show_ier, store_ier),
321 	__ATTR(sier, 0600, show_sier, store_sier),
322 	__ATTR(siar, 0600, show_siar, store_siar),
323 	__ATTR(tsr0, 0600, show_tsr0, store_tsr0),
324 	__ATTR(tsr1, 0600, show_tsr1, store_tsr1),
325 	__ATTR(tsr2, 0600, show_tsr2, store_tsr2),
326 	__ATTR(tsr3, 0600, show_tsr3, store_tsr3),
327 #endif /* CONFIG_DEBUG_KERNEL */
328 };
329 #endif /* HAS_PPC_PMC_PA6T */
330 #endif /* HAS_PPC_PMC_CLASSIC */
331 
332 static void __cpuinit register_cpu_online(unsigned int cpu)
333 {
334 	struct cpu *c = &per_cpu(cpu_devices, cpu);
335 	struct device *s = &c->dev;
336 	struct device_attribute *attrs, *pmc_attrs;
337 	int i, nattrs;
338 
339 #ifdef CONFIG_PPC64
340 	if (!firmware_has_feature(FW_FEATURE_ISERIES) &&
341 			cpu_has_feature(CPU_FTR_SMT))
342 		device_create_file(s, &dev_attr_smt_snooze_delay);
343 #endif
344 
345 	/* PMC stuff */
346 	switch (cur_cpu_spec->pmc_type) {
347 #ifdef HAS_PPC_PMC_IBM
348 	case PPC_PMC_IBM:
349 		attrs = ibm_common_attrs;
350 		nattrs = sizeof(ibm_common_attrs) / sizeof(struct device_attribute);
351 		pmc_attrs = classic_pmc_attrs;
352 		break;
353 #endif /* HAS_PPC_PMC_IBM */
354 #ifdef HAS_PPC_PMC_G4
355 	case PPC_PMC_G4:
356 		attrs = g4_common_attrs;
357 		nattrs = sizeof(g4_common_attrs) / sizeof(struct device_attribute);
358 		pmc_attrs = classic_pmc_attrs;
359 		break;
360 #endif /* HAS_PPC_PMC_G4 */
361 #ifdef HAS_PPC_PMC_PA6T
362 	case PPC_PMC_PA6T:
363 		/* PA Semi starts counting at PMC0 */
364 		attrs = pa6t_attrs;
365 		nattrs = sizeof(pa6t_attrs) / sizeof(struct device_attribute);
366 		pmc_attrs = NULL;
367 		break;
368 #endif /* HAS_PPC_PMC_PA6T */
369 	default:
370 		attrs = NULL;
371 		nattrs = 0;
372 		pmc_attrs = NULL;
373 	}
374 
375 	for (i = 0; i < nattrs; i++)
376 		device_create_file(s, &attrs[i]);
377 
378 	if (pmc_attrs)
379 		for (i = 0; i < cur_cpu_spec->num_pmcs; i++)
380 			device_create_file(s, &pmc_attrs[i]);
381 
382 #ifdef CONFIG_PPC64
383 	if (cpu_has_feature(CPU_FTR_MMCRA))
384 		device_create_file(s, &dev_attr_mmcra);
385 
386 	if (cpu_has_feature(CPU_FTR_PURR))
387 		device_create_file(s, &dev_attr_purr);
388 
389 	if (cpu_has_feature(CPU_FTR_SPURR))
390 		device_create_file(s, &dev_attr_spurr);
391 
392 	if (cpu_has_feature(CPU_FTR_DSCR))
393 		device_create_file(s, &dev_attr_dscr);
394 #endif /* CONFIG_PPC64 */
395 
396 	cacheinfo_cpu_online(cpu);
397 }
398 
399 #ifdef CONFIG_HOTPLUG_CPU
400 static void unregister_cpu_online(unsigned int cpu)
401 {
402 	struct cpu *c = &per_cpu(cpu_devices, cpu);
403 	struct device *s = &c->dev;
404 	struct device_attribute *attrs, *pmc_attrs;
405 	int i, nattrs;
406 
407 	BUG_ON(!c->hotpluggable);
408 
409 #ifdef CONFIG_PPC64
410 	if (!firmware_has_feature(FW_FEATURE_ISERIES) &&
411 			cpu_has_feature(CPU_FTR_SMT))
412 		device_remove_file(s, &dev_attr_smt_snooze_delay);
413 #endif
414 
415 	/* PMC stuff */
416 	switch (cur_cpu_spec->pmc_type) {
417 #ifdef HAS_PPC_PMC_IBM
418 	case PPC_PMC_IBM:
419 		attrs = ibm_common_attrs;
420 		nattrs = sizeof(ibm_common_attrs) / sizeof(struct device_attribute);
421 		pmc_attrs = classic_pmc_attrs;
422 		break;
423 #endif /* HAS_PPC_PMC_IBM */
424 #ifdef HAS_PPC_PMC_G4
425 	case PPC_PMC_G4:
426 		attrs = g4_common_attrs;
427 		nattrs = sizeof(g4_common_attrs) / sizeof(struct device_attribute);
428 		pmc_attrs = classic_pmc_attrs;
429 		break;
430 #endif /* HAS_PPC_PMC_G4 */
431 #ifdef HAS_PPC_PMC_PA6T
432 	case PPC_PMC_PA6T:
433 		/* PA Semi starts counting at PMC0 */
434 		attrs = pa6t_attrs;
435 		nattrs = sizeof(pa6t_attrs) / sizeof(struct device_attribute);
436 		pmc_attrs = NULL;
437 		break;
438 #endif /* HAS_PPC_PMC_PA6T */
439 	default:
440 		attrs = NULL;
441 		nattrs = 0;
442 		pmc_attrs = NULL;
443 	}
444 
445 	for (i = 0; i < nattrs; i++)
446 		device_remove_file(s, &attrs[i]);
447 
448 	if (pmc_attrs)
449 		for (i = 0; i < cur_cpu_spec->num_pmcs; i++)
450 			device_remove_file(s, &pmc_attrs[i]);
451 
452 #ifdef CONFIG_PPC64
453 	if (cpu_has_feature(CPU_FTR_MMCRA))
454 		device_remove_file(s, &dev_attr_mmcra);
455 
456 	if (cpu_has_feature(CPU_FTR_PURR))
457 		device_remove_file(s, &dev_attr_purr);
458 
459 	if (cpu_has_feature(CPU_FTR_SPURR))
460 		device_remove_file(s, &dev_attr_spurr);
461 
462 	if (cpu_has_feature(CPU_FTR_DSCR))
463 		device_remove_file(s, &dev_attr_dscr);
464 #endif /* CONFIG_PPC64 */
465 
466 	cacheinfo_cpu_offline(cpu);
467 }
468 
469 #ifdef CONFIG_ARCH_CPU_PROBE_RELEASE
470 ssize_t arch_cpu_probe(const char *buf, size_t count)
471 {
472 	if (ppc_md.cpu_probe)
473 		return ppc_md.cpu_probe(buf, count);
474 
475 	return -EINVAL;
476 }
477 
478 ssize_t arch_cpu_release(const char *buf, size_t count)
479 {
480 	if (ppc_md.cpu_release)
481 		return ppc_md.cpu_release(buf, count);
482 
483 	return -EINVAL;
484 }
485 #endif /* CONFIG_ARCH_CPU_PROBE_RELEASE */
486 
487 #endif /* CONFIG_HOTPLUG_CPU */
488 
489 static int __cpuinit sysfs_cpu_notify(struct notifier_block *self,
490 				      unsigned long action, void *hcpu)
491 {
492 	unsigned int cpu = (unsigned int)(long)hcpu;
493 
494 	switch (action) {
495 	case CPU_ONLINE:
496 	case CPU_ONLINE_FROZEN:
497 		register_cpu_online(cpu);
498 		break;
499 #ifdef CONFIG_HOTPLUG_CPU
500 	case CPU_DEAD:
501 	case CPU_DEAD_FROZEN:
502 		unregister_cpu_online(cpu);
503 		break;
504 #endif
505 	}
506 	return NOTIFY_OK;
507 }
508 
509 static struct notifier_block __cpuinitdata sysfs_cpu_nb = {
510 	.notifier_call	= sysfs_cpu_notify,
511 };
512 
513 static DEFINE_MUTEX(cpu_mutex);
514 
515 int cpu_add_dev_attr(struct device_attribute *attr)
516 {
517 	int cpu;
518 
519 	mutex_lock(&cpu_mutex);
520 
521 	for_each_possible_cpu(cpu) {
522 		device_create_file(get_cpu_device(cpu), attr);
523 	}
524 
525 	mutex_unlock(&cpu_mutex);
526 	return 0;
527 }
528 EXPORT_SYMBOL_GPL(cpu_add_dev_attr);
529 
530 int cpu_add_dev_attr_group(struct attribute_group *attrs)
531 {
532 	int cpu;
533 	struct device *dev;
534 	int ret;
535 
536 	mutex_lock(&cpu_mutex);
537 
538 	for_each_possible_cpu(cpu) {
539 		dev = get_cpu_device(cpu);
540 		ret = sysfs_create_group(&dev->kobj, attrs);
541 		WARN_ON(ret != 0);
542 	}
543 
544 	mutex_unlock(&cpu_mutex);
545 	return 0;
546 }
547 EXPORT_SYMBOL_GPL(cpu_add_dev_attr_group);
548 
549 
550 void cpu_remove_dev_attr(struct device_attribute *attr)
551 {
552 	int cpu;
553 
554 	mutex_lock(&cpu_mutex);
555 
556 	for_each_possible_cpu(cpu) {
557 		device_remove_file(get_cpu_device(cpu), attr);
558 	}
559 
560 	mutex_unlock(&cpu_mutex);
561 }
562 EXPORT_SYMBOL_GPL(cpu_remove_dev_attr);
563 
564 void cpu_remove_dev_attr_group(struct attribute_group *attrs)
565 {
566 	int cpu;
567 	struct device *dev;
568 
569 	mutex_lock(&cpu_mutex);
570 
571 	for_each_possible_cpu(cpu) {
572 		dev = get_cpu_device(cpu);
573 		sysfs_remove_group(&dev->kobj, attrs);
574 	}
575 
576 	mutex_unlock(&cpu_mutex);
577 }
578 EXPORT_SYMBOL_GPL(cpu_remove_dev_attr_group);
579 
580 
581 /* NUMA stuff */
582 
583 #ifdef CONFIG_NUMA
584 static void register_nodes(void)
585 {
586 	int i;
587 
588 	for (i = 0; i < MAX_NUMNODES; i++)
589 		register_one_node(i);
590 }
591 
592 int sysfs_add_device_to_node(struct device *dev, int nid)
593 {
594 	struct node *node = &node_devices[nid];
595 	return sysfs_create_link(&node->dev.kobj, &dev->kobj,
596 			kobject_name(&dev->kobj));
597 }
598 EXPORT_SYMBOL_GPL(sysfs_add_device_to_node);
599 
600 void sysfs_remove_device_from_node(struct device *dev, int nid)
601 {
602 	struct node *node = &node_devices[nid];
603 	sysfs_remove_link(&node->dev.kobj, kobject_name(&dev->kobj));
604 }
605 EXPORT_SYMBOL_GPL(sysfs_remove_device_from_node);
606 
607 #else
608 static void register_nodes(void)
609 {
610 	return;
611 }
612 
613 #endif
614 
615 /* Only valid if CPU is present. */
616 static ssize_t show_physical_id(struct device *dev,
617 				struct device_attribute *attr, char *buf)
618 {
619 	struct cpu *cpu = container_of(dev, struct cpu, dev);
620 
621 	return sprintf(buf, "%d\n", get_hard_smp_processor_id(cpu->dev.id));
622 }
623 static DEVICE_ATTR(physical_id, 0444, show_physical_id, NULL);
624 
625 static int __init topology_init(void)
626 {
627 	int cpu;
628 
629 	register_nodes();
630 	register_cpu_notifier(&sysfs_cpu_nb);
631 
632 	for_each_possible_cpu(cpu) {
633 		struct cpu *c = &per_cpu(cpu_devices, cpu);
634 
635 		/*
636 		 * For now, we just see if the system supports making
637 		 * the RTAS calls for CPU hotplug.  But, there may be a
638 		 * more comprehensive way to do this for an individual
639 		 * CPU.  For instance, the boot cpu might never be valid
640 		 * for hotplugging.
641 		 */
642 		if (ppc_md.cpu_die)
643 			c->hotpluggable = 1;
644 
645 		if (cpu_online(cpu) || c->hotpluggable) {
646 			register_cpu(c, cpu);
647 
648 			device_create_file(&c->dev, &dev_attr_physical_id);
649 		}
650 
651 		if (cpu_online(cpu))
652 			register_cpu_online(cpu);
653 	}
654 #ifdef CONFIG_PPC64
655 	sysfs_create_dscr_default();
656 #endif /* CONFIG_PPC64 */
657 
658 	return 0;
659 }
660 subsys_initcall(topology_init);
661