xref: /linux/arch/sparc/kernel/sysfs.c (revision b8bb76713ec50df2f11efee386e16f93d51e1076)
1 /* sysfs.c: Toplogy sysfs support code for sparc64.
2  *
3  * Copyright (C) 2007 David S. Miller <davem@davemloft.net>
4  */
5 #include <linux/sysdev.h>
6 #include <linux/cpu.h>
7 #include <linux/smp.h>
8 #include <linux/percpu.h>
9 #include <linux/init.h>
10 
11 #include <asm/cpudata.h>
12 #include <asm/hypervisor.h>
13 #include <asm/spitfire.h>
14 
15 static DEFINE_PER_CPU(struct hv_mmu_statistics, mmu_stats) __attribute__((aligned(64)));
16 
17 #define SHOW_MMUSTAT_ULONG(NAME) \
18 static ssize_t show_##NAME(struct sys_device *dev, \
19 			struct sysdev_attribute *attr, char *buf) \
20 { \
21 	struct hv_mmu_statistics *p = &per_cpu(mmu_stats, dev->id); \
22 	return sprintf(buf, "%lu\n", p->NAME); \
23 } \
24 static SYSDEV_ATTR(NAME, 0444, show_##NAME, NULL)
25 
26 SHOW_MMUSTAT_ULONG(immu_tsb_hits_ctx0_8k_tte);
27 SHOW_MMUSTAT_ULONG(immu_tsb_ticks_ctx0_8k_tte);
28 SHOW_MMUSTAT_ULONG(immu_tsb_hits_ctx0_64k_tte);
29 SHOW_MMUSTAT_ULONG(immu_tsb_ticks_ctx0_64k_tte);
30 SHOW_MMUSTAT_ULONG(immu_tsb_hits_ctx0_4mb_tte);
31 SHOW_MMUSTAT_ULONG(immu_tsb_ticks_ctx0_4mb_tte);
32 SHOW_MMUSTAT_ULONG(immu_tsb_hits_ctx0_256mb_tte);
33 SHOW_MMUSTAT_ULONG(immu_tsb_ticks_ctx0_256mb_tte);
34 SHOW_MMUSTAT_ULONG(immu_tsb_hits_ctxnon0_8k_tte);
35 SHOW_MMUSTAT_ULONG(immu_tsb_ticks_ctxnon0_8k_tte);
36 SHOW_MMUSTAT_ULONG(immu_tsb_hits_ctxnon0_64k_tte);
37 SHOW_MMUSTAT_ULONG(immu_tsb_ticks_ctxnon0_64k_tte);
38 SHOW_MMUSTAT_ULONG(immu_tsb_hits_ctxnon0_4mb_tte);
39 SHOW_MMUSTAT_ULONG(immu_tsb_ticks_ctxnon0_4mb_tte);
40 SHOW_MMUSTAT_ULONG(immu_tsb_hits_ctxnon0_256mb_tte);
41 SHOW_MMUSTAT_ULONG(immu_tsb_ticks_ctxnon0_256mb_tte);
42 SHOW_MMUSTAT_ULONG(dmmu_tsb_hits_ctx0_8k_tte);
43 SHOW_MMUSTAT_ULONG(dmmu_tsb_ticks_ctx0_8k_tte);
44 SHOW_MMUSTAT_ULONG(dmmu_tsb_hits_ctx0_64k_tte);
45 SHOW_MMUSTAT_ULONG(dmmu_tsb_ticks_ctx0_64k_tte);
46 SHOW_MMUSTAT_ULONG(dmmu_tsb_hits_ctx0_4mb_tte);
47 SHOW_MMUSTAT_ULONG(dmmu_tsb_ticks_ctx0_4mb_tte);
48 SHOW_MMUSTAT_ULONG(dmmu_tsb_hits_ctx0_256mb_tte);
49 SHOW_MMUSTAT_ULONG(dmmu_tsb_ticks_ctx0_256mb_tte);
50 SHOW_MMUSTAT_ULONG(dmmu_tsb_hits_ctxnon0_8k_tte);
51 SHOW_MMUSTAT_ULONG(dmmu_tsb_ticks_ctxnon0_8k_tte);
52 SHOW_MMUSTAT_ULONG(dmmu_tsb_hits_ctxnon0_64k_tte);
53 SHOW_MMUSTAT_ULONG(dmmu_tsb_ticks_ctxnon0_64k_tte);
54 SHOW_MMUSTAT_ULONG(dmmu_tsb_hits_ctxnon0_4mb_tte);
55 SHOW_MMUSTAT_ULONG(dmmu_tsb_ticks_ctxnon0_4mb_tte);
56 SHOW_MMUSTAT_ULONG(dmmu_tsb_hits_ctxnon0_256mb_tte);
57 SHOW_MMUSTAT_ULONG(dmmu_tsb_ticks_ctxnon0_256mb_tte);
58 
59 static struct attribute *mmu_stat_attrs[] = {
60 	&attr_immu_tsb_hits_ctx0_8k_tte.attr,
61 	&attr_immu_tsb_ticks_ctx0_8k_tte.attr,
62 	&attr_immu_tsb_hits_ctx0_64k_tte.attr,
63 	&attr_immu_tsb_ticks_ctx0_64k_tte.attr,
64 	&attr_immu_tsb_hits_ctx0_4mb_tte.attr,
65 	&attr_immu_tsb_ticks_ctx0_4mb_tte.attr,
66 	&attr_immu_tsb_hits_ctx0_256mb_tte.attr,
67 	&attr_immu_tsb_ticks_ctx0_256mb_tte.attr,
68 	&attr_immu_tsb_hits_ctxnon0_8k_tte.attr,
69 	&attr_immu_tsb_ticks_ctxnon0_8k_tte.attr,
70 	&attr_immu_tsb_hits_ctxnon0_64k_tte.attr,
71 	&attr_immu_tsb_ticks_ctxnon0_64k_tte.attr,
72 	&attr_immu_tsb_hits_ctxnon0_4mb_tte.attr,
73 	&attr_immu_tsb_ticks_ctxnon0_4mb_tte.attr,
74 	&attr_immu_tsb_hits_ctxnon0_256mb_tte.attr,
75 	&attr_immu_tsb_ticks_ctxnon0_256mb_tte.attr,
76 	&attr_dmmu_tsb_hits_ctx0_8k_tte.attr,
77 	&attr_dmmu_tsb_ticks_ctx0_8k_tte.attr,
78 	&attr_dmmu_tsb_hits_ctx0_64k_tte.attr,
79 	&attr_dmmu_tsb_ticks_ctx0_64k_tte.attr,
80 	&attr_dmmu_tsb_hits_ctx0_4mb_tte.attr,
81 	&attr_dmmu_tsb_ticks_ctx0_4mb_tte.attr,
82 	&attr_dmmu_tsb_hits_ctx0_256mb_tte.attr,
83 	&attr_dmmu_tsb_ticks_ctx0_256mb_tte.attr,
84 	&attr_dmmu_tsb_hits_ctxnon0_8k_tte.attr,
85 	&attr_dmmu_tsb_ticks_ctxnon0_8k_tte.attr,
86 	&attr_dmmu_tsb_hits_ctxnon0_64k_tte.attr,
87 	&attr_dmmu_tsb_ticks_ctxnon0_64k_tte.attr,
88 	&attr_dmmu_tsb_hits_ctxnon0_4mb_tte.attr,
89 	&attr_dmmu_tsb_ticks_ctxnon0_4mb_tte.attr,
90 	&attr_dmmu_tsb_hits_ctxnon0_256mb_tte.attr,
91 	&attr_dmmu_tsb_ticks_ctxnon0_256mb_tte.attr,
92 	NULL,
93 };
94 
95 static struct attribute_group mmu_stat_group = {
96 	.attrs = mmu_stat_attrs,
97 	.name = "mmu_stats",
98 };
99 
100 /* XXX convert to rusty's on_one_cpu */
101 static unsigned long run_on_cpu(unsigned long cpu,
102 			        unsigned long (*func)(unsigned long),
103 				unsigned long arg)
104 {
105 	cpumask_t old_affinity = current->cpus_allowed;
106 	unsigned long ret;
107 
108 	/* should return -EINVAL to userspace */
109 	if (set_cpus_allowed(current, cpumask_of_cpu(cpu)))
110 		return 0;
111 
112 	ret = func(arg);
113 
114 	set_cpus_allowed(current, old_affinity);
115 
116 	return ret;
117 }
118 
119 static unsigned long read_mmustat_enable(unsigned long junk)
120 {
121 	unsigned long ra = 0;
122 
123 	sun4v_mmustat_info(&ra);
124 
125 	return ra != 0;
126 }
127 
128 static unsigned long write_mmustat_enable(unsigned long val)
129 {
130 	unsigned long ra, orig_ra;
131 
132 	if (val)
133 		ra = __pa(&per_cpu(mmu_stats, smp_processor_id()));
134 	else
135 		ra = 0UL;
136 
137 	return sun4v_mmustat_conf(ra, &orig_ra);
138 }
139 
140 static ssize_t show_mmustat_enable(struct sys_device *s,
141 				struct sysdev_attribute *attr, char *buf)
142 {
143 	unsigned long val = run_on_cpu(s->id, read_mmustat_enable, 0);
144 	return sprintf(buf, "%lx\n", val);
145 }
146 
147 static ssize_t store_mmustat_enable(struct sys_device *s,
148 			struct sysdev_attribute *attr, const char *buf,
149 			size_t count)
150 {
151 	unsigned long val, err;
152 	int ret = sscanf(buf, "%ld", &val);
153 
154 	if (ret != 1)
155 		return -EINVAL;
156 
157 	err = run_on_cpu(s->id, write_mmustat_enable, val);
158 	if (err)
159 		return -EIO;
160 
161 	return count;
162 }
163 
164 static SYSDEV_ATTR(mmustat_enable, 0644, show_mmustat_enable, store_mmustat_enable);
165 
166 static int mmu_stats_supported;
167 
168 static int register_mmu_stats(struct sys_device *s)
169 {
170 	if (!mmu_stats_supported)
171 		return 0;
172 	sysdev_create_file(s, &attr_mmustat_enable);
173 	return sysfs_create_group(&s->kobj, &mmu_stat_group);
174 }
175 
176 #ifdef CONFIG_HOTPLUG_CPU
177 static void unregister_mmu_stats(struct sys_device *s)
178 {
179 	if (!mmu_stats_supported)
180 		return;
181 	sysfs_remove_group(&s->kobj, &mmu_stat_group);
182 	sysdev_remove_file(s, &attr_mmustat_enable);
183 }
184 #endif
185 
186 #define SHOW_CPUDATA_ULONG_NAME(NAME, MEMBER) \
187 static ssize_t show_##NAME(struct sys_device *dev, \
188 		struct sysdev_attribute *attr, char *buf) \
189 { \
190 	cpuinfo_sparc *c = &cpu_data(dev->id); \
191 	return sprintf(buf, "%lu\n", c->MEMBER); \
192 }
193 
194 #define SHOW_CPUDATA_UINT_NAME(NAME, MEMBER) \
195 static ssize_t show_##NAME(struct sys_device *dev, \
196 		struct sysdev_attribute *attr, char *buf) \
197 { \
198 	cpuinfo_sparc *c = &cpu_data(dev->id); \
199 	return sprintf(buf, "%u\n", c->MEMBER); \
200 }
201 
202 SHOW_CPUDATA_ULONG_NAME(clock_tick, clock_tick);
203 SHOW_CPUDATA_UINT_NAME(l1_dcache_size, dcache_size);
204 SHOW_CPUDATA_UINT_NAME(l1_dcache_line_size, dcache_line_size);
205 SHOW_CPUDATA_UINT_NAME(l1_icache_size, icache_size);
206 SHOW_CPUDATA_UINT_NAME(l1_icache_line_size, icache_line_size);
207 SHOW_CPUDATA_UINT_NAME(l2_cache_size, ecache_size);
208 SHOW_CPUDATA_UINT_NAME(l2_cache_line_size, ecache_line_size);
209 
210 static struct sysdev_attribute cpu_core_attrs[] = {
211 	_SYSDEV_ATTR(clock_tick,          0444, show_clock_tick, NULL),
212 	_SYSDEV_ATTR(l1_dcache_size,      0444, show_l1_dcache_size, NULL),
213 	_SYSDEV_ATTR(l1_dcache_line_size, 0444, show_l1_dcache_line_size, NULL),
214 	_SYSDEV_ATTR(l1_icache_size,      0444, show_l1_icache_size, NULL),
215 	_SYSDEV_ATTR(l1_icache_line_size, 0444, show_l1_icache_line_size, NULL),
216 	_SYSDEV_ATTR(l2_cache_size,       0444, show_l2_cache_size, NULL),
217 	_SYSDEV_ATTR(l2_cache_line_size,  0444, show_l2_cache_line_size, NULL),
218 };
219 
220 static DEFINE_PER_CPU(struct cpu, cpu_devices);
221 
222 static void register_cpu_online(unsigned int cpu)
223 {
224 	struct cpu *c = &per_cpu(cpu_devices, cpu);
225 	struct sys_device *s = &c->sysdev;
226 	int i;
227 
228 	for (i = 0; i < ARRAY_SIZE(cpu_core_attrs); i++)
229 		sysdev_create_file(s, &cpu_core_attrs[i]);
230 
231 	register_mmu_stats(s);
232 }
233 
234 #ifdef CONFIG_HOTPLUG_CPU
235 static void unregister_cpu_online(unsigned int cpu)
236 {
237 	struct cpu *c = &per_cpu(cpu_devices, cpu);
238 	struct sys_device *s = &c->sysdev;
239 	int i;
240 
241 	unregister_mmu_stats(s);
242 	for (i = 0; i < ARRAY_SIZE(cpu_core_attrs); i++)
243 		sysdev_remove_file(s, &cpu_core_attrs[i]);
244 }
245 #endif
246 
247 static int __cpuinit sysfs_cpu_notify(struct notifier_block *self,
248 				      unsigned long action, void *hcpu)
249 {
250 	unsigned int cpu = (unsigned int)(long)hcpu;
251 
252 	switch (action) {
253 	case CPU_ONLINE:
254 	case CPU_ONLINE_FROZEN:
255 		register_cpu_online(cpu);
256 		break;
257 #ifdef CONFIG_HOTPLUG_CPU
258 	case CPU_DEAD:
259 	case CPU_DEAD_FROZEN:
260 		unregister_cpu_online(cpu);
261 		break;
262 #endif
263 	}
264 	return NOTIFY_OK;
265 }
266 
267 static struct notifier_block __cpuinitdata sysfs_cpu_nb = {
268 	.notifier_call	= sysfs_cpu_notify,
269 };
270 
271 static void __init check_mmu_stats(void)
272 {
273 	unsigned long dummy1, err;
274 
275 	if (tlb_type != hypervisor)
276 		return;
277 
278 	err = sun4v_mmustat_info(&dummy1);
279 	if (!err)
280 		mmu_stats_supported = 1;
281 }
282 
283 static void register_nodes(void)
284 {
285 #ifdef CONFIG_NUMA
286 	int i;
287 
288 	for (i = 0; i < MAX_NUMNODES; i++)
289 		register_one_node(i);
290 #endif
291 }
292 
293 static int __init topology_init(void)
294 {
295 	int cpu;
296 
297 	register_nodes();
298 
299 	check_mmu_stats();
300 
301 	register_cpu_notifier(&sysfs_cpu_nb);
302 
303 	for_each_possible_cpu(cpu) {
304 		struct cpu *c = &per_cpu(cpu_devices, cpu);
305 
306 		register_cpu(c, cpu);
307 		if (cpu_online(cpu))
308 			register_cpu_online(cpu);
309 	}
310 
311 	return 0;
312 }
313 
314 subsys_initcall(topology_init);
315