xref: /linux/drivers/cpufreq/cpufreq_stats.c (revision c4ee0af3fa0dc65f690fc908f02b8355f9576ea0)
1 /*
2  *  drivers/cpufreq/cpufreq_stats.c
3  *
4  *  Copyright (C) 2003-2004 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>.
5  *  (C) 2004 Zou Nan hai <nanhai.zou@intel.com>.
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License version 2 as
9  * published by the Free Software Foundation.
10  */
11 
12 #include <linux/cpu.h>
13 #include <linux/cpufreq.h>
14 #include <linux/module.h>
15 #include <linux/slab.h>
16 #include <asm/cputime.h>
17 
18 static spinlock_t cpufreq_stats_lock;
19 
20 struct cpufreq_stats {
21 	unsigned int cpu;
22 	unsigned int total_trans;
23 	unsigned long long last_time;
24 	unsigned int max_state;
25 	unsigned int state_num;
26 	unsigned int last_index;
27 	u64 *time_in_state;
28 	unsigned int *freq_table;
29 #ifdef CONFIG_CPU_FREQ_STAT_DETAILS
30 	unsigned int *trans_table;
31 #endif
32 };
33 
34 static DEFINE_PER_CPU(struct cpufreq_stats *, cpufreq_stats_table);
35 
36 struct cpufreq_stats_attribute {
37 	struct attribute attr;
38 	ssize_t(*show) (struct cpufreq_stats *, char *);
39 };
40 
41 static int cpufreq_stats_update(unsigned int cpu)
42 {
43 	struct cpufreq_stats *stat;
44 	unsigned long long cur_time;
45 
46 	cur_time = get_jiffies_64();
47 	spin_lock(&cpufreq_stats_lock);
48 	stat = per_cpu(cpufreq_stats_table, cpu);
49 	if (stat->time_in_state)
50 		stat->time_in_state[stat->last_index] +=
51 			cur_time - stat->last_time;
52 	stat->last_time = cur_time;
53 	spin_unlock(&cpufreq_stats_lock);
54 	return 0;
55 }
56 
57 static ssize_t show_total_trans(struct cpufreq_policy *policy, char *buf)
58 {
59 	struct cpufreq_stats *stat = per_cpu(cpufreq_stats_table, policy->cpu);
60 	if (!stat)
61 		return 0;
62 	return sprintf(buf, "%d\n",
63 			per_cpu(cpufreq_stats_table, stat->cpu)->total_trans);
64 }
65 
66 static ssize_t show_time_in_state(struct cpufreq_policy *policy, char *buf)
67 {
68 	ssize_t len = 0;
69 	int i;
70 	struct cpufreq_stats *stat = per_cpu(cpufreq_stats_table, policy->cpu);
71 	if (!stat)
72 		return 0;
73 	cpufreq_stats_update(stat->cpu);
74 	for (i = 0; i < stat->state_num; i++) {
75 		len += sprintf(buf + len, "%u %llu\n", stat->freq_table[i],
76 			(unsigned long long)
77 			jiffies_64_to_clock_t(stat->time_in_state[i]));
78 	}
79 	return len;
80 }
81 
82 #ifdef CONFIG_CPU_FREQ_STAT_DETAILS
83 static ssize_t show_trans_table(struct cpufreq_policy *policy, char *buf)
84 {
85 	ssize_t len = 0;
86 	int i, j;
87 
88 	struct cpufreq_stats *stat = per_cpu(cpufreq_stats_table, policy->cpu);
89 	if (!stat)
90 		return 0;
91 	cpufreq_stats_update(stat->cpu);
92 	len += snprintf(buf + len, PAGE_SIZE - len, "   From  :    To\n");
93 	len += snprintf(buf + len, PAGE_SIZE - len, "         : ");
94 	for (i = 0; i < stat->state_num; i++) {
95 		if (len >= PAGE_SIZE)
96 			break;
97 		len += snprintf(buf + len, PAGE_SIZE - len, "%9u ",
98 				stat->freq_table[i]);
99 	}
100 	if (len >= PAGE_SIZE)
101 		return PAGE_SIZE;
102 
103 	len += snprintf(buf + len, PAGE_SIZE - len, "\n");
104 
105 	for (i = 0; i < stat->state_num; i++) {
106 		if (len >= PAGE_SIZE)
107 			break;
108 
109 		len += snprintf(buf + len, PAGE_SIZE - len, "%9u: ",
110 				stat->freq_table[i]);
111 
112 		for (j = 0; j < stat->state_num; j++) {
113 			if (len >= PAGE_SIZE)
114 				break;
115 			len += snprintf(buf + len, PAGE_SIZE - len, "%9u ",
116 					stat->trans_table[i*stat->max_state+j]);
117 		}
118 		if (len >= PAGE_SIZE)
119 			break;
120 		len += snprintf(buf + len, PAGE_SIZE - len, "\n");
121 	}
122 	if (len >= PAGE_SIZE)
123 		return PAGE_SIZE;
124 	return len;
125 }
126 cpufreq_freq_attr_ro(trans_table);
127 #endif
128 
129 cpufreq_freq_attr_ro(total_trans);
130 cpufreq_freq_attr_ro(time_in_state);
131 
132 static struct attribute *default_attrs[] = {
133 	&total_trans.attr,
134 	&time_in_state.attr,
135 #ifdef CONFIG_CPU_FREQ_STAT_DETAILS
136 	&trans_table.attr,
137 #endif
138 	NULL
139 };
140 static struct attribute_group stats_attr_group = {
141 	.attrs = default_attrs,
142 	.name = "stats"
143 };
144 
145 static int freq_table_get_index(struct cpufreq_stats *stat, unsigned int freq)
146 {
147 	int index;
148 	for (index = 0; index < stat->max_state; index++)
149 		if (stat->freq_table[index] == freq)
150 			return index;
151 	return -1;
152 }
153 
154 /* should be called late in the CPU removal sequence so that the stats
155  * memory is still available in case someone tries to use it.
156  */
157 static void cpufreq_stats_free_table(unsigned int cpu)
158 {
159 	struct cpufreq_stats *stat = per_cpu(cpufreq_stats_table, cpu);
160 
161 	if (stat) {
162 		pr_debug("%s: Free stat table\n", __func__);
163 		kfree(stat->time_in_state);
164 		kfree(stat);
165 		per_cpu(cpufreq_stats_table, cpu) = NULL;
166 	}
167 }
168 
169 /* must be called early in the CPU removal sequence (before
170  * cpufreq_remove_dev) so that policy is still valid.
171  */
172 static void cpufreq_stats_free_sysfs(unsigned int cpu)
173 {
174 	struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
175 
176 	if (!policy)
177 		return;
178 
179 	if (!cpufreq_frequency_get_table(cpu))
180 		goto put_ref;
181 
182 	if (!policy_is_shared(policy)) {
183 		pr_debug("%s: Free sysfs stat\n", __func__);
184 		sysfs_remove_group(&policy->kobj, &stats_attr_group);
185 	}
186 
187 put_ref:
188 	cpufreq_cpu_put(policy);
189 }
190 
191 static int cpufreq_stats_create_table(struct cpufreq_policy *policy,
192 		struct cpufreq_frequency_table *table)
193 {
194 	unsigned int i, j, count = 0, ret = 0;
195 	struct cpufreq_stats *stat;
196 	struct cpufreq_policy *current_policy;
197 	unsigned int alloc_size;
198 	unsigned int cpu = policy->cpu;
199 	if (per_cpu(cpufreq_stats_table, cpu))
200 		return -EBUSY;
201 	stat = kzalloc(sizeof(*stat), GFP_KERNEL);
202 	if ((stat) == NULL)
203 		return -ENOMEM;
204 
205 	current_policy = cpufreq_cpu_get(cpu);
206 	if (current_policy == NULL) {
207 		ret = -EINVAL;
208 		goto error_get_fail;
209 	}
210 
211 	ret = sysfs_create_group(&current_policy->kobj, &stats_attr_group);
212 	if (ret)
213 		goto error_out;
214 
215 	stat->cpu = cpu;
216 	per_cpu(cpufreq_stats_table, cpu) = stat;
217 
218 	for (i = 0; table[i].frequency != CPUFREQ_TABLE_END; i++) {
219 		unsigned int freq = table[i].frequency;
220 		if (freq == CPUFREQ_ENTRY_INVALID)
221 			continue;
222 		count++;
223 	}
224 
225 	alloc_size = count * sizeof(int) + count * sizeof(u64);
226 
227 #ifdef CONFIG_CPU_FREQ_STAT_DETAILS
228 	alloc_size += count * count * sizeof(int);
229 #endif
230 	stat->max_state = count;
231 	stat->time_in_state = kzalloc(alloc_size, GFP_KERNEL);
232 	if (!stat->time_in_state) {
233 		ret = -ENOMEM;
234 		goto error_out;
235 	}
236 	stat->freq_table = (unsigned int *)(stat->time_in_state + count);
237 
238 #ifdef CONFIG_CPU_FREQ_STAT_DETAILS
239 	stat->trans_table = stat->freq_table + count;
240 #endif
241 	j = 0;
242 	for (i = 0; table[i].frequency != CPUFREQ_TABLE_END; i++) {
243 		unsigned int freq = table[i].frequency;
244 		if (freq == CPUFREQ_ENTRY_INVALID)
245 			continue;
246 		if (freq_table_get_index(stat, freq) == -1)
247 			stat->freq_table[j++] = freq;
248 	}
249 	stat->state_num = j;
250 	spin_lock(&cpufreq_stats_lock);
251 	stat->last_time = get_jiffies_64();
252 	stat->last_index = freq_table_get_index(stat, policy->cur);
253 	spin_unlock(&cpufreq_stats_lock);
254 	cpufreq_cpu_put(current_policy);
255 	return 0;
256 error_out:
257 	cpufreq_cpu_put(current_policy);
258 error_get_fail:
259 	kfree(stat);
260 	per_cpu(cpufreq_stats_table, cpu) = NULL;
261 	return ret;
262 }
263 
264 static void cpufreq_stats_update_policy_cpu(struct cpufreq_policy *policy)
265 {
266 	struct cpufreq_stats *stat = per_cpu(cpufreq_stats_table,
267 			policy->last_cpu);
268 
269 	pr_debug("Updating stats_table for new_cpu %u from last_cpu %u\n",
270 			policy->cpu, policy->last_cpu);
271 	per_cpu(cpufreq_stats_table, policy->cpu) = per_cpu(cpufreq_stats_table,
272 			policy->last_cpu);
273 	per_cpu(cpufreq_stats_table, policy->last_cpu) = NULL;
274 	stat->cpu = policy->cpu;
275 }
276 
277 static int cpufreq_stat_notifier_policy(struct notifier_block *nb,
278 		unsigned long val, void *data)
279 {
280 	int ret;
281 	struct cpufreq_policy *policy = data;
282 	struct cpufreq_frequency_table *table;
283 	unsigned int cpu = policy->cpu;
284 
285 	if (val == CPUFREQ_UPDATE_POLICY_CPU) {
286 		cpufreq_stats_update_policy_cpu(policy);
287 		return 0;
288 	}
289 
290 	if (val != CPUFREQ_NOTIFY)
291 		return 0;
292 	table = cpufreq_frequency_get_table(cpu);
293 	if (!table)
294 		return 0;
295 	ret = cpufreq_stats_create_table(policy, table);
296 	if (ret)
297 		return ret;
298 	return 0;
299 }
300 
301 static int cpufreq_stat_notifier_trans(struct notifier_block *nb,
302 		unsigned long val, void *data)
303 {
304 	struct cpufreq_freqs *freq = data;
305 	struct cpufreq_stats *stat;
306 	int old_index, new_index;
307 
308 	if (val != CPUFREQ_POSTCHANGE)
309 		return 0;
310 
311 	stat = per_cpu(cpufreq_stats_table, freq->cpu);
312 	if (!stat)
313 		return 0;
314 
315 	old_index = stat->last_index;
316 	new_index = freq_table_get_index(stat, freq->new);
317 
318 	/* We can't do stat->time_in_state[-1]= .. */
319 	if (old_index == -1 || new_index == -1)
320 		return 0;
321 
322 	cpufreq_stats_update(freq->cpu);
323 
324 	if (old_index == new_index)
325 		return 0;
326 
327 	spin_lock(&cpufreq_stats_lock);
328 	stat->last_index = new_index;
329 #ifdef CONFIG_CPU_FREQ_STAT_DETAILS
330 	stat->trans_table[old_index * stat->max_state + new_index]++;
331 #endif
332 	stat->total_trans++;
333 	spin_unlock(&cpufreq_stats_lock);
334 	return 0;
335 }
336 
337 static int cpufreq_stat_cpu_callback(struct notifier_block *nfb,
338 					       unsigned long action,
339 					       void *hcpu)
340 {
341 	unsigned int cpu = (unsigned long)hcpu;
342 
343 	switch (action) {
344 	case CPU_DOWN_PREPARE:
345 		cpufreq_stats_free_sysfs(cpu);
346 		break;
347 	case CPU_DEAD:
348 		cpufreq_stats_free_table(cpu);
349 		break;
350 	}
351 	return NOTIFY_OK;
352 }
353 
354 /* priority=1 so this will get called before cpufreq_remove_dev */
355 static struct notifier_block cpufreq_stat_cpu_notifier __refdata = {
356 	.notifier_call = cpufreq_stat_cpu_callback,
357 	.priority = 1,
358 };
359 
360 static struct notifier_block notifier_policy_block = {
361 	.notifier_call = cpufreq_stat_notifier_policy
362 };
363 
364 static struct notifier_block notifier_trans_block = {
365 	.notifier_call = cpufreq_stat_notifier_trans
366 };
367 
368 static int __init cpufreq_stats_init(void)
369 {
370 	int ret;
371 	unsigned int cpu;
372 
373 	spin_lock_init(&cpufreq_stats_lock);
374 	ret = cpufreq_register_notifier(&notifier_policy_block,
375 				CPUFREQ_POLICY_NOTIFIER);
376 	if (ret)
377 		return ret;
378 
379 	register_hotcpu_notifier(&cpufreq_stat_cpu_notifier);
380 
381 	ret = cpufreq_register_notifier(&notifier_trans_block,
382 				CPUFREQ_TRANSITION_NOTIFIER);
383 	if (ret) {
384 		cpufreq_unregister_notifier(&notifier_policy_block,
385 				CPUFREQ_POLICY_NOTIFIER);
386 		unregister_hotcpu_notifier(&cpufreq_stat_cpu_notifier);
387 		for_each_online_cpu(cpu)
388 			cpufreq_stats_free_table(cpu);
389 		return ret;
390 	}
391 
392 	return 0;
393 }
394 static void __exit cpufreq_stats_exit(void)
395 {
396 	unsigned int cpu;
397 
398 	cpufreq_unregister_notifier(&notifier_policy_block,
399 			CPUFREQ_POLICY_NOTIFIER);
400 	cpufreq_unregister_notifier(&notifier_trans_block,
401 			CPUFREQ_TRANSITION_NOTIFIER);
402 	unregister_hotcpu_notifier(&cpufreq_stat_cpu_notifier);
403 	for_each_online_cpu(cpu) {
404 		cpufreq_stats_free_table(cpu);
405 		cpufreq_stats_free_sysfs(cpu);
406 	}
407 }
408 
409 MODULE_AUTHOR("Zou Nan hai <nanhai.zou@intel.com>");
410 MODULE_DESCRIPTION("'cpufreq_stats' - A driver to export cpufreq stats "
411 				"through sysfs filesystem");
412 MODULE_LICENSE("GPL");
413 
414 module_init(cpufreq_stats_init);
415 module_exit(cpufreq_stats_exit);
416