xref: /linux/fs/proc/stat.c (revision 529d6dad5bc69de14cdd24831e2a14264e93daa4)
1 #include <linux/cpumask.h>
2 #include <linux/fs.h>
3 #include <linux/init.h>
4 #include <linux/interrupt.h>
5 #include <linux/kernel_stat.h>
6 #include <linux/proc_fs.h>
7 #include <linux/sched.h>
8 #include <linux/seq_file.h>
9 #include <linux/slab.h>
10 #include <linux/time.h>
11 #include <linux/irqnr.h>
12 #include <asm/cputime.h>
13 
14 #ifndef arch_irq_stat_cpu
15 #define arch_irq_stat_cpu(cpu) 0
16 #endif
17 #ifndef arch_irq_stat
18 #define arch_irq_stat() 0
19 #endif
20 #ifndef arch_idle_time
21 #define arch_idle_time(cpu) 0
22 #endif
23 
24 static int show_stat(struct seq_file *p, void *v)
25 {
26 	int i, j;
27 	unsigned long jif;
28 	cputime64_t user, nice, system, idle, iowait, irq, softirq, steal;
29 	cputime64_t guest, guest_nice;
30 	u64 sum = 0;
31 	u64 sum_softirq = 0;
32 	unsigned int per_softirq_sums[NR_SOFTIRQS] = {0};
33 	struct timespec boottime;
34 	unsigned int per_irq_sum;
35 
36 	user = nice = system = idle = iowait =
37 		irq = softirq = steal = cputime64_zero;
38 	guest = guest_nice = cputime64_zero;
39 	getboottime(&boottime);
40 	jif = boottime.tv_sec;
41 
42 	for_each_possible_cpu(i) {
43 		user = cputime64_add(user, kstat_cpu(i).cpustat.user);
44 		nice = cputime64_add(nice, kstat_cpu(i).cpustat.nice);
45 		system = cputime64_add(system, kstat_cpu(i).cpustat.system);
46 		idle = cputime64_add(idle, kstat_cpu(i).cpustat.idle);
47 		idle = cputime64_add(idle, arch_idle_time(i));
48 		iowait = cputime64_add(iowait, kstat_cpu(i).cpustat.iowait);
49 		irq = cputime64_add(irq, kstat_cpu(i).cpustat.irq);
50 		softirq = cputime64_add(softirq, kstat_cpu(i).cpustat.softirq);
51 		steal = cputime64_add(steal, kstat_cpu(i).cpustat.steal);
52 		guest = cputime64_add(guest, kstat_cpu(i).cpustat.guest);
53 		guest_nice = cputime64_add(guest_nice,
54 			kstat_cpu(i).cpustat.guest_nice);
55 		for_each_irq_nr(j) {
56 			sum += kstat_irqs_cpu(j, i);
57 		}
58 		sum += arch_irq_stat_cpu(i);
59 
60 		for (j = 0; j < NR_SOFTIRQS; j++) {
61 			unsigned int softirq_stat = kstat_softirqs_cpu(j, i);
62 
63 			per_softirq_sums[j] += softirq_stat;
64 			sum_softirq += softirq_stat;
65 		}
66 	}
67 	sum += arch_irq_stat();
68 
69 	seq_printf(p, "cpu  %llu %llu %llu %llu %llu %llu %llu %llu %llu "
70 		"%llu\n",
71 		(unsigned long long)cputime64_to_clock_t(user),
72 		(unsigned long long)cputime64_to_clock_t(nice),
73 		(unsigned long long)cputime64_to_clock_t(system),
74 		(unsigned long long)cputime64_to_clock_t(idle),
75 		(unsigned long long)cputime64_to_clock_t(iowait),
76 		(unsigned long long)cputime64_to_clock_t(irq),
77 		(unsigned long long)cputime64_to_clock_t(softirq),
78 		(unsigned long long)cputime64_to_clock_t(steal),
79 		(unsigned long long)cputime64_to_clock_t(guest),
80 		(unsigned long long)cputime64_to_clock_t(guest_nice));
81 	for_each_online_cpu(i) {
82 
83 		/* Copy values here to work around gcc-2.95.3, gcc-2.96 */
84 		user = kstat_cpu(i).cpustat.user;
85 		nice = kstat_cpu(i).cpustat.nice;
86 		system = kstat_cpu(i).cpustat.system;
87 		idle = kstat_cpu(i).cpustat.idle;
88 		idle = cputime64_add(idle, arch_idle_time(i));
89 		iowait = kstat_cpu(i).cpustat.iowait;
90 		irq = kstat_cpu(i).cpustat.irq;
91 		softirq = kstat_cpu(i).cpustat.softirq;
92 		steal = kstat_cpu(i).cpustat.steal;
93 		guest = kstat_cpu(i).cpustat.guest;
94 		guest_nice = kstat_cpu(i).cpustat.guest_nice;
95 		seq_printf(p,
96 			"cpu%d %llu %llu %llu %llu %llu %llu %llu %llu %llu "
97 			"%llu\n",
98 			i,
99 			(unsigned long long)cputime64_to_clock_t(user),
100 			(unsigned long long)cputime64_to_clock_t(nice),
101 			(unsigned long long)cputime64_to_clock_t(system),
102 			(unsigned long long)cputime64_to_clock_t(idle),
103 			(unsigned long long)cputime64_to_clock_t(iowait),
104 			(unsigned long long)cputime64_to_clock_t(irq),
105 			(unsigned long long)cputime64_to_clock_t(softirq),
106 			(unsigned long long)cputime64_to_clock_t(steal),
107 			(unsigned long long)cputime64_to_clock_t(guest),
108 			(unsigned long long)cputime64_to_clock_t(guest_nice));
109 	}
110 	seq_printf(p, "intr %llu", (unsigned long long)sum);
111 
112 	/* sum again ? it could be updated? */
113 	for_each_irq_nr(j) {
114 		per_irq_sum = 0;
115 		for_each_possible_cpu(i)
116 			per_irq_sum += kstat_irqs_cpu(j, i);
117 
118 		seq_printf(p, " %u", per_irq_sum);
119 	}
120 
121 	seq_printf(p,
122 		"\nctxt %llu\n"
123 		"btime %lu\n"
124 		"processes %lu\n"
125 		"procs_running %lu\n"
126 		"procs_blocked %lu\n",
127 		nr_context_switches(),
128 		(unsigned long)jif,
129 		total_forks,
130 		nr_running(),
131 		nr_iowait());
132 
133 	seq_printf(p, "softirq %llu", (unsigned long long)sum_softirq);
134 
135 	for (i = 0; i < NR_SOFTIRQS; i++)
136 		seq_printf(p, " %u", per_softirq_sums[i]);
137 	seq_printf(p, "\n");
138 
139 	return 0;
140 }
141 
142 static int stat_open(struct inode *inode, struct file *file)
143 {
144 	unsigned size = 4096 * (1 + num_possible_cpus() / 32);
145 	char *buf;
146 	struct seq_file *m;
147 	int res;
148 
149 	/* don't ask for more than the kmalloc() max size, currently 128 KB */
150 	if (size > 128 * 1024)
151 		size = 128 * 1024;
152 	buf = kmalloc(size, GFP_KERNEL);
153 	if (!buf)
154 		return -ENOMEM;
155 
156 	res = single_open(file, show_stat, NULL);
157 	if (!res) {
158 		m = file->private_data;
159 		m->buf = buf;
160 		m->size = size;
161 	} else
162 		kfree(buf);
163 	return res;
164 }
165 
166 static const struct file_operations proc_stat_operations = {
167 	.open		= stat_open,
168 	.read		= seq_read,
169 	.llseek		= seq_lseek,
170 	.release	= single_release,
171 };
172 
173 static int __init proc_stat_init(void)
174 {
175 	proc_create("stat", 0, NULL, &proc_stat_operations);
176 	return 0;
177 }
178 module_init(proc_stat_init);
179