1 #include <linux/cpumask.h> 2 #include <linux/fs.h> 3 #include <linux/init.h> 4 #include <linux/interrupt.h> 5 #include <linux/kernel_stat.h> 6 #include <linux/proc_fs.h> 7 #include <linux/sched.h> 8 #include <linux/seq_file.h> 9 #include <linux/slab.h> 10 #include <linux/time.h> 11 #include <linux/irqnr.h> 12 #include <asm/cputime.h> 13 #include <linux/tick.h> 14 15 #ifndef arch_irq_stat_cpu 16 #define arch_irq_stat_cpu(cpu) 0 17 #endif 18 #ifndef arch_irq_stat 19 #define arch_irq_stat() 0 20 #endif 21 #ifndef arch_idle_time 22 #define arch_idle_time(cpu) 0 23 #endif 24 25 static u64 get_idle_time(int cpu) 26 { 27 u64 idle, idle_time = get_cpu_idle_time_us(cpu, NULL); 28 29 if (idle_time == -1ULL) { 30 /* !NO_HZ so we can rely on cpustat.idle */ 31 idle = kcpustat_cpu(cpu).cpustat[CPUTIME_IDLE]; 32 idle += arch_idle_time(cpu); 33 } else 34 idle = usecs_to_cputime64(idle_time); 35 36 return idle; 37 } 38 39 static u64 get_iowait_time(int cpu) 40 { 41 u64 iowait, iowait_time = get_cpu_iowait_time_us(cpu, NULL); 42 43 if (iowait_time == -1ULL) 44 /* !NO_HZ so we can rely on cpustat.iowait */ 45 iowait = kcpustat_cpu(cpu).cpustat[CPUTIME_IOWAIT]; 46 else 47 iowait = usecs_to_cputime64(iowait_time); 48 49 return iowait; 50 } 51 52 static int show_stat(struct seq_file *p, void *v) 53 { 54 int i, j; 55 unsigned long jif; 56 u64 user, nice, system, idle, iowait, irq, softirq, steal; 57 u64 guest, guest_nice; 58 u64 sum = 0; 59 u64 sum_softirq = 0; 60 unsigned int per_softirq_sums[NR_SOFTIRQS] = {0}; 61 struct timespec boottime; 62 63 user = nice = system = idle = iowait = 64 irq = softirq = steal = 0; 65 guest = guest_nice = 0; 66 getboottime(&boottime); 67 jif = boottime.tv_sec; 68 69 for_each_possible_cpu(i) { 70 user += kcpustat_cpu(i).cpustat[CPUTIME_USER]; 71 nice += kcpustat_cpu(i).cpustat[CPUTIME_NICE]; 72 system += kcpustat_cpu(i).cpustat[CPUTIME_SYSTEM]; 73 idle += get_idle_time(i); 74 iowait += get_iowait_time(i); 75 irq += kcpustat_cpu(i).cpustat[CPUTIME_IRQ]; 76 softirq += kcpustat_cpu(i).cpustat[CPUTIME_SOFTIRQ]; 77 steal += kcpustat_cpu(i).cpustat[CPUTIME_STEAL]; 78 guest += kcpustat_cpu(i).cpustat[CPUTIME_GUEST]; 79 guest_nice += kcpustat_cpu(i).cpustat[CPUTIME_GUEST_NICE]; 80 sum += kstat_cpu_irqs_sum(i); 81 sum += arch_irq_stat_cpu(i); 82 83 for (j = 0; j < NR_SOFTIRQS; j++) { 84 unsigned int softirq_stat = kstat_softirqs_cpu(j, i); 85 86 per_softirq_sums[j] += softirq_stat; 87 sum_softirq += softirq_stat; 88 } 89 } 90 sum += arch_irq_stat(); 91 92 seq_puts(p, "cpu "); 93 seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(user)); 94 seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(nice)); 95 seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(system)); 96 seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(idle)); 97 seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(iowait)); 98 seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(irq)); 99 seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(softirq)); 100 seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(steal)); 101 seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(guest)); 102 seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(guest_nice)); 103 seq_putc(p, '\n'); 104 105 for_each_online_cpu(i) { 106 /* Copy values here to work around gcc-2.95.3, gcc-2.96 */ 107 user = kcpustat_cpu(i).cpustat[CPUTIME_USER]; 108 nice = kcpustat_cpu(i).cpustat[CPUTIME_NICE]; 109 system = kcpustat_cpu(i).cpustat[CPUTIME_SYSTEM]; 110 idle = get_idle_time(i); 111 iowait = get_iowait_time(i); 112 irq = kcpustat_cpu(i).cpustat[CPUTIME_IRQ]; 113 softirq = kcpustat_cpu(i).cpustat[CPUTIME_SOFTIRQ]; 114 steal = kcpustat_cpu(i).cpustat[CPUTIME_STEAL]; 115 guest = kcpustat_cpu(i).cpustat[CPUTIME_GUEST]; 116 guest_nice = kcpustat_cpu(i).cpustat[CPUTIME_GUEST_NICE]; 117 seq_printf(p, "cpu%d", i); 118 seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(user)); 119 seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(nice)); 120 seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(system)); 121 seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(idle)); 122 seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(iowait)); 123 seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(irq)); 124 seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(softirq)); 125 seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(steal)); 126 seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(guest)); 127 seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(guest_nice)); 128 seq_putc(p, '\n'); 129 } 130 seq_printf(p, "intr %llu", (unsigned long long)sum); 131 132 /* sum again ? it could be updated? */ 133 for_each_irq_nr(j) 134 seq_put_decimal_ull(p, ' ', kstat_irqs(j)); 135 136 seq_printf(p, 137 "\nctxt %llu\n" 138 "btime %lu\n" 139 "processes %lu\n" 140 "procs_running %lu\n" 141 "procs_blocked %lu\n", 142 nr_context_switches(), 143 (unsigned long)jif, 144 total_forks, 145 nr_running(), 146 nr_iowait()); 147 148 seq_printf(p, "softirq %llu", (unsigned long long)sum_softirq); 149 150 for (i = 0; i < NR_SOFTIRQS; i++) 151 seq_put_decimal_ull(p, ' ', per_softirq_sums[i]); 152 seq_putc(p, '\n'); 153 154 return 0; 155 } 156 157 static int stat_open(struct inode *inode, struct file *file) 158 { 159 unsigned size = 1024 + 128 * num_possible_cpus(); 160 char *buf; 161 struct seq_file *m; 162 int res; 163 164 /* minimum size to display an interrupt count : 2 bytes */ 165 size += 2 * nr_irqs; 166 167 /* don't ask for more than the kmalloc() max size */ 168 if (size > KMALLOC_MAX_SIZE) 169 size = KMALLOC_MAX_SIZE; 170 buf = kmalloc(size, GFP_KERNEL); 171 if (!buf) 172 return -ENOMEM; 173 174 res = single_open(file, show_stat, NULL); 175 if (!res) { 176 m = file->private_data; 177 m->buf = buf; 178 m->size = ksize(buf); 179 } else 180 kfree(buf); 181 return res; 182 } 183 184 static const struct file_operations proc_stat_operations = { 185 .open = stat_open, 186 .read = seq_read, 187 .llseek = seq_lseek, 188 .release = single_release, 189 }; 190 191 static int __init proc_stat_init(void) 192 { 193 proc_create("stat", 0, NULL, &proc_stat_operations); 194 return 0; 195 } 196 module_init(proc_stat_init); 197