xref: /linux/arch/s390/appldata/appldata_os.c (revision ca55b2fef3a9373fcfc30f82fd26bc7fccbda732)
1 /*
2  * Data gathering module for Linux-VM Monitor Stream, Stage 1.
3  * Collects misc. OS related data (CPU utilization, running processes).
4  *
5  * Copyright IBM Corp. 2003, 2006
6  *
7  * Author: Gerald Schaefer <gerald.schaefer@de.ibm.com>
8  */
9 
10 #define KMSG_COMPONENT	"appldata"
11 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
12 
13 #include <linux/module.h>
14 #include <linux/init.h>
15 #include <linux/slab.h>
16 #include <linux/errno.h>
17 #include <linux/kernel_stat.h>
18 #include <linux/netdevice.h>
19 #include <linux/sched.h>
20 #include <asm/appldata.h>
21 #include <asm/smp.h>
22 
23 #include "appldata.h"
24 
25 
26 #define LOAD_INT(x) ((x) >> FSHIFT)
27 #define LOAD_FRAC(x) LOAD_INT(((x) & (FIXED_1-1)) * 100)
28 
29 /*
30  * OS data
31  *
32  * This is accessed as binary data by z/VM. If changes to it can't be avoided,
33  * the structure version (product ID, see appldata_base.c) needs to be changed
34  * as well and all documentation and z/VM applications using it must be
35  * updated.
36  *
37  * The record layout is documented in the Linux for zSeries Device Drivers
38  * book:
39  * http://oss.software.ibm.com/developerworks/opensource/linux390/index.shtml
40  */
41 struct appldata_os_per_cpu {
42 	u32 per_cpu_user;	/* timer ticks spent in user mode   */
43 	u32 per_cpu_nice;	/* ... spent with modified priority */
44 	u32 per_cpu_system;	/* ... spent in kernel mode         */
45 	u32 per_cpu_idle;	/* ... spent in idle mode           */
46 
47 	/* New in 2.6 */
48 	u32 per_cpu_irq;	/* ... spent in interrupts          */
49 	u32 per_cpu_softirq;	/* ... spent in softirqs            */
50 	u32 per_cpu_iowait;	/* ... spent while waiting for I/O  */
51 
52 	/* New in modification level 01 */
53 	u32 per_cpu_steal;	/* ... stolen by hypervisor	    */
54 	u32 cpu_id;		/* number of this CPU		    */
55 } __attribute__((packed));
56 
57 struct appldata_os_data {
58 	u64 timestamp;
59 	u32 sync_count_1;	/* after VM collected the record data, */
60 	u32 sync_count_2;	/* sync_count_1 and sync_count_2 should be the
61 				   same. If not, the record has been updated on
62 				   the Linux side while VM was collecting the
63 				   (possibly corrupt) data */
64 
65 	u32 nr_cpus;		/* number of (virtual) CPUs        */
66 	u32 per_cpu_size;	/* size of the per-cpu data struct */
67 	u32 cpu_offset;		/* offset of the first per-cpu data struct */
68 
69 	u32 nr_running;		/* number of runnable threads      */
70 	u32 nr_threads;		/* number of threads               */
71 	u32 avenrun[3];		/* average nr. of running processes during */
72 				/* the last 1, 5 and 15 minutes */
73 
74 	/* New in 2.6 */
75 	u32 nr_iowait;		/* number of blocked threads
76 				   (waiting for I/O)               */
77 
78 	/* per cpu data */
79 	struct appldata_os_per_cpu os_cpu[0];
80 } __attribute__((packed));
81 
82 static struct appldata_os_data *appldata_os_data;
83 
84 static struct appldata_ops ops = {
85 	.name	   = "os",
86 	.record_nr = APPLDATA_RECORD_OS_ID,
87 	.owner	   = THIS_MODULE,
88 	.mod_lvl   = {0xF0, 0xF1},		/* EBCDIC "01" */
89 };
90 
91 
92 /*
93  * appldata_get_os_data()
94  *
95  * gather OS data
96  */
97 static void appldata_get_os_data(void *data)
98 {
99 	int i, j, rc;
100 	struct appldata_os_data *os_data;
101 	unsigned int new_size;
102 
103 	os_data = data;
104 	os_data->sync_count_1++;
105 
106 	os_data->nr_threads = nr_threads;
107 	os_data->nr_running = nr_running();
108 	os_data->nr_iowait  = nr_iowait();
109 	os_data->avenrun[0] = avenrun[0] + (FIXED_1/200);
110 	os_data->avenrun[1] = avenrun[1] + (FIXED_1/200);
111 	os_data->avenrun[2] = avenrun[2] + (FIXED_1/200);
112 
113 	j = 0;
114 	for_each_online_cpu(i) {
115 		os_data->os_cpu[j].per_cpu_user =
116 			cputime_to_jiffies(kcpustat_cpu(i).cpustat[CPUTIME_USER]);
117 		os_data->os_cpu[j].per_cpu_nice =
118 			cputime_to_jiffies(kcpustat_cpu(i).cpustat[CPUTIME_NICE]);
119 		os_data->os_cpu[j].per_cpu_system =
120 			cputime_to_jiffies(kcpustat_cpu(i).cpustat[CPUTIME_SYSTEM]);
121 		os_data->os_cpu[j].per_cpu_idle =
122 			cputime_to_jiffies(kcpustat_cpu(i).cpustat[CPUTIME_IDLE]);
123 		os_data->os_cpu[j].per_cpu_irq =
124 			cputime_to_jiffies(kcpustat_cpu(i).cpustat[CPUTIME_IRQ]);
125 		os_data->os_cpu[j].per_cpu_softirq =
126 			cputime_to_jiffies(kcpustat_cpu(i).cpustat[CPUTIME_SOFTIRQ]);
127 		os_data->os_cpu[j].per_cpu_iowait =
128 			cputime_to_jiffies(kcpustat_cpu(i).cpustat[CPUTIME_IOWAIT]);
129 		os_data->os_cpu[j].per_cpu_steal =
130 			cputime_to_jiffies(kcpustat_cpu(i).cpustat[CPUTIME_STEAL]);
131 		os_data->os_cpu[j].cpu_id = i;
132 		j++;
133 	}
134 
135 	os_data->nr_cpus = j;
136 
137 	new_size = sizeof(struct appldata_os_data) +
138 		   (os_data->nr_cpus * sizeof(struct appldata_os_per_cpu));
139 	if (ops.size != new_size) {
140 		if (ops.active) {
141 			rc = appldata_diag(APPLDATA_RECORD_OS_ID,
142 					   APPLDATA_START_INTERVAL_REC,
143 					   (unsigned long) ops.data, new_size,
144 					   ops.mod_lvl);
145 			if (rc != 0)
146 				pr_err("Starting a new OS data collection "
147 				       "failed with rc=%d\n", rc);
148 
149 			rc = appldata_diag(APPLDATA_RECORD_OS_ID,
150 					   APPLDATA_STOP_REC,
151 					   (unsigned long) ops.data, ops.size,
152 					   ops.mod_lvl);
153 			if (rc != 0)
154 				pr_err("Stopping a faulty OS data "
155 				       "collection failed with rc=%d\n", rc);
156 		}
157 		ops.size = new_size;
158 	}
159 	os_data->timestamp = get_tod_clock();
160 	os_data->sync_count_2++;
161 }
162 
163 
164 /*
165  * appldata_os_init()
166  *
167  * init data, register ops
168  */
169 static int __init appldata_os_init(void)
170 {
171 	int rc, max_size;
172 
173 	max_size = sizeof(struct appldata_os_data) +
174 		   (num_possible_cpus() * sizeof(struct appldata_os_per_cpu));
175 	if (max_size > APPLDATA_MAX_REC_SIZE) {
176 		pr_err("Maximum OS record size %i exceeds the maximum "
177 		       "record size %i\n", max_size, APPLDATA_MAX_REC_SIZE);
178 		rc = -ENOMEM;
179 		goto out;
180 	}
181 
182 	appldata_os_data = kzalloc(max_size, GFP_KERNEL | GFP_DMA);
183 	if (appldata_os_data == NULL) {
184 		rc = -ENOMEM;
185 		goto out;
186 	}
187 
188 	appldata_os_data->per_cpu_size = sizeof(struct appldata_os_per_cpu);
189 	appldata_os_data->cpu_offset   = offsetof(struct appldata_os_data,
190 							os_cpu);
191 
192 	ops.data = appldata_os_data;
193 	ops.callback  = &appldata_get_os_data;
194 	rc = appldata_register_ops(&ops);
195 	if (rc != 0)
196 		kfree(appldata_os_data);
197 out:
198 	return rc;
199 }
200 
201 /*
202  * appldata_os_exit()
203  *
204  * unregister ops
205  */
206 static void __exit appldata_os_exit(void)
207 {
208 	appldata_unregister_ops(&ops);
209 	kfree(appldata_os_data);
210 }
211 
212 
213 module_init(appldata_os_init);
214 module_exit(appldata_os_exit);
215 
216 MODULE_LICENSE("GPL");
217 MODULE_AUTHOR("Gerald Schaefer");
218 MODULE_DESCRIPTION("Linux-VM Monitor Stream, OS statistics");
219