1 /* 2 * Base infrastructure for Linux-z/VM Monitor Stream, Stage 1. 3 * Exports appldata_register_ops() and appldata_unregister_ops() for the 4 * data gathering modules. 5 * 6 * Copyright IBM Corp. 2003, 2009 7 * 8 * Author: Gerald Schaefer <gerald.schaefer@de.ibm.com> 9 */ 10 11 #define KMSG_COMPONENT "appldata" 12 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 13 14 #include <linux/module.h> 15 #include <linux/sched/stat.h> 16 #include <linux/init.h> 17 #include <linux/slab.h> 18 #include <linux/errno.h> 19 #include <linux/interrupt.h> 20 #include <linux/proc_fs.h> 21 #include <linux/mm.h> 22 #include <linux/swap.h> 23 #include <linux/pagemap.h> 24 #include <linux/sysctl.h> 25 #include <linux/notifier.h> 26 #include <linux/cpu.h> 27 #include <linux/workqueue.h> 28 #include <linux/suspend.h> 29 #include <linux/platform_device.h> 30 #include <asm/appldata.h> 31 #include <asm/vtimer.h> 32 #include <linux/uaccess.h> 33 #include <asm/io.h> 34 #include <asm/smp.h> 35 36 #include "appldata.h" 37 38 39 #define APPLDATA_CPU_INTERVAL 10000 /* default (CPU) time for 40 sampling interval in 41 milliseconds */ 42 43 #define TOD_MICRO 0x01000 /* nr. of TOD clock units 44 for 1 microsecond */ 45 46 static struct platform_device *appldata_pdev; 47 48 /* 49 * /proc entries (sysctl) 50 */ 51 static const char appldata_proc_name[APPLDATA_PROC_NAME_LENGTH] = "appldata"; 52 static int appldata_timer_handler(struct ctl_table *ctl, int write, 53 void __user *buffer, size_t *lenp, loff_t *ppos); 54 static int appldata_interval_handler(struct ctl_table *ctl, int write, 55 void __user *buffer, 56 size_t *lenp, loff_t *ppos); 57 58 static struct ctl_table_header *appldata_sysctl_header; 59 static struct ctl_table appldata_table[] = { 60 { 61 .procname = "timer", 62 .mode = S_IRUGO | S_IWUSR, 63 .proc_handler = appldata_timer_handler, 64 }, 65 { 66 .procname = "interval", 67 .mode = S_IRUGO | S_IWUSR, 68 .proc_handler = appldata_interval_handler, 69 }, 70 { }, 71 }; 72 73 static struct ctl_table appldata_dir_table[] = { 74 { 75 .procname = appldata_proc_name, 76 .maxlen = 0, 77 .mode = S_IRUGO | S_IXUGO, 78 .child = appldata_table, 79 }, 80 { }, 81 }; 82 83 /* 84 * Timer 85 */ 86 static struct vtimer_list appldata_timer; 87 88 static DEFINE_SPINLOCK(appldata_timer_lock); 89 static int appldata_interval = APPLDATA_CPU_INTERVAL; 90 static int appldata_timer_active; 91 static int appldata_timer_suspended = 0; 92 93 /* 94 * Work queue 95 */ 96 static struct workqueue_struct *appldata_wq; 97 static void appldata_work_fn(struct work_struct *work); 98 static DECLARE_WORK(appldata_work, appldata_work_fn); 99 100 101 /* 102 * Ops list 103 */ 104 static DEFINE_MUTEX(appldata_ops_mutex); 105 static LIST_HEAD(appldata_ops_list); 106 107 108 /*************************** timer, work, DIAG *******************************/ 109 /* 110 * appldata_timer_function() 111 * 112 * schedule work and reschedule timer 113 */ 114 static void appldata_timer_function(unsigned long data) 115 { 116 queue_work(appldata_wq, (struct work_struct *) data); 117 } 118 119 /* 120 * appldata_work_fn() 121 * 122 * call data gathering function for each (active) module 123 */ 124 static void appldata_work_fn(struct work_struct *work) 125 { 126 struct list_head *lh; 127 struct appldata_ops *ops; 128 129 mutex_lock(&appldata_ops_mutex); 130 list_for_each(lh, &appldata_ops_list) { 131 ops = list_entry(lh, struct appldata_ops, list); 132 if (ops->active == 1) { 133 ops->callback(ops->data); 134 } 135 } 136 mutex_unlock(&appldata_ops_mutex); 137 } 138 139 /* 140 * appldata_diag() 141 * 142 * prepare parameter list, issue DIAG 0xDC 143 */ 144 int appldata_diag(char record_nr, u16 function, unsigned long buffer, 145 u16 length, char *mod_lvl) 146 { 147 struct appldata_product_id id = { 148 .prod_nr = {0xD3, 0xC9, 0xD5, 0xE4, 149 0xE7, 0xD2, 0xD9}, /* "LINUXKR" */ 150 .prod_fn = 0xD5D3, /* "NL" */ 151 .version_nr = 0xF2F6, /* "26" */ 152 .release_nr = 0xF0F1, /* "01" */ 153 }; 154 155 id.record_nr = record_nr; 156 id.mod_lvl = (mod_lvl[0]) << 8 | mod_lvl[1]; 157 return appldata_asm(&id, function, (void *) buffer, length); 158 } 159 /************************ timer, work, DIAG <END> ****************************/ 160 161 162 /****************************** /proc stuff **********************************/ 163 164 #define APPLDATA_ADD_TIMER 0 165 #define APPLDATA_DEL_TIMER 1 166 #define APPLDATA_MOD_TIMER 2 167 168 /* 169 * __appldata_vtimer_setup() 170 * 171 * Add, delete or modify virtual timers on all online cpus. 172 * The caller needs to get the appldata_timer_lock spinlock. 173 */ 174 static void __appldata_vtimer_setup(int cmd) 175 { 176 u64 timer_interval = (u64) appldata_interval * 1000 * TOD_MICRO; 177 178 switch (cmd) { 179 case APPLDATA_ADD_TIMER: 180 if (appldata_timer_active) 181 break; 182 appldata_timer.expires = timer_interval; 183 add_virt_timer_periodic(&appldata_timer); 184 appldata_timer_active = 1; 185 break; 186 case APPLDATA_DEL_TIMER: 187 del_virt_timer(&appldata_timer); 188 if (!appldata_timer_active) 189 break; 190 appldata_timer_active = 0; 191 break; 192 case APPLDATA_MOD_TIMER: 193 if (!appldata_timer_active) 194 break; 195 mod_virt_timer_periodic(&appldata_timer, timer_interval); 196 } 197 } 198 199 /* 200 * appldata_timer_handler() 201 * 202 * Start/Stop timer, show status of timer (0 = not active, 1 = active) 203 */ 204 static int 205 appldata_timer_handler(struct ctl_table *ctl, int write, 206 void __user *buffer, size_t *lenp, loff_t *ppos) 207 { 208 unsigned int len; 209 char buf[2]; 210 211 if (!*lenp || *ppos) { 212 *lenp = 0; 213 return 0; 214 } 215 if (!write) { 216 strncpy(buf, appldata_timer_active ? "1\n" : "0\n", 217 ARRAY_SIZE(buf)); 218 len = strnlen(buf, ARRAY_SIZE(buf)); 219 if (len > *lenp) 220 len = *lenp; 221 if (copy_to_user(buffer, buf, len)) 222 return -EFAULT; 223 goto out; 224 } 225 len = *lenp; 226 if (copy_from_user(buf, buffer, len > sizeof(buf) ? sizeof(buf) : len)) 227 return -EFAULT; 228 spin_lock(&appldata_timer_lock); 229 if (buf[0] == '1') 230 __appldata_vtimer_setup(APPLDATA_ADD_TIMER); 231 else if (buf[0] == '0') 232 __appldata_vtimer_setup(APPLDATA_DEL_TIMER); 233 spin_unlock(&appldata_timer_lock); 234 out: 235 *lenp = len; 236 *ppos += len; 237 return 0; 238 } 239 240 /* 241 * appldata_interval_handler() 242 * 243 * Set (CPU) timer interval for collection of data (in milliseconds), show 244 * current timer interval. 245 */ 246 static int 247 appldata_interval_handler(struct ctl_table *ctl, int write, 248 void __user *buffer, size_t *lenp, loff_t *ppos) 249 { 250 unsigned int len; 251 int interval; 252 char buf[16]; 253 254 if (!*lenp || *ppos) { 255 *lenp = 0; 256 return 0; 257 } 258 if (!write) { 259 len = sprintf(buf, "%i\n", appldata_interval); 260 if (len > *lenp) 261 len = *lenp; 262 if (copy_to_user(buffer, buf, len)) 263 return -EFAULT; 264 goto out; 265 } 266 len = *lenp; 267 if (copy_from_user(buf, buffer, len > sizeof(buf) ? sizeof(buf) : len)) 268 return -EFAULT; 269 interval = 0; 270 sscanf(buf, "%i", &interval); 271 if (interval <= 0) 272 return -EINVAL; 273 274 spin_lock(&appldata_timer_lock); 275 appldata_interval = interval; 276 __appldata_vtimer_setup(APPLDATA_MOD_TIMER); 277 spin_unlock(&appldata_timer_lock); 278 out: 279 *lenp = len; 280 *ppos += len; 281 return 0; 282 } 283 284 /* 285 * appldata_generic_handler() 286 * 287 * Generic start/stop monitoring and DIAG, show status of 288 * monitoring (0 = not in process, 1 = in process) 289 */ 290 static int 291 appldata_generic_handler(struct ctl_table *ctl, int write, 292 void __user *buffer, size_t *lenp, loff_t *ppos) 293 { 294 struct appldata_ops *ops = NULL, *tmp_ops; 295 unsigned int len; 296 int rc, found; 297 char buf[2]; 298 struct list_head *lh; 299 300 found = 0; 301 mutex_lock(&appldata_ops_mutex); 302 list_for_each(lh, &appldata_ops_list) { 303 tmp_ops = list_entry(lh, struct appldata_ops, list); 304 if (&tmp_ops->ctl_table[2] == ctl) { 305 found = 1; 306 } 307 } 308 if (!found) { 309 mutex_unlock(&appldata_ops_mutex); 310 return -ENODEV; 311 } 312 ops = ctl->data; 313 if (!try_module_get(ops->owner)) { // protect this function 314 mutex_unlock(&appldata_ops_mutex); 315 return -ENODEV; 316 } 317 mutex_unlock(&appldata_ops_mutex); 318 319 if (!*lenp || *ppos) { 320 *lenp = 0; 321 module_put(ops->owner); 322 return 0; 323 } 324 if (!write) { 325 strncpy(buf, ops->active ? "1\n" : "0\n", ARRAY_SIZE(buf)); 326 len = strnlen(buf, ARRAY_SIZE(buf)); 327 if (len > *lenp) 328 len = *lenp; 329 if (copy_to_user(buffer, buf, len)) { 330 module_put(ops->owner); 331 return -EFAULT; 332 } 333 goto out; 334 } 335 len = *lenp; 336 if (copy_from_user(buf, buffer, 337 len > sizeof(buf) ? sizeof(buf) : len)) { 338 module_put(ops->owner); 339 return -EFAULT; 340 } 341 342 mutex_lock(&appldata_ops_mutex); 343 if ((buf[0] == '1') && (ops->active == 0)) { 344 // protect work queue callback 345 if (!try_module_get(ops->owner)) { 346 mutex_unlock(&appldata_ops_mutex); 347 module_put(ops->owner); 348 return -ENODEV; 349 } 350 ops->callback(ops->data); // init record 351 rc = appldata_diag(ops->record_nr, 352 APPLDATA_START_INTERVAL_REC, 353 (unsigned long) ops->data, ops->size, 354 ops->mod_lvl); 355 if (rc != 0) { 356 pr_err("Starting the data collection for %s " 357 "failed with rc=%d\n", ops->name, rc); 358 module_put(ops->owner); 359 } else 360 ops->active = 1; 361 } else if ((buf[0] == '0') && (ops->active == 1)) { 362 ops->active = 0; 363 rc = appldata_diag(ops->record_nr, APPLDATA_STOP_REC, 364 (unsigned long) ops->data, ops->size, 365 ops->mod_lvl); 366 if (rc != 0) 367 pr_err("Stopping the data collection for %s " 368 "failed with rc=%d\n", ops->name, rc); 369 module_put(ops->owner); 370 } 371 mutex_unlock(&appldata_ops_mutex); 372 out: 373 *lenp = len; 374 *ppos += len; 375 module_put(ops->owner); 376 return 0; 377 } 378 379 /*************************** /proc stuff <END> *******************************/ 380 381 382 /************************* module-ops management *****************************/ 383 /* 384 * appldata_register_ops() 385 * 386 * update ops list, register /proc/sys entries 387 */ 388 int appldata_register_ops(struct appldata_ops *ops) 389 { 390 if (ops->size > APPLDATA_MAX_REC_SIZE) 391 return -EINVAL; 392 393 ops->ctl_table = kzalloc(4 * sizeof(struct ctl_table), GFP_KERNEL); 394 if (!ops->ctl_table) 395 return -ENOMEM; 396 397 mutex_lock(&appldata_ops_mutex); 398 list_add(&ops->list, &appldata_ops_list); 399 mutex_unlock(&appldata_ops_mutex); 400 401 ops->ctl_table[0].procname = appldata_proc_name; 402 ops->ctl_table[0].maxlen = 0; 403 ops->ctl_table[0].mode = S_IRUGO | S_IXUGO; 404 ops->ctl_table[0].child = &ops->ctl_table[2]; 405 406 ops->ctl_table[2].procname = ops->name; 407 ops->ctl_table[2].mode = S_IRUGO | S_IWUSR; 408 ops->ctl_table[2].proc_handler = appldata_generic_handler; 409 ops->ctl_table[2].data = ops; 410 411 ops->sysctl_header = register_sysctl_table(ops->ctl_table); 412 if (!ops->sysctl_header) 413 goto out; 414 return 0; 415 out: 416 mutex_lock(&appldata_ops_mutex); 417 list_del(&ops->list); 418 mutex_unlock(&appldata_ops_mutex); 419 kfree(ops->ctl_table); 420 return -ENOMEM; 421 } 422 423 /* 424 * appldata_unregister_ops() 425 * 426 * update ops list, unregister /proc entries, stop DIAG if necessary 427 */ 428 void appldata_unregister_ops(struct appldata_ops *ops) 429 { 430 mutex_lock(&appldata_ops_mutex); 431 list_del(&ops->list); 432 mutex_unlock(&appldata_ops_mutex); 433 unregister_sysctl_table(ops->sysctl_header); 434 kfree(ops->ctl_table); 435 } 436 /********************** module-ops management <END> **************************/ 437 438 439 /**************************** suspend / resume *******************************/ 440 static int appldata_freeze(struct device *dev) 441 { 442 struct appldata_ops *ops; 443 int rc; 444 struct list_head *lh; 445 446 spin_lock(&appldata_timer_lock); 447 if (appldata_timer_active) { 448 __appldata_vtimer_setup(APPLDATA_DEL_TIMER); 449 appldata_timer_suspended = 1; 450 } 451 spin_unlock(&appldata_timer_lock); 452 453 mutex_lock(&appldata_ops_mutex); 454 list_for_each(lh, &appldata_ops_list) { 455 ops = list_entry(lh, struct appldata_ops, list); 456 if (ops->active == 1) { 457 rc = appldata_diag(ops->record_nr, APPLDATA_STOP_REC, 458 (unsigned long) ops->data, ops->size, 459 ops->mod_lvl); 460 if (rc != 0) 461 pr_err("Stopping the data collection for %s " 462 "failed with rc=%d\n", ops->name, rc); 463 } 464 } 465 mutex_unlock(&appldata_ops_mutex); 466 return 0; 467 } 468 469 static int appldata_restore(struct device *dev) 470 { 471 struct appldata_ops *ops; 472 int rc; 473 struct list_head *lh; 474 475 spin_lock(&appldata_timer_lock); 476 if (appldata_timer_suspended) { 477 __appldata_vtimer_setup(APPLDATA_ADD_TIMER); 478 appldata_timer_suspended = 0; 479 } 480 spin_unlock(&appldata_timer_lock); 481 482 mutex_lock(&appldata_ops_mutex); 483 list_for_each(lh, &appldata_ops_list) { 484 ops = list_entry(lh, struct appldata_ops, list); 485 if (ops->active == 1) { 486 ops->callback(ops->data); // init record 487 rc = appldata_diag(ops->record_nr, 488 APPLDATA_START_INTERVAL_REC, 489 (unsigned long) ops->data, ops->size, 490 ops->mod_lvl); 491 if (rc != 0) { 492 pr_err("Starting the data collection for %s " 493 "failed with rc=%d\n", ops->name, rc); 494 } 495 } 496 } 497 mutex_unlock(&appldata_ops_mutex); 498 return 0; 499 } 500 501 static int appldata_thaw(struct device *dev) 502 { 503 return appldata_restore(dev); 504 } 505 506 static const struct dev_pm_ops appldata_pm_ops = { 507 .freeze = appldata_freeze, 508 .thaw = appldata_thaw, 509 .restore = appldata_restore, 510 }; 511 512 static struct platform_driver appldata_pdrv = { 513 .driver = { 514 .name = "appldata", 515 .pm = &appldata_pm_ops, 516 }, 517 }; 518 /************************* suspend / resume <END> ****************************/ 519 520 521 /******************************* init / exit *********************************/ 522 523 /* 524 * appldata_init() 525 * 526 * init timer, register /proc entries 527 */ 528 static int __init appldata_init(void) 529 { 530 int rc; 531 532 init_virt_timer(&appldata_timer); 533 appldata_timer.function = appldata_timer_function; 534 appldata_timer.data = (unsigned long) &appldata_work; 535 536 rc = platform_driver_register(&appldata_pdrv); 537 if (rc) 538 return rc; 539 540 appldata_pdev = platform_device_register_simple("appldata", -1, NULL, 541 0); 542 if (IS_ERR(appldata_pdev)) { 543 rc = PTR_ERR(appldata_pdev); 544 goto out_driver; 545 } 546 appldata_wq = alloc_ordered_workqueue("appldata", 0); 547 if (!appldata_wq) { 548 rc = -ENOMEM; 549 goto out_device; 550 } 551 552 appldata_sysctl_header = register_sysctl_table(appldata_dir_table); 553 return 0; 554 555 out_device: 556 platform_device_unregister(appldata_pdev); 557 out_driver: 558 platform_driver_unregister(&appldata_pdrv); 559 return rc; 560 } 561 562 __initcall(appldata_init); 563 564 /**************************** init / exit <END> ******************************/ 565 566 EXPORT_SYMBOL_GPL(appldata_register_ops); 567 EXPORT_SYMBOL_GPL(appldata_unregister_ops); 568 EXPORT_SYMBOL_GPL(appldata_diag); 569 570 #ifdef CONFIG_SWAP 571 EXPORT_SYMBOL_GPL(si_swapinfo); 572 #endif 573 EXPORT_SYMBOL_GPL(nr_threads); 574 EXPORT_SYMBOL_GPL(nr_running); 575 EXPORT_SYMBOL_GPL(nr_iowait); 576