1 /* 2 * arch/s390/appldata/appldata_base.c 3 * 4 * Base infrastructure for Linux-z/VM Monitor Stream, Stage 1. 5 * Exports appldata_register_ops() and appldata_unregister_ops() for the 6 * data gathering modules. 7 * 8 * Copyright (C) 2003 IBM Corporation, IBM Deutschland Entwicklung GmbH. 9 * 10 * Author: Gerald Schaefer <geraldsc@de.ibm.com> 11 */ 12 13 #include <linux/config.h> 14 #include <linux/module.h> 15 #include <linux/init.h> 16 #include <linux/slab.h> 17 #include <linux/errno.h> 18 #include <asm/uaccess.h> 19 #include <asm/io.h> 20 #include <asm/smp.h> 21 #include <linux/interrupt.h> 22 #include <linux/proc_fs.h> 23 #include <linux/page-flags.h> 24 #include <linux/swap.h> 25 #include <linux/pagemap.h> 26 #include <linux/sysctl.h> 27 #include <asm/timer.h> 28 //#include <linux/kernel_stat.h> 29 #include <linux/notifier.h> 30 #include <linux/cpu.h> 31 #include <linux/workqueue.h> 32 33 #include "appldata.h" 34 35 36 #define MY_PRINT_NAME "appldata" /* for debug messages, etc. */ 37 #define APPLDATA_CPU_INTERVAL 10000 /* default (CPU) time for 38 sampling interval in 39 milliseconds */ 40 41 #define TOD_MICRO 0x01000 /* nr. of TOD clock units 42 for 1 microsecond */ 43 #ifndef CONFIG_ARCH_S390X 44 45 #define APPLDATA_START_INTERVAL_REC 0x00 /* Function codes for */ 46 #define APPLDATA_STOP_REC 0x01 /* DIAG 0xDC */ 47 #define APPLDATA_GEN_EVENT_RECORD 0x02 48 #define APPLDATA_START_CONFIG_REC 0x03 49 50 #else 51 52 #define APPLDATA_START_INTERVAL_REC 0x80 53 #define APPLDATA_STOP_REC 0x81 54 #define APPLDATA_GEN_EVENT_RECORD 0x82 55 #define APPLDATA_START_CONFIG_REC 0x83 56 57 #endif /* CONFIG_ARCH_S390X */ 58 59 60 /* 61 * Parameter list for DIAGNOSE X'DC' 62 */ 63 #ifndef CONFIG_ARCH_S390X 64 struct appldata_parameter_list { 65 u16 diag; /* The DIAGNOSE code X'00DC' */ 66 u8 function; /* The function code for the DIAGNOSE */ 67 u8 parlist_length; /* Length of the parameter list */ 68 u32 product_id_addr; /* Address of the 16-byte product ID */ 69 u16 reserved; 70 u16 buffer_length; /* Length of the application data buffer */ 71 u32 buffer_addr; /* Address of the application data buffer */ 72 }; 73 #else 74 struct appldata_parameter_list { 75 u16 diag; 76 u8 function; 77 u8 parlist_length; 78 u32 unused01; 79 u16 reserved; 80 u16 buffer_length; 81 u32 unused02; 82 u64 product_id_addr; 83 u64 buffer_addr; 84 }; 85 #endif /* CONFIG_ARCH_S390X */ 86 87 /* 88 * /proc entries (sysctl) 89 */ 90 static const char appldata_proc_name[APPLDATA_PROC_NAME_LENGTH] = "appldata"; 91 static int appldata_timer_handler(ctl_table *ctl, int write, struct file *filp, 92 void __user *buffer, size_t *lenp, loff_t *ppos); 93 static int appldata_interval_handler(ctl_table *ctl, int write, 94 struct file *filp, 95 void __user *buffer, 96 size_t *lenp, loff_t *ppos); 97 98 static struct ctl_table_header *appldata_sysctl_header; 99 static struct ctl_table appldata_table[] = { 100 { 101 .ctl_name = CTL_APPLDATA_TIMER, 102 .procname = "timer", 103 .mode = S_IRUGO | S_IWUSR, 104 .proc_handler = &appldata_timer_handler, 105 }, 106 { 107 .ctl_name = CTL_APPLDATA_INTERVAL, 108 .procname = "interval", 109 .mode = S_IRUGO | S_IWUSR, 110 .proc_handler = &appldata_interval_handler, 111 }, 112 { .ctl_name = 0 } 113 }; 114 115 static struct ctl_table appldata_dir_table[] = { 116 { 117 .ctl_name = CTL_APPLDATA, 118 .procname = appldata_proc_name, 119 .maxlen = 0, 120 .mode = S_IRUGO | S_IXUGO, 121 .child = appldata_table, 122 }, 123 { .ctl_name = 0 } 124 }; 125 126 /* 127 * Timer 128 */ 129 DEFINE_PER_CPU(struct vtimer_list, appldata_timer); 130 static atomic_t appldata_expire_count = ATOMIC_INIT(0); 131 132 static DEFINE_SPINLOCK(appldata_timer_lock); 133 static int appldata_interval = APPLDATA_CPU_INTERVAL; 134 static int appldata_timer_active; 135 136 /* 137 * Work queue 138 */ 139 static struct workqueue_struct *appldata_wq; 140 static void appldata_work_fn(void *data); 141 static DECLARE_WORK(appldata_work, appldata_work_fn, NULL); 142 143 144 /* 145 * Ops list 146 */ 147 static DEFINE_SPINLOCK(appldata_ops_lock); 148 static LIST_HEAD(appldata_ops_list); 149 150 151 /*************************** timer, work, DIAG *******************************/ 152 /* 153 * appldata_timer_function() 154 * 155 * schedule work and reschedule timer 156 */ 157 static void appldata_timer_function(unsigned long data, struct pt_regs *regs) 158 { 159 P_DEBUG(" -= Timer =-\n"); 160 P_DEBUG("CPU: %i, expire_count: %i\n", smp_processor_id(), 161 atomic_read(&appldata_expire_count)); 162 if (atomic_dec_and_test(&appldata_expire_count)) { 163 atomic_set(&appldata_expire_count, num_online_cpus()); 164 queue_work(appldata_wq, (struct work_struct *) data); 165 } 166 } 167 168 /* 169 * appldata_work_fn() 170 * 171 * call data gathering function for each (active) module 172 */ 173 static void appldata_work_fn(void *data) 174 { 175 struct list_head *lh; 176 struct appldata_ops *ops; 177 int i; 178 179 P_DEBUG(" -= Work Queue =-\n"); 180 i = 0; 181 spin_lock(&appldata_ops_lock); 182 list_for_each(lh, &appldata_ops_list) { 183 ops = list_entry(lh, struct appldata_ops, list); 184 P_DEBUG("list_for_each loop: %i) active = %u, name = %s\n", 185 ++i, ops->active, ops->name); 186 if (ops->active == 1) { 187 ops->callback(ops->data); 188 } 189 } 190 spin_unlock(&appldata_ops_lock); 191 } 192 193 /* 194 * appldata_diag() 195 * 196 * prepare parameter list, issue DIAG 0xDC 197 */ 198 static int appldata_diag(char record_nr, u16 function, unsigned long buffer, 199 u16 length) 200 { 201 unsigned long ry; 202 struct appldata_product_id { 203 char prod_nr[7]; /* product nr. */ 204 char prod_fn[2]; /* product function */ 205 char record_nr; /* record nr. */ 206 char version_nr[2]; /* version */ 207 char release_nr[2]; /* release */ 208 char mod_lvl[2]; /* modification lvl. */ 209 } appldata_product_id = { 210 /* all strings are EBCDIC, record_nr is byte */ 211 .prod_nr = {0xD3, 0xC9, 0xD5, 0xE4, 212 0xE7, 0xD2, 0xD9}, /* "LINUXKR" */ 213 .prod_fn = {0xD5, 0xD3}, /* "NL" */ 214 .record_nr = record_nr, 215 .version_nr = {0xF2, 0xF6}, /* "26" */ 216 .release_nr = {0xF0, 0xF1}, /* "01" */ 217 .mod_lvl = {0xF0, 0xF0}, /* "00" */ 218 }; 219 struct appldata_parameter_list appldata_parameter_list = { 220 .diag = 0xDC, 221 .function = function, 222 .parlist_length = 223 sizeof(appldata_parameter_list), 224 .buffer_length = length, 225 .product_id_addr = 226 (unsigned long) &appldata_product_id, 227 .buffer_addr = virt_to_phys((void *) buffer) 228 }; 229 230 if (!MACHINE_IS_VM) 231 return -ENOSYS; 232 ry = -1; 233 asm volatile( 234 "diag %1,%0,0xDC\n\t" 235 : "=d" (ry) : "d" (&(appldata_parameter_list)) : "cc"); 236 return (int) ry; 237 } 238 /************************ timer, work, DIAG <END> ****************************/ 239 240 241 /****************************** /proc stuff **********************************/ 242 243 /* 244 * appldata_mod_vtimer_wrap() 245 * 246 * wrapper function for mod_virt_timer(), because smp_call_function_on() 247 * accepts only one parameter. 248 */ 249 static void __appldata_mod_vtimer_wrap(void *p) { 250 struct { 251 struct vtimer_list *timer; 252 u64 expires; 253 } *args = p; 254 mod_virt_timer(args->timer, args->expires); 255 } 256 257 #define APPLDATA_ADD_TIMER 0 258 #define APPLDATA_DEL_TIMER 1 259 #define APPLDATA_MOD_TIMER 2 260 261 /* 262 * __appldata_vtimer_setup() 263 * 264 * Add, delete or modify virtual timers on all online cpus. 265 * The caller needs to get the appldata_timer_lock spinlock. 266 */ 267 static void 268 __appldata_vtimer_setup(int cmd) 269 { 270 u64 per_cpu_interval; 271 int i; 272 273 switch (cmd) { 274 case APPLDATA_ADD_TIMER: 275 if (appldata_timer_active) 276 break; 277 per_cpu_interval = (u64) (appldata_interval*1000 / 278 num_online_cpus()) * TOD_MICRO; 279 for_each_online_cpu(i) { 280 per_cpu(appldata_timer, i).expires = per_cpu_interval; 281 smp_call_function_on(add_virt_timer_periodic, 282 &per_cpu(appldata_timer, i), 283 0, 1, i); 284 } 285 appldata_timer_active = 1; 286 P_INFO("Monitoring timer started.\n"); 287 break; 288 case APPLDATA_DEL_TIMER: 289 for_each_online_cpu(i) 290 del_virt_timer(&per_cpu(appldata_timer, i)); 291 if (!appldata_timer_active) 292 break; 293 appldata_timer_active = 0; 294 atomic_set(&appldata_expire_count, num_online_cpus()); 295 P_INFO("Monitoring timer stopped.\n"); 296 break; 297 case APPLDATA_MOD_TIMER: 298 per_cpu_interval = (u64) (appldata_interval*1000 / 299 num_online_cpus()) * TOD_MICRO; 300 if (!appldata_timer_active) 301 break; 302 for_each_online_cpu(i) { 303 struct { 304 struct vtimer_list *timer; 305 u64 expires; 306 } args; 307 args.timer = &per_cpu(appldata_timer, i); 308 args.expires = per_cpu_interval; 309 smp_call_function_on(__appldata_mod_vtimer_wrap, 310 &args, 0, 1, i); 311 } 312 } 313 } 314 315 /* 316 * appldata_timer_handler() 317 * 318 * Start/Stop timer, show status of timer (0 = not active, 1 = active) 319 */ 320 static int 321 appldata_timer_handler(ctl_table *ctl, int write, struct file *filp, 322 void __user *buffer, size_t *lenp, loff_t *ppos) 323 { 324 int len; 325 char buf[2]; 326 327 if (!*lenp || *ppos) { 328 *lenp = 0; 329 return 0; 330 } 331 if (!write) { 332 len = sprintf(buf, appldata_timer_active ? "1\n" : "0\n"); 333 if (len > *lenp) 334 len = *lenp; 335 if (copy_to_user(buffer, buf, len)) 336 return -EFAULT; 337 goto out; 338 } 339 len = *lenp; 340 if (copy_from_user(buf, buffer, len > sizeof(buf) ? sizeof(buf) : len)) 341 return -EFAULT; 342 spin_lock(&appldata_timer_lock); 343 if (buf[0] == '1') 344 __appldata_vtimer_setup(APPLDATA_ADD_TIMER); 345 else if (buf[0] == '0') 346 __appldata_vtimer_setup(APPLDATA_DEL_TIMER); 347 spin_unlock(&appldata_timer_lock); 348 out: 349 *lenp = len; 350 *ppos += len; 351 return 0; 352 } 353 354 /* 355 * appldata_interval_handler() 356 * 357 * Set (CPU) timer interval for collection of data (in milliseconds), show 358 * current timer interval. 359 */ 360 static int 361 appldata_interval_handler(ctl_table *ctl, int write, struct file *filp, 362 void __user *buffer, size_t *lenp, loff_t *ppos) 363 { 364 int len, interval; 365 char buf[16]; 366 367 if (!*lenp || *ppos) { 368 *lenp = 0; 369 return 0; 370 } 371 if (!write) { 372 len = sprintf(buf, "%i\n", appldata_interval); 373 if (len > *lenp) 374 len = *lenp; 375 if (copy_to_user(buffer, buf, len)) 376 return -EFAULT; 377 goto out; 378 } 379 len = *lenp; 380 if (copy_from_user(buf, buffer, len > sizeof(buf) ? sizeof(buf) : len)) { 381 return -EFAULT; 382 } 383 sscanf(buf, "%i", &interval); 384 if (interval <= 0) { 385 P_ERROR("Timer CPU interval has to be > 0!\n"); 386 return -EINVAL; 387 } 388 389 spin_lock(&appldata_timer_lock); 390 appldata_interval = interval; 391 __appldata_vtimer_setup(APPLDATA_MOD_TIMER); 392 spin_unlock(&appldata_timer_lock); 393 394 P_INFO("Monitoring CPU interval set to %u milliseconds.\n", 395 interval); 396 out: 397 *lenp = len; 398 *ppos += len; 399 return 0; 400 } 401 402 /* 403 * appldata_generic_handler() 404 * 405 * Generic start/stop monitoring and DIAG, show status of 406 * monitoring (0 = not in process, 1 = in process) 407 */ 408 static int 409 appldata_generic_handler(ctl_table *ctl, int write, struct file *filp, 410 void __user *buffer, size_t *lenp, loff_t *ppos) 411 { 412 struct appldata_ops *ops = NULL, *tmp_ops; 413 int rc, len, found; 414 char buf[2]; 415 struct list_head *lh; 416 417 found = 0; 418 spin_lock(&appldata_ops_lock); 419 list_for_each(lh, &appldata_ops_list) { 420 tmp_ops = list_entry(lh, struct appldata_ops, list); 421 if (&tmp_ops->ctl_table[2] == ctl) { 422 found = 1; 423 } 424 } 425 if (!found) { 426 spin_unlock(&appldata_ops_lock); 427 return -ENODEV; 428 } 429 ops = ctl->data; 430 if (!try_module_get(ops->owner)) { // protect this function 431 spin_unlock(&appldata_ops_lock); 432 return -ENODEV; 433 } 434 spin_unlock(&appldata_ops_lock); 435 436 if (!*lenp || *ppos) { 437 *lenp = 0; 438 module_put(ops->owner); 439 return 0; 440 } 441 if (!write) { 442 len = sprintf(buf, ops->active ? "1\n" : "0\n"); 443 if (len > *lenp) 444 len = *lenp; 445 if (copy_to_user(buffer, buf, len)) { 446 module_put(ops->owner); 447 return -EFAULT; 448 } 449 goto out; 450 } 451 len = *lenp; 452 if (copy_from_user(buf, buffer, 453 len > sizeof(buf) ? sizeof(buf) : len)) { 454 module_put(ops->owner); 455 return -EFAULT; 456 } 457 458 spin_lock(&appldata_ops_lock); 459 if ((buf[0] == '1') && (ops->active == 0)) { 460 // protect work queue callback 461 if (!try_module_get(ops->owner)) { 462 spin_unlock(&appldata_ops_lock); 463 module_put(ops->owner); 464 return -ENODEV; 465 } 466 ops->active = 1; 467 ops->callback(ops->data); // init record 468 rc = appldata_diag(ops->record_nr, 469 APPLDATA_START_INTERVAL_REC, 470 (unsigned long) ops->data, ops->size); 471 if (rc != 0) { 472 P_ERROR("START DIAG 0xDC for %s failed, " 473 "return code: %d\n", ops->name, rc); 474 module_put(ops->owner); 475 ops->active = 0; 476 } else { 477 P_INFO("Monitoring %s data enabled, " 478 "DIAG 0xDC started.\n", ops->name); 479 } 480 } else if ((buf[0] == '0') && (ops->active == 1)) { 481 ops->active = 0; 482 rc = appldata_diag(ops->record_nr, APPLDATA_STOP_REC, 483 (unsigned long) ops->data, ops->size); 484 if (rc != 0) { 485 P_ERROR("STOP DIAG 0xDC for %s failed, " 486 "return code: %d\n", ops->name, rc); 487 } else { 488 P_INFO("Monitoring %s data disabled, " 489 "DIAG 0xDC stopped.\n", ops->name); 490 } 491 module_put(ops->owner); 492 } 493 spin_unlock(&appldata_ops_lock); 494 out: 495 *lenp = len; 496 *ppos += len; 497 module_put(ops->owner); 498 return 0; 499 } 500 501 /*************************** /proc stuff <END> *******************************/ 502 503 504 /************************* module-ops management *****************************/ 505 /* 506 * appldata_register_ops() 507 * 508 * update ops list, register /proc/sys entries 509 */ 510 int appldata_register_ops(struct appldata_ops *ops) 511 { 512 struct list_head *lh; 513 struct appldata_ops *tmp_ops; 514 int i; 515 516 i = 0; 517 518 if ((ops->size > APPLDATA_MAX_REC_SIZE) || 519 (ops->size < 0)){ 520 P_ERROR("Invalid size of %s record = %i, maximum = %i!\n", 521 ops->name, ops->size, APPLDATA_MAX_REC_SIZE); 522 return -ENOMEM; 523 } 524 if ((ops->ctl_nr == CTL_APPLDATA) || 525 (ops->ctl_nr == CTL_APPLDATA_TIMER) || 526 (ops->ctl_nr == CTL_APPLDATA_INTERVAL)) { 527 P_ERROR("ctl_nr %i already in use!\n", ops->ctl_nr); 528 return -EBUSY; 529 } 530 ops->ctl_table = kmalloc(4*sizeof(struct ctl_table), GFP_KERNEL); 531 if (ops->ctl_table == NULL) { 532 P_ERROR("Not enough memory for %s ctl_table!\n", ops->name); 533 return -ENOMEM; 534 } 535 memset(ops->ctl_table, 0, 4*sizeof(struct ctl_table)); 536 537 spin_lock(&appldata_ops_lock); 538 list_for_each(lh, &appldata_ops_list) { 539 tmp_ops = list_entry(lh, struct appldata_ops, list); 540 P_DEBUG("register_ops loop: %i) name = %s, ctl = %i\n", 541 ++i, tmp_ops->name, tmp_ops->ctl_nr); 542 P_DEBUG("Comparing %s (ctl %i) with %s (ctl %i)\n", 543 tmp_ops->name, tmp_ops->ctl_nr, ops->name, 544 ops->ctl_nr); 545 if (strncmp(tmp_ops->name, ops->name, 546 APPLDATA_PROC_NAME_LENGTH) == 0) { 547 P_ERROR("Name \"%s\" already registered!\n", ops->name); 548 kfree(ops->ctl_table); 549 spin_unlock(&appldata_ops_lock); 550 return -EBUSY; 551 } 552 if (tmp_ops->ctl_nr == ops->ctl_nr) { 553 P_ERROR("ctl_nr %i already registered!\n", ops->ctl_nr); 554 kfree(ops->ctl_table); 555 spin_unlock(&appldata_ops_lock); 556 return -EBUSY; 557 } 558 } 559 list_add(&ops->list, &appldata_ops_list); 560 spin_unlock(&appldata_ops_lock); 561 562 ops->ctl_table[0].ctl_name = CTL_APPLDATA; 563 ops->ctl_table[0].procname = appldata_proc_name; 564 ops->ctl_table[0].maxlen = 0; 565 ops->ctl_table[0].mode = S_IRUGO | S_IXUGO; 566 ops->ctl_table[0].child = &ops->ctl_table[2]; 567 568 ops->ctl_table[1].ctl_name = 0; 569 570 ops->ctl_table[2].ctl_name = ops->ctl_nr; 571 ops->ctl_table[2].procname = ops->name; 572 ops->ctl_table[2].mode = S_IRUGO | S_IWUSR; 573 ops->ctl_table[2].proc_handler = appldata_generic_handler; 574 ops->ctl_table[2].data = ops; 575 576 ops->ctl_table[3].ctl_name = 0; 577 578 ops->sysctl_header = register_sysctl_table(ops->ctl_table,1); 579 580 P_INFO("%s-ops registered!\n", ops->name); 581 return 0; 582 } 583 584 /* 585 * appldata_unregister_ops() 586 * 587 * update ops list, unregister /proc entries, stop DIAG if necessary 588 */ 589 void appldata_unregister_ops(struct appldata_ops *ops) 590 { 591 spin_lock(&appldata_ops_lock); 592 unregister_sysctl_table(ops->sysctl_header); 593 list_del(&ops->list); 594 kfree(ops->ctl_table); 595 ops->ctl_table = NULL; 596 spin_unlock(&appldata_ops_lock); 597 P_INFO("%s-ops unregistered!\n", ops->name); 598 } 599 /********************** module-ops management <END> **************************/ 600 601 602 /******************************* init / exit *********************************/ 603 604 static void 605 appldata_online_cpu(int cpu) 606 { 607 init_virt_timer(&per_cpu(appldata_timer, cpu)); 608 per_cpu(appldata_timer, cpu).function = appldata_timer_function; 609 per_cpu(appldata_timer, cpu).data = (unsigned long) 610 &appldata_work; 611 atomic_inc(&appldata_expire_count); 612 spin_lock(&appldata_timer_lock); 613 __appldata_vtimer_setup(APPLDATA_MOD_TIMER); 614 spin_unlock(&appldata_timer_lock); 615 } 616 617 static void 618 appldata_offline_cpu(int cpu) 619 { 620 del_virt_timer(&per_cpu(appldata_timer, cpu)); 621 if (atomic_dec_and_test(&appldata_expire_count)) { 622 atomic_set(&appldata_expire_count, num_online_cpus()); 623 queue_work(appldata_wq, &appldata_work); 624 } 625 spin_lock(&appldata_timer_lock); 626 __appldata_vtimer_setup(APPLDATA_MOD_TIMER); 627 spin_unlock(&appldata_timer_lock); 628 } 629 630 static int 631 appldata_cpu_notify(struct notifier_block *self, 632 unsigned long action, void *hcpu) 633 { 634 switch (action) { 635 case CPU_ONLINE: 636 appldata_online_cpu((long) hcpu); 637 break; 638 #ifdef CONFIG_HOTPLUG_CPU 639 case CPU_DEAD: 640 appldata_offline_cpu((long) hcpu); 641 break; 642 #endif 643 default: 644 break; 645 } 646 return NOTIFY_OK; 647 } 648 649 static struct notifier_block __devinitdata appldata_nb = { 650 .notifier_call = appldata_cpu_notify, 651 }; 652 653 /* 654 * appldata_init() 655 * 656 * init timer, register /proc entries 657 */ 658 static int __init appldata_init(void) 659 { 660 int i; 661 662 P_DEBUG("sizeof(parameter_list) = %lu\n", 663 sizeof(struct appldata_parameter_list)); 664 665 appldata_wq = create_singlethread_workqueue("appldata"); 666 if (!appldata_wq) { 667 P_ERROR("Could not create work queue\n"); 668 return -ENOMEM; 669 } 670 671 for_each_online_cpu(i) 672 appldata_online_cpu(i); 673 674 /* Register cpu hotplug notifier */ 675 register_cpu_notifier(&appldata_nb); 676 677 appldata_sysctl_header = register_sysctl_table(appldata_dir_table, 1); 678 #ifdef MODULE 679 appldata_dir_table[0].de->owner = THIS_MODULE; 680 appldata_table[0].de->owner = THIS_MODULE; 681 appldata_table[1].de->owner = THIS_MODULE; 682 #endif 683 684 P_DEBUG("Base interface initialized.\n"); 685 return 0; 686 } 687 688 /* 689 * appldata_exit() 690 * 691 * stop timer, unregister /proc entries 692 */ 693 static void __exit appldata_exit(void) 694 { 695 struct list_head *lh; 696 struct appldata_ops *ops; 697 int rc, i; 698 699 P_DEBUG("Unloading module ...\n"); 700 /* 701 * ops list should be empty, but just in case something went wrong... 702 */ 703 spin_lock(&appldata_ops_lock); 704 list_for_each(lh, &appldata_ops_list) { 705 ops = list_entry(lh, struct appldata_ops, list); 706 rc = appldata_diag(ops->record_nr, APPLDATA_STOP_REC, 707 (unsigned long) ops->data, ops->size); 708 if (rc != 0) { 709 P_ERROR("STOP DIAG 0xDC for %s failed, " 710 "return code: %d\n", ops->name, rc); 711 } 712 } 713 spin_unlock(&appldata_ops_lock); 714 715 for_each_online_cpu(i) 716 appldata_offline_cpu(i); 717 718 appldata_timer_active = 0; 719 720 unregister_sysctl_table(appldata_sysctl_header); 721 722 destroy_workqueue(appldata_wq); 723 P_DEBUG("... module unloaded!\n"); 724 } 725 /**************************** init / exit <END> ******************************/ 726 727 728 module_init(appldata_init); 729 module_exit(appldata_exit); 730 MODULE_LICENSE("GPL"); 731 MODULE_AUTHOR("Gerald Schaefer"); 732 MODULE_DESCRIPTION("Linux-VM Monitor Stream, base infrastructure"); 733 734 EXPORT_SYMBOL_GPL(appldata_register_ops); 735 EXPORT_SYMBOL_GPL(appldata_unregister_ops); 736 737 #ifdef MODULE 738 /* 739 * Kernel symbols needed by appldata_mem and appldata_os modules. 740 * However, if this file is compiled as a module (for testing only), these 741 * symbols are not exported. In this case, we define them locally and export 742 * those. 743 */ 744 void si_swapinfo(struct sysinfo *val) 745 { 746 val->freeswap = -1ul; 747 val->totalswap = -1ul; 748 } 749 750 unsigned long avenrun[3] = {-1 - FIXED_1/200, -1 - FIXED_1/200, 751 -1 - FIXED_1/200}; 752 int nr_threads = -1; 753 754 void get_full_page_state(struct page_state *ps) 755 { 756 memset(ps, -1, sizeof(struct page_state)); 757 } 758 759 unsigned long nr_running(void) 760 { 761 return -1; 762 } 763 764 unsigned long nr_iowait(void) 765 { 766 return -1; 767 } 768 769 /*unsigned long nr_context_switches(void) 770 { 771 return -1; 772 }*/ 773 #endif /* MODULE */ 774 EXPORT_SYMBOL_GPL(si_swapinfo); 775 EXPORT_SYMBOL_GPL(nr_threads); 776 EXPORT_SYMBOL_GPL(avenrun); 777 EXPORT_SYMBOL_GPL(get_full_page_state); 778 EXPORT_SYMBOL_GPL(nr_running); 779 EXPORT_SYMBOL_GPL(nr_iowait); 780 //EXPORT_SYMBOL_GPL(nr_context_switches); 781