1 /* 2 * arch/s390/appldata/appldata_base.c 3 * 4 * Base infrastructure for Linux-z/VM Monitor Stream, Stage 1. 5 * Exports appldata_register_ops() and appldata_unregister_ops() for the 6 * data gathering modules. 7 * 8 * Copyright (C) 2003,2006 IBM Corporation, IBM Deutschland Entwicklung GmbH. 9 * 10 * Author: Gerald Schaefer <gerald.schaefer@de.ibm.com> 11 */ 12 13 #include <linux/config.h> 14 #include <linux/module.h> 15 #include <linux/init.h> 16 #include <linux/slab.h> 17 #include <linux/errno.h> 18 #include <asm/uaccess.h> 19 #include <asm/io.h> 20 #include <asm/smp.h> 21 #include <linux/interrupt.h> 22 #include <linux/proc_fs.h> 23 #include <linux/page-flags.h> 24 #include <linux/swap.h> 25 #include <linux/pagemap.h> 26 #include <linux/sysctl.h> 27 #include <asm/timer.h> 28 //#include <linux/kernel_stat.h> 29 #include <linux/notifier.h> 30 #include <linux/cpu.h> 31 #include <linux/workqueue.h> 32 33 #include "appldata.h" 34 35 36 #define MY_PRINT_NAME "appldata" /* for debug messages, etc. */ 37 #define APPLDATA_CPU_INTERVAL 10000 /* default (CPU) time for 38 sampling interval in 39 milliseconds */ 40 41 #define TOD_MICRO 0x01000 /* nr. of TOD clock units 42 for 1 microsecond */ 43 44 /* 45 * Parameter list for DIAGNOSE X'DC' 46 */ 47 #ifndef CONFIG_64BIT 48 struct appldata_parameter_list { 49 u16 diag; /* The DIAGNOSE code X'00DC' */ 50 u8 function; /* The function code for the DIAGNOSE */ 51 u8 parlist_length; /* Length of the parameter list */ 52 u32 product_id_addr; /* Address of the 16-byte product ID */ 53 u16 reserved; 54 u16 buffer_length; /* Length of the application data buffer */ 55 u32 buffer_addr; /* Address of the application data buffer */ 56 }; 57 #else 58 struct appldata_parameter_list { 59 u16 diag; 60 u8 function; 61 u8 parlist_length; 62 u32 unused01; 63 u16 reserved; 64 u16 buffer_length; 65 u32 unused02; 66 u64 product_id_addr; 67 u64 buffer_addr; 68 }; 69 #endif /* CONFIG_64BIT */ 70 71 /* 72 * /proc entries (sysctl) 73 */ 74 static const char appldata_proc_name[APPLDATA_PROC_NAME_LENGTH] = "appldata"; 75 static int appldata_timer_handler(ctl_table *ctl, int write, struct file *filp, 76 void __user *buffer, size_t *lenp, loff_t *ppos); 77 static int appldata_interval_handler(ctl_table *ctl, int write, 78 struct file *filp, 79 void __user *buffer, 80 size_t *lenp, loff_t *ppos); 81 82 static struct ctl_table_header *appldata_sysctl_header; 83 static struct ctl_table appldata_table[] = { 84 { 85 .ctl_name = CTL_APPLDATA_TIMER, 86 .procname = "timer", 87 .mode = S_IRUGO | S_IWUSR, 88 .proc_handler = &appldata_timer_handler, 89 }, 90 { 91 .ctl_name = CTL_APPLDATA_INTERVAL, 92 .procname = "interval", 93 .mode = S_IRUGO | S_IWUSR, 94 .proc_handler = &appldata_interval_handler, 95 }, 96 { .ctl_name = 0 } 97 }; 98 99 static struct ctl_table appldata_dir_table[] = { 100 { 101 .ctl_name = CTL_APPLDATA, 102 .procname = appldata_proc_name, 103 .maxlen = 0, 104 .mode = S_IRUGO | S_IXUGO, 105 .child = appldata_table, 106 }, 107 { .ctl_name = 0 } 108 }; 109 110 /* 111 * Timer 112 */ 113 DEFINE_PER_CPU(struct vtimer_list, appldata_timer); 114 static atomic_t appldata_expire_count = ATOMIC_INIT(0); 115 116 static DEFINE_SPINLOCK(appldata_timer_lock); 117 static int appldata_interval = APPLDATA_CPU_INTERVAL; 118 static int appldata_timer_active; 119 120 /* 121 * Work queue 122 */ 123 static struct workqueue_struct *appldata_wq; 124 static void appldata_work_fn(void *data); 125 static DECLARE_WORK(appldata_work, appldata_work_fn, NULL); 126 127 128 /* 129 * Ops list 130 */ 131 static DEFINE_SPINLOCK(appldata_ops_lock); 132 static LIST_HEAD(appldata_ops_list); 133 134 135 /*************************** timer, work, DIAG *******************************/ 136 /* 137 * appldata_timer_function() 138 * 139 * schedule work and reschedule timer 140 */ 141 static void appldata_timer_function(unsigned long data, struct pt_regs *regs) 142 { 143 P_DEBUG(" -= Timer =-\n"); 144 P_DEBUG("CPU: %i, expire_count: %i\n", smp_processor_id(), 145 atomic_read(&appldata_expire_count)); 146 if (atomic_dec_and_test(&appldata_expire_count)) { 147 atomic_set(&appldata_expire_count, num_online_cpus()); 148 queue_work(appldata_wq, (struct work_struct *) data); 149 } 150 } 151 152 /* 153 * appldata_work_fn() 154 * 155 * call data gathering function for each (active) module 156 */ 157 static void appldata_work_fn(void *data) 158 { 159 struct list_head *lh; 160 struct appldata_ops *ops; 161 int i; 162 163 P_DEBUG(" -= Work Queue =-\n"); 164 i = 0; 165 spin_lock(&appldata_ops_lock); 166 list_for_each(lh, &appldata_ops_list) { 167 ops = list_entry(lh, struct appldata_ops, list); 168 P_DEBUG("list_for_each loop: %i) active = %u, name = %s\n", 169 ++i, ops->active, ops->name); 170 if (ops->active == 1) { 171 ops->callback(ops->data); 172 } 173 } 174 spin_unlock(&appldata_ops_lock); 175 } 176 177 /* 178 * appldata_diag() 179 * 180 * prepare parameter list, issue DIAG 0xDC 181 */ 182 int appldata_diag(char record_nr, u16 function, unsigned long buffer, 183 u16 length, char *mod_lvl) 184 { 185 unsigned long ry; 186 struct appldata_product_id { 187 char prod_nr[7]; /* product nr. */ 188 char prod_fn[2]; /* product function */ 189 char record_nr; /* record nr. */ 190 char version_nr[2]; /* version */ 191 char release_nr[2]; /* release */ 192 char mod_lvl[2]; /* modification lvl. */ 193 } appldata_product_id = { 194 /* all strings are EBCDIC, record_nr is byte */ 195 .prod_nr = {0xD3, 0xC9, 0xD5, 0xE4, 196 0xE7, 0xD2, 0xD9}, /* "LINUXKR" */ 197 .prod_fn = {0xD5, 0xD3}, /* "NL" */ 198 .record_nr = record_nr, 199 .version_nr = {0xF2, 0xF6}, /* "26" */ 200 .release_nr = {0xF0, 0xF1}, /* "01" */ 201 .mod_lvl = {mod_lvl[0], mod_lvl[1]}, 202 }; 203 struct appldata_parameter_list appldata_parameter_list = { 204 .diag = 0xDC, 205 .function = function, 206 .parlist_length = 207 sizeof(appldata_parameter_list), 208 .buffer_length = length, 209 .product_id_addr = 210 (unsigned long) &appldata_product_id, 211 .buffer_addr = virt_to_phys((void *) buffer) 212 }; 213 214 if (!MACHINE_IS_VM) 215 return -ENOSYS; 216 ry = -1; 217 asm volatile( 218 "diag %1,%0,0xDC\n\t" 219 : "=d" (ry) 220 : "d" (&appldata_parameter_list), 221 "m" (appldata_parameter_list), 222 "m" (appldata_product_id) 223 : "cc"); 224 return (int) ry; 225 } 226 /************************ timer, work, DIAG <END> ****************************/ 227 228 229 /****************************** /proc stuff **********************************/ 230 231 /* 232 * appldata_mod_vtimer_wrap() 233 * 234 * wrapper function for mod_virt_timer(), because smp_call_function_on() 235 * accepts only one parameter. 236 */ 237 static void __appldata_mod_vtimer_wrap(void *p) { 238 struct { 239 struct vtimer_list *timer; 240 u64 expires; 241 } *args = p; 242 mod_virt_timer(args->timer, args->expires); 243 } 244 245 #define APPLDATA_ADD_TIMER 0 246 #define APPLDATA_DEL_TIMER 1 247 #define APPLDATA_MOD_TIMER 2 248 249 /* 250 * __appldata_vtimer_setup() 251 * 252 * Add, delete or modify virtual timers on all online cpus. 253 * The caller needs to get the appldata_timer_lock spinlock. 254 */ 255 static void 256 __appldata_vtimer_setup(int cmd) 257 { 258 u64 per_cpu_interval; 259 int i; 260 261 switch (cmd) { 262 case APPLDATA_ADD_TIMER: 263 if (appldata_timer_active) 264 break; 265 per_cpu_interval = (u64) (appldata_interval*1000 / 266 num_online_cpus()) * TOD_MICRO; 267 for_each_online_cpu(i) { 268 per_cpu(appldata_timer, i).expires = per_cpu_interval; 269 smp_call_function_on(add_virt_timer_periodic, 270 &per_cpu(appldata_timer, i), 271 0, 1, i); 272 } 273 appldata_timer_active = 1; 274 P_INFO("Monitoring timer started.\n"); 275 break; 276 case APPLDATA_DEL_TIMER: 277 for_each_online_cpu(i) 278 del_virt_timer(&per_cpu(appldata_timer, i)); 279 if (!appldata_timer_active) 280 break; 281 appldata_timer_active = 0; 282 atomic_set(&appldata_expire_count, num_online_cpus()); 283 P_INFO("Monitoring timer stopped.\n"); 284 break; 285 case APPLDATA_MOD_TIMER: 286 per_cpu_interval = (u64) (appldata_interval*1000 / 287 num_online_cpus()) * TOD_MICRO; 288 if (!appldata_timer_active) 289 break; 290 for_each_online_cpu(i) { 291 struct { 292 struct vtimer_list *timer; 293 u64 expires; 294 } args; 295 args.timer = &per_cpu(appldata_timer, i); 296 args.expires = per_cpu_interval; 297 smp_call_function_on(__appldata_mod_vtimer_wrap, 298 &args, 0, 1, i); 299 } 300 } 301 } 302 303 /* 304 * appldata_timer_handler() 305 * 306 * Start/Stop timer, show status of timer (0 = not active, 1 = active) 307 */ 308 static int 309 appldata_timer_handler(ctl_table *ctl, int write, struct file *filp, 310 void __user *buffer, size_t *lenp, loff_t *ppos) 311 { 312 int len; 313 char buf[2]; 314 315 if (!*lenp || *ppos) { 316 *lenp = 0; 317 return 0; 318 } 319 if (!write) { 320 len = sprintf(buf, appldata_timer_active ? "1\n" : "0\n"); 321 if (len > *lenp) 322 len = *lenp; 323 if (copy_to_user(buffer, buf, len)) 324 return -EFAULT; 325 goto out; 326 } 327 len = *lenp; 328 if (copy_from_user(buf, buffer, len > sizeof(buf) ? sizeof(buf) : len)) 329 return -EFAULT; 330 spin_lock(&appldata_timer_lock); 331 if (buf[0] == '1') 332 __appldata_vtimer_setup(APPLDATA_ADD_TIMER); 333 else if (buf[0] == '0') 334 __appldata_vtimer_setup(APPLDATA_DEL_TIMER); 335 spin_unlock(&appldata_timer_lock); 336 out: 337 *lenp = len; 338 *ppos += len; 339 return 0; 340 } 341 342 /* 343 * appldata_interval_handler() 344 * 345 * Set (CPU) timer interval for collection of data (in milliseconds), show 346 * current timer interval. 347 */ 348 static int 349 appldata_interval_handler(ctl_table *ctl, int write, struct file *filp, 350 void __user *buffer, size_t *lenp, loff_t *ppos) 351 { 352 int len, interval; 353 char buf[16]; 354 355 if (!*lenp || *ppos) { 356 *lenp = 0; 357 return 0; 358 } 359 if (!write) { 360 len = sprintf(buf, "%i\n", appldata_interval); 361 if (len > *lenp) 362 len = *lenp; 363 if (copy_to_user(buffer, buf, len)) 364 return -EFAULT; 365 goto out; 366 } 367 len = *lenp; 368 if (copy_from_user(buf, buffer, len > sizeof(buf) ? sizeof(buf) : len)) { 369 return -EFAULT; 370 } 371 sscanf(buf, "%i", &interval); 372 if (interval <= 0) { 373 P_ERROR("Timer CPU interval has to be > 0!\n"); 374 return -EINVAL; 375 } 376 377 spin_lock(&appldata_timer_lock); 378 appldata_interval = interval; 379 __appldata_vtimer_setup(APPLDATA_MOD_TIMER); 380 spin_unlock(&appldata_timer_lock); 381 382 P_INFO("Monitoring CPU interval set to %u milliseconds.\n", 383 interval); 384 out: 385 *lenp = len; 386 *ppos += len; 387 return 0; 388 } 389 390 /* 391 * appldata_generic_handler() 392 * 393 * Generic start/stop monitoring and DIAG, show status of 394 * monitoring (0 = not in process, 1 = in process) 395 */ 396 static int 397 appldata_generic_handler(ctl_table *ctl, int write, struct file *filp, 398 void __user *buffer, size_t *lenp, loff_t *ppos) 399 { 400 struct appldata_ops *ops = NULL, *tmp_ops; 401 int rc, len, found; 402 char buf[2]; 403 struct list_head *lh; 404 405 found = 0; 406 spin_lock(&appldata_ops_lock); 407 list_for_each(lh, &appldata_ops_list) { 408 tmp_ops = list_entry(lh, struct appldata_ops, list); 409 if (&tmp_ops->ctl_table[2] == ctl) { 410 found = 1; 411 } 412 } 413 if (!found) { 414 spin_unlock(&appldata_ops_lock); 415 return -ENODEV; 416 } 417 ops = ctl->data; 418 if (!try_module_get(ops->owner)) { // protect this function 419 spin_unlock(&appldata_ops_lock); 420 return -ENODEV; 421 } 422 spin_unlock(&appldata_ops_lock); 423 424 if (!*lenp || *ppos) { 425 *lenp = 0; 426 module_put(ops->owner); 427 return 0; 428 } 429 if (!write) { 430 len = sprintf(buf, ops->active ? "1\n" : "0\n"); 431 if (len > *lenp) 432 len = *lenp; 433 if (copy_to_user(buffer, buf, len)) { 434 module_put(ops->owner); 435 return -EFAULT; 436 } 437 goto out; 438 } 439 len = *lenp; 440 if (copy_from_user(buf, buffer, 441 len > sizeof(buf) ? sizeof(buf) : len)) { 442 module_put(ops->owner); 443 return -EFAULT; 444 } 445 446 spin_lock(&appldata_ops_lock); 447 if ((buf[0] == '1') && (ops->active == 0)) { 448 // protect work queue callback 449 if (!try_module_get(ops->owner)) { 450 spin_unlock(&appldata_ops_lock); 451 module_put(ops->owner); 452 return -ENODEV; 453 } 454 ops->callback(ops->data); // init record 455 rc = appldata_diag(ops->record_nr, 456 APPLDATA_START_INTERVAL_REC, 457 (unsigned long) ops->data, ops->size, 458 ops->mod_lvl); 459 if (rc != 0) { 460 P_ERROR("START DIAG 0xDC for %s failed, " 461 "return code: %d\n", ops->name, rc); 462 module_put(ops->owner); 463 } else { 464 P_INFO("Monitoring %s data enabled, " 465 "DIAG 0xDC started.\n", ops->name); 466 ops->active = 1; 467 } 468 } else if ((buf[0] == '0') && (ops->active == 1)) { 469 ops->active = 0; 470 rc = appldata_diag(ops->record_nr, APPLDATA_STOP_REC, 471 (unsigned long) ops->data, ops->size, 472 ops->mod_lvl); 473 if (rc != 0) { 474 P_ERROR("STOP DIAG 0xDC for %s failed, " 475 "return code: %d\n", ops->name, rc); 476 } else { 477 P_INFO("Monitoring %s data disabled, " 478 "DIAG 0xDC stopped.\n", ops->name); 479 } 480 module_put(ops->owner); 481 } 482 spin_unlock(&appldata_ops_lock); 483 out: 484 *lenp = len; 485 *ppos += len; 486 module_put(ops->owner); 487 return 0; 488 } 489 490 /*************************** /proc stuff <END> *******************************/ 491 492 493 /************************* module-ops management *****************************/ 494 /* 495 * appldata_register_ops() 496 * 497 * update ops list, register /proc/sys entries 498 */ 499 int appldata_register_ops(struct appldata_ops *ops) 500 { 501 struct list_head *lh; 502 struct appldata_ops *tmp_ops; 503 int i; 504 505 i = 0; 506 507 if ((ops->size > APPLDATA_MAX_REC_SIZE) || 508 (ops->size < 0)){ 509 P_ERROR("Invalid size of %s record = %i, maximum = %i!\n", 510 ops->name, ops->size, APPLDATA_MAX_REC_SIZE); 511 return -ENOMEM; 512 } 513 if ((ops->ctl_nr == CTL_APPLDATA) || 514 (ops->ctl_nr == CTL_APPLDATA_TIMER) || 515 (ops->ctl_nr == CTL_APPLDATA_INTERVAL)) { 516 P_ERROR("ctl_nr %i already in use!\n", ops->ctl_nr); 517 return -EBUSY; 518 } 519 ops->ctl_table = kzalloc(4*sizeof(struct ctl_table), GFP_KERNEL); 520 if (ops->ctl_table == NULL) { 521 P_ERROR("Not enough memory for %s ctl_table!\n", ops->name); 522 return -ENOMEM; 523 } 524 525 spin_lock(&appldata_ops_lock); 526 list_for_each(lh, &appldata_ops_list) { 527 tmp_ops = list_entry(lh, struct appldata_ops, list); 528 P_DEBUG("register_ops loop: %i) name = %s, ctl = %i\n", 529 ++i, tmp_ops->name, tmp_ops->ctl_nr); 530 P_DEBUG("Comparing %s (ctl %i) with %s (ctl %i)\n", 531 tmp_ops->name, tmp_ops->ctl_nr, ops->name, 532 ops->ctl_nr); 533 if (strncmp(tmp_ops->name, ops->name, 534 APPLDATA_PROC_NAME_LENGTH) == 0) { 535 P_ERROR("Name \"%s\" already registered!\n", ops->name); 536 kfree(ops->ctl_table); 537 spin_unlock(&appldata_ops_lock); 538 return -EBUSY; 539 } 540 if (tmp_ops->ctl_nr == ops->ctl_nr) { 541 P_ERROR("ctl_nr %i already registered!\n", ops->ctl_nr); 542 kfree(ops->ctl_table); 543 spin_unlock(&appldata_ops_lock); 544 return -EBUSY; 545 } 546 } 547 list_add(&ops->list, &appldata_ops_list); 548 spin_unlock(&appldata_ops_lock); 549 550 ops->ctl_table[0].ctl_name = CTL_APPLDATA; 551 ops->ctl_table[0].procname = appldata_proc_name; 552 ops->ctl_table[0].maxlen = 0; 553 ops->ctl_table[0].mode = S_IRUGO | S_IXUGO; 554 ops->ctl_table[0].child = &ops->ctl_table[2]; 555 556 ops->ctl_table[1].ctl_name = 0; 557 558 ops->ctl_table[2].ctl_name = ops->ctl_nr; 559 ops->ctl_table[2].procname = ops->name; 560 ops->ctl_table[2].mode = S_IRUGO | S_IWUSR; 561 ops->ctl_table[2].proc_handler = appldata_generic_handler; 562 ops->ctl_table[2].data = ops; 563 564 ops->ctl_table[3].ctl_name = 0; 565 566 ops->sysctl_header = register_sysctl_table(ops->ctl_table,1); 567 568 P_INFO("%s-ops registered!\n", ops->name); 569 return 0; 570 } 571 572 /* 573 * appldata_unregister_ops() 574 * 575 * update ops list, unregister /proc entries, stop DIAG if necessary 576 */ 577 void appldata_unregister_ops(struct appldata_ops *ops) 578 { 579 void *table; 580 spin_lock(&appldata_ops_lock); 581 list_del(&ops->list); 582 /* at that point any incoming access will fail */ 583 table = ops->ctl_table; 584 ops->ctl_table = NULL; 585 spin_unlock(&appldata_ops_lock); 586 unregister_sysctl_table(ops->sysctl_header); 587 kfree(table); 588 P_INFO("%s-ops unregistered!\n", ops->name); 589 } 590 /********************** module-ops management <END> **************************/ 591 592 593 /******************************* init / exit *********************************/ 594 595 static void 596 appldata_online_cpu(int cpu) 597 { 598 init_virt_timer(&per_cpu(appldata_timer, cpu)); 599 per_cpu(appldata_timer, cpu).function = appldata_timer_function; 600 per_cpu(appldata_timer, cpu).data = (unsigned long) 601 &appldata_work; 602 atomic_inc(&appldata_expire_count); 603 spin_lock(&appldata_timer_lock); 604 __appldata_vtimer_setup(APPLDATA_MOD_TIMER); 605 spin_unlock(&appldata_timer_lock); 606 } 607 608 static void 609 appldata_offline_cpu(int cpu) 610 { 611 del_virt_timer(&per_cpu(appldata_timer, cpu)); 612 if (atomic_dec_and_test(&appldata_expire_count)) { 613 atomic_set(&appldata_expire_count, num_online_cpus()); 614 queue_work(appldata_wq, &appldata_work); 615 } 616 spin_lock(&appldata_timer_lock); 617 __appldata_vtimer_setup(APPLDATA_MOD_TIMER); 618 spin_unlock(&appldata_timer_lock); 619 } 620 621 static int __cpuinit 622 appldata_cpu_notify(struct notifier_block *self, 623 unsigned long action, void *hcpu) 624 { 625 switch (action) { 626 case CPU_ONLINE: 627 appldata_online_cpu((long) hcpu); 628 break; 629 #ifdef CONFIG_HOTPLUG_CPU 630 case CPU_DEAD: 631 appldata_offline_cpu((long) hcpu); 632 break; 633 #endif 634 default: 635 break; 636 } 637 return NOTIFY_OK; 638 } 639 640 static struct notifier_block __devinitdata appldata_nb = { 641 .notifier_call = appldata_cpu_notify, 642 }; 643 644 /* 645 * appldata_init() 646 * 647 * init timer, register /proc entries 648 */ 649 static int __init appldata_init(void) 650 { 651 int i; 652 653 P_DEBUG("sizeof(parameter_list) = %lu\n", 654 sizeof(struct appldata_parameter_list)); 655 656 appldata_wq = create_singlethread_workqueue("appldata"); 657 if (!appldata_wq) { 658 P_ERROR("Could not create work queue\n"); 659 return -ENOMEM; 660 } 661 662 for_each_online_cpu(i) 663 appldata_online_cpu(i); 664 665 /* Register cpu hotplug notifier */ 666 register_cpu_notifier(&appldata_nb); 667 668 appldata_sysctl_header = register_sysctl_table(appldata_dir_table, 1); 669 #ifdef MODULE 670 appldata_dir_table[0].de->owner = THIS_MODULE; 671 appldata_table[0].de->owner = THIS_MODULE; 672 appldata_table[1].de->owner = THIS_MODULE; 673 #endif 674 675 P_DEBUG("Base interface initialized.\n"); 676 return 0; 677 } 678 679 /* 680 * appldata_exit() 681 * 682 * stop timer, unregister /proc entries 683 */ 684 static void __exit appldata_exit(void) 685 { 686 struct list_head *lh; 687 struct appldata_ops *ops; 688 int rc, i; 689 690 P_DEBUG("Unloading module ...\n"); 691 /* 692 * ops list should be empty, but just in case something went wrong... 693 */ 694 spin_lock(&appldata_ops_lock); 695 list_for_each(lh, &appldata_ops_list) { 696 ops = list_entry(lh, struct appldata_ops, list); 697 rc = appldata_diag(ops->record_nr, APPLDATA_STOP_REC, 698 (unsigned long) ops->data, ops->size, 699 ops->mod_lvl); 700 if (rc != 0) { 701 P_ERROR("STOP DIAG 0xDC for %s failed, " 702 "return code: %d\n", ops->name, rc); 703 } 704 } 705 spin_unlock(&appldata_ops_lock); 706 707 for_each_online_cpu(i) 708 appldata_offline_cpu(i); 709 710 appldata_timer_active = 0; 711 712 unregister_sysctl_table(appldata_sysctl_header); 713 714 destroy_workqueue(appldata_wq); 715 P_DEBUG("... module unloaded!\n"); 716 } 717 /**************************** init / exit <END> ******************************/ 718 719 720 module_init(appldata_init); 721 module_exit(appldata_exit); 722 MODULE_LICENSE("GPL"); 723 MODULE_AUTHOR("Gerald Schaefer"); 724 MODULE_DESCRIPTION("Linux-VM Monitor Stream, base infrastructure"); 725 726 EXPORT_SYMBOL_GPL(appldata_register_ops); 727 EXPORT_SYMBOL_GPL(appldata_unregister_ops); 728 EXPORT_SYMBOL_GPL(appldata_diag); 729 730 #ifdef MODULE 731 /* 732 * Kernel symbols needed by appldata_mem and appldata_os modules. 733 * However, if this file is compiled as a module (for testing only), these 734 * symbols are not exported. In this case, we define them locally and export 735 * those. 736 */ 737 void si_swapinfo(struct sysinfo *val) 738 { 739 val->freeswap = -1ul; 740 val->totalswap = -1ul; 741 } 742 743 unsigned long avenrun[3] = {-1 - FIXED_1/200, -1 - FIXED_1/200, 744 -1 - FIXED_1/200}; 745 int nr_threads = -1; 746 747 void get_full_page_state(struct page_state *ps) 748 { 749 memset(ps, -1, sizeof(struct page_state)); 750 } 751 752 unsigned long nr_running(void) 753 { 754 return -1; 755 } 756 757 unsigned long nr_iowait(void) 758 { 759 return -1; 760 } 761 762 /*unsigned long nr_context_switches(void) 763 { 764 return -1; 765 }*/ 766 #endif /* MODULE */ 767 EXPORT_SYMBOL_GPL(si_swapinfo); 768 EXPORT_SYMBOL_GPL(nr_threads); 769 EXPORT_SYMBOL_GPL(nr_running); 770 EXPORT_SYMBOL_GPL(nr_iowait); 771 //EXPORT_SYMBOL_GPL(nr_context_switches); 772