1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * character device driver for reading z/VM system service records 4 * 5 * 6 * Copyright IBM Corp. 2004, 2009 7 * character device driver for reading z/VM system service records, 8 * Version 1.0 9 * Author(s): Xenia Tkatschow <xenia@us.ibm.com> 10 * Stefan Weinhuber <wein@de.ibm.com> 11 * 12 */ 13 14 #define KMSG_COMPONENT "vmlogrdr" 15 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 16 17 #include <linux/module.h> 18 #include <linux/init.h> 19 #include <linux/slab.h> 20 #include <linux/errno.h> 21 #include <linux/types.h> 22 #include <linux/interrupt.h> 23 #include <linux/spinlock.h> 24 #include <linux/atomic.h> 25 #include <linux/uaccess.h> 26 #include <asm/cpcmd.h> 27 #include <asm/debug.h> 28 #include <asm/ebcdic.h> 29 #include <net/iucv/iucv.h> 30 #include <linux/kmod.h> 31 #include <linux/cdev.h> 32 #include <linux/device.h> 33 #include <linux/string.h> 34 35 MODULE_AUTHOR 36 ("(C) 2004 IBM Corporation by Xenia Tkatschow (xenia@us.ibm.com)\n" 37 " Stefan Weinhuber (wein@de.ibm.com)"); 38 MODULE_DESCRIPTION ("Character device driver for reading z/VM " 39 "system service records."); 40 MODULE_LICENSE("GPL"); 41 42 43 /* 44 * The size of the buffer for iucv data transfer is one page, 45 * but in addition to the data we read from iucv we also 46 * place an integer and some characters into that buffer, 47 * so the maximum size for record data is a little less then 48 * one page. 49 */ 50 #define NET_BUFFER_SIZE (PAGE_SIZE - sizeof(int) - sizeof(FENCE)) 51 52 /* 53 * The elements that are concurrently accessed by bottom halves are 54 * connection_established, iucv_path_severed, local_interrupt_buffer 55 * and receive_ready. The first three can be protected by 56 * priv_lock. receive_ready is atomic, so it can be incremented and 57 * decremented without holding a lock. 58 * The variable dev_in_use needs to be protected by the lock, since 59 * it's a flag used by open to make sure that the device is opened only 60 * by one user at the same time. 61 */ 62 struct vmlogrdr_priv_t { 63 char system_service[8]; 64 char internal_name[8]; 65 char recording_name[8]; 66 struct iucv_path *path; 67 int connection_established; 68 int iucv_path_severed; 69 struct iucv_message local_interrupt_buffer; 70 atomic_t receive_ready; 71 int minor_num; 72 char * buffer; 73 char * current_position; 74 int remaining; 75 ulong residual_length; 76 int buffer_free; 77 int dev_in_use; /* 1: already opened, 0: not opened*/ 78 spinlock_t priv_lock; 79 struct device *device; 80 struct device *class_device; 81 int autorecording; 82 int autopurge; 83 }; 84 85 86 /* 87 * File operation structure for vmlogrdr devices 88 */ 89 static int vmlogrdr_open(struct inode *, struct file *); 90 static int vmlogrdr_release(struct inode *, struct file *); 91 static ssize_t vmlogrdr_read (struct file *filp, char __user *data, 92 size_t count, loff_t * ppos); 93 94 static const struct file_operations vmlogrdr_fops = { 95 .owner = THIS_MODULE, 96 .open = vmlogrdr_open, 97 .release = vmlogrdr_release, 98 .read = vmlogrdr_read, 99 .llseek = no_llseek, 100 }; 101 102 103 static void vmlogrdr_iucv_path_complete(struct iucv_path *, u8 *ipuser); 104 static void vmlogrdr_iucv_path_severed(struct iucv_path *, u8 *ipuser); 105 static void vmlogrdr_iucv_message_pending(struct iucv_path *, 106 struct iucv_message *); 107 108 109 static struct iucv_handler vmlogrdr_iucv_handler = { 110 .path_complete = vmlogrdr_iucv_path_complete, 111 .path_severed = vmlogrdr_iucv_path_severed, 112 .message_pending = vmlogrdr_iucv_message_pending, 113 }; 114 115 116 static DECLARE_WAIT_QUEUE_HEAD(conn_wait_queue); 117 static DECLARE_WAIT_QUEUE_HEAD(read_wait_queue); 118 119 /* 120 * pointer to system service private structure 121 * minor number 0 --> logrec 122 * minor number 1 --> account 123 * minor number 2 --> symptom 124 */ 125 126 static struct vmlogrdr_priv_t sys_ser[] = { 127 { .system_service = "*LOGREC ", 128 .internal_name = "logrec", 129 .recording_name = "EREP", 130 .minor_num = 0, 131 .buffer_free = 1, 132 .priv_lock = __SPIN_LOCK_UNLOCKED(sys_ser[0].priv_lock), 133 .autorecording = 1, 134 .autopurge = 1, 135 }, 136 { .system_service = "*ACCOUNT", 137 .internal_name = "account", 138 .recording_name = "ACCOUNT", 139 .minor_num = 1, 140 .buffer_free = 1, 141 .priv_lock = __SPIN_LOCK_UNLOCKED(sys_ser[1].priv_lock), 142 .autorecording = 1, 143 .autopurge = 1, 144 }, 145 { .system_service = "*SYMPTOM", 146 .internal_name = "symptom", 147 .recording_name = "SYMPTOM", 148 .minor_num = 2, 149 .buffer_free = 1, 150 .priv_lock = __SPIN_LOCK_UNLOCKED(sys_ser[2].priv_lock), 151 .autorecording = 1, 152 .autopurge = 1, 153 } 154 }; 155 156 #define MAXMINOR ARRAY_SIZE(sys_ser) 157 158 static char FENCE[] = {"EOR"}; 159 static int vmlogrdr_major = 0; 160 static struct cdev *vmlogrdr_cdev = NULL; 161 static int recording_class_AB; 162 163 164 static void vmlogrdr_iucv_path_complete(struct iucv_path *path, u8 *ipuser) 165 { 166 struct vmlogrdr_priv_t * logptr = path->private; 167 168 spin_lock(&logptr->priv_lock); 169 logptr->connection_established = 1; 170 spin_unlock(&logptr->priv_lock); 171 wake_up(&conn_wait_queue); 172 } 173 174 175 static void vmlogrdr_iucv_path_severed(struct iucv_path *path, u8 *ipuser) 176 { 177 struct vmlogrdr_priv_t * logptr = path->private; 178 u8 reason = (u8) ipuser[8]; 179 180 pr_err("vmlogrdr: connection severed with reason %i\n", reason); 181 182 iucv_path_sever(path, NULL); 183 kfree(path); 184 logptr->path = NULL; 185 186 spin_lock(&logptr->priv_lock); 187 logptr->connection_established = 0; 188 logptr->iucv_path_severed = 1; 189 spin_unlock(&logptr->priv_lock); 190 191 wake_up(&conn_wait_queue); 192 /* just in case we're sleeping waiting for a record */ 193 wake_up_interruptible(&read_wait_queue); 194 } 195 196 197 static void vmlogrdr_iucv_message_pending(struct iucv_path *path, 198 struct iucv_message *msg) 199 { 200 struct vmlogrdr_priv_t * logptr = path->private; 201 202 /* 203 * This function is the bottom half so it should be quick. 204 * Copy the external interrupt data into our local eib and increment 205 * the usage count 206 */ 207 spin_lock(&logptr->priv_lock); 208 memcpy(&logptr->local_interrupt_buffer, msg, sizeof(*msg)); 209 atomic_inc(&logptr->receive_ready); 210 spin_unlock(&logptr->priv_lock); 211 wake_up_interruptible(&read_wait_queue); 212 } 213 214 215 static int vmlogrdr_get_recording_class_AB(void) 216 { 217 static const char cp_command[] = "QUERY COMMAND RECORDING "; 218 char cp_response[80]; 219 char *tail; 220 int len,i; 221 222 cpcmd(cp_command, cp_response, sizeof(cp_response), NULL); 223 len = strnlen(cp_response,sizeof(cp_response)); 224 // now the parsing 225 tail=strnchr(cp_response,len,'='); 226 if (!tail) 227 return 0; 228 tail++; 229 if (!strncmp("ANY",tail,3)) 230 return 1; 231 if (!strncmp("NONE",tail,4)) 232 return 0; 233 /* 234 * expect comma separated list of classes here, if one of them 235 * is A or B return 1 otherwise 0 236 */ 237 for (i=tail-cp_response; i<len; i++) 238 if ( cp_response[i]=='A' || cp_response[i]=='B' ) 239 return 1; 240 return 0; 241 } 242 243 244 static int vmlogrdr_recording(struct vmlogrdr_priv_t * logptr, 245 int action, int purge) 246 { 247 248 char cp_command[80]; 249 char cp_response[160]; 250 char *onoff, *qid_string; 251 int rc; 252 253 onoff = ((action == 1) ? "ON" : "OFF"); 254 qid_string = ((recording_class_AB == 1) ? " QID * " : ""); 255 256 /* 257 * The recording commands needs to be called with option QID 258 * for guests that have previlege classes A or B. 259 * Purging has to be done as separate step, because recording 260 * can't be switched on as long as records are on the queue. 261 * Doing both at the same time doesn't work. 262 */ 263 if (purge && (action == 1)) { 264 memset(cp_command, 0x00, sizeof(cp_command)); 265 memset(cp_response, 0x00, sizeof(cp_response)); 266 snprintf(cp_command, sizeof(cp_command), 267 "RECORDING %s PURGE %s", 268 logptr->recording_name, 269 qid_string); 270 cpcmd(cp_command, cp_response, sizeof(cp_response), NULL); 271 } 272 273 memset(cp_command, 0x00, sizeof(cp_command)); 274 memset(cp_response, 0x00, sizeof(cp_response)); 275 snprintf(cp_command, sizeof(cp_command), "RECORDING %s %s %s", 276 logptr->recording_name, 277 onoff, 278 qid_string); 279 cpcmd(cp_command, cp_response, sizeof(cp_response), NULL); 280 /* The recording command will usually answer with 'Command complete' 281 * on success, but when the specific service was never connected 282 * before then there might be an additional informational message 283 * 'HCPCRC8072I Recording entry not found' before the 284 * 'Command complete'. So I use strstr rather then the strncmp. 285 */ 286 if (strstr(cp_response,"Command complete")) 287 rc = 0; 288 else 289 rc = -EIO; 290 /* 291 * If we turn recording off, we have to purge any remaining records 292 * afterwards, as a large number of queued records may impact z/VM 293 * performance. 294 */ 295 if (purge && (action == 0)) { 296 memset(cp_command, 0x00, sizeof(cp_command)); 297 memset(cp_response, 0x00, sizeof(cp_response)); 298 snprintf(cp_command, sizeof(cp_command), 299 "RECORDING %s PURGE %s", 300 logptr->recording_name, 301 qid_string); 302 cpcmd(cp_command, cp_response, sizeof(cp_response), NULL); 303 } 304 305 return rc; 306 } 307 308 309 static int vmlogrdr_open (struct inode *inode, struct file *filp) 310 { 311 int dev_num = 0; 312 struct vmlogrdr_priv_t * logptr = NULL; 313 int connect_rc = 0; 314 int ret; 315 316 dev_num = iminor(inode); 317 if (dev_num >= MAXMINOR) 318 return -ENODEV; 319 logptr = &sys_ser[dev_num]; 320 321 /* 322 * only allow for blocking reads to be open 323 */ 324 if (filp->f_flags & O_NONBLOCK) 325 return -EOPNOTSUPP; 326 327 /* Besure this device hasn't already been opened */ 328 spin_lock_bh(&logptr->priv_lock); 329 if (logptr->dev_in_use) { 330 spin_unlock_bh(&logptr->priv_lock); 331 return -EBUSY; 332 } 333 logptr->dev_in_use = 1; 334 logptr->connection_established = 0; 335 logptr->iucv_path_severed = 0; 336 atomic_set(&logptr->receive_ready, 0); 337 logptr->buffer_free = 1; 338 spin_unlock_bh(&logptr->priv_lock); 339 340 /* set the file options */ 341 filp->private_data = logptr; 342 343 /* start recording for this service*/ 344 if (logptr->autorecording) { 345 ret = vmlogrdr_recording(logptr,1,logptr->autopurge); 346 if (ret) 347 pr_warn("vmlogrdr: failed to start recording automatically\n"); 348 } 349 350 /* create connection to the system service */ 351 logptr->path = iucv_path_alloc(10, 0, GFP_KERNEL); 352 if (!logptr->path) 353 goto out_dev; 354 connect_rc = iucv_path_connect(logptr->path, &vmlogrdr_iucv_handler, 355 logptr->system_service, NULL, NULL, 356 logptr); 357 if (connect_rc) { 358 pr_err("vmlogrdr: iucv connection to %s " 359 "failed with rc %i \n", 360 logptr->system_service, connect_rc); 361 goto out_path; 362 } 363 364 /* We've issued the connect and now we must wait for a 365 * ConnectionComplete or ConnectinSevered Interrupt 366 * before we can continue to process. 367 */ 368 wait_event(conn_wait_queue, (logptr->connection_established) 369 || (logptr->iucv_path_severed)); 370 if (logptr->iucv_path_severed) 371 goto out_record; 372 nonseekable_open(inode, filp); 373 return 0; 374 375 out_record: 376 if (logptr->autorecording) 377 vmlogrdr_recording(logptr,0,logptr->autopurge); 378 out_path: 379 kfree(logptr->path); /* kfree(NULL) is ok. */ 380 logptr->path = NULL; 381 out_dev: 382 logptr->dev_in_use = 0; 383 return -EIO; 384 } 385 386 387 static int vmlogrdr_release (struct inode *inode, struct file *filp) 388 { 389 int ret; 390 391 struct vmlogrdr_priv_t * logptr = filp->private_data; 392 393 iucv_path_sever(logptr->path, NULL); 394 kfree(logptr->path); 395 logptr->path = NULL; 396 if (logptr->autorecording) { 397 ret = vmlogrdr_recording(logptr,0,logptr->autopurge); 398 if (ret) 399 pr_warn("vmlogrdr: failed to stop recording automatically\n"); 400 } 401 logptr->dev_in_use = 0; 402 403 return 0; 404 } 405 406 407 static int vmlogrdr_receive_data(struct vmlogrdr_priv_t *priv) 408 { 409 int rc, *temp; 410 /* we need to keep track of two data sizes here: 411 * The number of bytes we need to receive from iucv and 412 * the total number of bytes we actually write into the buffer. 413 */ 414 int user_data_count, iucv_data_count; 415 char * buffer; 416 417 if (atomic_read(&priv->receive_ready)) { 418 spin_lock_bh(&priv->priv_lock); 419 if (priv->residual_length){ 420 /* receive second half of a record */ 421 iucv_data_count = priv->residual_length; 422 user_data_count = 0; 423 buffer = priv->buffer; 424 } else { 425 /* receive a new record: 426 * We need to return the total length of the record 427 * + size of FENCE in the first 4 bytes of the buffer. 428 */ 429 iucv_data_count = priv->local_interrupt_buffer.length; 430 user_data_count = sizeof(int); 431 temp = (int*)priv->buffer; 432 *temp= iucv_data_count + sizeof(FENCE); 433 buffer = priv->buffer + sizeof(int); 434 } 435 /* 436 * If the record is bigger than our buffer, we receive only 437 * a part of it. We can get the rest later. 438 */ 439 if (iucv_data_count > NET_BUFFER_SIZE) 440 iucv_data_count = NET_BUFFER_SIZE; 441 rc = iucv_message_receive(priv->path, 442 &priv->local_interrupt_buffer, 443 0, buffer, iucv_data_count, 444 &priv->residual_length); 445 spin_unlock_bh(&priv->priv_lock); 446 /* An rc of 5 indicates that the record was bigger than 447 * the buffer, which is OK for us. A 9 indicates that the 448 * record was purged befor we could receive it. 449 */ 450 if (rc == 5) 451 rc = 0; 452 if (rc == 9) 453 atomic_set(&priv->receive_ready, 0); 454 } else { 455 rc = 1; 456 } 457 if (!rc) { 458 priv->buffer_free = 0; 459 user_data_count += iucv_data_count; 460 priv->current_position = priv->buffer; 461 if (priv->residual_length == 0){ 462 /* the whole record has been captured, 463 * now add the fence */ 464 atomic_dec(&priv->receive_ready); 465 buffer = priv->buffer + user_data_count; 466 memcpy(buffer, FENCE, sizeof(FENCE)); 467 user_data_count += sizeof(FENCE); 468 } 469 priv->remaining = user_data_count; 470 } 471 472 return rc; 473 } 474 475 476 static ssize_t vmlogrdr_read(struct file *filp, char __user *data, 477 size_t count, loff_t * ppos) 478 { 479 int rc; 480 struct vmlogrdr_priv_t * priv = filp->private_data; 481 482 while (priv->buffer_free) { 483 rc = vmlogrdr_receive_data(priv); 484 if (rc) { 485 rc = wait_event_interruptible(read_wait_queue, 486 atomic_read(&priv->receive_ready)); 487 if (rc) 488 return rc; 489 } 490 } 491 /* copy only up to end of record */ 492 if (count > priv->remaining) 493 count = priv->remaining; 494 495 if (copy_to_user(data, priv->current_position, count)) 496 return -EFAULT; 497 498 *ppos += count; 499 priv->current_position += count; 500 priv->remaining -= count; 501 502 /* if all data has been transferred, set buffer free */ 503 if (priv->remaining == 0) 504 priv->buffer_free = 1; 505 506 return count; 507 } 508 509 static ssize_t vmlogrdr_autopurge_store(struct device * dev, 510 struct device_attribute *attr, 511 const char * buf, size_t count) 512 { 513 struct vmlogrdr_priv_t *priv = dev_get_drvdata(dev); 514 ssize_t ret = count; 515 516 switch (buf[0]) { 517 case '0': 518 priv->autopurge=0; 519 break; 520 case '1': 521 priv->autopurge=1; 522 break; 523 default: 524 ret = -EINVAL; 525 } 526 return ret; 527 } 528 529 530 static ssize_t vmlogrdr_autopurge_show(struct device *dev, 531 struct device_attribute *attr, 532 char *buf) 533 { 534 struct vmlogrdr_priv_t *priv = dev_get_drvdata(dev); 535 return sprintf(buf, "%u\n", priv->autopurge); 536 } 537 538 539 static DEVICE_ATTR(autopurge, 0644, vmlogrdr_autopurge_show, 540 vmlogrdr_autopurge_store); 541 542 543 static ssize_t vmlogrdr_purge_store(struct device * dev, 544 struct device_attribute *attr, 545 const char * buf, size_t count) 546 { 547 548 char cp_command[80]; 549 char cp_response[80]; 550 struct vmlogrdr_priv_t *priv = dev_get_drvdata(dev); 551 552 if (buf[0] != '1') 553 return -EINVAL; 554 555 memset(cp_command, 0x00, sizeof(cp_command)); 556 memset(cp_response, 0x00, sizeof(cp_response)); 557 558 /* 559 * The recording command needs to be called with option QID 560 * for guests that have previlege classes A or B. 561 * Other guests will not recognize the command and we have to 562 * issue the same command without the QID parameter. 563 */ 564 565 if (recording_class_AB) 566 snprintf(cp_command, sizeof(cp_command), 567 "RECORDING %s PURGE QID * ", 568 priv->recording_name); 569 else 570 snprintf(cp_command, sizeof(cp_command), 571 "RECORDING %s PURGE ", 572 priv->recording_name); 573 574 cpcmd(cp_command, cp_response, sizeof(cp_response), NULL); 575 576 return count; 577 } 578 579 580 static DEVICE_ATTR(purge, 0200, NULL, vmlogrdr_purge_store); 581 582 583 static ssize_t vmlogrdr_autorecording_store(struct device *dev, 584 struct device_attribute *attr, 585 const char *buf, size_t count) 586 { 587 struct vmlogrdr_priv_t *priv = dev_get_drvdata(dev); 588 ssize_t ret = count; 589 590 switch (buf[0]) { 591 case '0': 592 priv->autorecording=0; 593 break; 594 case '1': 595 priv->autorecording=1; 596 break; 597 default: 598 ret = -EINVAL; 599 } 600 return ret; 601 } 602 603 604 static ssize_t vmlogrdr_autorecording_show(struct device *dev, 605 struct device_attribute *attr, 606 char *buf) 607 { 608 struct vmlogrdr_priv_t *priv = dev_get_drvdata(dev); 609 return sprintf(buf, "%u\n", priv->autorecording); 610 } 611 612 613 static DEVICE_ATTR(autorecording, 0644, vmlogrdr_autorecording_show, 614 vmlogrdr_autorecording_store); 615 616 617 static ssize_t vmlogrdr_recording_store(struct device * dev, 618 struct device_attribute *attr, 619 const char * buf, size_t count) 620 { 621 struct vmlogrdr_priv_t *priv = dev_get_drvdata(dev); 622 ssize_t ret; 623 624 switch (buf[0]) { 625 case '0': 626 ret = vmlogrdr_recording(priv,0,0); 627 break; 628 case '1': 629 ret = vmlogrdr_recording(priv,1,0); 630 break; 631 default: 632 ret = -EINVAL; 633 } 634 if (ret) 635 return ret; 636 else 637 return count; 638 639 } 640 641 642 static DEVICE_ATTR(recording, 0200, NULL, vmlogrdr_recording_store); 643 644 645 static ssize_t recording_status_show(struct device_driver *driver, char *buf) 646 { 647 static const char cp_command[] = "QUERY RECORDING "; 648 int len; 649 650 cpcmd(cp_command, buf, 4096, NULL); 651 len = strlen(buf); 652 return len; 653 } 654 static DRIVER_ATTR_RO(recording_status); 655 static struct attribute *vmlogrdr_drv_attrs[] = { 656 &driver_attr_recording_status.attr, 657 NULL, 658 }; 659 static struct attribute_group vmlogrdr_drv_attr_group = { 660 .attrs = vmlogrdr_drv_attrs, 661 }; 662 static const struct attribute_group *vmlogrdr_drv_attr_groups[] = { 663 &vmlogrdr_drv_attr_group, 664 NULL, 665 }; 666 667 static struct attribute *vmlogrdr_attrs[] = { 668 &dev_attr_autopurge.attr, 669 &dev_attr_purge.attr, 670 &dev_attr_autorecording.attr, 671 &dev_attr_recording.attr, 672 NULL, 673 }; 674 static struct attribute_group vmlogrdr_attr_group = { 675 .attrs = vmlogrdr_attrs, 676 }; 677 static const struct attribute_group *vmlogrdr_attr_groups[] = { 678 &vmlogrdr_attr_group, 679 NULL, 680 }; 681 682 static const struct class vmlogrdr_class = { 683 .name = "vmlogrdr_class", 684 }; 685 static struct device_driver vmlogrdr_driver = { 686 .name = "vmlogrdr", 687 .bus = &iucv_bus, 688 .groups = vmlogrdr_drv_attr_groups, 689 }; 690 691 static int vmlogrdr_register_driver(void) 692 { 693 int ret; 694 695 /* Register with iucv driver */ 696 ret = iucv_register(&vmlogrdr_iucv_handler, 1); 697 if (ret) 698 goto out; 699 700 ret = driver_register(&vmlogrdr_driver); 701 if (ret) 702 goto out_iucv; 703 704 ret = class_register(&vmlogrdr_class); 705 if (ret) 706 goto out_driver; 707 return 0; 708 709 out_driver: 710 driver_unregister(&vmlogrdr_driver); 711 out_iucv: 712 iucv_unregister(&vmlogrdr_iucv_handler, 1); 713 out: 714 return ret; 715 } 716 717 718 static void vmlogrdr_unregister_driver(void) 719 { 720 class_unregister(&vmlogrdr_class); 721 driver_unregister(&vmlogrdr_driver); 722 iucv_unregister(&vmlogrdr_iucv_handler, 1); 723 } 724 725 726 static int vmlogrdr_register_device(struct vmlogrdr_priv_t *priv) 727 { 728 struct device *dev; 729 int ret; 730 731 dev = iucv_alloc_device(vmlogrdr_attr_groups, &vmlogrdr_driver, 732 priv, priv->internal_name); 733 if (!dev) 734 return -ENOMEM; 735 ret = device_register(dev); 736 if (ret) { 737 put_device(dev); 738 return ret; 739 } 740 741 priv->class_device = device_create(&vmlogrdr_class, dev, 742 MKDEV(vmlogrdr_major, 743 priv->minor_num), 744 priv, "%s", dev_name(dev)); 745 if (IS_ERR(priv->class_device)) { 746 ret = PTR_ERR(priv->class_device); 747 priv->class_device=NULL; 748 device_unregister(dev); 749 return ret; 750 } 751 priv->device = dev; 752 return 0; 753 } 754 755 756 static int vmlogrdr_unregister_device(struct vmlogrdr_priv_t *priv) 757 { 758 device_destroy(&vmlogrdr_class, MKDEV(vmlogrdr_major, priv->minor_num)); 759 if (priv->device != NULL) { 760 device_unregister(priv->device); 761 priv->device=NULL; 762 } 763 return 0; 764 } 765 766 767 static int vmlogrdr_register_cdev(dev_t dev) 768 { 769 int rc = 0; 770 vmlogrdr_cdev = cdev_alloc(); 771 if (!vmlogrdr_cdev) { 772 return -ENOMEM; 773 } 774 vmlogrdr_cdev->owner = THIS_MODULE; 775 vmlogrdr_cdev->ops = &vmlogrdr_fops; 776 rc = cdev_add(vmlogrdr_cdev, dev, MAXMINOR); 777 if (!rc) 778 return 0; 779 780 // cleanup: cdev is not fully registered, no cdev_del here! 781 kobject_put(&vmlogrdr_cdev->kobj); 782 vmlogrdr_cdev=NULL; 783 return rc; 784 } 785 786 787 static void vmlogrdr_cleanup(void) 788 { 789 int i; 790 791 if (vmlogrdr_cdev) { 792 cdev_del(vmlogrdr_cdev); 793 vmlogrdr_cdev=NULL; 794 } 795 for (i=0; i < MAXMINOR; ++i ) { 796 vmlogrdr_unregister_device(&sys_ser[i]); 797 free_page((unsigned long)sys_ser[i].buffer); 798 } 799 vmlogrdr_unregister_driver(); 800 if (vmlogrdr_major) { 801 unregister_chrdev_region(MKDEV(vmlogrdr_major, 0), MAXMINOR); 802 vmlogrdr_major=0; 803 } 804 } 805 806 807 static int __init vmlogrdr_init(void) 808 { 809 int rc; 810 int i; 811 dev_t dev; 812 813 if (! MACHINE_IS_VM) { 814 pr_err("not running under VM, driver not loaded.\n"); 815 return -ENODEV; 816 } 817 818 recording_class_AB = vmlogrdr_get_recording_class_AB(); 819 820 rc = alloc_chrdev_region(&dev, 0, MAXMINOR, "vmlogrdr"); 821 if (rc) 822 return rc; 823 vmlogrdr_major = MAJOR(dev); 824 825 rc=vmlogrdr_register_driver(); 826 if (rc) 827 goto cleanup; 828 829 for (i=0; i < MAXMINOR; ++i ) { 830 sys_ser[i].buffer = (char *) get_zeroed_page(GFP_KERNEL | GFP_DMA); 831 if (!sys_ser[i].buffer) { 832 rc = -ENOMEM; 833 break; 834 } 835 sys_ser[i].current_position = sys_ser[i].buffer; 836 rc=vmlogrdr_register_device(&sys_ser[i]); 837 if (rc) 838 break; 839 } 840 if (rc) 841 goto cleanup; 842 843 rc = vmlogrdr_register_cdev(dev); 844 if (rc) 845 goto cleanup; 846 return 0; 847 848 cleanup: 849 vmlogrdr_cleanup(); 850 return rc; 851 } 852 853 854 static void __exit vmlogrdr_exit(void) 855 { 856 vmlogrdr_cleanup(); 857 return; 858 } 859 860 861 module_init(vmlogrdr_init); 862 module_exit(vmlogrdr_exit); 863