1 /* 2 * Linux driver for System z and s390 unit record devices 3 * (z/VM virtual punch, reader, printer) 4 * 5 * Copyright IBM Corp. 2001, 2009 6 * Authors: Malcolm Beattie <beattiem@uk.ibm.com> 7 * Michael Holzheu <holzheu@de.ibm.com> 8 * Frank Munzert <munzert@de.ibm.com> 9 */ 10 11 #define KMSG_COMPONENT "vmur" 12 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 13 14 #include <linux/cdev.h> 15 #include <linux/slab.h> 16 #include <linux/module.h> 17 18 #include <linux/uaccess.h> 19 #include <asm/cio.h> 20 #include <asm/ccwdev.h> 21 #include <asm/debug.h> 22 #include <asm/diag.h> 23 24 #include "vmur.h" 25 26 /* 27 * Driver overview 28 * 29 * Unit record device support is implemented as a character device driver. 30 * We can fit at least 16 bits into a device minor number and use the 31 * simple method of mapping a character device number with minor abcd 32 * to the unit record device with devno abcd. 33 * I/O to virtual unit record devices is handled as follows: 34 * Reads: Diagnose code 0x14 (input spool file manipulation) 35 * is used to read spool data page-wise. 36 * Writes: The CCW used is WRITE_CCW_CMD (0x01). The device's record length 37 * is available by reading sysfs attr reclen. Each write() to the device 38 * must specify an integral multiple (maximal 511) of reclen. 39 */ 40 41 static char ur_banner[] = "z/VM virtual unit record device driver"; 42 43 MODULE_AUTHOR("IBM Corporation"); 44 MODULE_DESCRIPTION("s390 z/VM virtual unit record device driver"); 45 MODULE_LICENSE("GPL"); 46 47 static dev_t ur_first_dev_maj_min; 48 static struct class *vmur_class; 49 static struct debug_info *vmur_dbf; 50 51 /* We put the device's record length (for writes) in the driver_info field */ 52 static struct ccw_device_id ur_ids[] = { 53 { CCWDEV_CU_DI(READER_PUNCH_DEVTYPE, 80) }, 54 { CCWDEV_CU_DI(PRINTER_DEVTYPE, 132) }, 55 { /* end of list */ } 56 }; 57 58 MODULE_DEVICE_TABLE(ccw, ur_ids); 59 60 static int ur_probe(struct ccw_device *cdev); 61 static void ur_remove(struct ccw_device *cdev); 62 static int ur_set_online(struct ccw_device *cdev); 63 static int ur_set_offline(struct ccw_device *cdev); 64 static int ur_pm_suspend(struct ccw_device *cdev); 65 66 static struct ccw_driver ur_driver = { 67 .driver = { 68 .name = "vmur", 69 .owner = THIS_MODULE, 70 }, 71 .ids = ur_ids, 72 .probe = ur_probe, 73 .remove = ur_remove, 74 .set_online = ur_set_online, 75 .set_offline = ur_set_offline, 76 .freeze = ur_pm_suspend, 77 .int_class = IRQIO_VMR, 78 }; 79 80 static DEFINE_MUTEX(vmur_mutex); 81 82 /* 83 * Allocation, freeing, getting and putting of urdev structures 84 * 85 * Each ur device (urd) contains a reference to its corresponding ccw device 86 * (cdev) using the urd->cdev pointer. Each ccw device has a reference to the 87 * ur device using dev_get_drvdata(&cdev->dev) pointer. 88 * 89 * urd references: 90 * - ur_probe gets a urd reference, ur_remove drops the reference 91 * dev_get_drvdata(&cdev->dev) 92 * - ur_open gets a urd reference, ur_release drops the reference 93 * (urf->urd) 94 * 95 * cdev references: 96 * - urdev_alloc get a cdev reference (urd->cdev) 97 * - urdev_free drops the cdev reference (urd->cdev) 98 * 99 * Setting and clearing of dev_get_drvdata(&cdev->dev) is protected by the ccwdev lock 100 */ 101 static struct urdev *urdev_alloc(struct ccw_device *cdev) 102 { 103 struct urdev *urd; 104 105 urd = kzalloc(sizeof(struct urdev), GFP_KERNEL); 106 if (!urd) 107 return NULL; 108 urd->reclen = cdev->id.driver_info; 109 ccw_device_get_id(cdev, &urd->dev_id); 110 mutex_init(&urd->io_mutex); 111 init_waitqueue_head(&urd->wait); 112 spin_lock_init(&urd->open_lock); 113 atomic_set(&urd->ref_count, 1); 114 urd->cdev = cdev; 115 get_device(&cdev->dev); 116 return urd; 117 } 118 119 static void urdev_free(struct urdev *urd) 120 { 121 TRACE("urdev_free: %p\n", urd); 122 if (urd->cdev) 123 put_device(&urd->cdev->dev); 124 kfree(urd); 125 } 126 127 static void urdev_get(struct urdev *urd) 128 { 129 atomic_inc(&urd->ref_count); 130 } 131 132 static struct urdev *urdev_get_from_cdev(struct ccw_device *cdev) 133 { 134 struct urdev *urd; 135 unsigned long flags; 136 137 spin_lock_irqsave(get_ccwdev_lock(cdev), flags); 138 urd = dev_get_drvdata(&cdev->dev); 139 if (urd) 140 urdev_get(urd); 141 spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags); 142 return urd; 143 } 144 145 static struct urdev *urdev_get_from_devno(u16 devno) 146 { 147 char bus_id[16]; 148 struct ccw_device *cdev; 149 struct urdev *urd; 150 151 sprintf(bus_id, "0.0.%04x", devno); 152 cdev = get_ccwdev_by_busid(&ur_driver, bus_id); 153 if (!cdev) 154 return NULL; 155 urd = urdev_get_from_cdev(cdev); 156 put_device(&cdev->dev); 157 return urd; 158 } 159 160 static void urdev_put(struct urdev *urd) 161 { 162 if (atomic_dec_and_test(&urd->ref_count)) 163 urdev_free(urd); 164 } 165 166 /* 167 * State and contents of ur devices can be changed by class D users issuing 168 * CP commands such as PURGE or TRANSFER, while the Linux guest is suspended. 169 * Also the Linux guest might be logged off, which causes all active spool 170 * files to be closed. 171 * So we cannot guarantee that spool files are still the same when the Linux 172 * guest is resumed. In order to avoid unpredictable results at resume time 173 * we simply refuse to suspend if a ur device node is open. 174 */ 175 static int ur_pm_suspend(struct ccw_device *cdev) 176 { 177 struct urdev *urd = dev_get_drvdata(&cdev->dev); 178 179 TRACE("ur_pm_suspend: cdev=%p\n", cdev); 180 if (urd->open_flag) { 181 pr_err("Unit record device %s is busy, %s refusing to " 182 "suspend.\n", dev_name(&cdev->dev), ur_banner); 183 return -EBUSY; 184 } 185 return 0; 186 } 187 188 /* 189 * Low-level functions to do I/O to a ur device. 190 * alloc_chan_prog 191 * free_chan_prog 192 * do_ur_io 193 * ur_int_handler 194 * 195 * alloc_chan_prog allocates and builds the channel program 196 * free_chan_prog frees memory of the channel program 197 * 198 * do_ur_io issues the channel program to the device and blocks waiting 199 * on a completion event it publishes at urd->io_done. The function 200 * serialises itself on the device's mutex so that only one I/O 201 * is issued at a time (and that I/O is synchronous). 202 * 203 * ur_int_handler catches the "I/O done" interrupt, writes the 204 * subchannel status word into the scsw member of the urdev structure 205 * and complete()s the io_done to wake the waiting do_ur_io. 206 * 207 * The caller of do_ur_io is responsible for kfree()ing the channel program 208 * address pointer that alloc_chan_prog returned. 209 */ 210 211 static void free_chan_prog(struct ccw1 *cpa) 212 { 213 struct ccw1 *ptr = cpa; 214 215 while (ptr->cda) { 216 kfree((void *)(addr_t) ptr->cda); 217 ptr++; 218 } 219 kfree(cpa); 220 } 221 222 /* 223 * alloc_chan_prog 224 * The channel program we use is write commands chained together 225 * with a final NOP CCW command-chained on (which ensures that CE and DE 226 * are presented together in a single interrupt instead of as separate 227 * interrupts unless an incorrect length indication kicks in first). The 228 * data length in each CCW is reclen. 229 */ 230 static struct ccw1 *alloc_chan_prog(const char __user *ubuf, int rec_count, 231 int reclen) 232 { 233 struct ccw1 *cpa; 234 void *kbuf; 235 int i; 236 237 TRACE("alloc_chan_prog(%p, %i, %i)\n", ubuf, rec_count, reclen); 238 239 /* 240 * We chain a NOP onto the writes to force CE+DE together. 241 * That means we allocate room for CCWs to cover count/reclen 242 * records plus a NOP. 243 */ 244 cpa = kzalloc((rec_count + 1) * sizeof(struct ccw1), 245 GFP_KERNEL | GFP_DMA); 246 if (!cpa) 247 return ERR_PTR(-ENOMEM); 248 249 for (i = 0; i < rec_count; i++) { 250 cpa[i].cmd_code = WRITE_CCW_CMD; 251 cpa[i].flags = CCW_FLAG_CC | CCW_FLAG_SLI; 252 cpa[i].count = reclen; 253 kbuf = kmalloc(reclen, GFP_KERNEL | GFP_DMA); 254 if (!kbuf) { 255 free_chan_prog(cpa); 256 return ERR_PTR(-ENOMEM); 257 } 258 cpa[i].cda = (u32)(addr_t) kbuf; 259 if (copy_from_user(kbuf, ubuf, reclen)) { 260 free_chan_prog(cpa); 261 return ERR_PTR(-EFAULT); 262 } 263 ubuf += reclen; 264 } 265 /* The following NOP CCW forces CE+DE to be presented together */ 266 cpa[i].cmd_code = CCW_CMD_NOOP; 267 return cpa; 268 } 269 270 static int do_ur_io(struct urdev *urd, struct ccw1 *cpa) 271 { 272 int rc; 273 struct ccw_device *cdev = urd->cdev; 274 DECLARE_COMPLETION_ONSTACK(event); 275 276 TRACE("do_ur_io: cpa=%p\n", cpa); 277 278 rc = mutex_lock_interruptible(&urd->io_mutex); 279 if (rc) 280 return rc; 281 282 urd->io_done = &event; 283 284 spin_lock_irq(get_ccwdev_lock(cdev)); 285 rc = ccw_device_start(cdev, cpa, 1, 0, 0); 286 spin_unlock_irq(get_ccwdev_lock(cdev)); 287 288 TRACE("do_ur_io: ccw_device_start returned %d\n", rc); 289 if (rc) 290 goto out; 291 292 wait_for_completion(&event); 293 TRACE("do_ur_io: I/O complete\n"); 294 rc = 0; 295 296 out: 297 mutex_unlock(&urd->io_mutex); 298 return rc; 299 } 300 301 /* 302 * ur interrupt handler, called from the ccw_device layer 303 */ 304 static void ur_int_handler(struct ccw_device *cdev, unsigned long intparm, 305 struct irb *irb) 306 { 307 struct urdev *urd; 308 309 if (!IS_ERR(irb)) { 310 TRACE("ur_int_handler: intparm=0x%lx cstat=%02x dstat=%02x res=%u\n", 311 intparm, irb->scsw.cmd.cstat, irb->scsw.cmd.dstat, 312 irb->scsw.cmd.count); 313 } 314 if (!intparm) { 315 TRACE("ur_int_handler: unsolicited interrupt\n"); 316 return; 317 } 318 urd = dev_get_drvdata(&cdev->dev); 319 BUG_ON(!urd); 320 /* On special conditions irb is an error pointer */ 321 if (IS_ERR(irb)) 322 urd->io_request_rc = PTR_ERR(irb); 323 else if (irb->scsw.cmd.dstat == (DEV_STAT_CHN_END | DEV_STAT_DEV_END)) 324 urd->io_request_rc = 0; 325 else 326 urd->io_request_rc = -EIO; 327 328 complete(urd->io_done); 329 } 330 331 /* 332 * reclen sysfs attribute - The record length to be used for write CCWs 333 */ 334 static ssize_t ur_attr_reclen_show(struct device *dev, 335 struct device_attribute *attr, char *buf) 336 { 337 struct urdev *urd; 338 int rc; 339 340 urd = urdev_get_from_cdev(to_ccwdev(dev)); 341 if (!urd) 342 return -ENODEV; 343 rc = sprintf(buf, "%zu\n", urd->reclen); 344 urdev_put(urd); 345 return rc; 346 } 347 348 static DEVICE_ATTR(reclen, 0444, ur_attr_reclen_show, NULL); 349 350 static int ur_create_attributes(struct device *dev) 351 { 352 return device_create_file(dev, &dev_attr_reclen); 353 } 354 355 static void ur_remove_attributes(struct device *dev) 356 { 357 device_remove_file(dev, &dev_attr_reclen); 358 } 359 360 /* 361 * diagnose code 0x210 - retrieve device information 362 * cc=0 normal completion, we have a real device 363 * cc=1 CP paging error 364 * cc=2 The virtual device exists, but is not associated with a real device 365 * cc=3 Invalid device address, or the virtual device does not exist 366 */ 367 static int get_urd_class(struct urdev *urd) 368 { 369 static struct diag210 ur_diag210; 370 int cc; 371 372 ur_diag210.vrdcdvno = urd->dev_id.devno; 373 ur_diag210.vrdclen = sizeof(struct diag210); 374 375 cc = diag210(&ur_diag210); 376 switch (cc) { 377 case 0: 378 return -EOPNOTSUPP; 379 case 2: 380 return ur_diag210.vrdcvcla; /* virtual device class */ 381 case 3: 382 return -ENODEV; 383 default: 384 return -EIO; 385 } 386 } 387 388 /* 389 * Allocation and freeing of urfile structures 390 */ 391 static struct urfile *urfile_alloc(struct urdev *urd) 392 { 393 struct urfile *urf; 394 395 urf = kzalloc(sizeof(struct urfile), GFP_KERNEL); 396 if (!urf) 397 return NULL; 398 urf->urd = urd; 399 400 TRACE("urfile_alloc: urd=%p urf=%p rl=%zu\n", urd, urf, 401 urf->dev_reclen); 402 403 return urf; 404 } 405 406 static void urfile_free(struct urfile *urf) 407 { 408 TRACE("urfile_free: urf=%p urd=%p\n", urf, urf->urd); 409 kfree(urf); 410 } 411 412 /* 413 * The fops implementation of the character device driver 414 */ 415 static ssize_t do_write(struct urdev *urd, const char __user *udata, 416 size_t count, size_t reclen, loff_t *ppos) 417 { 418 struct ccw1 *cpa; 419 int rc; 420 421 cpa = alloc_chan_prog(udata, count / reclen, reclen); 422 if (IS_ERR(cpa)) 423 return PTR_ERR(cpa); 424 425 rc = do_ur_io(urd, cpa); 426 if (rc) 427 goto fail_kfree_cpa; 428 429 if (urd->io_request_rc) { 430 rc = urd->io_request_rc; 431 goto fail_kfree_cpa; 432 } 433 *ppos += count; 434 rc = count; 435 436 fail_kfree_cpa: 437 free_chan_prog(cpa); 438 return rc; 439 } 440 441 static ssize_t ur_write(struct file *file, const char __user *udata, 442 size_t count, loff_t *ppos) 443 { 444 struct urfile *urf = file->private_data; 445 446 TRACE("ur_write: count=%zu\n", count); 447 448 if (count == 0) 449 return 0; 450 451 if (count % urf->dev_reclen) 452 return -EINVAL; /* count must be a multiple of reclen */ 453 454 if (count > urf->dev_reclen * MAX_RECS_PER_IO) 455 count = urf->dev_reclen * MAX_RECS_PER_IO; 456 457 return do_write(urf->urd, udata, count, urf->dev_reclen, ppos); 458 } 459 460 /* 461 * diagnose code 0x14 subcode 0x0028 - position spool file to designated 462 * record 463 * cc=0 normal completion 464 * cc=2 no file active on the virtual reader or device not ready 465 * cc=3 record specified is beyond EOF 466 */ 467 static int diag_position_to_record(int devno, int record) 468 { 469 int cc; 470 471 cc = diag14(record, devno, 0x28); 472 switch (cc) { 473 case 0: 474 return 0; 475 case 2: 476 return -ENOMEDIUM; 477 case 3: 478 return -ENODATA; /* position beyond end of file */ 479 default: 480 return -EIO; 481 } 482 } 483 484 /* 485 * diagnose code 0x14 subcode 0x0000 - read next spool file buffer 486 * cc=0 normal completion 487 * cc=1 EOF reached 488 * cc=2 no file active on the virtual reader, and no file eligible 489 * cc=3 file already active on the virtual reader or specified virtual 490 * reader does not exist or is not a reader 491 */ 492 static int diag_read_file(int devno, char *buf) 493 { 494 int cc; 495 496 cc = diag14((unsigned long) buf, devno, 0x00); 497 switch (cc) { 498 case 0: 499 return 0; 500 case 1: 501 return -ENODATA; 502 case 2: 503 return -ENOMEDIUM; 504 default: 505 return -EIO; 506 } 507 } 508 509 static ssize_t diag14_read(struct file *file, char __user *ubuf, size_t count, 510 loff_t *offs) 511 { 512 size_t len, copied, res; 513 char *buf; 514 int rc; 515 u16 reclen; 516 struct urdev *urd; 517 518 urd = ((struct urfile *) file->private_data)->urd; 519 reclen = ((struct urfile *) file->private_data)->file_reclen; 520 521 rc = diag_position_to_record(urd->dev_id.devno, *offs / PAGE_SIZE + 1); 522 if (rc == -ENODATA) 523 return 0; 524 if (rc) 525 return rc; 526 527 len = min((size_t) PAGE_SIZE, count); 528 buf = (char *) __get_free_page(GFP_KERNEL | GFP_DMA); 529 if (!buf) 530 return -ENOMEM; 531 532 copied = 0; 533 res = (size_t) (*offs % PAGE_SIZE); 534 do { 535 rc = diag_read_file(urd->dev_id.devno, buf); 536 if (rc == -ENODATA) { 537 break; 538 } 539 if (rc) 540 goto fail; 541 if (reclen && (copied == 0) && (*offs < PAGE_SIZE)) 542 *((u16 *) &buf[FILE_RECLEN_OFFSET]) = reclen; 543 len = min(count - copied, PAGE_SIZE - res); 544 if (copy_to_user(ubuf + copied, buf + res, len)) { 545 rc = -EFAULT; 546 goto fail; 547 } 548 res = 0; 549 copied += len; 550 } while (copied != count); 551 552 *offs += copied; 553 rc = copied; 554 fail: 555 free_page((unsigned long) buf); 556 return rc; 557 } 558 559 static ssize_t ur_read(struct file *file, char __user *ubuf, size_t count, 560 loff_t *offs) 561 { 562 struct urdev *urd; 563 int rc; 564 565 TRACE("ur_read: count=%zu ppos=%li\n", count, (unsigned long) *offs); 566 567 if (count == 0) 568 return 0; 569 570 urd = ((struct urfile *) file->private_data)->urd; 571 rc = mutex_lock_interruptible(&urd->io_mutex); 572 if (rc) 573 return rc; 574 rc = diag14_read(file, ubuf, count, offs); 575 mutex_unlock(&urd->io_mutex); 576 return rc; 577 } 578 579 /* 580 * diagnose code 0x14 subcode 0x0fff - retrieve next file descriptor 581 * cc=0 normal completion 582 * cc=1 no files on reader queue or no subsequent file 583 * cc=2 spid specified is invalid 584 */ 585 static int diag_read_next_file_info(struct file_control_block *buf, int spid) 586 { 587 int cc; 588 589 cc = diag14((unsigned long) buf, spid, 0xfff); 590 switch (cc) { 591 case 0: 592 return 0; 593 default: 594 return -ENODATA; 595 } 596 } 597 598 static int verify_uri_device(struct urdev *urd) 599 { 600 struct file_control_block *fcb; 601 char *buf; 602 int rc; 603 604 fcb = kmalloc(sizeof(*fcb), GFP_KERNEL | GFP_DMA); 605 if (!fcb) 606 return -ENOMEM; 607 608 /* check for empty reader device (beginning of chain) */ 609 rc = diag_read_next_file_info(fcb, 0); 610 if (rc) 611 goto fail_free_fcb; 612 613 /* if file is in hold status, we do not read it */ 614 if (fcb->file_stat & (FLG_SYSTEM_HOLD | FLG_USER_HOLD)) { 615 rc = -EPERM; 616 goto fail_free_fcb; 617 } 618 619 /* open file on virtual reader */ 620 buf = (char *) __get_free_page(GFP_KERNEL | GFP_DMA); 621 if (!buf) { 622 rc = -ENOMEM; 623 goto fail_free_fcb; 624 } 625 rc = diag_read_file(urd->dev_id.devno, buf); 626 if ((rc != 0) && (rc != -ENODATA)) /* EOF does not hurt */ 627 goto fail_free_buf; 628 629 /* check if the file on top of the queue is open now */ 630 rc = diag_read_next_file_info(fcb, 0); 631 if (rc) 632 goto fail_free_buf; 633 if (!(fcb->file_stat & FLG_IN_USE)) { 634 rc = -EMFILE; 635 goto fail_free_buf; 636 } 637 rc = 0; 638 639 fail_free_buf: 640 free_page((unsigned long) buf); 641 fail_free_fcb: 642 kfree(fcb); 643 return rc; 644 } 645 646 static int verify_device(struct urdev *urd) 647 { 648 switch (urd->class) { 649 case DEV_CLASS_UR_O: 650 return 0; /* no check needed here */ 651 case DEV_CLASS_UR_I: 652 return verify_uri_device(urd); 653 default: 654 return -EOPNOTSUPP; 655 } 656 } 657 658 static int get_uri_file_reclen(struct urdev *urd) 659 { 660 struct file_control_block *fcb; 661 int rc; 662 663 fcb = kmalloc(sizeof(*fcb), GFP_KERNEL | GFP_DMA); 664 if (!fcb) 665 return -ENOMEM; 666 rc = diag_read_next_file_info(fcb, 0); 667 if (rc) 668 goto fail_free; 669 if (fcb->file_stat & FLG_CP_DUMP) 670 rc = 0; 671 else 672 rc = fcb->rec_len; 673 674 fail_free: 675 kfree(fcb); 676 return rc; 677 } 678 679 static int get_file_reclen(struct urdev *urd) 680 { 681 switch (urd->class) { 682 case DEV_CLASS_UR_O: 683 return 0; 684 case DEV_CLASS_UR_I: 685 return get_uri_file_reclen(urd); 686 default: 687 return -EOPNOTSUPP; 688 } 689 } 690 691 static int ur_open(struct inode *inode, struct file *file) 692 { 693 u16 devno; 694 struct urdev *urd; 695 struct urfile *urf; 696 unsigned short accmode; 697 int rc; 698 699 accmode = file->f_flags & O_ACCMODE; 700 701 if (accmode == O_RDWR) 702 return -EACCES; 703 /* 704 * We treat the minor number as the devno of the ur device 705 * to find in the driver tree. 706 */ 707 devno = MINOR(file_inode(file)->i_rdev); 708 709 urd = urdev_get_from_devno(devno); 710 if (!urd) { 711 rc = -ENXIO; 712 goto out; 713 } 714 715 spin_lock(&urd->open_lock); 716 while (urd->open_flag) { 717 spin_unlock(&urd->open_lock); 718 if (file->f_flags & O_NONBLOCK) { 719 rc = -EBUSY; 720 goto fail_put; 721 } 722 if (wait_event_interruptible(urd->wait, urd->open_flag == 0)) { 723 rc = -ERESTARTSYS; 724 goto fail_put; 725 } 726 spin_lock(&urd->open_lock); 727 } 728 urd->open_flag++; 729 spin_unlock(&urd->open_lock); 730 731 TRACE("ur_open\n"); 732 733 if (((accmode == O_RDONLY) && (urd->class != DEV_CLASS_UR_I)) || 734 ((accmode == O_WRONLY) && (urd->class != DEV_CLASS_UR_O))) { 735 TRACE("ur_open: unsupported dev class (%d)\n", urd->class); 736 rc = -EACCES; 737 goto fail_unlock; 738 } 739 740 rc = verify_device(urd); 741 if (rc) 742 goto fail_unlock; 743 744 urf = urfile_alloc(urd); 745 if (!urf) { 746 rc = -ENOMEM; 747 goto fail_unlock; 748 } 749 750 urf->dev_reclen = urd->reclen; 751 rc = get_file_reclen(urd); 752 if (rc < 0) 753 goto fail_urfile_free; 754 urf->file_reclen = rc; 755 file->private_data = urf; 756 return 0; 757 758 fail_urfile_free: 759 urfile_free(urf); 760 fail_unlock: 761 spin_lock(&urd->open_lock); 762 urd->open_flag--; 763 spin_unlock(&urd->open_lock); 764 fail_put: 765 urdev_put(urd); 766 out: 767 return rc; 768 } 769 770 static int ur_release(struct inode *inode, struct file *file) 771 { 772 struct urfile *urf = file->private_data; 773 774 TRACE("ur_release\n"); 775 spin_lock(&urf->urd->open_lock); 776 urf->urd->open_flag--; 777 spin_unlock(&urf->urd->open_lock); 778 wake_up_interruptible(&urf->urd->wait); 779 urdev_put(urf->urd); 780 urfile_free(urf); 781 return 0; 782 } 783 784 static loff_t ur_llseek(struct file *file, loff_t offset, int whence) 785 { 786 if ((file->f_flags & O_ACCMODE) != O_RDONLY) 787 return -ESPIPE; /* seek allowed only for reader */ 788 if (offset % PAGE_SIZE) 789 return -ESPIPE; /* only multiples of 4K allowed */ 790 return no_seek_end_llseek(file, offset, whence); 791 } 792 793 static const struct file_operations ur_fops = { 794 .owner = THIS_MODULE, 795 .open = ur_open, 796 .release = ur_release, 797 .read = ur_read, 798 .write = ur_write, 799 .llseek = ur_llseek, 800 }; 801 802 /* 803 * ccw_device infrastructure: 804 * ur_probe creates the struct urdev (with refcount = 1), the device 805 * attributes, sets up the interrupt handler and validates the virtual 806 * unit record device. 807 * ur_remove removes the device attributes and drops the reference to 808 * struct urdev. 809 * 810 * ur_probe, ur_remove, ur_set_online and ur_set_offline are serialized 811 * by the vmur_mutex lock. 812 * 813 * urd->char_device is used as indication that the online function has 814 * been completed successfully. 815 */ 816 static int ur_probe(struct ccw_device *cdev) 817 { 818 struct urdev *urd; 819 int rc; 820 821 TRACE("ur_probe: cdev=%p\n", cdev); 822 823 mutex_lock(&vmur_mutex); 824 urd = urdev_alloc(cdev); 825 if (!urd) { 826 rc = -ENOMEM; 827 goto fail_unlock; 828 } 829 830 rc = ur_create_attributes(&cdev->dev); 831 if (rc) { 832 rc = -ENOMEM; 833 goto fail_urdev_put; 834 } 835 cdev->handler = ur_int_handler; 836 837 /* validate virtual unit record device */ 838 urd->class = get_urd_class(urd); 839 if (urd->class < 0) { 840 rc = urd->class; 841 goto fail_remove_attr; 842 } 843 if ((urd->class != DEV_CLASS_UR_I) && (urd->class != DEV_CLASS_UR_O)) { 844 rc = -EOPNOTSUPP; 845 goto fail_remove_attr; 846 } 847 spin_lock_irq(get_ccwdev_lock(cdev)); 848 dev_set_drvdata(&cdev->dev, urd); 849 spin_unlock_irq(get_ccwdev_lock(cdev)); 850 851 mutex_unlock(&vmur_mutex); 852 return 0; 853 854 fail_remove_attr: 855 ur_remove_attributes(&cdev->dev); 856 fail_urdev_put: 857 urdev_put(urd); 858 fail_unlock: 859 mutex_unlock(&vmur_mutex); 860 return rc; 861 } 862 863 static int ur_set_online(struct ccw_device *cdev) 864 { 865 struct urdev *urd; 866 int minor, major, rc; 867 char node_id[16]; 868 869 TRACE("ur_set_online: cdev=%p\n", cdev); 870 871 mutex_lock(&vmur_mutex); 872 urd = urdev_get_from_cdev(cdev); 873 if (!urd) { 874 /* ur_remove already deleted our urd */ 875 rc = -ENODEV; 876 goto fail_unlock; 877 } 878 879 if (urd->char_device) { 880 /* Another ur_set_online was faster */ 881 rc = -EBUSY; 882 goto fail_urdev_put; 883 } 884 885 minor = urd->dev_id.devno; 886 major = MAJOR(ur_first_dev_maj_min); 887 888 urd->char_device = cdev_alloc(); 889 if (!urd->char_device) { 890 rc = -ENOMEM; 891 goto fail_urdev_put; 892 } 893 894 urd->char_device->ops = &ur_fops; 895 urd->char_device->dev = MKDEV(major, minor); 896 urd->char_device->owner = ur_fops.owner; 897 898 rc = cdev_add(urd->char_device, urd->char_device->dev, 1); 899 if (rc) 900 goto fail_free_cdev; 901 if (urd->cdev->id.cu_type == READER_PUNCH_DEVTYPE) { 902 if (urd->class == DEV_CLASS_UR_I) 903 sprintf(node_id, "vmrdr-%s", dev_name(&cdev->dev)); 904 if (urd->class == DEV_CLASS_UR_O) 905 sprintf(node_id, "vmpun-%s", dev_name(&cdev->dev)); 906 } else if (urd->cdev->id.cu_type == PRINTER_DEVTYPE) { 907 sprintf(node_id, "vmprt-%s", dev_name(&cdev->dev)); 908 } else { 909 rc = -EOPNOTSUPP; 910 goto fail_free_cdev; 911 } 912 913 urd->device = device_create(vmur_class, &cdev->dev, 914 urd->char_device->dev, NULL, "%s", node_id); 915 if (IS_ERR(urd->device)) { 916 rc = PTR_ERR(urd->device); 917 TRACE("ur_set_online: device_create rc=%d\n", rc); 918 goto fail_free_cdev; 919 } 920 urdev_put(urd); 921 mutex_unlock(&vmur_mutex); 922 return 0; 923 924 fail_free_cdev: 925 cdev_del(urd->char_device); 926 urd->char_device = NULL; 927 fail_urdev_put: 928 urdev_put(urd); 929 fail_unlock: 930 mutex_unlock(&vmur_mutex); 931 return rc; 932 } 933 934 static int ur_set_offline_force(struct ccw_device *cdev, int force) 935 { 936 struct urdev *urd; 937 int rc; 938 939 TRACE("ur_set_offline: cdev=%p\n", cdev); 940 urd = urdev_get_from_cdev(cdev); 941 if (!urd) 942 /* ur_remove already deleted our urd */ 943 return -ENODEV; 944 if (!urd->char_device) { 945 /* Another ur_set_offline was faster */ 946 rc = -EBUSY; 947 goto fail_urdev_put; 948 } 949 if (!force && (atomic_read(&urd->ref_count) > 2)) { 950 /* There is still a user of urd (e.g. ur_open) */ 951 TRACE("ur_set_offline: BUSY\n"); 952 rc = -EBUSY; 953 goto fail_urdev_put; 954 } 955 device_destroy(vmur_class, urd->char_device->dev); 956 cdev_del(urd->char_device); 957 urd->char_device = NULL; 958 rc = 0; 959 960 fail_urdev_put: 961 urdev_put(urd); 962 return rc; 963 } 964 965 static int ur_set_offline(struct ccw_device *cdev) 966 { 967 int rc; 968 969 mutex_lock(&vmur_mutex); 970 rc = ur_set_offline_force(cdev, 0); 971 mutex_unlock(&vmur_mutex); 972 return rc; 973 } 974 975 static void ur_remove(struct ccw_device *cdev) 976 { 977 unsigned long flags; 978 979 TRACE("ur_remove\n"); 980 981 mutex_lock(&vmur_mutex); 982 983 if (cdev->online) 984 ur_set_offline_force(cdev, 1); 985 ur_remove_attributes(&cdev->dev); 986 987 spin_lock_irqsave(get_ccwdev_lock(cdev), flags); 988 urdev_put(dev_get_drvdata(&cdev->dev)); 989 dev_set_drvdata(&cdev->dev, NULL); 990 spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags); 991 992 mutex_unlock(&vmur_mutex); 993 } 994 995 /* 996 * Module initialisation and cleanup 997 */ 998 static int __init ur_init(void) 999 { 1000 int rc; 1001 dev_t dev; 1002 1003 if (!MACHINE_IS_VM) { 1004 pr_err("The %s cannot be loaded without z/VM\n", 1005 ur_banner); 1006 return -ENODEV; 1007 } 1008 1009 vmur_dbf = debug_register("vmur", 4, 1, 4 * sizeof(long)); 1010 if (!vmur_dbf) 1011 return -ENOMEM; 1012 rc = debug_register_view(vmur_dbf, &debug_sprintf_view); 1013 if (rc) 1014 goto fail_free_dbf; 1015 1016 debug_set_level(vmur_dbf, 6); 1017 1018 vmur_class = class_create(THIS_MODULE, "vmur"); 1019 if (IS_ERR(vmur_class)) { 1020 rc = PTR_ERR(vmur_class); 1021 goto fail_free_dbf; 1022 } 1023 1024 rc = ccw_driver_register(&ur_driver); 1025 if (rc) 1026 goto fail_class_destroy; 1027 1028 rc = alloc_chrdev_region(&dev, 0, NUM_MINORS, "vmur"); 1029 if (rc) { 1030 pr_err("Kernel function alloc_chrdev_region failed with " 1031 "error code %d\n", rc); 1032 goto fail_unregister_driver; 1033 } 1034 ur_first_dev_maj_min = MKDEV(MAJOR(dev), 0); 1035 1036 pr_info("%s loaded.\n", ur_banner); 1037 return 0; 1038 1039 fail_unregister_driver: 1040 ccw_driver_unregister(&ur_driver); 1041 fail_class_destroy: 1042 class_destroy(vmur_class); 1043 fail_free_dbf: 1044 debug_unregister(vmur_dbf); 1045 return rc; 1046 } 1047 1048 static void __exit ur_exit(void) 1049 { 1050 unregister_chrdev_region(ur_first_dev_maj_min, NUM_MINORS); 1051 ccw_driver_unregister(&ur_driver); 1052 class_destroy(vmur_class); 1053 debug_unregister(vmur_dbf); 1054 pr_info("%s unloaded.\n", ur_banner); 1055 } 1056 1057 module_init(ur_init); 1058 module_exit(ur_exit); 1059