1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Linux driver for System z and s390 unit record devices 4 * (z/VM virtual punch, reader, printer) 5 * 6 * Copyright IBM Corp. 2001, 2009 7 * Authors: Malcolm Beattie <beattiem@uk.ibm.com> 8 * Michael Holzheu <holzheu@de.ibm.com> 9 * Frank Munzert <munzert@de.ibm.com> 10 */ 11 12 #define KMSG_COMPONENT "vmur" 13 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 14 15 #include <linux/cdev.h> 16 #include <linux/slab.h> 17 #include <linux/module.h> 18 #include <linux/kobject.h> 19 20 #include <linux/uaccess.h> 21 #include <asm/cio.h> 22 #include <asm/ccwdev.h> 23 #include <asm/debug.h> 24 #include <asm/diag.h> 25 #include <asm/scsw.h> 26 27 #include "vmur.h" 28 29 /* 30 * Driver overview 31 * 32 * Unit record device support is implemented as a character device driver. 33 * We can fit at least 16 bits into a device minor number and use the 34 * simple method of mapping a character device number with minor abcd 35 * to the unit record device with devno abcd. 36 * I/O to virtual unit record devices is handled as follows: 37 * Reads: Diagnose code 0x14 (input spool file manipulation) 38 * is used to read spool data page-wise. 39 * Writes: The CCW used is WRITE_CCW_CMD (0x01). The device's record length 40 * is available by reading sysfs attr reclen. Each write() to the device 41 * must specify an integral multiple (maximal 511) of reclen. 42 */ 43 44 static char ur_banner[] = "z/VM virtual unit record device driver"; 45 46 MODULE_AUTHOR("IBM Corporation"); 47 MODULE_DESCRIPTION("s390 z/VM virtual unit record device driver"); 48 MODULE_LICENSE("GPL"); 49 50 static dev_t ur_first_dev_maj_min; 51 static struct class *vmur_class; 52 static struct debug_info *vmur_dbf; 53 54 /* We put the device's record length (for writes) in the driver_info field */ 55 static struct ccw_device_id ur_ids[] = { 56 { CCWDEV_CU_DI(READER_PUNCH_DEVTYPE, 80) }, 57 { CCWDEV_CU_DI(PRINTER_DEVTYPE, 132) }, 58 { /* end of list */ } 59 }; 60 61 MODULE_DEVICE_TABLE(ccw, ur_ids); 62 63 static int ur_probe(struct ccw_device *cdev); 64 static void ur_remove(struct ccw_device *cdev); 65 static int ur_set_online(struct ccw_device *cdev); 66 static int ur_set_offline(struct ccw_device *cdev); 67 68 static struct ccw_driver ur_driver = { 69 .driver = { 70 .name = "vmur", 71 .owner = THIS_MODULE, 72 }, 73 .ids = ur_ids, 74 .probe = ur_probe, 75 .remove = ur_remove, 76 .set_online = ur_set_online, 77 .set_offline = ur_set_offline, 78 .int_class = IRQIO_VMR, 79 }; 80 81 static DEFINE_MUTEX(vmur_mutex); 82 83 static void ur_uevent(struct work_struct *ws); 84 85 /* 86 * Allocation, freeing, getting and putting of urdev structures 87 * 88 * Each ur device (urd) contains a reference to its corresponding ccw device 89 * (cdev) using the urd->cdev pointer. Each ccw device has a reference to the 90 * ur device using dev_get_drvdata(&cdev->dev) pointer. 91 * 92 * urd references: 93 * - ur_probe gets a urd reference, ur_remove drops the reference 94 * dev_get_drvdata(&cdev->dev) 95 * - ur_open gets a urd reference, ur_release drops the reference 96 * (urf->urd) 97 * 98 * cdev references: 99 * - urdev_alloc get a cdev reference (urd->cdev) 100 * - urdev_free drops the cdev reference (urd->cdev) 101 * 102 * Setting and clearing of dev_get_drvdata(&cdev->dev) is protected by the ccwdev lock 103 */ 104 static struct urdev *urdev_alloc(struct ccw_device *cdev) 105 { 106 struct urdev *urd; 107 108 urd = kzalloc(sizeof(struct urdev), GFP_KERNEL); 109 if (!urd) 110 return NULL; 111 urd->reclen = cdev->id.driver_info; 112 ccw_device_get_id(cdev, &urd->dev_id); 113 mutex_init(&urd->io_mutex); 114 init_waitqueue_head(&urd->wait); 115 INIT_WORK(&urd->uevent_work, ur_uevent); 116 spin_lock_init(&urd->open_lock); 117 refcount_set(&urd->ref_count, 1); 118 urd->cdev = cdev; 119 get_device(&cdev->dev); 120 return urd; 121 } 122 123 static void urdev_free(struct urdev *urd) 124 { 125 TRACE("urdev_free: %p\n", urd); 126 if (urd->cdev) 127 put_device(&urd->cdev->dev); 128 kfree(urd); 129 } 130 131 static void urdev_get(struct urdev *urd) 132 { 133 refcount_inc(&urd->ref_count); 134 } 135 136 static struct urdev *urdev_get_from_cdev(struct ccw_device *cdev) 137 { 138 struct urdev *urd; 139 unsigned long flags; 140 141 spin_lock_irqsave(get_ccwdev_lock(cdev), flags); 142 urd = dev_get_drvdata(&cdev->dev); 143 if (urd) 144 urdev_get(urd); 145 spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags); 146 return urd; 147 } 148 149 static struct urdev *urdev_get_from_devno(u16 devno) 150 { 151 char bus_id[16]; 152 struct ccw_device *cdev; 153 struct urdev *urd; 154 155 sprintf(bus_id, "0.0.%04x", devno); 156 cdev = get_ccwdev_by_busid(&ur_driver, bus_id); 157 if (!cdev) 158 return NULL; 159 urd = urdev_get_from_cdev(cdev); 160 put_device(&cdev->dev); 161 return urd; 162 } 163 164 static void urdev_put(struct urdev *urd) 165 { 166 if (refcount_dec_and_test(&urd->ref_count)) 167 urdev_free(urd); 168 } 169 170 /* 171 * Low-level functions to do I/O to a ur device. 172 * alloc_chan_prog 173 * free_chan_prog 174 * do_ur_io 175 * ur_int_handler 176 * 177 * alloc_chan_prog allocates and builds the channel program 178 * free_chan_prog frees memory of the channel program 179 * 180 * do_ur_io issues the channel program to the device and blocks waiting 181 * on a completion event it publishes at urd->io_done. The function 182 * serialises itself on the device's mutex so that only one I/O 183 * is issued at a time (and that I/O is synchronous). 184 * 185 * ur_int_handler catches the "I/O done" interrupt, writes the 186 * subchannel status word into the scsw member of the urdev structure 187 * and complete()s the io_done to wake the waiting do_ur_io. 188 * 189 * The caller of do_ur_io is responsible for kfree()ing the channel program 190 * address pointer that alloc_chan_prog returned. 191 */ 192 193 static void free_chan_prog(struct ccw1 *cpa) 194 { 195 struct ccw1 *ptr = cpa; 196 197 while (ptr->cda) { 198 kfree(phys_to_virt(ptr->cda)); 199 ptr++; 200 } 201 kfree(cpa); 202 } 203 204 /* 205 * alloc_chan_prog 206 * The channel program we use is write commands chained together 207 * with a final NOP CCW command-chained on (which ensures that CE and DE 208 * are presented together in a single interrupt instead of as separate 209 * interrupts unless an incorrect length indication kicks in first). The 210 * data length in each CCW is reclen. 211 */ 212 static struct ccw1 *alloc_chan_prog(const char __user *ubuf, int rec_count, 213 int reclen) 214 { 215 struct ccw1 *cpa; 216 void *kbuf; 217 int i; 218 219 TRACE("alloc_chan_prog(%p, %i, %i)\n", ubuf, rec_count, reclen); 220 221 /* 222 * We chain a NOP onto the writes to force CE+DE together. 223 * That means we allocate room for CCWs to cover count/reclen 224 * records plus a NOP. 225 */ 226 cpa = kcalloc(rec_count + 1, sizeof(struct ccw1), 227 GFP_KERNEL | GFP_DMA); 228 if (!cpa) 229 return ERR_PTR(-ENOMEM); 230 231 for (i = 0; i < rec_count; i++) { 232 cpa[i].cmd_code = WRITE_CCW_CMD; 233 cpa[i].flags = CCW_FLAG_CC | CCW_FLAG_SLI; 234 cpa[i].count = reclen; 235 kbuf = kmalloc(reclen, GFP_KERNEL | GFP_DMA); 236 if (!kbuf) { 237 free_chan_prog(cpa); 238 return ERR_PTR(-ENOMEM); 239 } 240 cpa[i].cda = (u32)virt_to_phys(kbuf); 241 if (copy_from_user(kbuf, ubuf, reclen)) { 242 free_chan_prog(cpa); 243 return ERR_PTR(-EFAULT); 244 } 245 ubuf += reclen; 246 } 247 /* The following NOP CCW forces CE+DE to be presented together */ 248 cpa[i].cmd_code = CCW_CMD_NOOP; 249 return cpa; 250 } 251 252 static int do_ur_io(struct urdev *urd, struct ccw1 *cpa) 253 { 254 int rc; 255 struct ccw_device *cdev = urd->cdev; 256 DECLARE_COMPLETION_ONSTACK(event); 257 258 TRACE("do_ur_io: cpa=%p\n", cpa); 259 260 rc = mutex_lock_interruptible(&urd->io_mutex); 261 if (rc) 262 return rc; 263 264 urd->io_done = &event; 265 266 spin_lock_irq(get_ccwdev_lock(cdev)); 267 rc = ccw_device_start(cdev, cpa, 1, 0, 0); 268 spin_unlock_irq(get_ccwdev_lock(cdev)); 269 270 TRACE("do_ur_io: ccw_device_start returned %d\n", rc); 271 if (rc) 272 goto out; 273 274 wait_for_completion(&event); 275 TRACE("do_ur_io: I/O complete\n"); 276 rc = 0; 277 278 out: 279 mutex_unlock(&urd->io_mutex); 280 return rc; 281 } 282 283 static void ur_uevent(struct work_struct *ws) 284 { 285 struct urdev *urd = container_of(ws, struct urdev, uevent_work); 286 char *envp[] = { 287 "EVENT=unsol_de", /* Unsolicited device-end interrupt */ 288 NULL 289 }; 290 291 kobject_uevent_env(&urd->cdev->dev.kobj, KOBJ_CHANGE, envp); 292 urdev_put(urd); 293 } 294 295 /* 296 * ur interrupt handler, called from the ccw_device layer 297 */ 298 static void ur_int_handler(struct ccw_device *cdev, unsigned long intparm, 299 struct irb *irb) 300 { 301 struct urdev *urd; 302 303 if (!IS_ERR(irb)) { 304 TRACE("ur_int_handler: intparm=0x%lx cstat=%02x dstat=%02x res=%u\n", 305 intparm, irb->scsw.cmd.cstat, irb->scsw.cmd.dstat, 306 irb->scsw.cmd.count); 307 } 308 urd = dev_get_drvdata(&cdev->dev); 309 if (!intparm) { 310 TRACE("ur_int_handler: unsolicited interrupt\n"); 311 312 if (scsw_dstat(&irb->scsw) & DEV_STAT_DEV_END) { 313 /* 314 * Userspace might be interested in a transition to 315 * device-ready state. 316 */ 317 urdev_get(urd); 318 schedule_work(&urd->uevent_work); 319 } 320 321 return; 322 } 323 /* On special conditions irb is an error pointer */ 324 if (IS_ERR(irb)) 325 urd->io_request_rc = PTR_ERR(irb); 326 else if (irb->scsw.cmd.dstat == (DEV_STAT_CHN_END | DEV_STAT_DEV_END)) 327 urd->io_request_rc = 0; 328 else 329 urd->io_request_rc = -EIO; 330 331 complete(urd->io_done); 332 } 333 334 /* 335 * reclen sysfs attribute - The record length to be used for write CCWs 336 */ 337 static ssize_t ur_attr_reclen_show(struct device *dev, 338 struct device_attribute *attr, char *buf) 339 { 340 struct urdev *urd; 341 int rc; 342 343 urd = urdev_get_from_cdev(to_ccwdev(dev)); 344 if (!urd) 345 return -ENODEV; 346 rc = sprintf(buf, "%zu\n", urd->reclen); 347 urdev_put(urd); 348 return rc; 349 } 350 351 static DEVICE_ATTR(reclen, 0444, ur_attr_reclen_show, NULL); 352 353 static int ur_create_attributes(struct device *dev) 354 { 355 return device_create_file(dev, &dev_attr_reclen); 356 } 357 358 static void ur_remove_attributes(struct device *dev) 359 { 360 device_remove_file(dev, &dev_attr_reclen); 361 } 362 363 /* 364 * diagnose code 0x210 - retrieve device information 365 * cc=0 normal completion, we have a real device 366 * cc=1 CP paging error 367 * cc=2 The virtual device exists, but is not associated with a real device 368 * cc=3 Invalid device address, or the virtual device does not exist 369 */ 370 static int get_urd_class(struct urdev *urd) 371 { 372 static struct diag210 ur_diag210; 373 int cc; 374 375 ur_diag210.vrdcdvno = urd->dev_id.devno; 376 ur_diag210.vrdclen = sizeof(struct diag210); 377 378 cc = diag210(&ur_diag210); 379 switch (cc) { 380 case 0: 381 return -EOPNOTSUPP; 382 case 2: 383 return ur_diag210.vrdcvcla; /* virtual device class */ 384 case 3: 385 return -ENODEV; 386 default: 387 return -EIO; 388 } 389 } 390 391 /* 392 * Allocation and freeing of urfile structures 393 */ 394 static struct urfile *urfile_alloc(struct urdev *urd) 395 { 396 struct urfile *urf; 397 398 urf = kzalloc(sizeof(struct urfile), GFP_KERNEL); 399 if (!urf) 400 return NULL; 401 urf->urd = urd; 402 403 TRACE("urfile_alloc: urd=%p urf=%p rl=%zu\n", urd, urf, 404 urf->dev_reclen); 405 406 return urf; 407 } 408 409 static void urfile_free(struct urfile *urf) 410 { 411 TRACE("urfile_free: urf=%p urd=%p\n", urf, urf->urd); 412 kfree(urf); 413 } 414 415 /* 416 * The fops implementation of the character device driver 417 */ 418 static ssize_t do_write(struct urdev *urd, const char __user *udata, 419 size_t count, size_t reclen, loff_t *ppos) 420 { 421 struct ccw1 *cpa; 422 int rc; 423 424 cpa = alloc_chan_prog(udata, count / reclen, reclen); 425 if (IS_ERR(cpa)) 426 return PTR_ERR(cpa); 427 428 rc = do_ur_io(urd, cpa); 429 if (rc) 430 goto fail_kfree_cpa; 431 432 if (urd->io_request_rc) { 433 rc = urd->io_request_rc; 434 goto fail_kfree_cpa; 435 } 436 *ppos += count; 437 rc = count; 438 439 fail_kfree_cpa: 440 free_chan_prog(cpa); 441 return rc; 442 } 443 444 static ssize_t ur_write(struct file *file, const char __user *udata, 445 size_t count, loff_t *ppos) 446 { 447 struct urfile *urf = file->private_data; 448 449 TRACE("ur_write: count=%zu\n", count); 450 451 if (count == 0) 452 return 0; 453 454 if (count % urf->dev_reclen) 455 return -EINVAL; /* count must be a multiple of reclen */ 456 457 if (count > urf->dev_reclen * MAX_RECS_PER_IO) 458 count = urf->dev_reclen * MAX_RECS_PER_IO; 459 460 return do_write(urf->urd, udata, count, urf->dev_reclen, ppos); 461 } 462 463 /* 464 * diagnose code 0x14 subcode 0x0028 - position spool file to designated 465 * record 466 * cc=0 normal completion 467 * cc=2 no file active on the virtual reader or device not ready 468 * cc=3 record specified is beyond EOF 469 */ 470 static int diag_position_to_record(int devno, int record) 471 { 472 int cc; 473 474 cc = diag14(record, devno, 0x28); 475 switch (cc) { 476 case 0: 477 return 0; 478 case 2: 479 return -ENOMEDIUM; 480 case 3: 481 return -ENODATA; /* position beyond end of file */ 482 default: 483 return -EIO; 484 } 485 } 486 487 /* 488 * diagnose code 0x14 subcode 0x0000 - read next spool file buffer 489 * cc=0 normal completion 490 * cc=1 EOF reached 491 * cc=2 no file active on the virtual reader, and no file eligible 492 * cc=3 file already active on the virtual reader or specified virtual 493 * reader does not exist or is not a reader 494 */ 495 static int diag_read_file(int devno, char *buf) 496 { 497 int cc; 498 499 cc = diag14((unsigned long) buf, devno, 0x00); 500 switch (cc) { 501 case 0: 502 return 0; 503 case 1: 504 return -ENODATA; 505 case 2: 506 return -ENOMEDIUM; 507 default: 508 return -EIO; 509 } 510 } 511 512 static ssize_t diag14_read(struct file *file, char __user *ubuf, size_t count, 513 loff_t *offs) 514 { 515 size_t len, copied, res; 516 char *buf; 517 int rc; 518 u16 reclen; 519 struct urdev *urd; 520 521 urd = ((struct urfile *) file->private_data)->urd; 522 reclen = ((struct urfile *) file->private_data)->file_reclen; 523 524 rc = diag_position_to_record(urd->dev_id.devno, *offs / PAGE_SIZE + 1); 525 if (rc == -ENODATA) 526 return 0; 527 if (rc) 528 return rc; 529 530 len = min((size_t) PAGE_SIZE, count); 531 buf = (char *) __get_free_page(GFP_KERNEL | GFP_DMA); 532 if (!buf) 533 return -ENOMEM; 534 535 copied = 0; 536 res = (size_t) (*offs % PAGE_SIZE); 537 do { 538 rc = diag_read_file(urd->dev_id.devno, buf); 539 if (rc == -ENODATA) { 540 break; 541 } 542 if (rc) 543 goto fail; 544 if (reclen && (copied == 0) && (*offs < PAGE_SIZE)) 545 *((u16 *) &buf[FILE_RECLEN_OFFSET]) = reclen; 546 len = min(count - copied, PAGE_SIZE - res); 547 if (copy_to_user(ubuf + copied, buf + res, len)) { 548 rc = -EFAULT; 549 goto fail; 550 } 551 res = 0; 552 copied += len; 553 } while (copied != count); 554 555 *offs += copied; 556 rc = copied; 557 fail: 558 free_page((unsigned long) buf); 559 return rc; 560 } 561 562 static ssize_t ur_read(struct file *file, char __user *ubuf, size_t count, 563 loff_t *offs) 564 { 565 struct urdev *urd; 566 int rc; 567 568 TRACE("ur_read: count=%zu ppos=%li\n", count, (unsigned long) *offs); 569 570 if (count == 0) 571 return 0; 572 573 urd = ((struct urfile *) file->private_data)->urd; 574 rc = mutex_lock_interruptible(&urd->io_mutex); 575 if (rc) 576 return rc; 577 rc = diag14_read(file, ubuf, count, offs); 578 mutex_unlock(&urd->io_mutex); 579 return rc; 580 } 581 582 /* 583 * diagnose code 0x14 subcode 0x0fff - retrieve next file descriptor 584 * cc=0 normal completion 585 * cc=1 no files on reader queue or no subsequent file 586 * cc=2 spid specified is invalid 587 */ 588 static int diag_read_next_file_info(struct file_control_block *buf, int spid) 589 { 590 int cc; 591 592 cc = diag14((unsigned long) buf, spid, 0xfff); 593 switch (cc) { 594 case 0: 595 return 0; 596 default: 597 return -ENODATA; 598 } 599 } 600 601 static int verify_uri_device(struct urdev *urd) 602 { 603 struct file_control_block *fcb; 604 char *buf; 605 int rc; 606 607 fcb = kmalloc(sizeof(*fcb), GFP_KERNEL | GFP_DMA); 608 if (!fcb) 609 return -ENOMEM; 610 611 /* check for empty reader device (beginning of chain) */ 612 rc = diag_read_next_file_info(fcb, 0); 613 if (rc) 614 goto fail_free_fcb; 615 616 /* if file is in hold status, we do not read it */ 617 if (fcb->file_stat & (FLG_SYSTEM_HOLD | FLG_USER_HOLD)) { 618 rc = -EPERM; 619 goto fail_free_fcb; 620 } 621 622 /* open file on virtual reader */ 623 buf = (char *) __get_free_page(GFP_KERNEL | GFP_DMA); 624 if (!buf) { 625 rc = -ENOMEM; 626 goto fail_free_fcb; 627 } 628 rc = diag_read_file(urd->dev_id.devno, buf); 629 if ((rc != 0) && (rc != -ENODATA)) /* EOF does not hurt */ 630 goto fail_free_buf; 631 632 /* check if the file on top of the queue is open now */ 633 rc = diag_read_next_file_info(fcb, 0); 634 if (rc) 635 goto fail_free_buf; 636 if (!(fcb->file_stat & FLG_IN_USE)) { 637 rc = -EMFILE; 638 goto fail_free_buf; 639 } 640 rc = 0; 641 642 fail_free_buf: 643 free_page((unsigned long) buf); 644 fail_free_fcb: 645 kfree(fcb); 646 return rc; 647 } 648 649 static int verify_device(struct urdev *urd) 650 { 651 switch (urd->class) { 652 case DEV_CLASS_UR_O: 653 return 0; /* no check needed here */ 654 case DEV_CLASS_UR_I: 655 return verify_uri_device(urd); 656 default: 657 return -EOPNOTSUPP; 658 } 659 } 660 661 static int get_uri_file_reclen(struct urdev *urd) 662 { 663 struct file_control_block *fcb; 664 int rc; 665 666 fcb = kmalloc(sizeof(*fcb), GFP_KERNEL | GFP_DMA); 667 if (!fcb) 668 return -ENOMEM; 669 rc = diag_read_next_file_info(fcb, 0); 670 if (rc) 671 goto fail_free; 672 if (fcb->file_stat & FLG_CP_DUMP) 673 rc = 0; 674 else 675 rc = fcb->rec_len; 676 677 fail_free: 678 kfree(fcb); 679 return rc; 680 } 681 682 static int get_file_reclen(struct urdev *urd) 683 { 684 switch (urd->class) { 685 case DEV_CLASS_UR_O: 686 return 0; 687 case DEV_CLASS_UR_I: 688 return get_uri_file_reclen(urd); 689 default: 690 return -EOPNOTSUPP; 691 } 692 } 693 694 static int ur_open(struct inode *inode, struct file *file) 695 { 696 u16 devno; 697 struct urdev *urd; 698 struct urfile *urf; 699 unsigned short accmode; 700 int rc; 701 702 accmode = file->f_flags & O_ACCMODE; 703 704 if (accmode == O_RDWR) 705 return -EACCES; 706 /* 707 * We treat the minor number as the devno of the ur device 708 * to find in the driver tree. 709 */ 710 devno = iminor(file_inode(file)); 711 712 urd = urdev_get_from_devno(devno); 713 if (!urd) { 714 rc = -ENXIO; 715 goto out; 716 } 717 718 spin_lock(&urd->open_lock); 719 while (urd->open_flag) { 720 spin_unlock(&urd->open_lock); 721 if (file->f_flags & O_NONBLOCK) { 722 rc = -EBUSY; 723 goto fail_put; 724 } 725 if (wait_event_interruptible(urd->wait, urd->open_flag == 0)) { 726 rc = -ERESTARTSYS; 727 goto fail_put; 728 } 729 spin_lock(&urd->open_lock); 730 } 731 urd->open_flag++; 732 spin_unlock(&urd->open_lock); 733 734 TRACE("ur_open\n"); 735 736 if (((accmode == O_RDONLY) && (urd->class != DEV_CLASS_UR_I)) || 737 ((accmode == O_WRONLY) && (urd->class != DEV_CLASS_UR_O))) { 738 TRACE("ur_open: unsupported dev class (%d)\n", urd->class); 739 rc = -EACCES; 740 goto fail_unlock; 741 } 742 743 rc = verify_device(urd); 744 if (rc) 745 goto fail_unlock; 746 747 urf = urfile_alloc(urd); 748 if (!urf) { 749 rc = -ENOMEM; 750 goto fail_unlock; 751 } 752 753 urf->dev_reclen = urd->reclen; 754 rc = get_file_reclen(urd); 755 if (rc < 0) 756 goto fail_urfile_free; 757 urf->file_reclen = rc; 758 file->private_data = urf; 759 return 0; 760 761 fail_urfile_free: 762 urfile_free(urf); 763 fail_unlock: 764 spin_lock(&urd->open_lock); 765 urd->open_flag--; 766 spin_unlock(&urd->open_lock); 767 fail_put: 768 urdev_put(urd); 769 out: 770 return rc; 771 } 772 773 static int ur_release(struct inode *inode, struct file *file) 774 { 775 struct urfile *urf = file->private_data; 776 777 TRACE("ur_release\n"); 778 spin_lock(&urf->urd->open_lock); 779 urf->urd->open_flag--; 780 spin_unlock(&urf->urd->open_lock); 781 wake_up_interruptible(&urf->urd->wait); 782 urdev_put(urf->urd); 783 urfile_free(urf); 784 return 0; 785 } 786 787 static loff_t ur_llseek(struct file *file, loff_t offset, int whence) 788 { 789 if ((file->f_flags & O_ACCMODE) != O_RDONLY) 790 return -ESPIPE; /* seek allowed only for reader */ 791 if (offset % PAGE_SIZE) 792 return -ESPIPE; /* only multiples of 4K allowed */ 793 return no_seek_end_llseek(file, offset, whence); 794 } 795 796 static const struct file_operations ur_fops = { 797 .owner = THIS_MODULE, 798 .open = ur_open, 799 .release = ur_release, 800 .read = ur_read, 801 .write = ur_write, 802 .llseek = ur_llseek, 803 }; 804 805 /* 806 * ccw_device infrastructure: 807 * ur_probe creates the struct urdev (with refcount = 1), the device 808 * attributes, sets up the interrupt handler and validates the virtual 809 * unit record device. 810 * ur_remove removes the device attributes and drops the reference to 811 * struct urdev. 812 * 813 * ur_probe, ur_remove, ur_set_online and ur_set_offline are serialized 814 * by the vmur_mutex lock. 815 * 816 * urd->char_device is used as indication that the online function has 817 * been completed successfully. 818 */ 819 static int ur_probe(struct ccw_device *cdev) 820 { 821 struct urdev *urd; 822 int rc; 823 824 TRACE("ur_probe: cdev=%p\n", cdev); 825 826 mutex_lock(&vmur_mutex); 827 urd = urdev_alloc(cdev); 828 if (!urd) { 829 rc = -ENOMEM; 830 goto fail_unlock; 831 } 832 833 rc = ur_create_attributes(&cdev->dev); 834 if (rc) { 835 rc = -ENOMEM; 836 goto fail_urdev_put; 837 } 838 839 /* validate virtual unit record device */ 840 urd->class = get_urd_class(urd); 841 if (urd->class < 0) { 842 rc = urd->class; 843 goto fail_remove_attr; 844 } 845 if ((urd->class != DEV_CLASS_UR_I) && (urd->class != DEV_CLASS_UR_O)) { 846 rc = -EOPNOTSUPP; 847 goto fail_remove_attr; 848 } 849 spin_lock_irq(get_ccwdev_lock(cdev)); 850 dev_set_drvdata(&cdev->dev, urd); 851 cdev->handler = ur_int_handler; 852 spin_unlock_irq(get_ccwdev_lock(cdev)); 853 854 mutex_unlock(&vmur_mutex); 855 return 0; 856 857 fail_remove_attr: 858 ur_remove_attributes(&cdev->dev); 859 fail_urdev_put: 860 urdev_put(urd); 861 fail_unlock: 862 mutex_unlock(&vmur_mutex); 863 return rc; 864 } 865 866 static int ur_set_online(struct ccw_device *cdev) 867 { 868 struct urdev *urd; 869 int minor, major, rc; 870 char node_id[16]; 871 872 TRACE("ur_set_online: cdev=%p\n", cdev); 873 874 mutex_lock(&vmur_mutex); 875 urd = urdev_get_from_cdev(cdev); 876 if (!urd) { 877 /* ur_remove already deleted our urd */ 878 rc = -ENODEV; 879 goto fail_unlock; 880 } 881 882 if (urd->char_device) { 883 /* Another ur_set_online was faster */ 884 rc = -EBUSY; 885 goto fail_urdev_put; 886 } 887 888 minor = urd->dev_id.devno; 889 major = MAJOR(ur_first_dev_maj_min); 890 891 urd->char_device = cdev_alloc(); 892 if (!urd->char_device) { 893 rc = -ENOMEM; 894 goto fail_urdev_put; 895 } 896 897 urd->char_device->ops = &ur_fops; 898 urd->char_device->owner = ur_fops.owner; 899 900 rc = cdev_add(urd->char_device, MKDEV(major, minor), 1); 901 if (rc) 902 goto fail_free_cdev; 903 if (urd->cdev->id.cu_type == READER_PUNCH_DEVTYPE) { 904 if (urd->class == DEV_CLASS_UR_I) 905 sprintf(node_id, "vmrdr-%s", dev_name(&cdev->dev)); 906 if (urd->class == DEV_CLASS_UR_O) 907 sprintf(node_id, "vmpun-%s", dev_name(&cdev->dev)); 908 } else if (urd->cdev->id.cu_type == PRINTER_DEVTYPE) { 909 sprintf(node_id, "vmprt-%s", dev_name(&cdev->dev)); 910 } else { 911 rc = -EOPNOTSUPP; 912 goto fail_free_cdev; 913 } 914 915 urd->device = device_create(vmur_class, &cdev->dev, 916 urd->char_device->dev, NULL, "%s", node_id); 917 if (IS_ERR(urd->device)) { 918 rc = PTR_ERR(urd->device); 919 TRACE("ur_set_online: device_create rc=%d\n", rc); 920 goto fail_free_cdev; 921 } 922 urdev_put(urd); 923 mutex_unlock(&vmur_mutex); 924 return 0; 925 926 fail_free_cdev: 927 cdev_del(urd->char_device); 928 urd->char_device = NULL; 929 fail_urdev_put: 930 urdev_put(urd); 931 fail_unlock: 932 mutex_unlock(&vmur_mutex); 933 return rc; 934 } 935 936 static int ur_set_offline_force(struct ccw_device *cdev, int force) 937 { 938 struct urdev *urd; 939 int rc; 940 941 TRACE("ur_set_offline: cdev=%p\n", cdev); 942 urd = urdev_get_from_cdev(cdev); 943 if (!urd) 944 /* ur_remove already deleted our urd */ 945 return -ENODEV; 946 if (!urd->char_device) { 947 /* Another ur_set_offline was faster */ 948 rc = -EBUSY; 949 goto fail_urdev_put; 950 } 951 if (!force && (refcount_read(&urd->ref_count) > 2)) { 952 /* There is still a user of urd (e.g. ur_open) */ 953 TRACE("ur_set_offline: BUSY\n"); 954 rc = -EBUSY; 955 goto fail_urdev_put; 956 } 957 if (cancel_work_sync(&urd->uevent_work)) { 958 /* Work not run yet - need to release reference here */ 959 urdev_put(urd); 960 } 961 device_destroy(vmur_class, urd->char_device->dev); 962 cdev_del(urd->char_device); 963 urd->char_device = NULL; 964 rc = 0; 965 966 fail_urdev_put: 967 urdev_put(urd); 968 return rc; 969 } 970 971 static int ur_set_offline(struct ccw_device *cdev) 972 { 973 int rc; 974 975 mutex_lock(&vmur_mutex); 976 rc = ur_set_offline_force(cdev, 0); 977 mutex_unlock(&vmur_mutex); 978 return rc; 979 } 980 981 static void ur_remove(struct ccw_device *cdev) 982 { 983 unsigned long flags; 984 985 TRACE("ur_remove\n"); 986 987 mutex_lock(&vmur_mutex); 988 989 if (cdev->online) 990 ur_set_offline_force(cdev, 1); 991 ur_remove_attributes(&cdev->dev); 992 993 spin_lock_irqsave(get_ccwdev_lock(cdev), flags); 994 urdev_put(dev_get_drvdata(&cdev->dev)); 995 dev_set_drvdata(&cdev->dev, NULL); 996 cdev->handler = NULL; 997 spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags); 998 999 mutex_unlock(&vmur_mutex); 1000 } 1001 1002 /* 1003 * Module initialisation and cleanup 1004 */ 1005 static int __init ur_init(void) 1006 { 1007 int rc; 1008 dev_t dev; 1009 1010 if (!MACHINE_IS_VM) { 1011 pr_err("The %s cannot be loaded without z/VM\n", 1012 ur_banner); 1013 return -ENODEV; 1014 } 1015 1016 vmur_dbf = debug_register("vmur", 4, 1, 4 * sizeof(long)); 1017 if (!vmur_dbf) 1018 return -ENOMEM; 1019 rc = debug_register_view(vmur_dbf, &debug_sprintf_view); 1020 if (rc) 1021 goto fail_free_dbf; 1022 1023 debug_set_level(vmur_dbf, 6); 1024 1025 vmur_class = class_create("vmur"); 1026 if (IS_ERR(vmur_class)) { 1027 rc = PTR_ERR(vmur_class); 1028 goto fail_free_dbf; 1029 } 1030 1031 rc = ccw_driver_register(&ur_driver); 1032 if (rc) 1033 goto fail_class_destroy; 1034 1035 rc = alloc_chrdev_region(&dev, 0, NUM_MINORS, "vmur"); 1036 if (rc) { 1037 pr_err("Kernel function alloc_chrdev_region failed with " 1038 "error code %d\n", rc); 1039 goto fail_unregister_driver; 1040 } 1041 ur_first_dev_maj_min = MKDEV(MAJOR(dev), 0); 1042 1043 pr_info("%s loaded.\n", ur_banner); 1044 return 0; 1045 1046 fail_unregister_driver: 1047 ccw_driver_unregister(&ur_driver); 1048 fail_class_destroy: 1049 class_destroy(vmur_class); 1050 fail_free_dbf: 1051 debug_unregister(vmur_dbf); 1052 return rc; 1053 } 1054 1055 static void __exit ur_exit(void) 1056 { 1057 unregister_chrdev_region(ur_first_dev_maj_min, NUM_MINORS); 1058 ccw_driver_unregister(&ur_driver); 1059 class_destroy(vmur_class); 1060 debug_unregister(vmur_dbf); 1061 pr_info("%s unloaded.\n", ur_banner); 1062 } 1063 1064 module_init(ur_init); 1065 module_exit(ur_exit); 1066