1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Linux driver for System z and s390 unit record devices 4 * (z/VM virtual punch, reader, printer) 5 * 6 * Copyright IBM Corp. 2001, 2009 7 * Authors: Malcolm Beattie <beattiem@uk.ibm.com> 8 * Michael Holzheu <holzheu@de.ibm.com> 9 * Frank Munzert <munzert@de.ibm.com> 10 */ 11 12 #define KMSG_COMPONENT "vmur" 13 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 14 15 #include <linux/cdev.h> 16 #include <linux/slab.h> 17 #include <linux/module.h> 18 19 #include <linux/uaccess.h> 20 #include <asm/cio.h> 21 #include <asm/ccwdev.h> 22 #include <asm/debug.h> 23 #include <asm/diag.h> 24 25 #include "vmur.h" 26 27 /* 28 * Driver overview 29 * 30 * Unit record device support is implemented as a character device driver. 31 * We can fit at least 16 bits into a device minor number and use the 32 * simple method of mapping a character device number with minor abcd 33 * to the unit record device with devno abcd. 34 * I/O to virtual unit record devices is handled as follows: 35 * Reads: Diagnose code 0x14 (input spool file manipulation) 36 * is used to read spool data page-wise. 37 * Writes: The CCW used is WRITE_CCW_CMD (0x01). The device's record length 38 * is available by reading sysfs attr reclen. Each write() to the device 39 * must specify an integral multiple (maximal 511) of reclen. 40 */ 41 42 static char ur_banner[] = "z/VM virtual unit record device driver"; 43 44 MODULE_AUTHOR("IBM Corporation"); 45 MODULE_DESCRIPTION("s390 z/VM virtual unit record device driver"); 46 MODULE_LICENSE("GPL"); 47 48 static dev_t ur_first_dev_maj_min; 49 static struct class *vmur_class; 50 static struct debug_info *vmur_dbf; 51 52 /* We put the device's record length (for writes) in the driver_info field */ 53 static struct ccw_device_id ur_ids[] = { 54 { CCWDEV_CU_DI(READER_PUNCH_DEVTYPE, 80) }, 55 { CCWDEV_CU_DI(PRINTER_DEVTYPE, 132) }, 56 { /* end of list */ } 57 }; 58 59 MODULE_DEVICE_TABLE(ccw, ur_ids); 60 61 static int ur_probe(struct ccw_device *cdev); 62 static void ur_remove(struct ccw_device *cdev); 63 static int ur_set_online(struct ccw_device *cdev); 64 static int ur_set_offline(struct ccw_device *cdev); 65 66 static struct ccw_driver ur_driver = { 67 .driver = { 68 .name = "vmur", 69 .owner = THIS_MODULE, 70 }, 71 .ids = ur_ids, 72 .probe = ur_probe, 73 .remove = ur_remove, 74 .set_online = ur_set_online, 75 .set_offline = ur_set_offline, 76 .int_class = IRQIO_VMR, 77 }; 78 79 static DEFINE_MUTEX(vmur_mutex); 80 81 /* 82 * Allocation, freeing, getting and putting of urdev structures 83 * 84 * Each ur device (urd) contains a reference to its corresponding ccw device 85 * (cdev) using the urd->cdev pointer. Each ccw device has a reference to the 86 * ur device using dev_get_drvdata(&cdev->dev) pointer. 87 * 88 * urd references: 89 * - ur_probe gets a urd reference, ur_remove drops the reference 90 * dev_get_drvdata(&cdev->dev) 91 * - ur_open gets a urd reference, ur_release drops the reference 92 * (urf->urd) 93 * 94 * cdev references: 95 * - urdev_alloc get a cdev reference (urd->cdev) 96 * - urdev_free drops the cdev reference (urd->cdev) 97 * 98 * Setting and clearing of dev_get_drvdata(&cdev->dev) is protected by the ccwdev lock 99 */ 100 static struct urdev *urdev_alloc(struct ccw_device *cdev) 101 { 102 struct urdev *urd; 103 104 urd = kzalloc(sizeof(struct urdev), GFP_KERNEL); 105 if (!urd) 106 return NULL; 107 urd->reclen = cdev->id.driver_info; 108 ccw_device_get_id(cdev, &urd->dev_id); 109 mutex_init(&urd->io_mutex); 110 init_waitqueue_head(&urd->wait); 111 spin_lock_init(&urd->open_lock); 112 refcount_set(&urd->ref_count, 1); 113 urd->cdev = cdev; 114 get_device(&cdev->dev); 115 return urd; 116 } 117 118 static void urdev_free(struct urdev *urd) 119 { 120 TRACE("urdev_free: %p\n", urd); 121 if (urd->cdev) 122 put_device(&urd->cdev->dev); 123 kfree(urd); 124 } 125 126 static void urdev_get(struct urdev *urd) 127 { 128 refcount_inc(&urd->ref_count); 129 } 130 131 static struct urdev *urdev_get_from_cdev(struct ccw_device *cdev) 132 { 133 struct urdev *urd; 134 unsigned long flags; 135 136 spin_lock_irqsave(get_ccwdev_lock(cdev), flags); 137 urd = dev_get_drvdata(&cdev->dev); 138 if (urd) 139 urdev_get(urd); 140 spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags); 141 return urd; 142 } 143 144 static struct urdev *urdev_get_from_devno(u16 devno) 145 { 146 char bus_id[16]; 147 struct ccw_device *cdev; 148 struct urdev *urd; 149 150 sprintf(bus_id, "0.0.%04x", devno); 151 cdev = get_ccwdev_by_busid(&ur_driver, bus_id); 152 if (!cdev) 153 return NULL; 154 urd = urdev_get_from_cdev(cdev); 155 put_device(&cdev->dev); 156 return urd; 157 } 158 159 static void urdev_put(struct urdev *urd) 160 { 161 if (refcount_dec_and_test(&urd->ref_count)) 162 urdev_free(urd); 163 } 164 165 /* 166 * Low-level functions to do I/O to a ur device. 167 * alloc_chan_prog 168 * free_chan_prog 169 * do_ur_io 170 * ur_int_handler 171 * 172 * alloc_chan_prog allocates and builds the channel program 173 * free_chan_prog frees memory of the channel program 174 * 175 * do_ur_io issues the channel program to the device and blocks waiting 176 * on a completion event it publishes at urd->io_done. The function 177 * serialises itself on the device's mutex so that only one I/O 178 * is issued at a time (and that I/O is synchronous). 179 * 180 * ur_int_handler catches the "I/O done" interrupt, writes the 181 * subchannel status word into the scsw member of the urdev structure 182 * and complete()s the io_done to wake the waiting do_ur_io. 183 * 184 * The caller of do_ur_io is responsible for kfree()ing the channel program 185 * address pointer that alloc_chan_prog returned. 186 */ 187 188 static void free_chan_prog(struct ccw1 *cpa) 189 { 190 struct ccw1 *ptr = cpa; 191 192 while (ptr->cda) { 193 kfree((void *)(addr_t) ptr->cda); 194 ptr++; 195 } 196 kfree(cpa); 197 } 198 199 /* 200 * alloc_chan_prog 201 * The channel program we use is write commands chained together 202 * with a final NOP CCW command-chained on (which ensures that CE and DE 203 * are presented together in a single interrupt instead of as separate 204 * interrupts unless an incorrect length indication kicks in first). The 205 * data length in each CCW is reclen. 206 */ 207 static struct ccw1 *alloc_chan_prog(const char __user *ubuf, int rec_count, 208 int reclen) 209 { 210 struct ccw1 *cpa; 211 void *kbuf; 212 int i; 213 214 TRACE("alloc_chan_prog(%p, %i, %i)\n", ubuf, rec_count, reclen); 215 216 /* 217 * We chain a NOP onto the writes to force CE+DE together. 218 * That means we allocate room for CCWs to cover count/reclen 219 * records plus a NOP. 220 */ 221 cpa = kcalloc(rec_count + 1, sizeof(struct ccw1), 222 GFP_KERNEL | GFP_DMA); 223 if (!cpa) 224 return ERR_PTR(-ENOMEM); 225 226 for (i = 0; i < rec_count; i++) { 227 cpa[i].cmd_code = WRITE_CCW_CMD; 228 cpa[i].flags = CCW_FLAG_CC | CCW_FLAG_SLI; 229 cpa[i].count = reclen; 230 kbuf = kmalloc(reclen, GFP_KERNEL | GFP_DMA); 231 if (!kbuf) { 232 free_chan_prog(cpa); 233 return ERR_PTR(-ENOMEM); 234 } 235 cpa[i].cda = (u32)(addr_t) kbuf; 236 if (copy_from_user(kbuf, ubuf, reclen)) { 237 free_chan_prog(cpa); 238 return ERR_PTR(-EFAULT); 239 } 240 ubuf += reclen; 241 } 242 /* The following NOP CCW forces CE+DE to be presented together */ 243 cpa[i].cmd_code = CCW_CMD_NOOP; 244 return cpa; 245 } 246 247 static int do_ur_io(struct urdev *urd, struct ccw1 *cpa) 248 { 249 int rc; 250 struct ccw_device *cdev = urd->cdev; 251 DECLARE_COMPLETION_ONSTACK(event); 252 253 TRACE("do_ur_io: cpa=%p\n", cpa); 254 255 rc = mutex_lock_interruptible(&urd->io_mutex); 256 if (rc) 257 return rc; 258 259 urd->io_done = &event; 260 261 spin_lock_irq(get_ccwdev_lock(cdev)); 262 rc = ccw_device_start(cdev, cpa, 1, 0, 0); 263 spin_unlock_irq(get_ccwdev_lock(cdev)); 264 265 TRACE("do_ur_io: ccw_device_start returned %d\n", rc); 266 if (rc) 267 goto out; 268 269 wait_for_completion(&event); 270 TRACE("do_ur_io: I/O complete\n"); 271 rc = 0; 272 273 out: 274 mutex_unlock(&urd->io_mutex); 275 return rc; 276 } 277 278 /* 279 * ur interrupt handler, called from the ccw_device layer 280 */ 281 static void ur_int_handler(struct ccw_device *cdev, unsigned long intparm, 282 struct irb *irb) 283 { 284 struct urdev *urd; 285 286 if (!IS_ERR(irb)) { 287 TRACE("ur_int_handler: intparm=0x%lx cstat=%02x dstat=%02x res=%u\n", 288 intparm, irb->scsw.cmd.cstat, irb->scsw.cmd.dstat, 289 irb->scsw.cmd.count); 290 } 291 if (!intparm) { 292 TRACE("ur_int_handler: unsolicited interrupt\n"); 293 return; 294 } 295 urd = dev_get_drvdata(&cdev->dev); 296 BUG_ON(!urd); 297 /* On special conditions irb is an error pointer */ 298 if (IS_ERR(irb)) 299 urd->io_request_rc = PTR_ERR(irb); 300 else if (irb->scsw.cmd.dstat == (DEV_STAT_CHN_END | DEV_STAT_DEV_END)) 301 urd->io_request_rc = 0; 302 else 303 urd->io_request_rc = -EIO; 304 305 complete(urd->io_done); 306 } 307 308 /* 309 * reclen sysfs attribute - The record length to be used for write CCWs 310 */ 311 static ssize_t ur_attr_reclen_show(struct device *dev, 312 struct device_attribute *attr, char *buf) 313 { 314 struct urdev *urd; 315 int rc; 316 317 urd = urdev_get_from_cdev(to_ccwdev(dev)); 318 if (!urd) 319 return -ENODEV; 320 rc = sprintf(buf, "%zu\n", urd->reclen); 321 urdev_put(urd); 322 return rc; 323 } 324 325 static DEVICE_ATTR(reclen, 0444, ur_attr_reclen_show, NULL); 326 327 static int ur_create_attributes(struct device *dev) 328 { 329 return device_create_file(dev, &dev_attr_reclen); 330 } 331 332 static void ur_remove_attributes(struct device *dev) 333 { 334 device_remove_file(dev, &dev_attr_reclen); 335 } 336 337 /* 338 * diagnose code 0x210 - retrieve device information 339 * cc=0 normal completion, we have a real device 340 * cc=1 CP paging error 341 * cc=2 The virtual device exists, but is not associated with a real device 342 * cc=3 Invalid device address, or the virtual device does not exist 343 */ 344 static int get_urd_class(struct urdev *urd) 345 { 346 static struct diag210 ur_diag210; 347 int cc; 348 349 ur_diag210.vrdcdvno = urd->dev_id.devno; 350 ur_diag210.vrdclen = sizeof(struct diag210); 351 352 cc = diag210(&ur_diag210); 353 switch (cc) { 354 case 0: 355 return -EOPNOTSUPP; 356 case 2: 357 return ur_diag210.vrdcvcla; /* virtual device class */ 358 case 3: 359 return -ENODEV; 360 default: 361 return -EIO; 362 } 363 } 364 365 /* 366 * Allocation and freeing of urfile structures 367 */ 368 static struct urfile *urfile_alloc(struct urdev *urd) 369 { 370 struct urfile *urf; 371 372 urf = kzalloc(sizeof(struct urfile), GFP_KERNEL); 373 if (!urf) 374 return NULL; 375 urf->urd = urd; 376 377 TRACE("urfile_alloc: urd=%p urf=%p rl=%zu\n", urd, urf, 378 urf->dev_reclen); 379 380 return urf; 381 } 382 383 static void urfile_free(struct urfile *urf) 384 { 385 TRACE("urfile_free: urf=%p urd=%p\n", urf, urf->urd); 386 kfree(urf); 387 } 388 389 /* 390 * The fops implementation of the character device driver 391 */ 392 static ssize_t do_write(struct urdev *urd, const char __user *udata, 393 size_t count, size_t reclen, loff_t *ppos) 394 { 395 struct ccw1 *cpa; 396 int rc; 397 398 cpa = alloc_chan_prog(udata, count / reclen, reclen); 399 if (IS_ERR(cpa)) 400 return PTR_ERR(cpa); 401 402 rc = do_ur_io(urd, cpa); 403 if (rc) 404 goto fail_kfree_cpa; 405 406 if (urd->io_request_rc) { 407 rc = urd->io_request_rc; 408 goto fail_kfree_cpa; 409 } 410 *ppos += count; 411 rc = count; 412 413 fail_kfree_cpa: 414 free_chan_prog(cpa); 415 return rc; 416 } 417 418 static ssize_t ur_write(struct file *file, const char __user *udata, 419 size_t count, loff_t *ppos) 420 { 421 struct urfile *urf = file->private_data; 422 423 TRACE("ur_write: count=%zu\n", count); 424 425 if (count == 0) 426 return 0; 427 428 if (count % urf->dev_reclen) 429 return -EINVAL; /* count must be a multiple of reclen */ 430 431 if (count > urf->dev_reclen * MAX_RECS_PER_IO) 432 count = urf->dev_reclen * MAX_RECS_PER_IO; 433 434 return do_write(urf->urd, udata, count, urf->dev_reclen, ppos); 435 } 436 437 /* 438 * diagnose code 0x14 subcode 0x0028 - position spool file to designated 439 * record 440 * cc=0 normal completion 441 * cc=2 no file active on the virtual reader or device not ready 442 * cc=3 record specified is beyond EOF 443 */ 444 static int diag_position_to_record(int devno, int record) 445 { 446 int cc; 447 448 cc = diag14(record, devno, 0x28); 449 switch (cc) { 450 case 0: 451 return 0; 452 case 2: 453 return -ENOMEDIUM; 454 case 3: 455 return -ENODATA; /* position beyond end of file */ 456 default: 457 return -EIO; 458 } 459 } 460 461 /* 462 * diagnose code 0x14 subcode 0x0000 - read next spool file buffer 463 * cc=0 normal completion 464 * cc=1 EOF reached 465 * cc=2 no file active on the virtual reader, and no file eligible 466 * cc=3 file already active on the virtual reader or specified virtual 467 * reader does not exist or is not a reader 468 */ 469 static int diag_read_file(int devno, char *buf) 470 { 471 int cc; 472 473 cc = diag14((unsigned long) buf, devno, 0x00); 474 switch (cc) { 475 case 0: 476 return 0; 477 case 1: 478 return -ENODATA; 479 case 2: 480 return -ENOMEDIUM; 481 default: 482 return -EIO; 483 } 484 } 485 486 static ssize_t diag14_read(struct file *file, char __user *ubuf, size_t count, 487 loff_t *offs) 488 { 489 size_t len, copied, res; 490 char *buf; 491 int rc; 492 u16 reclen; 493 struct urdev *urd; 494 495 urd = ((struct urfile *) file->private_data)->urd; 496 reclen = ((struct urfile *) file->private_data)->file_reclen; 497 498 rc = diag_position_to_record(urd->dev_id.devno, *offs / PAGE_SIZE + 1); 499 if (rc == -ENODATA) 500 return 0; 501 if (rc) 502 return rc; 503 504 len = min((size_t) PAGE_SIZE, count); 505 buf = (char *) __get_free_page(GFP_KERNEL | GFP_DMA); 506 if (!buf) 507 return -ENOMEM; 508 509 copied = 0; 510 res = (size_t) (*offs % PAGE_SIZE); 511 do { 512 rc = diag_read_file(urd->dev_id.devno, buf); 513 if (rc == -ENODATA) { 514 break; 515 } 516 if (rc) 517 goto fail; 518 if (reclen && (copied == 0) && (*offs < PAGE_SIZE)) 519 *((u16 *) &buf[FILE_RECLEN_OFFSET]) = reclen; 520 len = min(count - copied, PAGE_SIZE - res); 521 if (copy_to_user(ubuf + copied, buf + res, len)) { 522 rc = -EFAULT; 523 goto fail; 524 } 525 res = 0; 526 copied += len; 527 } while (copied != count); 528 529 *offs += copied; 530 rc = copied; 531 fail: 532 free_page((unsigned long) buf); 533 return rc; 534 } 535 536 static ssize_t ur_read(struct file *file, char __user *ubuf, size_t count, 537 loff_t *offs) 538 { 539 struct urdev *urd; 540 int rc; 541 542 TRACE("ur_read: count=%zu ppos=%li\n", count, (unsigned long) *offs); 543 544 if (count == 0) 545 return 0; 546 547 urd = ((struct urfile *) file->private_data)->urd; 548 rc = mutex_lock_interruptible(&urd->io_mutex); 549 if (rc) 550 return rc; 551 rc = diag14_read(file, ubuf, count, offs); 552 mutex_unlock(&urd->io_mutex); 553 return rc; 554 } 555 556 /* 557 * diagnose code 0x14 subcode 0x0fff - retrieve next file descriptor 558 * cc=0 normal completion 559 * cc=1 no files on reader queue or no subsequent file 560 * cc=2 spid specified is invalid 561 */ 562 static int diag_read_next_file_info(struct file_control_block *buf, int spid) 563 { 564 int cc; 565 566 cc = diag14((unsigned long) buf, spid, 0xfff); 567 switch (cc) { 568 case 0: 569 return 0; 570 default: 571 return -ENODATA; 572 } 573 } 574 575 static int verify_uri_device(struct urdev *urd) 576 { 577 struct file_control_block *fcb; 578 char *buf; 579 int rc; 580 581 fcb = kmalloc(sizeof(*fcb), GFP_KERNEL | GFP_DMA); 582 if (!fcb) 583 return -ENOMEM; 584 585 /* check for empty reader device (beginning of chain) */ 586 rc = diag_read_next_file_info(fcb, 0); 587 if (rc) 588 goto fail_free_fcb; 589 590 /* if file is in hold status, we do not read it */ 591 if (fcb->file_stat & (FLG_SYSTEM_HOLD | FLG_USER_HOLD)) { 592 rc = -EPERM; 593 goto fail_free_fcb; 594 } 595 596 /* open file on virtual reader */ 597 buf = (char *) __get_free_page(GFP_KERNEL | GFP_DMA); 598 if (!buf) { 599 rc = -ENOMEM; 600 goto fail_free_fcb; 601 } 602 rc = diag_read_file(urd->dev_id.devno, buf); 603 if ((rc != 0) && (rc != -ENODATA)) /* EOF does not hurt */ 604 goto fail_free_buf; 605 606 /* check if the file on top of the queue is open now */ 607 rc = diag_read_next_file_info(fcb, 0); 608 if (rc) 609 goto fail_free_buf; 610 if (!(fcb->file_stat & FLG_IN_USE)) { 611 rc = -EMFILE; 612 goto fail_free_buf; 613 } 614 rc = 0; 615 616 fail_free_buf: 617 free_page((unsigned long) buf); 618 fail_free_fcb: 619 kfree(fcb); 620 return rc; 621 } 622 623 static int verify_device(struct urdev *urd) 624 { 625 switch (urd->class) { 626 case DEV_CLASS_UR_O: 627 return 0; /* no check needed here */ 628 case DEV_CLASS_UR_I: 629 return verify_uri_device(urd); 630 default: 631 return -EOPNOTSUPP; 632 } 633 } 634 635 static int get_uri_file_reclen(struct urdev *urd) 636 { 637 struct file_control_block *fcb; 638 int rc; 639 640 fcb = kmalloc(sizeof(*fcb), GFP_KERNEL | GFP_DMA); 641 if (!fcb) 642 return -ENOMEM; 643 rc = diag_read_next_file_info(fcb, 0); 644 if (rc) 645 goto fail_free; 646 if (fcb->file_stat & FLG_CP_DUMP) 647 rc = 0; 648 else 649 rc = fcb->rec_len; 650 651 fail_free: 652 kfree(fcb); 653 return rc; 654 } 655 656 static int get_file_reclen(struct urdev *urd) 657 { 658 switch (urd->class) { 659 case DEV_CLASS_UR_O: 660 return 0; 661 case DEV_CLASS_UR_I: 662 return get_uri_file_reclen(urd); 663 default: 664 return -EOPNOTSUPP; 665 } 666 } 667 668 static int ur_open(struct inode *inode, struct file *file) 669 { 670 u16 devno; 671 struct urdev *urd; 672 struct urfile *urf; 673 unsigned short accmode; 674 int rc; 675 676 accmode = file->f_flags & O_ACCMODE; 677 678 if (accmode == O_RDWR) 679 return -EACCES; 680 /* 681 * We treat the minor number as the devno of the ur device 682 * to find in the driver tree. 683 */ 684 devno = MINOR(file_inode(file)->i_rdev); 685 686 urd = urdev_get_from_devno(devno); 687 if (!urd) { 688 rc = -ENXIO; 689 goto out; 690 } 691 692 spin_lock(&urd->open_lock); 693 while (urd->open_flag) { 694 spin_unlock(&urd->open_lock); 695 if (file->f_flags & O_NONBLOCK) { 696 rc = -EBUSY; 697 goto fail_put; 698 } 699 if (wait_event_interruptible(urd->wait, urd->open_flag == 0)) { 700 rc = -ERESTARTSYS; 701 goto fail_put; 702 } 703 spin_lock(&urd->open_lock); 704 } 705 urd->open_flag++; 706 spin_unlock(&urd->open_lock); 707 708 TRACE("ur_open\n"); 709 710 if (((accmode == O_RDONLY) && (urd->class != DEV_CLASS_UR_I)) || 711 ((accmode == O_WRONLY) && (urd->class != DEV_CLASS_UR_O))) { 712 TRACE("ur_open: unsupported dev class (%d)\n", urd->class); 713 rc = -EACCES; 714 goto fail_unlock; 715 } 716 717 rc = verify_device(urd); 718 if (rc) 719 goto fail_unlock; 720 721 urf = urfile_alloc(urd); 722 if (!urf) { 723 rc = -ENOMEM; 724 goto fail_unlock; 725 } 726 727 urf->dev_reclen = urd->reclen; 728 rc = get_file_reclen(urd); 729 if (rc < 0) 730 goto fail_urfile_free; 731 urf->file_reclen = rc; 732 file->private_data = urf; 733 return 0; 734 735 fail_urfile_free: 736 urfile_free(urf); 737 fail_unlock: 738 spin_lock(&urd->open_lock); 739 urd->open_flag--; 740 spin_unlock(&urd->open_lock); 741 fail_put: 742 urdev_put(urd); 743 out: 744 return rc; 745 } 746 747 static int ur_release(struct inode *inode, struct file *file) 748 { 749 struct urfile *urf = file->private_data; 750 751 TRACE("ur_release\n"); 752 spin_lock(&urf->urd->open_lock); 753 urf->urd->open_flag--; 754 spin_unlock(&urf->urd->open_lock); 755 wake_up_interruptible(&urf->urd->wait); 756 urdev_put(urf->urd); 757 urfile_free(urf); 758 return 0; 759 } 760 761 static loff_t ur_llseek(struct file *file, loff_t offset, int whence) 762 { 763 if ((file->f_flags & O_ACCMODE) != O_RDONLY) 764 return -ESPIPE; /* seek allowed only for reader */ 765 if (offset % PAGE_SIZE) 766 return -ESPIPE; /* only multiples of 4K allowed */ 767 return no_seek_end_llseek(file, offset, whence); 768 } 769 770 static const struct file_operations ur_fops = { 771 .owner = THIS_MODULE, 772 .open = ur_open, 773 .release = ur_release, 774 .read = ur_read, 775 .write = ur_write, 776 .llseek = ur_llseek, 777 }; 778 779 /* 780 * ccw_device infrastructure: 781 * ur_probe creates the struct urdev (with refcount = 1), the device 782 * attributes, sets up the interrupt handler and validates the virtual 783 * unit record device. 784 * ur_remove removes the device attributes and drops the reference to 785 * struct urdev. 786 * 787 * ur_probe, ur_remove, ur_set_online and ur_set_offline are serialized 788 * by the vmur_mutex lock. 789 * 790 * urd->char_device is used as indication that the online function has 791 * been completed successfully. 792 */ 793 static int ur_probe(struct ccw_device *cdev) 794 { 795 struct urdev *urd; 796 int rc; 797 798 TRACE("ur_probe: cdev=%p\n", cdev); 799 800 mutex_lock(&vmur_mutex); 801 urd = urdev_alloc(cdev); 802 if (!urd) { 803 rc = -ENOMEM; 804 goto fail_unlock; 805 } 806 807 rc = ur_create_attributes(&cdev->dev); 808 if (rc) { 809 rc = -ENOMEM; 810 goto fail_urdev_put; 811 } 812 cdev->handler = ur_int_handler; 813 814 /* validate virtual unit record device */ 815 urd->class = get_urd_class(urd); 816 if (urd->class < 0) { 817 rc = urd->class; 818 goto fail_remove_attr; 819 } 820 if ((urd->class != DEV_CLASS_UR_I) && (urd->class != DEV_CLASS_UR_O)) { 821 rc = -EOPNOTSUPP; 822 goto fail_remove_attr; 823 } 824 spin_lock_irq(get_ccwdev_lock(cdev)); 825 dev_set_drvdata(&cdev->dev, urd); 826 spin_unlock_irq(get_ccwdev_lock(cdev)); 827 828 mutex_unlock(&vmur_mutex); 829 return 0; 830 831 fail_remove_attr: 832 ur_remove_attributes(&cdev->dev); 833 fail_urdev_put: 834 urdev_put(urd); 835 fail_unlock: 836 mutex_unlock(&vmur_mutex); 837 return rc; 838 } 839 840 static int ur_set_online(struct ccw_device *cdev) 841 { 842 struct urdev *urd; 843 int minor, major, rc; 844 char node_id[16]; 845 846 TRACE("ur_set_online: cdev=%p\n", cdev); 847 848 mutex_lock(&vmur_mutex); 849 urd = urdev_get_from_cdev(cdev); 850 if (!urd) { 851 /* ur_remove already deleted our urd */ 852 rc = -ENODEV; 853 goto fail_unlock; 854 } 855 856 if (urd->char_device) { 857 /* Another ur_set_online was faster */ 858 rc = -EBUSY; 859 goto fail_urdev_put; 860 } 861 862 minor = urd->dev_id.devno; 863 major = MAJOR(ur_first_dev_maj_min); 864 865 urd->char_device = cdev_alloc(); 866 if (!urd->char_device) { 867 rc = -ENOMEM; 868 goto fail_urdev_put; 869 } 870 871 urd->char_device->ops = &ur_fops; 872 urd->char_device->owner = ur_fops.owner; 873 874 rc = cdev_add(urd->char_device, MKDEV(major, minor), 1); 875 if (rc) 876 goto fail_free_cdev; 877 if (urd->cdev->id.cu_type == READER_PUNCH_DEVTYPE) { 878 if (urd->class == DEV_CLASS_UR_I) 879 sprintf(node_id, "vmrdr-%s", dev_name(&cdev->dev)); 880 if (urd->class == DEV_CLASS_UR_O) 881 sprintf(node_id, "vmpun-%s", dev_name(&cdev->dev)); 882 } else if (urd->cdev->id.cu_type == PRINTER_DEVTYPE) { 883 sprintf(node_id, "vmprt-%s", dev_name(&cdev->dev)); 884 } else { 885 rc = -EOPNOTSUPP; 886 goto fail_free_cdev; 887 } 888 889 urd->device = device_create(vmur_class, &cdev->dev, 890 urd->char_device->dev, NULL, "%s", node_id); 891 if (IS_ERR(urd->device)) { 892 rc = PTR_ERR(urd->device); 893 TRACE("ur_set_online: device_create rc=%d\n", rc); 894 goto fail_free_cdev; 895 } 896 urdev_put(urd); 897 mutex_unlock(&vmur_mutex); 898 return 0; 899 900 fail_free_cdev: 901 cdev_del(urd->char_device); 902 urd->char_device = NULL; 903 fail_urdev_put: 904 urdev_put(urd); 905 fail_unlock: 906 mutex_unlock(&vmur_mutex); 907 return rc; 908 } 909 910 static int ur_set_offline_force(struct ccw_device *cdev, int force) 911 { 912 struct urdev *urd; 913 int rc; 914 915 TRACE("ur_set_offline: cdev=%p\n", cdev); 916 urd = urdev_get_from_cdev(cdev); 917 if (!urd) 918 /* ur_remove already deleted our urd */ 919 return -ENODEV; 920 if (!urd->char_device) { 921 /* Another ur_set_offline was faster */ 922 rc = -EBUSY; 923 goto fail_urdev_put; 924 } 925 if (!force && (refcount_read(&urd->ref_count) > 2)) { 926 /* There is still a user of urd (e.g. ur_open) */ 927 TRACE("ur_set_offline: BUSY\n"); 928 rc = -EBUSY; 929 goto fail_urdev_put; 930 } 931 device_destroy(vmur_class, urd->char_device->dev); 932 cdev_del(urd->char_device); 933 urd->char_device = NULL; 934 rc = 0; 935 936 fail_urdev_put: 937 urdev_put(urd); 938 return rc; 939 } 940 941 static int ur_set_offline(struct ccw_device *cdev) 942 { 943 int rc; 944 945 mutex_lock(&vmur_mutex); 946 rc = ur_set_offline_force(cdev, 0); 947 mutex_unlock(&vmur_mutex); 948 return rc; 949 } 950 951 static void ur_remove(struct ccw_device *cdev) 952 { 953 unsigned long flags; 954 955 TRACE("ur_remove\n"); 956 957 mutex_lock(&vmur_mutex); 958 959 if (cdev->online) 960 ur_set_offline_force(cdev, 1); 961 ur_remove_attributes(&cdev->dev); 962 963 spin_lock_irqsave(get_ccwdev_lock(cdev), flags); 964 urdev_put(dev_get_drvdata(&cdev->dev)); 965 dev_set_drvdata(&cdev->dev, NULL); 966 spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags); 967 968 mutex_unlock(&vmur_mutex); 969 } 970 971 /* 972 * Module initialisation and cleanup 973 */ 974 static int __init ur_init(void) 975 { 976 int rc; 977 dev_t dev; 978 979 if (!MACHINE_IS_VM) { 980 pr_err("The %s cannot be loaded without z/VM\n", 981 ur_banner); 982 return -ENODEV; 983 } 984 985 vmur_dbf = debug_register("vmur", 4, 1, 4 * sizeof(long)); 986 if (!vmur_dbf) 987 return -ENOMEM; 988 rc = debug_register_view(vmur_dbf, &debug_sprintf_view); 989 if (rc) 990 goto fail_free_dbf; 991 992 debug_set_level(vmur_dbf, 6); 993 994 vmur_class = class_create(THIS_MODULE, "vmur"); 995 if (IS_ERR(vmur_class)) { 996 rc = PTR_ERR(vmur_class); 997 goto fail_free_dbf; 998 } 999 1000 rc = ccw_driver_register(&ur_driver); 1001 if (rc) 1002 goto fail_class_destroy; 1003 1004 rc = alloc_chrdev_region(&dev, 0, NUM_MINORS, "vmur"); 1005 if (rc) { 1006 pr_err("Kernel function alloc_chrdev_region failed with " 1007 "error code %d\n", rc); 1008 goto fail_unregister_driver; 1009 } 1010 ur_first_dev_maj_min = MKDEV(MAJOR(dev), 0); 1011 1012 pr_info("%s loaded.\n", ur_banner); 1013 return 0; 1014 1015 fail_unregister_driver: 1016 ccw_driver_unregister(&ur_driver); 1017 fail_class_destroy: 1018 class_destroy(vmur_class); 1019 fail_free_dbf: 1020 debug_unregister(vmur_dbf); 1021 return rc; 1022 } 1023 1024 static void __exit ur_exit(void) 1025 { 1026 unregister_chrdev_region(ur_first_dev_maj_min, NUM_MINORS); 1027 ccw_driver_unregister(&ur_driver); 1028 class_destroy(vmur_class); 1029 debug_unregister(vmur_dbf); 1030 pr_info("%s unloaded.\n", ur_banner); 1031 } 1032 1033 module_init(ur_init); 1034 module_exit(ur_exit); 1035