1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Mediated virtual PCI display host device driver 4 * 5 * See mdpy-defs.h for device specs 6 * 7 * (c) Gerd Hoffmann <kraxel@redhat.com> 8 * 9 * based on mtty driver which is: 10 * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved. 11 * Author: Neo Jia <cjia@nvidia.com> 12 * Kirti Wankhede <kwankhede@nvidia.com> 13 * 14 * This program is free software; you can redistribute it and/or modify 15 * it under the terms of the GNU General Public License version 2 as 16 * published by the Free Software Foundation. 17 */ 18 #include <linux/init.h> 19 #include <linux/module.h> 20 #include <linux/kernel.h> 21 #include <linux/slab.h> 22 #include <linux/vmalloc.h> 23 #include <linux/cdev.h> 24 #include <linux/vfio.h> 25 #include <linux/iommu.h> 26 #include <linux/sysfs.h> 27 #include <linux/mdev.h> 28 #include <linux/pci.h> 29 #include <drm/drm_fourcc.h> 30 #include "mdpy-defs.h" 31 32 #define MDPY_NAME "mdpy" 33 #define MDPY_CLASS_NAME "mdpy" 34 35 #define MDPY_CONFIG_SPACE_SIZE 0xff 36 #define MDPY_MEMORY_BAR_OFFSET PAGE_SIZE 37 #define MDPY_DISPLAY_REGION 16 38 39 #define STORE_LE16(addr, val) (*(u16 *)addr = val) 40 #define STORE_LE32(addr, val) (*(u32 *)addr = val) 41 42 43 MODULE_LICENSE("GPL v2"); 44 45 static int max_devices = 4; 46 module_param_named(count, max_devices, int, 0444); 47 MODULE_PARM_DESC(count, "number of " MDPY_NAME " devices"); 48 49 50 #define MDPY_TYPE_1 "vga" 51 #define MDPY_TYPE_2 "xga" 52 #define MDPY_TYPE_3 "hd" 53 54 static struct mdpy_type { 55 struct mdev_type type; 56 u32 format; 57 u32 bytepp; 58 u32 width; 59 u32 height; 60 } mdpy_types[] = { 61 { 62 .type.sysfs_name = MDPY_TYPE_1, 63 .type.pretty_name = MDPY_CLASS_NAME "-" MDPY_TYPE_1, 64 .format = DRM_FORMAT_XRGB8888, 65 .bytepp = 4, 66 .width = 640, 67 .height = 480, 68 }, { 69 .type.sysfs_name = MDPY_TYPE_2, 70 .type.pretty_name = MDPY_CLASS_NAME "-" MDPY_TYPE_2, 71 .format = DRM_FORMAT_XRGB8888, 72 .bytepp = 4, 73 .width = 1024, 74 .height = 768, 75 }, { 76 .type.sysfs_name = MDPY_TYPE_3, 77 .type.pretty_name = MDPY_CLASS_NAME "-" MDPY_TYPE_3, 78 .format = DRM_FORMAT_XRGB8888, 79 .bytepp = 4, 80 .width = 1920, 81 .height = 1080, 82 }, 83 }; 84 85 static struct mdev_type *mdpy_mdev_types[] = { 86 &mdpy_types[0].type, 87 &mdpy_types[1].type, 88 &mdpy_types[2].type, 89 }; 90 91 static dev_t mdpy_devt; 92 static struct class *mdpy_class; 93 static struct cdev mdpy_cdev; 94 static struct device mdpy_dev; 95 static struct mdev_parent mdpy_parent; 96 static u32 mdpy_count; 97 static const struct vfio_device_ops mdpy_dev_ops; 98 99 /* State of each mdev device */ 100 struct mdev_state { 101 struct vfio_device vdev; 102 u8 *vconfig; 103 u32 bar_mask; 104 struct mutex ops_lock; 105 struct mdev_device *mdev; 106 struct vfio_device_info dev_info; 107 108 const struct mdpy_type *type; 109 u32 memsize; 110 void *memblk; 111 }; 112 113 static void mdpy_create_config_space(struct mdev_state *mdev_state) 114 { 115 STORE_LE16((u16 *) &mdev_state->vconfig[PCI_VENDOR_ID], 116 MDPY_PCI_VENDOR_ID); 117 STORE_LE16((u16 *) &mdev_state->vconfig[PCI_DEVICE_ID], 118 MDPY_PCI_DEVICE_ID); 119 STORE_LE16((u16 *) &mdev_state->vconfig[PCI_SUBSYSTEM_VENDOR_ID], 120 MDPY_PCI_SUBVENDOR_ID); 121 STORE_LE16((u16 *) &mdev_state->vconfig[PCI_SUBSYSTEM_ID], 122 MDPY_PCI_SUBDEVICE_ID); 123 124 STORE_LE16((u16 *) &mdev_state->vconfig[PCI_COMMAND], 125 PCI_COMMAND_IO | PCI_COMMAND_MEMORY); 126 STORE_LE16((u16 *) &mdev_state->vconfig[PCI_STATUS], 127 PCI_STATUS_CAP_LIST); 128 STORE_LE16((u16 *) &mdev_state->vconfig[PCI_CLASS_DEVICE], 129 PCI_CLASS_DISPLAY_OTHER); 130 mdev_state->vconfig[PCI_CLASS_REVISION] = 0x01; 131 132 STORE_LE32((u32 *) &mdev_state->vconfig[PCI_BASE_ADDRESS_0], 133 PCI_BASE_ADDRESS_SPACE_MEMORY | 134 PCI_BASE_ADDRESS_MEM_TYPE_32 | 135 PCI_BASE_ADDRESS_MEM_PREFETCH); 136 mdev_state->bar_mask = ~(mdev_state->memsize) + 1; 137 138 /* vendor specific capability for the config registers */ 139 mdev_state->vconfig[PCI_CAPABILITY_LIST] = MDPY_VENDORCAP_OFFSET; 140 mdev_state->vconfig[MDPY_VENDORCAP_OFFSET + 0] = 0x09; /* vendor cap */ 141 mdev_state->vconfig[MDPY_VENDORCAP_OFFSET + 1] = 0x00; /* next ptr */ 142 mdev_state->vconfig[MDPY_VENDORCAP_OFFSET + 2] = MDPY_VENDORCAP_SIZE; 143 STORE_LE32((u32 *) &mdev_state->vconfig[MDPY_FORMAT_OFFSET], 144 mdev_state->type->format); 145 STORE_LE32((u32 *) &mdev_state->vconfig[MDPY_WIDTH_OFFSET], 146 mdev_state->type->width); 147 STORE_LE32((u32 *) &mdev_state->vconfig[MDPY_HEIGHT_OFFSET], 148 mdev_state->type->height); 149 } 150 151 static void handle_pci_cfg_write(struct mdev_state *mdev_state, u16 offset, 152 char *buf, u32 count) 153 { 154 struct device *dev = mdev_dev(mdev_state->mdev); 155 u32 cfg_addr; 156 157 switch (offset) { 158 case PCI_BASE_ADDRESS_0: 159 cfg_addr = *(u32 *)buf; 160 161 if (cfg_addr == 0xffffffff) { 162 cfg_addr = (cfg_addr & mdev_state->bar_mask); 163 } else { 164 cfg_addr &= PCI_BASE_ADDRESS_MEM_MASK; 165 if (cfg_addr) 166 dev_info(dev, "BAR0 @ 0x%x\n", cfg_addr); 167 } 168 169 cfg_addr |= (mdev_state->vconfig[offset] & 170 ~PCI_BASE_ADDRESS_MEM_MASK); 171 STORE_LE32(&mdev_state->vconfig[offset], cfg_addr); 172 break; 173 } 174 } 175 176 static ssize_t mdev_access(struct mdev_state *mdev_state, char *buf, 177 size_t count, loff_t pos, bool is_write) 178 { 179 int ret = 0; 180 181 mutex_lock(&mdev_state->ops_lock); 182 183 if (pos < MDPY_CONFIG_SPACE_SIZE) { 184 if (is_write) 185 handle_pci_cfg_write(mdev_state, pos, buf, count); 186 else 187 memcpy(buf, (mdev_state->vconfig + pos), count); 188 189 } else if ((pos >= MDPY_MEMORY_BAR_OFFSET) && 190 (pos + count <= 191 MDPY_MEMORY_BAR_OFFSET + mdev_state->memsize)) { 192 pos -= MDPY_MEMORY_BAR_OFFSET; 193 if (is_write) 194 memcpy(mdev_state->memblk, buf, count); 195 else 196 memcpy(buf, mdev_state->memblk, count); 197 198 } else { 199 dev_info(mdev_state->vdev.dev, 200 "%s: %s @0x%llx (unhandled)\n", __func__, 201 is_write ? "WR" : "RD", pos); 202 ret = -1; 203 goto accessfailed; 204 } 205 206 ret = count; 207 208 209 accessfailed: 210 mutex_unlock(&mdev_state->ops_lock); 211 212 return ret; 213 } 214 215 static int mdpy_reset(struct mdev_state *mdev_state) 216 { 217 u32 stride, i; 218 219 /* initialize with gray gradient */ 220 stride = mdev_state->type->width * mdev_state->type->bytepp; 221 for (i = 0; i < mdev_state->type->height; i++) 222 memset(mdev_state->memblk + i * stride, 223 i * 255 / mdev_state->type->height, 224 stride); 225 return 0; 226 } 227 228 static int mdpy_init_dev(struct vfio_device *vdev) 229 { 230 struct mdev_state *mdev_state = 231 container_of(vdev, struct mdev_state, vdev); 232 struct mdev_device *mdev = to_mdev_device(vdev->dev); 233 const struct mdpy_type *type = 234 container_of(mdev->type, struct mdpy_type, type); 235 u32 fbsize; 236 int ret = -ENOMEM; 237 238 if (mdpy_count >= max_devices) 239 return ret; 240 241 mdev_state->vconfig = kzalloc(MDPY_CONFIG_SPACE_SIZE, GFP_KERNEL); 242 if (!mdev_state->vconfig) 243 return ret; 244 245 fbsize = roundup_pow_of_two(type->width * type->height * type->bytepp); 246 247 mdev_state->memblk = vmalloc_user(fbsize); 248 if (!mdev_state->memblk) 249 goto out_vconfig; 250 251 mutex_init(&mdev_state->ops_lock); 252 mdev_state->mdev = mdev; 253 mdev_state->type = type; 254 mdev_state->memsize = fbsize; 255 mdpy_create_config_space(mdev_state); 256 mdpy_reset(mdev_state); 257 258 dev_info(vdev->dev, "%s: %s (%dx%d)\n", __func__, type->type.pretty_name, 259 type->width, type->height); 260 261 mdpy_count++; 262 return 0; 263 264 out_vconfig: 265 kfree(mdev_state->vconfig); 266 return ret; 267 } 268 269 static int mdpy_probe(struct mdev_device *mdev) 270 { 271 struct mdev_state *mdev_state; 272 int ret; 273 274 mdev_state = vfio_alloc_device(mdev_state, vdev, &mdev->dev, 275 &mdpy_dev_ops); 276 if (IS_ERR(mdev_state)) 277 return PTR_ERR(mdev_state); 278 279 ret = vfio_register_emulated_iommu_dev(&mdev_state->vdev); 280 if (ret) 281 goto err_put_vdev; 282 dev_set_drvdata(&mdev->dev, mdev_state); 283 return 0; 284 285 err_put_vdev: 286 vfio_put_device(&mdev_state->vdev); 287 return ret; 288 } 289 290 static void mdpy_release_dev(struct vfio_device *vdev) 291 { 292 struct mdev_state *mdev_state = 293 container_of(vdev, struct mdev_state, vdev); 294 295 mdpy_count--; 296 vfree(mdev_state->memblk); 297 kfree(mdev_state->vconfig); 298 vfio_free_device(vdev); 299 } 300 301 static void mdpy_remove(struct mdev_device *mdev) 302 { 303 struct mdev_state *mdev_state = dev_get_drvdata(&mdev->dev); 304 305 dev_info(&mdev->dev, "%s\n", __func__); 306 307 vfio_unregister_group_dev(&mdev_state->vdev); 308 vfio_put_device(&mdev_state->vdev); 309 } 310 311 static ssize_t mdpy_read(struct vfio_device *vdev, char __user *buf, 312 size_t count, loff_t *ppos) 313 { 314 struct mdev_state *mdev_state = 315 container_of(vdev, struct mdev_state, vdev); 316 unsigned int done = 0; 317 int ret; 318 319 while (count) { 320 size_t filled; 321 322 if (count >= 4 && !(*ppos % 4)) { 323 u32 val; 324 325 ret = mdev_access(mdev_state, (char *)&val, sizeof(val), 326 *ppos, false); 327 if (ret <= 0) 328 goto read_err; 329 330 if (copy_to_user(buf, &val, sizeof(val))) 331 goto read_err; 332 333 filled = 4; 334 } else if (count >= 2 && !(*ppos % 2)) { 335 u16 val; 336 337 ret = mdev_access(mdev_state, (char *)&val, sizeof(val), 338 *ppos, false); 339 if (ret <= 0) 340 goto read_err; 341 342 if (copy_to_user(buf, &val, sizeof(val))) 343 goto read_err; 344 345 filled = 2; 346 } else { 347 u8 val; 348 349 ret = mdev_access(mdev_state, (char *)&val, sizeof(val), 350 *ppos, false); 351 if (ret <= 0) 352 goto read_err; 353 354 if (copy_to_user(buf, &val, sizeof(val))) 355 goto read_err; 356 357 filled = 1; 358 } 359 360 count -= filled; 361 done += filled; 362 *ppos += filled; 363 buf += filled; 364 } 365 366 return done; 367 368 read_err: 369 return -EFAULT; 370 } 371 372 static ssize_t mdpy_write(struct vfio_device *vdev, const char __user *buf, 373 size_t count, loff_t *ppos) 374 { 375 struct mdev_state *mdev_state = 376 container_of(vdev, struct mdev_state, vdev); 377 unsigned int done = 0; 378 int ret; 379 380 while (count) { 381 size_t filled; 382 383 if (count >= 4 && !(*ppos % 4)) { 384 u32 val; 385 386 if (copy_from_user(&val, buf, sizeof(val))) 387 goto write_err; 388 389 ret = mdev_access(mdev_state, (char *)&val, sizeof(val), 390 *ppos, true); 391 if (ret <= 0) 392 goto write_err; 393 394 filled = 4; 395 } else if (count >= 2 && !(*ppos % 2)) { 396 u16 val; 397 398 if (copy_from_user(&val, buf, sizeof(val))) 399 goto write_err; 400 401 ret = mdev_access(mdev_state, (char *)&val, sizeof(val), 402 *ppos, true); 403 if (ret <= 0) 404 goto write_err; 405 406 filled = 2; 407 } else { 408 u8 val; 409 410 if (copy_from_user(&val, buf, sizeof(val))) 411 goto write_err; 412 413 ret = mdev_access(mdev_state, (char *)&val, sizeof(val), 414 *ppos, true); 415 if (ret <= 0) 416 goto write_err; 417 418 filled = 1; 419 } 420 count -= filled; 421 done += filled; 422 *ppos += filled; 423 buf += filled; 424 } 425 426 return done; 427 write_err: 428 return -EFAULT; 429 } 430 431 static int mdpy_mmap(struct vfio_device *vdev, struct vm_area_struct *vma) 432 { 433 struct mdev_state *mdev_state = 434 container_of(vdev, struct mdev_state, vdev); 435 436 if (vma->vm_pgoff != MDPY_MEMORY_BAR_OFFSET >> PAGE_SHIFT) 437 return -EINVAL; 438 if (vma->vm_end < vma->vm_start) 439 return -EINVAL; 440 if (vma->vm_end - vma->vm_start > mdev_state->memsize) 441 return -EINVAL; 442 if ((vma->vm_flags & VM_SHARED) == 0) 443 return -EINVAL; 444 445 return remap_vmalloc_range(vma, mdev_state->memblk, 0); 446 } 447 448 static int mdpy_get_region_info(struct mdev_state *mdev_state, 449 struct vfio_region_info *region_info, 450 u16 *cap_type_id, void **cap_type) 451 { 452 if (region_info->index >= VFIO_PCI_NUM_REGIONS && 453 region_info->index != MDPY_DISPLAY_REGION) 454 return -EINVAL; 455 456 switch (region_info->index) { 457 case VFIO_PCI_CONFIG_REGION_INDEX: 458 region_info->offset = 0; 459 region_info->size = MDPY_CONFIG_SPACE_SIZE; 460 region_info->flags = (VFIO_REGION_INFO_FLAG_READ | 461 VFIO_REGION_INFO_FLAG_WRITE); 462 break; 463 case VFIO_PCI_BAR0_REGION_INDEX: 464 case MDPY_DISPLAY_REGION: 465 region_info->offset = MDPY_MEMORY_BAR_OFFSET; 466 region_info->size = mdev_state->memsize; 467 region_info->flags = (VFIO_REGION_INFO_FLAG_READ | 468 VFIO_REGION_INFO_FLAG_WRITE | 469 VFIO_REGION_INFO_FLAG_MMAP); 470 break; 471 default: 472 region_info->size = 0; 473 region_info->offset = 0; 474 region_info->flags = 0; 475 } 476 477 return 0; 478 } 479 480 static int mdpy_get_irq_info(struct vfio_irq_info *irq_info) 481 { 482 irq_info->count = 0; 483 return 0; 484 } 485 486 static int mdpy_get_device_info(struct vfio_device_info *dev_info) 487 { 488 dev_info->flags = VFIO_DEVICE_FLAGS_PCI; 489 dev_info->num_regions = VFIO_PCI_NUM_REGIONS; 490 dev_info->num_irqs = VFIO_PCI_NUM_IRQS; 491 return 0; 492 } 493 494 static int mdpy_query_gfx_plane(struct mdev_state *mdev_state, 495 struct vfio_device_gfx_plane_info *plane) 496 { 497 if (plane->flags & VFIO_GFX_PLANE_TYPE_PROBE) { 498 if (plane->flags == (VFIO_GFX_PLANE_TYPE_PROBE | 499 VFIO_GFX_PLANE_TYPE_REGION)) 500 return 0; 501 return -EINVAL; 502 } 503 504 if (plane->flags != VFIO_GFX_PLANE_TYPE_REGION) 505 return -EINVAL; 506 507 plane->drm_format = mdev_state->type->format; 508 plane->width = mdev_state->type->width; 509 plane->height = mdev_state->type->height; 510 plane->stride = (mdev_state->type->width * 511 mdev_state->type->bytepp); 512 plane->size = mdev_state->memsize; 513 plane->region_index = MDPY_DISPLAY_REGION; 514 515 /* unused */ 516 plane->drm_format_mod = 0; 517 plane->x_pos = 0; 518 plane->y_pos = 0; 519 plane->x_hot = 0; 520 plane->y_hot = 0; 521 522 return 0; 523 } 524 525 static long mdpy_ioctl(struct vfio_device *vdev, unsigned int cmd, 526 unsigned long arg) 527 { 528 int ret = 0; 529 unsigned long minsz; 530 struct mdev_state *mdev_state = 531 container_of(vdev, struct mdev_state, vdev); 532 533 switch (cmd) { 534 case VFIO_DEVICE_GET_INFO: 535 { 536 struct vfio_device_info info; 537 538 minsz = offsetofend(struct vfio_device_info, num_irqs); 539 540 if (copy_from_user(&info, (void __user *)arg, minsz)) 541 return -EFAULT; 542 543 if (info.argsz < minsz) 544 return -EINVAL; 545 546 ret = mdpy_get_device_info(&info); 547 if (ret) 548 return ret; 549 550 memcpy(&mdev_state->dev_info, &info, sizeof(info)); 551 552 if (copy_to_user((void __user *)arg, &info, minsz)) 553 return -EFAULT; 554 555 return 0; 556 } 557 case VFIO_DEVICE_GET_REGION_INFO: 558 { 559 struct vfio_region_info info; 560 u16 cap_type_id = 0; 561 void *cap_type = NULL; 562 563 minsz = offsetofend(struct vfio_region_info, offset); 564 565 if (copy_from_user(&info, (void __user *)arg, minsz)) 566 return -EFAULT; 567 568 if (info.argsz < minsz) 569 return -EINVAL; 570 571 ret = mdpy_get_region_info(mdev_state, &info, &cap_type_id, 572 &cap_type); 573 if (ret) 574 return ret; 575 576 if (copy_to_user((void __user *)arg, &info, minsz)) 577 return -EFAULT; 578 579 return 0; 580 } 581 582 case VFIO_DEVICE_GET_IRQ_INFO: 583 { 584 struct vfio_irq_info info; 585 586 minsz = offsetofend(struct vfio_irq_info, count); 587 588 if (copy_from_user(&info, (void __user *)arg, minsz)) 589 return -EFAULT; 590 591 if ((info.argsz < minsz) || 592 (info.index >= mdev_state->dev_info.num_irqs)) 593 return -EINVAL; 594 595 ret = mdpy_get_irq_info(&info); 596 if (ret) 597 return ret; 598 599 if (copy_to_user((void __user *)arg, &info, minsz)) 600 return -EFAULT; 601 602 return 0; 603 } 604 605 case VFIO_DEVICE_QUERY_GFX_PLANE: 606 { 607 struct vfio_device_gfx_plane_info plane; 608 609 minsz = offsetofend(struct vfio_device_gfx_plane_info, 610 region_index); 611 612 if (copy_from_user(&plane, (void __user *)arg, minsz)) 613 return -EFAULT; 614 615 if (plane.argsz < minsz) 616 return -EINVAL; 617 618 ret = mdpy_query_gfx_plane(mdev_state, &plane); 619 if (ret) 620 return ret; 621 622 if (copy_to_user((void __user *)arg, &plane, minsz)) 623 return -EFAULT; 624 625 return 0; 626 } 627 628 case VFIO_DEVICE_SET_IRQS: 629 return -EINVAL; 630 631 case VFIO_DEVICE_RESET: 632 return mdpy_reset(mdev_state); 633 } 634 return -ENOTTY; 635 } 636 637 static ssize_t 638 resolution_show(struct device *dev, struct device_attribute *attr, 639 char *buf) 640 { 641 struct mdev_state *mdev_state = dev_get_drvdata(dev); 642 643 return sprintf(buf, "%dx%d\n", 644 mdev_state->type->width, 645 mdev_state->type->height); 646 } 647 static DEVICE_ATTR_RO(resolution); 648 649 static struct attribute *mdev_dev_attrs[] = { 650 &dev_attr_resolution.attr, 651 NULL, 652 }; 653 654 static const struct attribute_group mdev_dev_group = { 655 .name = "vendor", 656 .attrs = mdev_dev_attrs, 657 }; 658 659 static const struct attribute_group *mdev_dev_groups[] = { 660 &mdev_dev_group, 661 NULL, 662 }; 663 664 static ssize_t mdpy_show_description(struct mdev_type *mtype, char *buf) 665 { 666 struct mdpy_type *type = container_of(mtype, struct mdpy_type, type); 667 668 return sprintf(buf, "virtual display, %dx%d framebuffer\n", 669 type->width, type->height); 670 } 671 672 static unsigned int mdpy_get_available(struct mdev_type *mtype) 673 { 674 return max_devices - mdpy_count; 675 } 676 677 static const struct vfio_device_ops mdpy_dev_ops = { 678 .init = mdpy_init_dev, 679 .release = mdpy_release_dev, 680 .read = mdpy_read, 681 .write = mdpy_write, 682 .ioctl = mdpy_ioctl, 683 .mmap = mdpy_mmap, 684 }; 685 686 static struct mdev_driver mdpy_driver = { 687 .device_api = VFIO_DEVICE_API_PCI_STRING, 688 .driver = { 689 .name = "mdpy", 690 .owner = THIS_MODULE, 691 .mod_name = KBUILD_MODNAME, 692 .dev_groups = mdev_dev_groups, 693 }, 694 .probe = mdpy_probe, 695 .remove = mdpy_remove, 696 .get_available = mdpy_get_available, 697 .show_description = mdpy_show_description, 698 }; 699 700 static const struct file_operations vd_fops = { 701 .owner = THIS_MODULE, 702 }; 703 704 static void mdpy_device_release(struct device *dev) 705 { 706 /* nothing */ 707 } 708 709 static int __init mdpy_dev_init(void) 710 { 711 int ret = 0; 712 713 ret = alloc_chrdev_region(&mdpy_devt, 0, MINORMASK + 1, MDPY_NAME); 714 if (ret < 0) { 715 pr_err("Error: failed to register mdpy_dev, err: %d\n", ret); 716 return ret; 717 } 718 cdev_init(&mdpy_cdev, &vd_fops); 719 cdev_add(&mdpy_cdev, mdpy_devt, MINORMASK + 1); 720 pr_info("%s: major %d\n", __func__, MAJOR(mdpy_devt)); 721 722 ret = mdev_register_driver(&mdpy_driver); 723 if (ret) 724 goto err_cdev; 725 726 mdpy_class = class_create(THIS_MODULE, MDPY_CLASS_NAME); 727 if (IS_ERR(mdpy_class)) { 728 pr_err("Error: failed to register mdpy_dev class\n"); 729 ret = PTR_ERR(mdpy_class); 730 goto err_driver; 731 } 732 mdpy_dev.class = mdpy_class; 733 mdpy_dev.release = mdpy_device_release; 734 dev_set_name(&mdpy_dev, "%s", MDPY_NAME); 735 736 ret = device_register(&mdpy_dev); 737 if (ret) 738 goto err_class; 739 740 ret = mdev_register_parent(&mdpy_parent, &mdpy_dev, &mdpy_driver, 741 mdpy_mdev_types, 742 ARRAY_SIZE(mdpy_mdev_types)); 743 if (ret) 744 goto err_device; 745 746 return 0; 747 748 err_device: 749 device_unregister(&mdpy_dev); 750 err_class: 751 class_destroy(mdpy_class); 752 err_driver: 753 mdev_unregister_driver(&mdpy_driver); 754 err_cdev: 755 cdev_del(&mdpy_cdev); 756 unregister_chrdev_region(mdpy_devt, MINORMASK + 1); 757 return ret; 758 } 759 760 static void __exit mdpy_dev_exit(void) 761 { 762 mdpy_dev.bus = NULL; 763 mdev_unregister_parent(&mdpy_parent); 764 765 device_unregister(&mdpy_dev); 766 mdev_unregister_driver(&mdpy_driver); 767 cdev_del(&mdpy_cdev); 768 unregister_chrdev_region(mdpy_devt, MINORMASK + 1); 769 class_destroy(mdpy_class); 770 mdpy_class = NULL; 771 } 772 773 module_init(mdpy_dev_init) 774 module_exit(mdpy_dev_exit) 775