1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Mediated virtual PCI display host device driver 4 * 5 * See mdpy-defs.h for device specs 6 * 7 * (c) Gerd Hoffmann <kraxel@redhat.com> 8 * 9 * based on mtty driver which is: 10 * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved. 11 * Author: Neo Jia <cjia@nvidia.com> 12 * Kirti Wankhede <kwankhede@nvidia.com> 13 * 14 * This program is free software; you can redistribute it and/or modify 15 * it under the terms of the GNU General Public License version 2 as 16 * published by the Free Software Foundation. 17 */ 18 #include <linux/init.h> 19 #include <linux/module.h> 20 #include <linux/device.h> 21 #include <linux/kernel.h> 22 #include <linux/slab.h> 23 #include <linux/vmalloc.h> 24 #include <linux/cdev.h> 25 #include <linux/vfio.h> 26 #include <linux/iommu.h> 27 #include <linux/sysfs.h> 28 #include <linux/mdev.h> 29 #include <linux/pci.h> 30 #include <drm/drm_fourcc.h> 31 #include "mdpy-defs.h" 32 33 #define MDPY_NAME "mdpy" 34 #define MDPY_CLASS_NAME "mdpy" 35 36 #define MDPY_CONFIG_SPACE_SIZE 0xff 37 #define MDPY_MEMORY_BAR_OFFSET PAGE_SIZE 38 #define MDPY_DISPLAY_REGION 16 39 40 #define STORE_LE16(addr, val) (*(u16 *)addr = val) 41 #define STORE_LE32(addr, val) (*(u32 *)addr = val) 42 43 44 MODULE_LICENSE("GPL v2"); 45 46 static int max_devices = 4; 47 module_param_named(count, max_devices, int, 0444); 48 MODULE_PARM_DESC(count, "number of " MDPY_NAME " devices"); 49 50 51 #define MDPY_TYPE_1 "vga" 52 #define MDPY_TYPE_2 "xga" 53 #define MDPY_TYPE_3 "hd" 54 55 static const struct mdpy_type { 56 const char *name; 57 u32 format; 58 u32 bytepp; 59 u32 width; 60 u32 height; 61 } mdpy_types[] = { 62 { 63 .name = MDPY_CLASS_NAME "-" MDPY_TYPE_1, 64 .format = DRM_FORMAT_XRGB8888, 65 .bytepp = 4, 66 .width = 640, 67 .height = 480, 68 }, { 69 .name = MDPY_CLASS_NAME "-" MDPY_TYPE_2, 70 .format = DRM_FORMAT_XRGB8888, 71 .bytepp = 4, 72 .width = 1024, 73 .height = 768, 74 }, { 75 .name = MDPY_CLASS_NAME "-" MDPY_TYPE_3, 76 .format = DRM_FORMAT_XRGB8888, 77 .bytepp = 4, 78 .width = 1920, 79 .height = 1080, 80 }, 81 }; 82 83 static dev_t mdpy_devt; 84 static struct class *mdpy_class; 85 static struct cdev mdpy_cdev; 86 static struct device mdpy_dev; 87 static u32 mdpy_count; 88 89 /* State of each mdev device */ 90 struct mdev_state { 91 u8 *vconfig; 92 u32 bar_mask; 93 struct mutex ops_lock; 94 struct mdev_device *mdev; 95 struct vfio_device_info dev_info; 96 97 const struct mdpy_type *type; 98 u32 memsize; 99 void *memblk; 100 }; 101 102 static void mdpy_create_config_space(struct mdev_state *mdev_state) 103 { 104 STORE_LE16((u16 *) &mdev_state->vconfig[PCI_VENDOR_ID], 105 MDPY_PCI_VENDOR_ID); 106 STORE_LE16((u16 *) &mdev_state->vconfig[PCI_DEVICE_ID], 107 MDPY_PCI_DEVICE_ID); 108 STORE_LE16((u16 *) &mdev_state->vconfig[PCI_SUBSYSTEM_VENDOR_ID], 109 MDPY_PCI_SUBVENDOR_ID); 110 STORE_LE16((u16 *) &mdev_state->vconfig[PCI_SUBSYSTEM_ID], 111 MDPY_PCI_SUBDEVICE_ID); 112 113 STORE_LE16((u16 *) &mdev_state->vconfig[PCI_COMMAND], 114 PCI_COMMAND_IO | PCI_COMMAND_MEMORY); 115 STORE_LE16((u16 *) &mdev_state->vconfig[PCI_STATUS], 116 PCI_STATUS_CAP_LIST); 117 STORE_LE16((u16 *) &mdev_state->vconfig[PCI_CLASS_DEVICE], 118 PCI_CLASS_DISPLAY_OTHER); 119 mdev_state->vconfig[PCI_CLASS_REVISION] = 0x01; 120 121 STORE_LE32((u32 *) &mdev_state->vconfig[PCI_BASE_ADDRESS_0], 122 PCI_BASE_ADDRESS_SPACE_MEMORY | 123 PCI_BASE_ADDRESS_MEM_TYPE_32 | 124 PCI_BASE_ADDRESS_MEM_PREFETCH); 125 mdev_state->bar_mask = ~(mdev_state->memsize) + 1; 126 127 /* vendor specific capability for the config registers */ 128 mdev_state->vconfig[PCI_CAPABILITY_LIST] = MDPY_VENDORCAP_OFFSET; 129 mdev_state->vconfig[MDPY_VENDORCAP_OFFSET + 0] = 0x09; /* vendor cap */ 130 mdev_state->vconfig[MDPY_VENDORCAP_OFFSET + 1] = 0x00; /* next ptr */ 131 mdev_state->vconfig[MDPY_VENDORCAP_OFFSET + 2] = MDPY_VENDORCAP_SIZE; 132 STORE_LE32((u32 *) &mdev_state->vconfig[MDPY_FORMAT_OFFSET], 133 mdev_state->type->format); 134 STORE_LE32((u32 *) &mdev_state->vconfig[MDPY_WIDTH_OFFSET], 135 mdev_state->type->width); 136 STORE_LE32((u32 *) &mdev_state->vconfig[MDPY_HEIGHT_OFFSET], 137 mdev_state->type->height); 138 } 139 140 static void handle_pci_cfg_write(struct mdev_state *mdev_state, u16 offset, 141 char *buf, u32 count) 142 { 143 struct device *dev = mdev_dev(mdev_state->mdev); 144 u32 cfg_addr; 145 146 switch (offset) { 147 case PCI_BASE_ADDRESS_0: 148 cfg_addr = *(u32 *)buf; 149 150 if (cfg_addr == 0xffffffff) { 151 cfg_addr = (cfg_addr & mdev_state->bar_mask); 152 } else { 153 cfg_addr &= PCI_BASE_ADDRESS_MEM_MASK; 154 if (cfg_addr) 155 dev_info(dev, "BAR0 @ 0x%x\n", cfg_addr); 156 } 157 158 cfg_addr |= (mdev_state->vconfig[offset] & 159 ~PCI_BASE_ADDRESS_MEM_MASK); 160 STORE_LE32(&mdev_state->vconfig[offset], cfg_addr); 161 break; 162 } 163 } 164 165 static ssize_t mdev_access(struct mdev_device *mdev, char *buf, size_t count, 166 loff_t pos, bool is_write) 167 { 168 struct mdev_state *mdev_state = mdev_get_drvdata(mdev); 169 struct device *dev = mdev_dev(mdev); 170 int ret = 0; 171 172 mutex_lock(&mdev_state->ops_lock); 173 174 if (pos < MDPY_CONFIG_SPACE_SIZE) { 175 if (is_write) 176 handle_pci_cfg_write(mdev_state, pos, buf, count); 177 else 178 memcpy(buf, (mdev_state->vconfig + pos), count); 179 180 } else if ((pos >= MDPY_MEMORY_BAR_OFFSET) && 181 (pos + count <= 182 MDPY_MEMORY_BAR_OFFSET + mdev_state->memsize)) { 183 pos -= MDPY_MEMORY_BAR_OFFSET; 184 if (is_write) 185 memcpy(mdev_state->memblk, buf, count); 186 else 187 memcpy(buf, mdev_state->memblk, count); 188 189 } else { 190 dev_info(dev, "%s: %s @0x%llx (unhandled)\n", 191 __func__, is_write ? "WR" : "RD", pos); 192 ret = -1; 193 goto accessfailed; 194 } 195 196 ret = count; 197 198 199 accessfailed: 200 mutex_unlock(&mdev_state->ops_lock); 201 202 return ret; 203 } 204 205 static int mdpy_reset(struct mdev_device *mdev) 206 { 207 struct mdev_state *mdev_state = mdev_get_drvdata(mdev); 208 u32 stride, i; 209 210 /* initialize with gray gradient */ 211 stride = mdev_state->type->width * mdev_state->type->bytepp; 212 for (i = 0; i < mdev_state->type->height; i++) 213 memset(mdev_state->memblk + i * stride, 214 i * 255 / mdev_state->type->height, 215 stride); 216 return 0; 217 } 218 219 static int mdpy_create(struct mdev_device *mdev) 220 { 221 const struct mdpy_type *type = 222 &mdpy_types[mdev_get_type_group_id(mdev)]; 223 struct device *dev = mdev_dev(mdev); 224 struct mdev_state *mdev_state; 225 u32 fbsize; 226 227 if (mdpy_count >= max_devices) 228 return -ENOMEM; 229 230 mdev_state = kzalloc(sizeof(struct mdev_state), GFP_KERNEL); 231 if (mdev_state == NULL) 232 return -ENOMEM; 233 234 mdev_state->vconfig = kzalloc(MDPY_CONFIG_SPACE_SIZE, GFP_KERNEL); 235 if (mdev_state->vconfig == NULL) { 236 kfree(mdev_state); 237 return -ENOMEM; 238 } 239 240 fbsize = roundup_pow_of_two(type->width * type->height * type->bytepp); 241 242 mdev_state->memblk = vmalloc_user(fbsize); 243 if (!mdev_state->memblk) { 244 kfree(mdev_state->vconfig); 245 kfree(mdev_state); 246 return -ENOMEM; 247 } 248 dev_info(dev, "%s: %s (%dx%d)\n", __func__, type->name, type->width, 249 type->height); 250 251 mutex_init(&mdev_state->ops_lock); 252 mdev_state->mdev = mdev; 253 mdev_set_drvdata(mdev, mdev_state); 254 255 mdev_state->type = type; 256 mdev_state->memsize = fbsize; 257 mdpy_create_config_space(mdev_state); 258 mdpy_reset(mdev); 259 260 mdpy_count++; 261 return 0; 262 } 263 264 static int mdpy_remove(struct mdev_device *mdev) 265 { 266 struct mdev_state *mdev_state = mdev_get_drvdata(mdev); 267 struct device *dev = mdev_dev(mdev); 268 269 dev_info(dev, "%s\n", __func__); 270 271 mdev_set_drvdata(mdev, NULL); 272 vfree(mdev_state->memblk); 273 kfree(mdev_state->vconfig); 274 kfree(mdev_state); 275 276 mdpy_count--; 277 return 0; 278 } 279 280 static ssize_t mdpy_read(struct mdev_device *mdev, char __user *buf, 281 size_t count, loff_t *ppos) 282 { 283 unsigned int done = 0; 284 int ret; 285 286 while (count) { 287 size_t filled; 288 289 if (count >= 4 && !(*ppos % 4)) { 290 u32 val; 291 292 ret = mdev_access(mdev, (char *)&val, sizeof(val), 293 *ppos, false); 294 if (ret <= 0) 295 goto read_err; 296 297 if (copy_to_user(buf, &val, sizeof(val))) 298 goto read_err; 299 300 filled = 4; 301 } else if (count >= 2 && !(*ppos % 2)) { 302 u16 val; 303 304 ret = mdev_access(mdev, (char *)&val, sizeof(val), 305 *ppos, false); 306 if (ret <= 0) 307 goto read_err; 308 309 if (copy_to_user(buf, &val, sizeof(val))) 310 goto read_err; 311 312 filled = 2; 313 } else { 314 u8 val; 315 316 ret = mdev_access(mdev, (char *)&val, sizeof(val), 317 *ppos, false); 318 if (ret <= 0) 319 goto read_err; 320 321 if (copy_to_user(buf, &val, sizeof(val))) 322 goto read_err; 323 324 filled = 1; 325 } 326 327 count -= filled; 328 done += filled; 329 *ppos += filled; 330 buf += filled; 331 } 332 333 return done; 334 335 read_err: 336 return -EFAULT; 337 } 338 339 static ssize_t mdpy_write(struct mdev_device *mdev, const char __user *buf, 340 size_t count, loff_t *ppos) 341 { 342 unsigned int done = 0; 343 int ret; 344 345 while (count) { 346 size_t filled; 347 348 if (count >= 4 && !(*ppos % 4)) { 349 u32 val; 350 351 if (copy_from_user(&val, buf, sizeof(val))) 352 goto write_err; 353 354 ret = mdev_access(mdev, (char *)&val, sizeof(val), 355 *ppos, true); 356 if (ret <= 0) 357 goto write_err; 358 359 filled = 4; 360 } else if (count >= 2 && !(*ppos % 2)) { 361 u16 val; 362 363 if (copy_from_user(&val, buf, sizeof(val))) 364 goto write_err; 365 366 ret = mdev_access(mdev, (char *)&val, sizeof(val), 367 *ppos, true); 368 if (ret <= 0) 369 goto write_err; 370 371 filled = 2; 372 } else { 373 u8 val; 374 375 if (copy_from_user(&val, buf, sizeof(val))) 376 goto write_err; 377 378 ret = mdev_access(mdev, (char *)&val, sizeof(val), 379 *ppos, true); 380 if (ret <= 0) 381 goto write_err; 382 383 filled = 1; 384 } 385 count -= filled; 386 done += filled; 387 *ppos += filled; 388 buf += filled; 389 } 390 391 return done; 392 write_err: 393 return -EFAULT; 394 } 395 396 static int mdpy_mmap(struct mdev_device *mdev, struct vm_area_struct *vma) 397 { 398 struct mdev_state *mdev_state = mdev_get_drvdata(mdev); 399 400 if (vma->vm_pgoff != MDPY_MEMORY_BAR_OFFSET >> PAGE_SHIFT) 401 return -EINVAL; 402 if (vma->vm_end < vma->vm_start) 403 return -EINVAL; 404 if (vma->vm_end - vma->vm_start > mdev_state->memsize) 405 return -EINVAL; 406 if ((vma->vm_flags & VM_SHARED) == 0) 407 return -EINVAL; 408 409 return remap_vmalloc_range_partial(vma, vma->vm_start, 410 mdev_state->memblk, 0, 411 vma->vm_end - vma->vm_start); 412 } 413 414 static int mdpy_get_region_info(struct mdev_device *mdev, 415 struct vfio_region_info *region_info, 416 u16 *cap_type_id, void **cap_type) 417 { 418 struct mdev_state *mdev_state; 419 420 mdev_state = mdev_get_drvdata(mdev); 421 if (!mdev_state) 422 return -EINVAL; 423 424 if (region_info->index >= VFIO_PCI_NUM_REGIONS && 425 region_info->index != MDPY_DISPLAY_REGION) 426 return -EINVAL; 427 428 switch (region_info->index) { 429 case VFIO_PCI_CONFIG_REGION_INDEX: 430 region_info->offset = 0; 431 region_info->size = MDPY_CONFIG_SPACE_SIZE; 432 region_info->flags = (VFIO_REGION_INFO_FLAG_READ | 433 VFIO_REGION_INFO_FLAG_WRITE); 434 break; 435 case VFIO_PCI_BAR0_REGION_INDEX: 436 case MDPY_DISPLAY_REGION: 437 region_info->offset = MDPY_MEMORY_BAR_OFFSET; 438 region_info->size = mdev_state->memsize; 439 region_info->flags = (VFIO_REGION_INFO_FLAG_READ | 440 VFIO_REGION_INFO_FLAG_WRITE | 441 VFIO_REGION_INFO_FLAG_MMAP); 442 break; 443 default: 444 region_info->size = 0; 445 region_info->offset = 0; 446 region_info->flags = 0; 447 } 448 449 return 0; 450 } 451 452 static int mdpy_get_irq_info(struct mdev_device *mdev, 453 struct vfio_irq_info *irq_info) 454 { 455 irq_info->count = 0; 456 return 0; 457 } 458 459 static int mdpy_get_device_info(struct mdev_device *mdev, 460 struct vfio_device_info *dev_info) 461 { 462 dev_info->flags = VFIO_DEVICE_FLAGS_PCI; 463 dev_info->num_regions = VFIO_PCI_NUM_REGIONS; 464 dev_info->num_irqs = VFIO_PCI_NUM_IRQS; 465 return 0; 466 } 467 468 static int mdpy_query_gfx_plane(struct mdev_device *mdev, 469 struct vfio_device_gfx_plane_info *plane) 470 { 471 struct mdev_state *mdev_state = mdev_get_drvdata(mdev); 472 473 if (plane->flags & VFIO_GFX_PLANE_TYPE_PROBE) { 474 if (plane->flags == (VFIO_GFX_PLANE_TYPE_PROBE | 475 VFIO_GFX_PLANE_TYPE_REGION)) 476 return 0; 477 return -EINVAL; 478 } 479 480 if (plane->flags != VFIO_GFX_PLANE_TYPE_REGION) 481 return -EINVAL; 482 483 plane->drm_format = mdev_state->type->format; 484 plane->width = mdev_state->type->width; 485 plane->height = mdev_state->type->height; 486 plane->stride = (mdev_state->type->width * 487 mdev_state->type->bytepp); 488 plane->size = mdev_state->memsize; 489 plane->region_index = MDPY_DISPLAY_REGION; 490 491 /* unused */ 492 plane->drm_format_mod = 0; 493 plane->x_pos = 0; 494 plane->y_pos = 0; 495 plane->x_hot = 0; 496 plane->y_hot = 0; 497 498 return 0; 499 } 500 501 static long mdpy_ioctl(struct mdev_device *mdev, unsigned int cmd, 502 unsigned long arg) 503 { 504 int ret = 0; 505 unsigned long minsz; 506 struct mdev_state *mdev_state; 507 508 mdev_state = mdev_get_drvdata(mdev); 509 510 switch (cmd) { 511 case VFIO_DEVICE_GET_INFO: 512 { 513 struct vfio_device_info info; 514 515 minsz = offsetofend(struct vfio_device_info, num_irqs); 516 517 if (copy_from_user(&info, (void __user *)arg, minsz)) 518 return -EFAULT; 519 520 if (info.argsz < minsz) 521 return -EINVAL; 522 523 ret = mdpy_get_device_info(mdev, &info); 524 if (ret) 525 return ret; 526 527 memcpy(&mdev_state->dev_info, &info, sizeof(info)); 528 529 if (copy_to_user((void __user *)arg, &info, minsz)) 530 return -EFAULT; 531 532 return 0; 533 } 534 case VFIO_DEVICE_GET_REGION_INFO: 535 { 536 struct vfio_region_info info; 537 u16 cap_type_id = 0; 538 void *cap_type = NULL; 539 540 minsz = offsetofend(struct vfio_region_info, offset); 541 542 if (copy_from_user(&info, (void __user *)arg, minsz)) 543 return -EFAULT; 544 545 if (info.argsz < minsz) 546 return -EINVAL; 547 548 ret = mdpy_get_region_info(mdev, &info, &cap_type_id, 549 &cap_type); 550 if (ret) 551 return ret; 552 553 if (copy_to_user((void __user *)arg, &info, minsz)) 554 return -EFAULT; 555 556 return 0; 557 } 558 559 case VFIO_DEVICE_GET_IRQ_INFO: 560 { 561 struct vfio_irq_info info; 562 563 minsz = offsetofend(struct vfio_irq_info, count); 564 565 if (copy_from_user(&info, (void __user *)arg, minsz)) 566 return -EFAULT; 567 568 if ((info.argsz < minsz) || 569 (info.index >= mdev_state->dev_info.num_irqs)) 570 return -EINVAL; 571 572 ret = mdpy_get_irq_info(mdev, &info); 573 if (ret) 574 return ret; 575 576 if (copy_to_user((void __user *)arg, &info, minsz)) 577 return -EFAULT; 578 579 return 0; 580 } 581 582 case VFIO_DEVICE_QUERY_GFX_PLANE: 583 { 584 struct vfio_device_gfx_plane_info plane; 585 586 minsz = offsetofend(struct vfio_device_gfx_plane_info, 587 region_index); 588 589 if (copy_from_user(&plane, (void __user *)arg, minsz)) 590 return -EFAULT; 591 592 if (plane.argsz < minsz) 593 return -EINVAL; 594 595 ret = mdpy_query_gfx_plane(mdev, &plane); 596 if (ret) 597 return ret; 598 599 if (copy_to_user((void __user *)arg, &plane, minsz)) 600 return -EFAULT; 601 602 return 0; 603 } 604 605 case VFIO_DEVICE_SET_IRQS: 606 return -EINVAL; 607 608 case VFIO_DEVICE_RESET: 609 return mdpy_reset(mdev); 610 } 611 return -ENOTTY; 612 } 613 614 static int mdpy_open(struct mdev_device *mdev) 615 { 616 if (!try_module_get(THIS_MODULE)) 617 return -ENODEV; 618 619 return 0; 620 } 621 622 static void mdpy_close(struct mdev_device *mdev) 623 { 624 module_put(THIS_MODULE); 625 } 626 627 static ssize_t 628 resolution_show(struct device *dev, struct device_attribute *attr, 629 char *buf) 630 { 631 struct mdev_device *mdev = mdev_from_dev(dev); 632 struct mdev_state *mdev_state = mdev_get_drvdata(mdev); 633 634 return sprintf(buf, "%dx%d\n", 635 mdev_state->type->width, 636 mdev_state->type->height); 637 } 638 static DEVICE_ATTR_RO(resolution); 639 640 static struct attribute *mdev_dev_attrs[] = { 641 &dev_attr_resolution.attr, 642 NULL, 643 }; 644 645 static const struct attribute_group mdev_dev_group = { 646 .name = "vendor", 647 .attrs = mdev_dev_attrs, 648 }; 649 650 static const struct attribute_group *mdev_dev_groups[] = { 651 &mdev_dev_group, 652 NULL, 653 }; 654 655 static ssize_t name_show(struct mdev_type *mtype, 656 struct mdev_type_attribute *attr, char *buf) 657 { 658 const struct mdpy_type *type = 659 &mdpy_types[mtype_get_type_group_id(mtype)]; 660 661 return sprintf(buf, "%s\n", type->name); 662 } 663 static MDEV_TYPE_ATTR_RO(name); 664 665 static ssize_t description_show(struct mdev_type *mtype, 666 struct mdev_type_attribute *attr, char *buf) 667 { 668 const struct mdpy_type *type = 669 &mdpy_types[mtype_get_type_group_id(mtype)]; 670 671 return sprintf(buf, "virtual display, %dx%d framebuffer\n", 672 type ? type->width : 0, 673 type ? type->height : 0); 674 } 675 static MDEV_TYPE_ATTR_RO(description); 676 677 static ssize_t available_instances_show(struct mdev_type *mtype, 678 struct mdev_type_attribute *attr, 679 char *buf) 680 { 681 return sprintf(buf, "%d\n", max_devices - mdpy_count); 682 } 683 static MDEV_TYPE_ATTR_RO(available_instances); 684 685 static ssize_t device_api_show(struct mdev_type *mtype, 686 struct mdev_type_attribute *attr, char *buf) 687 { 688 return sprintf(buf, "%s\n", VFIO_DEVICE_API_PCI_STRING); 689 } 690 static MDEV_TYPE_ATTR_RO(device_api); 691 692 static struct attribute *mdev_types_attrs[] = { 693 &mdev_type_attr_name.attr, 694 &mdev_type_attr_description.attr, 695 &mdev_type_attr_device_api.attr, 696 &mdev_type_attr_available_instances.attr, 697 NULL, 698 }; 699 700 static struct attribute_group mdev_type_group1 = { 701 .name = MDPY_TYPE_1, 702 .attrs = mdev_types_attrs, 703 }; 704 705 static struct attribute_group mdev_type_group2 = { 706 .name = MDPY_TYPE_2, 707 .attrs = mdev_types_attrs, 708 }; 709 710 static struct attribute_group mdev_type_group3 = { 711 .name = MDPY_TYPE_3, 712 .attrs = mdev_types_attrs, 713 }; 714 715 static struct attribute_group *mdev_type_groups[] = { 716 &mdev_type_group1, 717 &mdev_type_group2, 718 &mdev_type_group3, 719 NULL, 720 }; 721 722 static const struct mdev_parent_ops mdev_fops = { 723 .owner = THIS_MODULE, 724 .mdev_attr_groups = mdev_dev_groups, 725 .supported_type_groups = mdev_type_groups, 726 .create = mdpy_create, 727 .remove = mdpy_remove, 728 .open = mdpy_open, 729 .release = mdpy_close, 730 .read = mdpy_read, 731 .write = mdpy_write, 732 .ioctl = mdpy_ioctl, 733 .mmap = mdpy_mmap, 734 }; 735 736 static const struct file_operations vd_fops = { 737 .owner = THIS_MODULE, 738 }; 739 740 static void mdpy_device_release(struct device *dev) 741 { 742 /* nothing */ 743 } 744 745 static int __init mdpy_dev_init(void) 746 { 747 int ret = 0; 748 749 ret = alloc_chrdev_region(&mdpy_devt, 0, MINORMASK + 1, MDPY_NAME); 750 if (ret < 0) { 751 pr_err("Error: failed to register mdpy_dev, err: %d\n", ret); 752 return ret; 753 } 754 cdev_init(&mdpy_cdev, &vd_fops); 755 cdev_add(&mdpy_cdev, mdpy_devt, MINORMASK + 1); 756 pr_info("%s: major %d\n", __func__, MAJOR(mdpy_devt)); 757 758 mdpy_class = class_create(THIS_MODULE, MDPY_CLASS_NAME); 759 if (IS_ERR(mdpy_class)) { 760 pr_err("Error: failed to register mdpy_dev class\n"); 761 ret = PTR_ERR(mdpy_class); 762 goto failed1; 763 } 764 mdpy_dev.class = mdpy_class; 765 mdpy_dev.release = mdpy_device_release; 766 dev_set_name(&mdpy_dev, "%s", MDPY_NAME); 767 768 ret = device_register(&mdpy_dev); 769 if (ret) 770 goto failed2; 771 772 ret = mdev_register_device(&mdpy_dev, &mdev_fops); 773 if (ret) 774 goto failed3; 775 776 return 0; 777 778 failed3: 779 device_unregister(&mdpy_dev); 780 failed2: 781 class_destroy(mdpy_class); 782 failed1: 783 cdev_del(&mdpy_cdev); 784 unregister_chrdev_region(mdpy_devt, MINORMASK + 1); 785 return ret; 786 } 787 788 static void __exit mdpy_dev_exit(void) 789 { 790 mdpy_dev.bus = NULL; 791 mdev_unregister_device(&mdpy_dev); 792 793 device_unregister(&mdpy_dev); 794 cdev_del(&mdpy_cdev); 795 unregister_chrdev_region(mdpy_devt, MINORMASK + 1); 796 class_destroy(mdpy_class); 797 mdpy_class = NULL; 798 } 799 800 module_init(mdpy_dev_init) 801 module_exit(mdpy_dev_exit) 802