xref: /linux/samples/vfio-mdev/mdpy.c (revision bdef2b7896df293736330eb6eb0f43947049b828)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Mediated virtual PCI display host device driver
4  *
5  * See mdpy-defs.h for device specs
6  *
7  *   (c) Gerd Hoffmann <kraxel@redhat.com>
8  *
9  * based on mtty driver which is:
10  *   Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
11  *	 Author: Neo Jia <cjia@nvidia.com>
12  *		 Kirti Wankhede <kwankhede@nvidia.com>
13  *
14  * This program is free software; you can redistribute it and/or modify
15  * it under the terms of the GNU General Public License version 2 as
16  * published by the Free Software Foundation.
17  */
18 #include <linux/init.h>
19 #include <linux/module.h>
20 #include <linux/kernel.h>
21 #include <linux/slab.h>
22 #include <linux/vmalloc.h>
23 #include <linux/cdev.h>
24 #include <linux/vfio.h>
25 #include <linux/iommu.h>
26 #include <linux/sysfs.h>
27 #include <linux/mdev.h>
28 #include <linux/pci.h>
29 #include <drm/drm_fourcc.h>
30 #include "mdpy-defs.h"
31 
32 #define MDPY_NAME		"mdpy"
33 #define MDPY_CLASS_NAME		"mdpy"
34 
35 #define MDPY_CONFIG_SPACE_SIZE	0xff
36 #define MDPY_MEMORY_BAR_OFFSET	PAGE_SIZE
37 #define MDPY_DISPLAY_REGION	16
38 
39 #define STORE_LE16(addr, val)	(*(u16 *)addr = val)
40 #define STORE_LE32(addr, val)	(*(u32 *)addr = val)
41 
42 
43 MODULE_LICENSE("GPL v2");
44 
45 static int max_devices = 4;
46 module_param_named(count, max_devices, int, 0444);
47 MODULE_PARM_DESC(count, "number of " MDPY_NAME " devices");
48 
49 
50 #define MDPY_TYPE_1 "vga"
51 #define MDPY_TYPE_2 "xga"
52 #define MDPY_TYPE_3 "hd"
53 
54 static const struct mdpy_type {
55 	const char *name;
56 	u32 format;
57 	u32 bytepp;
58 	u32 width;
59 	u32 height;
60 } mdpy_types[] = {
61 	{
62 		.name	= MDPY_CLASS_NAME "-" MDPY_TYPE_1,
63 		.format = DRM_FORMAT_XRGB8888,
64 		.bytepp = 4,
65 		.width	= 640,
66 		.height = 480,
67 	}, {
68 		.name	= MDPY_CLASS_NAME "-" MDPY_TYPE_2,
69 		.format = DRM_FORMAT_XRGB8888,
70 		.bytepp = 4,
71 		.width	= 1024,
72 		.height = 768,
73 	}, {
74 		.name	= MDPY_CLASS_NAME "-" MDPY_TYPE_3,
75 		.format = DRM_FORMAT_XRGB8888,
76 		.bytepp = 4,
77 		.width	= 1920,
78 		.height = 1080,
79 	},
80 };
81 
82 static dev_t		mdpy_devt;
83 static struct class	*mdpy_class;
84 static struct cdev	mdpy_cdev;
85 static struct device	mdpy_dev;
86 static u32		mdpy_count;
87 static const struct vfio_device_ops mdpy_dev_ops;
88 
89 /* State of each mdev device */
90 struct mdev_state {
91 	struct vfio_device vdev;
92 	u8 *vconfig;
93 	u32 bar_mask;
94 	struct mutex ops_lock;
95 	struct mdev_device *mdev;
96 	struct vfio_device_info dev_info;
97 
98 	const struct mdpy_type *type;
99 	u32 memsize;
100 	void *memblk;
101 };
102 
103 static void mdpy_create_config_space(struct mdev_state *mdev_state)
104 {
105 	STORE_LE16((u16 *) &mdev_state->vconfig[PCI_VENDOR_ID],
106 		   MDPY_PCI_VENDOR_ID);
107 	STORE_LE16((u16 *) &mdev_state->vconfig[PCI_DEVICE_ID],
108 		   MDPY_PCI_DEVICE_ID);
109 	STORE_LE16((u16 *) &mdev_state->vconfig[PCI_SUBSYSTEM_VENDOR_ID],
110 		   MDPY_PCI_SUBVENDOR_ID);
111 	STORE_LE16((u16 *) &mdev_state->vconfig[PCI_SUBSYSTEM_ID],
112 		   MDPY_PCI_SUBDEVICE_ID);
113 
114 	STORE_LE16((u16 *) &mdev_state->vconfig[PCI_COMMAND],
115 		   PCI_COMMAND_IO | PCI_COMMAND_MEMORY);
116 	STORE_LE16((u16 *) &mdev_state->vconfig[PCI_STATUS],
117 		   PCI_STATUS_CAP_LIST);
118 	STORE_LE16((u16 *) &mdev_state->vconfig[PCI_CLASS_DEVICE],
119 		   PCI_CLASS_DISPLAY_OTHER);
120 	mdev_state->vconfig[PCI_CLASS_REVISION] =  0x01;
121 
122 	STORE_LE32((u32 *) &mdev_state->vconfig[PCI_BASE_ADDRESS_0],
123 		   PCI_BASE_ADDRESS_SPACE_MEMORY |
124 		   PCI_BASE_ADDRESS_MEM_TYPE_32	 |
125 		   PCI_BASE_ADDRESS_MEM_PREFETCH);
126 	mdev_state->bar_mask = ~(mdev_state->memsize) + 1;
127 
128 	/* vendor specific capability for the config registers */
129 	mdev_state->vconfig[PCI_CAPABILITY_LIST]       = MDPY_VENDORCAP_OFFSET;
130 	mdev_state->vconfig[MDPY_VENDORCAP_OFFSET + 0] = 0x09; /* vendor cap */
131 	mdev_state->vconfig[MDPY_VENDORCAP_OFFSET + 1] = 0x00; /* next ptr */
132 	mdev_state->vconfig[MDPY_VENDORCAP_OFFSET + 2] = MDPY_VENDORCAP_SIZE;
133 	STORE_LE32((u32 *) &mdev_state->vconfig[MDPY_FORMAT_OFFSET],
134 		   mdev_state->type->format);
135 	STORE_LE32((u32 *) &mdev_state->vconfig[MDPY_WIDTH_OFFSET],
136 		   mdev_state->type->width);
137 	STORE_LE32((u32 *) &mdev_state->vconfig[MDPY_HEIGHT_OFFSET],
138 		   mdev_state->type->height);
139 }
140 
141 static void handle_pci_cfg_write(struct mdev_state *mdev_state, u16 offset,
142 				 char *buf, u32 count)
143 {
144 	struct device *dev = mdev_dev(mdev_state->mdev);
145 	u32 cfg_addr;
146 
147 	switch (offset) {
148 	case PCI_BASE_ADDRESS_0:
149 		cfg_addr = *(u32 *)buf;
150 
151 		if (cfg_addr == 0xffffffff) {
152 			cfg_addr = (cfg_addr & mdev_state->bar_mask);
153 		} else {
154 			cfg_addr &= PCI_BASE_ADDRESS_MEM_MASK;
155 			if (cfg_addr)
156 				dev_info(dev, "BAR0 @ 0x%x\n", cfg_addr);
157 		}
158 
159 		cfg_addr |= (mdev_state->vconfig[offset] &
160 			     ~PCI_BASE_ADDRESS_MEM_MASK);
161 		STORE_LE32(&mdev_state->vconfig[offset], cfg_addr);
162 		break;
163 	}
164 }
165 
166 static ssize_t mdev_access(struct mdev_state *mdev_state, char *buf,
167 			   size_t count, loff_t pos, bool is_write)
168 {
169 	int ret = 0;
170 
171 	mutex_lock(&mdev_state->ops_lock);
172 
173 	if (pos < MDPY_CONFIG_SPACE_SIZE) {
174 		if (is_write)
175 			handle_pci_cfg_write(mdev_state, pos, buf, count);
176 		else
177 			memcpy(buf, (mdev_state->vconfig + pos), count);
178 
179 	} else if ((pos >= MDPY_MEMORY_BAR_OFFSET) &&
180 		   (pos + count <=
181 		    MDPY_MEMORY_BAR_OFFSET + mdev_state->memsize)) {
182 		pos -= MDPY_MEMORY_BAR_OFFSET;
183 		if (is_write)
184 			memcpy(mdev_state->memblk, buf, count);
185 		else
186 			memcpy(buf, mdev_state->memblk, count);
187 
188 	} else {
189 		dev_info(mdev_state->vdev.dev,
190 			 "%s: %s @0x%llx (unhandled)\n", __func__,
191 			 is_write ? "WR" : "RD", pos);
192 		ret = -1;
193 		goto accessfailed;
194 	}
195 
196 	ret = count;
197 
198 
199 accessfailed:
200 	mutex_unlock(&mdev_state->ops_lock);
201 
202 	return ret;
203 }
204 
205 static int mdpy_reset(struct mdev_state *mdev_state)
206 {
207 	u32 stride, i;
208 
209 	/* initialize with gray gradient */
210 	stride = mdev_state->type->width * mdev_state->type->bytepp;
211 	for (i = 0; i < mdev_state->type->height; i++)
212 		memset(mdev_state->memblk + i * stride,
213 		       i * 255 / mdev_state->type->height,
214 		       stride);
215 	return 0;
216 }
217 
218 static int mdpy_init_dev(struct vfio_device *vdev)
219 {
220 	struct mdev_state *mdev_state =
221 		container_of(vdev, struct mdev_state, vdev);
222 	struct mdev_device *mdev = to_mdev_device(vdev->dev);
223 	const struct mdpy_type *type =
224 		&mdpy_types[mdev_get_type_group_id(mdev)];
225 	u32 fbsize;
226 	int ret = -ENOMEM;
227 
228 	if (mdpy_count >= max_devices)
229 		return ret;
230 
231 	mdev_state->vconfig = kzalloc(MDPY_CONFIG_SPACE_SIZE, GFP_KERNEL);
232 	if (!mdev_state->vconfig)
233 		return ret;
234 
235 	fbsize = roundup_pow_of_two(type->width * type->height * type->bytepp);
236 
237 	mdev_state->memblk = vmalloc_user(fbsize);
238 	if (!mdev_state->memblk)
239 		goto out_vconfig;
240 
241 	mutex_init(&mdev_state->ops_lock);
242 	mdev_state->mdev = mdev;
243 	mdev_state->type = type;
244 	mdev_state->memsize = fbsize;
245 	mdpy_create_config_space(mdev_state);
246 	mdpy_reset(mdev_state);
247 
248 	dev_info(vdev->dev, "%s: %s (%dx%d)\n", __func__, type->name, type->width,
249 		 type->height);
250 
251 	mdpy_count++;
252 	return 0;
253 
254 out_vconfig:
255 	kfree(mdev_state->vconfig);
256 	return ret;
257 }
258 
259 static int mdpy_probe(struct mdev_device *mdev)
260 {
261 	struct mdev_state *mdev_state;
262 	int ret;
263 
264 	mdev_state = vfio_alloc_device(mdev_state, vdev, &mdev->dev,
265 				       &mdpy_dev_ops);
266 	if (IS_ERR(mdev_state))
267 		return PTR_ERR(mdev_state);
268 
269 	ret = vfio_register_emulated_iommu_dev(&mdev_state->vdev);
270 	if (ret)
271 		goto err_put_vdev;
272 	dev_set_drvdata(&mdev->dev, mdev_state);
273 	return 0;
274 
275 err_put_vdev:
276 	vfio_put_device(&mdev_state->vdev);
277 	return ret;
278 }
279 
280 static void mdpy_release_dev(struct vfio_device *vdev)
281 {
282 	struct mdev_state *mdev_state =
283 		container_of(vdev, struct mdev_state, vdev);
284 
285 	mdpy_count--;
286 	vfree(mdev_state->memblk);
287 	kfree(mdev_state->vconfig);
288 	vfio_free_device(vdev);
289 }
290 
291 static void mdpy_remove(struct mdev_device *mdev)
292 {
293 	struct mdev_state *mdev_state = dev_get_drvdata(&mdev->dev);
294 
295 	dev_info(&mdev->dev, "%s\n", __func__);
296 
297 	vfio_unregister_group_dev(&mdev_state->vdev);
298 	vfio_put_device(&mdev_state->vdev);
299 }
300 
301 static ssize_t mdpy_read(struct vfio_device *vdev, char __user *buf,
302 			 size_t count, loff_t *ppos)
303 {
304 	struct mdev_state *mdev_state =
305 		container_of(vdev, struct mdev_state, vdev);
306 	unsigned int done = 0;
307 	int ret;
308 
309 	while (count) {
310 		size_t filled;
311 
312 		if (count >= 4 && !(*ppos % 4)) {
313 			u32 val;
314 
315 			ret = mdev_access(mdev_state, (char *)&val, sizeof(val),
316 					  *ppos, false);
317 			if (ret <= 0)
318 				goto read_err;
319 
320 			if (copy_to_user(buf, &val, sizeof(val)))
321 				goto read_err;
322 
323 			filled = 4;
324 		} else if (count >= 2 && !(*ppos % 2)) {
325 			u16 val;
326 
327 			ret = mdev_access(mdev_state, (char *)&val, sizeof(val),
328 					  *ppos, false);
329 			if (ret <= 0)
330 				goto read_err;
331 
332 			if (copy_to_user(buf, &val, sizeof(val)))
333 				goto read_err;
334 
335 			filled = 2;
336 		} else {
337 			u8 val;
338 
339 			ret = mdev_access(mdev_state, (char *)&val, sizeof(val),
340 					  *ppos, false);
341 			if (ret <= 0)
342 				goto read_err;
343 
344 			if (copy_to_user(buf, &val, sizeof(val)))
345 				goto read_err;
346 
347 			filled = 1;
348 		}
349 
350 		count -= filled;
351 		done += filled;
352 		*ppos += filled;
353 		buf += filled;
354 	}
355 
356 	return done;
357 
358 read_err:
359 	return -EFAULT;
360 }
361 
362 static ssize_t mdpy_write(struct vfio_device *vdev, const char __user *buf,
363 			  size_t count, loff_t *ppos)
364 {
365 	struct mdev_state *mdev_state =
366 		container_of(vdev, struct mdev_state, vdev);
367 	unsigned int done = 0;
368 	int ret;
369 
370 	while (count) {
371 		size_t filled;
372 
373 		if (count >= 4 && !(*ppos % 4)) {
374 			u32 val;
375 
376 			if (copy_from_user(&val, buf, sizeof(val)))
377 				goto write_err;
378 
379 			ret = mdev_access(mdev_state, (char *)&val, sizeof(val),
380 					  *ppos, true);
381 			if (ret <= 0)
382 				goto write_err;
383 
384 			filled = 4;
385 		} else if (count >= 2 && !(*ppos % 2)) {
386 			u16 val;
387 
388 			if (copy_from_user(&val, buf, sizeof(val)))
389 				goto write_err;
390 
391 			ret = mdev_access(mdev_state, (char *)&val, sizeof(val),
392 					  *ppos, true);
393 			if (ret <= 0)
394 				goto write_err;
395 
396 			filled = 2;
397 		} else {
398 			u8 val;
399 
400 			if (copy_from_user(&val, buf, sizeof(val)))
401 				goto write_err;
402 
403 			ret = mdev_access(mdev_state, (char *)&val, sizeof(val),
404 					  *ppos, true);
405 			if (ret <= 0)
406 				goto write_err;
407 
408 			filled = 1;
409 		}
410 		count -= filled;
411 		done += filled;
412 		*ppos += filled;
413 		buf += filled;
414 	}
415 
416 	return done;
417 write_err:
418 	return -EFAULT;
419 }
420 
421 static int mdpy_mmap(struct vfio_device *vdev, struct vm_area_struct *vma)
422 {
423 	struct mdev_state *mdev_state =
424 		container_of(vdev, struct mdev_state, vdev);
425 
426 	if (vma->vm_pgoff != MDPY_MEMORY_BAR_OFFSET >> PAGE_SHIFT)
427 		return -EINVAL;
428 	if (vma->vm_end < vma->vm_start)
429 		return -EINVAL;
430 	if (vma->vm_end - vma->vm_start > mdev_state->memsize)
431 		return -EINVAL;
432 	if ((vma->vm_flags & VM_SHARED) == 0)
433 		return -EINVAL;
434 
435 	return remap_vmalloc_range(vma, mdev_state->memblk, 0);
436 }
437 
438 static int mdpy_get_region_info(struct mdev_state *mdev_state,
439 				struct vfio_region_info *region_info,
440 				u16 *cap_type_id, void **cap_type)
441 {
442 	if (region_info->index >= VFIO_PCI_NUM_REGIONS &&
443 	    region_info->index != MDPY_DISPLAY_REGION)
444 		return -EINVAL;
445 
446 	switch (region_info->index) {
447 	case VFIO_PCI_CONFIG_REGION_INDEX:
448 		region_info->offset = 0;
449 		region_info->size   = MDPY_CONFIG_SPACE_SIZE;
450 		region_info->flags  = (VFIO_REGION_INFO_FLAG_READ |
451 				       VFIO_REGION_INFO_FLAG_WRITE);
452 		break;
453 	case VFIO_PCI_BAR0_REGION_INDEX:
454 	case MDPY_DISPLAY_REGION:
455 		region_info->offset = MDPY_MEMORY_BAR_OFFSET;
456 		region_info->size   = mdev_state->memsize;
457 		region_info->flags  = (VFIO_REGION_INFO_FLAG_READ  |
458 				       VFIO_REGION_INFO_FLAG_WRITE |
459 				       VFIO_REGION_INFO_FLAG_MMAP);
460 		break;
461 	default:
462 		region_info->size   = 0;
463 		region_info->offset = 0;
464 		region_info->flags  = 0;
465 	}
466 
467 	return 0;
468 }
469 
470 static int mdpy_get_irq_info(struct vfio_irq_info *irq_info)
471 {
472 	irq_info->count = 0;
473 	return 0;
474 }
475 
476 static int mdpy_get_device_info(struct vfio_device_info *dev_info)
477 {
478 	dev_info->flags = VFIO_DEVICE_FLAGS_PCI;
479 	dev_info->num_regions = VFIO_PCI_NUM_REGIONS;
480 	dev_info->num_irqs = VFIO_PCI_NUM_IRQS;
481 	return 0;
482 }
483 
484 static int mdpy_query_gfx_plane(struct mdev_state *mdev_state,
485 				struct vfio_device_gfx_plane_info *plane)
486 {
487 	if (plane->flags & VFIO_GFX_PLANE_TYPE_PROBE) {
488 		if (plane->flags == (VFIO_GFX_PLANE_TYPE_PROBE |
489 				     VFIO_GFX_PLANE_TYPE_REGION))
490 			return 0;
491 		return -EINVAL;
492 	}
493 
494 	if (plane->flags != VFIO_GFX_PLANE_TYPE_REGION)
495 		return -EINVAL;
496 
497 	plane->drm_format     = mdev_state->type->format;
498 	plane->width	      = mdev_state->type->width;
499 	plane->height	      = mdev_state->type->height;
500 	plane->stride	      = (mdev_state->type->width *
501 				 mdev_state->type->bytepp);
502 	plane->size	      = mdev_state->memsize;
503 	plane->region_index   = MDPY_DISPLAY_REGION;
504 
505 	/* unused */
506 	plane->drm_format_mod = 0;
507 	plane->x_pos	      = 0;
508 	plane->y_pos	      = 0;
509 	plane->x_hot	      = 0;
510 	plane->y_hot	      = 0;
511 
512 	return 0;
513 }
514 
515 static long mdpy_ioctl(struct vfio_device *vdev, unsigned int cmd,
516 		       unsigned long arg)
517 {
518 	int ret = 0;
519 	unsigned long minsz;
520 	struct mdev_state *mdev_state =
521 		container_of(vdev, struct mdev_state, vdev);
522 
523 	switch (cmd) {
524 	case VFIO_DEVICE_GET_INFO:
525 	{
526 		struct vfio_device_info info;
527 
528 		minsz = offsetofend(struct vfio_device_info, num_irqs);
529 
530 		if (copy_from_user(&info, (void __user *)arg, minsz))
531 			return -EFAULT;
532 
533 		if (info.argsz < minsz)
534 			return -EINVAL;
535 
536 		ret = mdpy_get_device_info(&info);
537 		if (ret)
538 			return ret;
539 
540 		memcpy(&mdev_state->dev_info, &info, sizeof(info));
541 
542 		if (copy_to_user((void __user *)arg, &info, minsz))
543 			return -EFAULT;
544 
545 		return 0;
546 	}
547 	case VFIO_DEVICE_GET_REGION_INFO:
548 	{
549 		struct vfio_region_info info;
550 		u16 cap_type_id = 0;
551 		void *cap_type = NULL;
552 
553 		minsz = offsetofend(struct vfio_region_info, offset);
554 
555 		if (copy_from_user(&info, (void __user *)arg, minsz))
556 			return -EFAULT;
557 
558 		if (info.argsz < minsz)
559 			return -EINVAL;
560 
561 		ret = mdpy_get_region_info(mdev_state, &info, &cap_type_id,
562 					   &cap_type);
563 		if (ret)
564 			return ret;
565 
566 		if (copy_to_user((void __user *)arg, &info, minsz))
567 			return -EFAULT;
568 
569 		return 0;
570 	}
571 
572 	case VFIO_DEVICE_GET_IRQ_INFO:
573 	{
574 		struct vfio_irq_info info;
575 
576 		minsz = offsetofend(struct vfio_irq_info, count);
577 
578 		if (copy_from_user(&info, (void __user *)arg, minsz))
579 			return -EFAULT;
580 
581 		if ((info.argsz < minsz) ||
582 		    (info.index >= mdev_state->dev_info.num_irqs))
583 			return -EINVAL;
584 
585 		ret = mdpy_get_irq_info(&info);
586 		if (ret)
587 			return ret;
588 
589 		if (copy_to_user((void __user *)arg, &info, minsz))
590 			return -EFAULT;
591 
592 		return 0;
593 	}
594 
595 	case VFIO_DEVICE_QUERY_GFX_PLANE:
596 	{
597 		struct vfio_device_gfx_plane_info plane;
598 
599 		minsz = offsetofend(struct vfio_device_gfx_plane_info,
600 				    region_index);
601 
602 		if (copy_from_user(&plane, (void __user *)arg, minsz))
603 			return -EFAULT;
604 
605 		if (plane.argsz < minsz)
606 			return -EINVAL;
607 
608 		ret = mdpy_query_gfx_plane(mdev_state, &plane);
609 		if (ret)
610 			return ret;
611 
612 		if (copy_to_user((void __user *)arg, &plane, minsz))
613 			return -EFAULT;
614 
615 		return 0;
616 	}
617 
618 	case VFIO_DEVICE_SET_IRQS:
619 		return -EINVAL;
620 
621 	case VFIO_DEVICE_RESET:
622 		return mdpy_reset(mdev_state);
623 	}
624 	return -ENOTTY;
625 }
626 
627 static ssize_t
628 resolution_show(struct device *dev, struct device_attribute *attr,
629 		char *buf)
630 {
631 	struct mdev_state *mdev_state = dev_get_drvdata(dev);
632 
633 	return sprintf(buf, "%dx%d\n",
634 		       mdev_state->type->width,
635 		       mdev_state->type->height);
636 }
637 static DEVICE_ATTR_RO(resolution);
638 
639 static struct attribute *mdev_dev_attrs[] = {
640 	&dev_attr_resolution.attr,
641 	NULL,
642 };
643 
644 static const struct attribute_group mdev_dev_group = {
645 	.name  = "vendor",
646 	.attrs = mdev_dev_attrs,
647 };
648 
649 static const struct attribute_group *mdev_dev_groups[] = {
650 	&mdev_dev_group,
651 	NULL,
652 };
653 
654 static ssize_t name_show(struct mdev_type *mtype,
655 			 struct mdev_type_attribute *attr, char *buf)
656 {
657 	const struct mdpy_type *type =
658 		&mdpy_types[mtype_get_type_group_id(mtype)];
659 
660 	return sprintf(buf, "%s\n", type->name);
661 }
662 static MDEV_TYPE_ATTR_RO(name);
663 
664 static ssize_t description_show(struct mdev_type *mtype,
665 				struct mdev_type_attribute *attr, char *buf)
666 {
667 	const struct mdpy_type *type =
668 		&mdpy_types[mtype_get_type_group_id(mtype)];
669 
670 	return sprintf(buf, "virtual display, %dx%d framebuffer\n",
671 		       type->width, type->height);
672 }
673 static MDEV_TYPE_ATTR_RO(description);
674 
675 static ssize_t available_instances_show(struct mdev_type *mtype,
676 					struct mdev_type_attribute *attr,
677 					char *buf)
678 {
679 	return sprintf(buf, "%d\n", max_devices - mdpy_count);
680 }
681 static MDEV_TYPE_ATTR_RO(available_instances);
682 
683 static ssize_t device_api_show(struct mdev_type *mtype,
684 			       struct mdev_type_attribute *attr, char *buf)
685 {
686 	return sprintf(buf, "%s\n", VFIO_DEVICE_API_PCI_STRING);
687 }
688 static MDEV_TYPE_ATTR_RO(device_api);
689 
690 static struct attribute *mdev_types_attrs[] = {
691 	&mdev_type_attr_name.attr,
692 	&mdev_type_attr_description.attr,
693 	&mdev_type_attr_device_api.attr,
694 	&mdev_type_attr_available_instances.attr,
695 	NULL,
696 };
697 
698 static struct attribute_group mdev_type_group1 = {
699 	.name  = MDPY_TYPE_1,
700 	.attrs = mdev_types_attrs,
701 };
702 
703 static struct attribute_group mdev_type_group2 = {
704 	.name  = MDPY_TYPE_2,
705 	.attrs = mdev_types_attrs,
706 };
707 
708 static struct attribute_group mdev_type_group3 = {
709 	.name  = MDPY_TYPE_3,
710 	.attrs = mdev_types_attrs,
711 };
712 
713 static struct attribute_group *mdev_type_groups[] = {
714 	&mdev_type_group1,
715 	&mdev_type_group2,
716 	&mdev_type_group3,
717 	NULL,
718 };
719 
720 static const struct vfio_device_ops mdpy_dev_ops = {
721 	.init = mdpy_init_dev,
722 	.release = mdpy_release_dev,
723 	.read = mdpy_read,
724 	.write = mdpy_write,
725 	.ioctl = mdpy_ioctl,
726 	.mmap = mdpy_mmap,
727 };
728 
729 static struct mdev_driver mdpy_driver = {
730 	.driver = {
731 		.name = "mdpy",
732 		.owner = THIS_MODULE,
733 		.mod_name = KBUILD_MODNAME,
734 		.dev_groups = mdev_dev_groups,
735 	},
736 	.probe = mdpy_probe,
737 	.remove	= mdpy_remove,
738 	.supported_type_groups = mdev_type_groups,
739 };
740 
741 static const struct file_operations vd_fops = {
742 	.owner		= THIS_MODULE,
743 };
744 
745 static void mdpy_device_release(struct device *dev)
746 {
747 	/* nothing */
748 }
749 
750 static int __init mdpy_dev_init(void)
751 {
752 	int ret = 0;
753 
754 	ret = alloc_chrdev_region(&mdpy_devt, 0, MINORMASK + 1, MDPY_NAME);
755 	if (ret < 0) {
756 		pr_err("Error: failed to register mdpy_dev, err: %d\n", ret);
757 		return ret;
758 	}
759 	cdev_init(&mdpy_cdev, &vd_fops);
760 	cdev_add(&mdpy_cdev, mdpy_devt, MINORMASK + 1);
761 	pr_info("%s: major %d\n", __func__, MAJOR(mdpy_devt));
762 
763 	ret = mdev_register_driver(&mdpy_driver);
764 	if (ret)
765 		goto err_cdev;
766 
767 	mdpy_class = class_create(THIS_MODULE, MDPY_CLASS_NAME);
768 	if (IS_ERR(mdpy_class)) {
769 		pr_err("Error: failed to register mdpy_dev class\n");
770 		ret = PTR_ERR(mdpy_class);
771 		goto err_driver;
772 	}
773 	mdpy_dev.class = mdpy_class;
774 	mdpy_dev.release = mdpy_device_release;
775 	dev_set_name(&mdpy_dev, "%s", MDPY_NAME);
776 
777 	ret = device_register(&mdpy_dev);
778 	if (ret)
779 		goto err_class;
780 
781 	ret = mdev_register_device(&mdpy_dev, &mdpy_driver);
782 	if (ret)
783 		goto err_device;
784 
785 	return 0;
786 
787 err_device:
788 	device_unregister(&mdpy_dev);
789 err_class:
790 	class_destroy(mdpy_class);
791 err_driver:
792 	mdev_unregister_driver(&mdpy_driver);
793 err_cdev:
794 	cdev_del(&mdpy_cdev);
795 	unregister_chrdev_region(mdpy_devt, MINORMASK + 1);
796 	return ret;
797 }
798 
799 static void __exit mdpy_dev_exit(void)
800 {
801 	mdpy_dev.bus = NULL;
802 	mdev_unregister_device(&mdpy_dev);
803 
804 	device_unregister(&mdpy_dev);
805 	mdev_unregister_driver(&mdpy_driver);
806 	cdev_del(&mdpy_cdev);
807 	unregister_chrdev_region(mdpy_devt, MINORMASK + 1);
808 	class_destroy(mdpy_class);
809 	mdpy_class = NULL;
810 }
811 
812 module_init(mdpy_dev_init)
813 module_exit(mdpy_dev_exit)
814