xref: /linux/drivers/gpu/drm/gud/gud_drv.c (revision 0da908c291070d89482f6211dbe81d4d43c3f7cb)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright 2020 Noralf Trønnes
4  */
5 
6 #include <linux/dma-buf.h>
7 #include <linux/dma-mapping.h>
8 #include <linux/lz4.h>
9 #include <linux/module.h>
10 #include <linux/platform_device.h>
11 #include <linux/string_helpers.h>
12 #include <linux/usb.h>
13 #include <linux/vmalloc.h>
14 #include <linux/workqueue.h>
15 
16 #include <drm/drm_atomic_helper.h>
17 #include <drm/drm_blend.h>
18 #include <drm/drm_damage_helper.h>
19 #include <drm/drm_debugfs.h>
20 #include <drm/drm_drv.h>
21 #include <drm/drm_fbdev_generic.h>
22 #include <drm/drm_fourcc.h>
23 #include <drm/drm_gem_atomic_helper.h>
24 #include <drm/drm_gem_framebuffer_helper.h>
25 #include <drm/drm_gem_shmem_helper.h>
26 #include <drm/drm_managed.h>
27 #include <drm/drm_print.h>
28 #include <drm/drm_probe_helper.h>
29 #include <drm/drm_simple_kms_helper.h>
30 #include <drm/gud.h>
31 
32 #include "gud_internal.h"
33 
34 /* Only used internally */
35 static const struct drm_format_info gud_drm_format_r1 = {
36 	.format = GUD_DRM_FORMAT_R1,
37 	.num_planes = 1,
38 	.char_per_block = { 1, 0, 0 },
39 	.block_w = { 8, 0, 0 },
40 	.block_h = { 1, 0, 0 },
41 	.hsub = 1,
42 	.vsub = 1,
43 };
44 
45 static const struct drm_format_info gud_drm_format_xrgb1111 = {
46 	.format = GUD_DRM_FORMAT_XRGB1111,
47 	.num_planes = 1,
48 	.char_per_block = { 1, 0, 0 },
49 	.block_w = { 2, 0, 0 },
50 	.block_h = { 1, 0, 0 },
51 	.hsub = 1,
52 	.vsub = 1,
53 };
54 
55 static int gud_usb_control_msg(struct usb_interface *intf, bool in,
56 			       u8 request, u16 value, void *buf, size_t len)
57 {
58 	u8 requesttype = USB_TYPE_VENDOR | USB_RECIP_INTERFACE;
59 	u8 ifnum = intf->cur_altsetting->desc.bInterfaceNumber;
60 	struct usb_device *usb = interface_to_usbdev(intf);
61 	unsigned int pipe;
62 
63 	if (len && !buf)
64 		return -EINVAL;
65 
66 	if (in) {
67 		pipe = usb_rcvctrlpipe(usb, 0);
68 		requesttype |= USB_DIR_IN;
69 	} else {
70 		pipe = usb_sndctrlpipe(usb, 0);
71 		requesttype |= USB_DIR_OUT;
72 	}
73 
74 	return usb_control_msg(usb, pipe, request, requesttype, value,
75 			       ifnum, buf, len, USB_CTRL_GET_TIMEOUT);
76 }
77 
78 static int gud_get_display_descriptor(struct usb_interface *intf,
79 				      struct gud_display_descriptor_req *desc)
80 {
81 	void *buf;
82 	int ret;
83 
84 	buf = kmalloc(sizeof(*desc), GFP_KERNEL);
85 	if (!buf)
86 		return -ENOMEM;
87 
88 	ret = gud_usb_control_msg(intf, true, GUD_REQ_GET_DESCRIPTOR, 0, buf, sizeof(*desc));
89 	memcpy(desc, buf, sizeof(*desc));
90 	kfree(buf);
91 	if (ret < 0)
92 		return ret;
93 	if (ret != sizeof(*desc))
94 		return -EIO;
95 
96 	if (desc->magic != le32_to_cpu(GUD_DISPLAY_MAGIC))
97 		return -ENODATA;
98 
99 	DRM_DEV_DEBUG_DRIVER(&intf->dev,
100 			     "version=%u flags=0x%x compression=0x%x max_buffer_size=%u\n",
101 			     desc->version, le32_to_cpu(desc->flags), desc->compression,
102 			     le32_to_cpu(desc->max_buffer_size));
103 
104 	if (!desc->version || !desc->max_width || !desc->max_height ||
105 	    le32_to_cpu(desc->min_width) > le32_to_cpu(desc->max_width) ||
106 	    le32_to_cpu(desc->min_height) > le32_to_cpu(desc->max_height))
107 		return -EINVAL;
108 
109 	return 0;
110 }
111 
112 static int gud_status_to_errno(u8 status)
113 {
114 	switch (status) {
115 	case GUD_STATUS_OK:
116 		return 0;
117 	case GUD_STATUS_BUSY:
118 		return -EBUSY;
119 	case GUD_STATUS_REQUEST_NOT_SUPPORTED:
120 		return -EOPNOTSUPP;
121 	case GUD_STATUS_PROTOCOL_ERROR:
122 		return -EPROTO;
123 	case GUD_STATUS_INVALID_PARAMETER:
124 		return -EINVAL;
125 	case GUD_STATUS_ERROR:
126 		return -EREMOTEIO;
127 	default:
128 		return -EREMOTEIO;
129 	}
130 }
131 
132 static int gud_usb_get_status(struct usb_interface *intf)
133 {
134 	int ret, status = -EIO;
135 	u8 *buf;
136 
137 	buf = kmalloc(sizeof(*buf), GFP_KERNEL);
138 	if (!buf)
139 		return -ENOMEM;
140 
141 	ret = gud_usb_control_msg(intf, true, GUD_REQ_GET_STATUS, 0, buf, sizeof(*buf));
142 	if (ret == sizeof(*buf))
143 		status = gud_status_to_errno(*buf);
144 	kfree(buf);
145 
146 	if (ret < 0)
147 		return ret;
148 
149 	return status;
150 }
151 
152 static int gud_usb_transfer(struct gud_device *gdrm, bool in, u8 request, u16 index,
153 			    void *buf, size_t len)
154 {
155 	struct usb_interface *intf = to_usb_interface(gdrm->drm.dev);
156 	int idx, ret;
157 
158 	drm_dbg(&gdrm->drm, "%s: request=0x%x index=%u len=%zu\n",
159 		in ? "get" : "set", request, index, len);
160 
161 	if (!drm_dev_enter(&gdrm->drm, &idx))
162 		return -ENODEV;
163 
164 	mutex_lock(&gdrm->ctrl_lock);
165 
166 	ret = gud_usb_control_msg(intf, in, request, index, buf, len);
167 	if (ret == -EPIPE || ((gdrm->flags & GUD_DISPLAY_FLAG_STATUS_ON_SET) && !in && ret >= 0)) {
168 		int status;
169 
170 		status = gud_usb_get_status(intf);
171 		if (status < 0) {
172 			ret = status;
173 		} else if (ret < 0) {
174 			dev_err_once(gdrm->drm.dev,
175 				     "Unexpected status OK for failed transfer\n");
176 			ret = -EPIPE;
177 		}
178 	}
179 
180 	if (ret < 0) {
181 		drm_dbg(&gdrm->drm, "ret=%d\n", ret);
182 		gdrm->stats_num_errors++;
183 	}
184 
185 	mutex_unlock(&gdrm->ctrl_lock);
186 	drm_dev_exit(idx);
187 
188 	return ret;
189 }
190 
191 /*
192  * @buf cannot be allocated on the stack.
193  * Returns number of bytes received or negative error code on failure.
194  */
195 int gud_usb_get(struct gud_device *gdrm, u8 request, u16 index, void *buf, size_t max_len)
196 {
197 	return gud_usb_transfer(gdrm, true, request, index, buf, max_len);
198 }
199 
200 /*
201  * @buf can be allocated on the stack or NULL.
202  * Returns zero on success or negative error code on failure.
203  */
204 int gud_usb_set(struct gud_device *gdrm, u8 request, u16 index, void *buf, size_t len)
205 {
206 	void *trbuf = NULL;
207 	int ret;
208 
209 	if (buf && len) {
210 		trbuf = kmemdup(buf, len, GFP_KERNEL);
211 		if (!trbuf)
212 			return -ENOMEM;
213 	}
214 
215 	ret = gud_usb_transfer(gdrm, false, request, index, trbuf, len);
216 	kfree(trbuf);
217 	if (ret < 0)
218 		return ret;
219 
220 	return ret != len ? -EIO : 0;
221 }
222 
223 /*
224  * @val can be allocated on the stack.
225  * Returns zero on success or negative error code on failure.
226  */
227 int gud_usb_get_u8(struct gud_device *gdrm, u8 request, u16 index, u8 *val)
228 {
229 	u8 *buf;
230 	int ret;
231 
232 	buf = kmalloc(sizeof(*val), GFP_KERNEL);
233 	if (!buf)
234 		return -ENOMEM;
235 
236 	ret = gud_usb_get(gdrm, request, index, buf, sizeof(*val));
237 	*val = *buf;
238 	kfree(buf);
239 	if (ret < 0)
240 		return ret;
241 
242 	return ret != sizeof(*val) ? -EIO : 0;
243 }
244 
245 /* Returns zero on success or negative error code on failure. */
246 int gud_usb_set_u8(struct gud_device *gdrm, u8 request, u8 val)
247 {
248 	return gud_usb_set(gdrm, request, 0, &val, sizeof(val));
249 }
250 
251 static int gud_get_properties(struct gud_device *gdrm)
252 {
253 	struct gud_property_req *properties;
254 	unsigned int i, num_properties;
255 	int ret;
256 
257 	properties = kcalloc(GUD_PROPERTIES_MAX_NUM, sizeof(*properties), GFP_KERNEL);
258 	if (!properties)
259 		return -ENOMEM;
260 
261 	ret = gud_usb_get(gdrm, GUD_REQ_GET_PROPERTIES, 0,
262 			  properties, GUD_PROPERTIES_MAX_NUM * sizeof(*properties));
263 	if (ret <= 0)
264 		goto out;
265 	if (ret % sizeof(*properties)) {
266 		ret = -EIO;
267 		goto out;
268 	}
269 
270 	num_properties = ret / sizeof(*properties);
271 	ret = 0;
272 
273 	gdrm->properties = drmm_kcalloc(&gdrm->drm, num_properties, sizeof(*gdrm->properties),
274 					GFP_KERNEL);
275 	if (!gdrm->properties) {
276 		ret = -ENOMEM;
277 		goto out;
278 	}
279 
280 	for (i = 0; i < num_properties; i++) {
281 		u16 prop = le16_to_cpu(properties[i].prop);
282 		u64 val = le64_to_cpu(properties[i].val);
283 
284 		switch (prop) {
285 		case GUD_PROPERTY_ROTATION:
286 			/*
287 			 * DRM UAPI matches the protocol so use the value directly,
288 			 * but mask out any additions on future devices.
289 			 */
290 			val &= GUD_ROTATION_MASK;
291 			ret = drm_plane_create_rotation_property(&gdrm->pipe.plane,
292 								 DRM_MODE_ROTATE_0, val);
293 			break;
294 		default:
295 			/* New ones might show up in future devices, skip those we don't know. */
296 			drm_dbg(&gdrm->drm, "Ignoring unknown property: %u\n", prop);
297 			continue;
298 		}
299 
300 		if (ret)
301 			goto out;
302 
303 		gdrm->properties[gdrm->num_properties++] = prop;
304 	}
305 out:
306 	kfree(properties);
307 
308 	return ret;
309 }
310 
311 /*
312  * FIXME: Dma-buf sharing requires DMA support by the importing device.
313  *        This function is a workaround to make USB devices work as well.
314  *        See todo.rst for how to fix the issue in the dma-buf framework.
315  */
316 static struct drm_gem_object *gud_gem_prime_import(struct drm_device *drm, struct dma_buf *dma_buf)
317 {
318 	struct gud_device *gdrm = to_gud_device(drm);
319 
320 	if (!gdrm->dmadev)
321 		return ERR_PTR(-ENODEV);
322 
323 	return drm_gem_prime_import_dev(drm, dma_buf, gdrm->dmadev);
324 }
325 
326 static int gud_stats_debugfs(struct seq_file *m, void *data)
327 {
328 	struct drm_info_node *node = m->private;
329 	struct gud_device *gdrm = to_gud_device(node->minor->dev);
330 	char buf[10];
331 
332 	string_get_size(gdrm->bulk_len, 1, STRING_UNITS_2, buf, sizeof(buf));
333 	seq_printf(m, "Max buffer size: %s\n", buf);
334 	seq_printf(m, "Number of errors:  %u\n", gdrm->stats_num_errors);
335 
336 	seq_puts(m, "Compression:      ");
337 	if (gdrm->compression & GUD_COMPRESSION_LZ4)
338 		seq_puts(m, " lz4");
339 	if (!gdrm->compression)
340 		seq_puts(m, " none");
341 	seq_puts(m, "\n");
342 
343 	if (gdrm->compression) {
344 		u64 remainder;
345 		u64 ratio = div64_u64_rem(gdrm->stats_length, gdrm->stats_actual_length,
346 					  &remainder);
347 		u64 ratio_frac = div64_u64(remainder * 10, gdrm->stats_actual_length);
348 
349 		seq_printf(m, "Compression ratio: %llu.%llu\n", ratio, ratio_frac);
350 	}
351 
352 	return 0;
353 }
354 
355 static const struct drm_info_list gud_debugfs_list[] = {
356 	{ "stats", gud_stats_debugfs, 0, NULL },
357 };
358 
359 static void gud_debugfs_init(struct drm_minor *minor)
360 {
361 	drm_debugfs_create_files(gud_debugfs_list, ARRAY_SIZE(gud_debugfs_list),
362 				 minor->debugfs_root, minor);
363 }
364 
365 static const struct drm_simple_display_pipe_funcs gud_pipe_funcs = {
366 	.check      = gud_pipe_check,
367 	.update	    = gud_pipe_update,
368 };
369 
370 static const struct drm_mode_config_funcs gud_mode_config_funcs = {
371 	.fb_create = drm_gem_fb_create_with_dirty,
372 	.atomic_check = drm_atomic_helper_check,
373 	.atomic_commit = drm_atomic_helper_commit,
374 };
375 
376 static const u64 gud_pipe_modifiers[] = {
377 	DRM_FORMAT_MOD_LINEAR,
378 	DRM_FORMAT_MOD_INVALID
379 };
380 
381 DEFINE_DRM_GEM_FOPS(gud_fops);
382 
383 static const struct drm_driver gud_drm_driver = {
384 	.driver_features	= DRIVER_MODESET | DRIVER_GEM | DRIVER_ATOMIC,
385 	.fops			= &gud_fops,
386 	DRM_GEM_SHMEM_DRIVER_OPS,
387 	.gem_prime_import	= gud_gem_prime_import,
388 	.debugfs_init		= gud_debugfs_init,
389 
390 	.name			= "gud",
391 	.desc			= "Generic USB Display",
392 	.date			= "20200422",
393 	.major			= 1,
394 	.minor			= 0,
395 };
396 
397 static int gud_alloc_bulk_buffer(struct gud_device *gdrm)
398 {
399 	unsigned int i, num_pages;
400 	struct page **pages;
401 	void *ptr;
402 	int ret;
403 
404 	gdrm->bulk_buf = vmalloc_32(gdrm->bulk_len);
405 	if (!gdrm->bulk_buf)
406 		return -ENOMEM;
407 
408 	num_pages = DIV_ROUND_UP(gdrm->bulk_len, PAGE_SIZE);
409 	pages = kmalloc_array(num_pages, sizeof(struct page *), GFP_KERNEL);
410 	if (!pages)
411 		return -ENOMEM;
412 
413 	for (i = 0, ptr = gdrm->bulk_buf; i < num_pages; i++, ptr += PAGE_SIZE)
414 		pages[i] = vmalloc_to_page(ptr);
415 
416 	ret = sg_alloc_table_from_pages(&gdrm->bulk_sgt, pages, num_pages,
417 					0, gdrm->bulk_len, GFP_KERNEL);
418 	kfree(pages);
419 
420 	return ret;
421 }
422 
423 static void gud_free_buffers_and_mutex(void *data)
424 {
425 	struct gud_device *gdrm = data;
426 
427 	vfree(gdrm->compress_buf);
428 	gdrm->compress_buf = NULL;
429 	sg_free_table(&gdrm->bulk_sgt);
430 	vfree(gdrm->bulk_buf);
431 	gdrm->bulk_buf = NULL;
432 	mutex_destroy(&gdrm->ctrl_lock);
433 }
434 
435 static int gud_probe(struct usb_interface *intf, const struct usb_device_id *id)
436 {
437 	const struct drm_format_info *xrgb8888_emulation_format = NULL;
438 	bool rgb565_supported = false, xrgb8888_supported = false;
439 	unsigned int num_formats_dev, num_formats = 0;
440 	struct usb_endpoint_descriptor *bulk_out;
441 	struct gud_display_descriptor_req desc;
442 	struct device *dev = &intf->dev;
443 	size_t max_buffer_size = 0;
444 	struct gud_device *gdrm;
445 	struct drm_device *drm;
446 	u8 *formats_dev;
447 	u32 *formats;
448 	int ret, i;
449 
450 	ret = usb_find_bulk_out_endpoint(intf->cur_altsetting, &bulk_out);
451 	if (ret)
452 		return ret;
453 
454 	ret = gud_get_display_descriptor(intf, &desc);
455 	if (ret) {
456 		DRM_DEV_DEBUG_DRIVER(dev, "Not a display interface: ret=%d\n", ret);
457 		return -ENODEV;
458 	}
459 
460 	if (desc.version > 1) {
461 		dev_err(dev, "Protocol version %u is not supported\n", desc.version);
462 		return -ENODEV;
463 	}
464 
465 	gdrm = devm_drm_dev_alloc(dev, &gud_drm_driver, struct gud_device, drm);
466 	if (IS_ERR(gdrm))
467 		return PTR_ERR(gdrm);
468 
469 	drm = &gdrm->drm;
470 	drm->mode_config.funcs = &gud_mode_config_funcs;
471 	ret = drmm_mode_config_init(drm);
472 	if (ret)
473 		return ret;
474 
475 	gdrm->flags = le32_to_cpu(desc.flags);
476 	gdrm->compression = desc.compression & GUD_COMPRESSION_LZ4;
477 
478 	if (gdrm->flags & GUD_DISPLAY_FLAG_FULL_UPDATE && gdrm->compression)
479 		return -EINVAL;
480 
481 	mutex_init(&gdrm->ctrl_lock);
482 	mutex_init(&gdrm->damage_lock);
483 	INIT_WORK(&gdrm->work, gud_flush_work);
484 	gud_clear_damage(gdrm);
485 
486 	ret = devm_add_action(dev, gud_free_buffers_and_mutex, gdrm);
487 	if (ret)
488 		return ret;
489 
490 	drm->mode_config.min_width = le32_to_cpu(desc.min_width);
491 	drm->mode_config.max_width = le32_to_cpu(desc.max_width);
492 	drm->mode_config.min_height = le32_to_cpu(desc.min_height);
493 	drm->mode_config.max_height = le32_to_cpu(desc.max_height);
494 
495 	formats_dev = devm_kmalloc(dev, GUD_FORMATS_MAX_NUM, GFP_KERNEL);
496 	/* Add room for emulated XRGB8888 */
497 	formats = devm_kmalloc_array(dev, GUD_FORMATS_MAX_NUM + 1, sizeof(*formats), GFP_KERNEL);
498 	if (!formats_dev || !formats)
499 		return -ENOMEM;
500 
501 	ret = gud_usb_get(gdrm, GUD_REQ_GET_FORMATS, 0, formats_dev, GUD_FORMATS_MAX_NUM);
502 	if (ret < 0)
503 		return ret;
504 
505 	num_formats_dev = ret;
506 	for (i = 0; i < num_formats_dev; i++) {
507 		const struct drm_format_info *info;
508 		size_t fmt_buf_size;
509 		u32 format;
510 
511 		format = gud_to_fourcc(formats_dev[i]);
512 		if (!format) {
513 			drm_dbg(drm, "Unsupported format: 0x%02x\n", formats_dev[i]);
514 			continue;
515 		}
516 
517 		if (format == GUD_DRM_FORMAT_R1)
518 			info = &gud_drm_format_r1;
519 		else if (format == GUD_DRM_FORMAT_XRGB1111)
520 			info = &gud_drm_format_xrgb1111;
521 		else
522 			info = drm_format_info(format);
523 
524 		switch (format) {
525 		case GUD_DRM_FORMAT_R1:
526 			fallthrough;
527 		case DRM_FORMAT_R8:
528 			fallthrough;
529 		case GUD_DRM_FORMAT_XRGB1111:
530 			fallthrough;
531 		case DRM_FORMAT_RGB332:
532 			fallthrough;
533 		case DRM_FORMAT_RGB888:
534 			if (!xrgb8888_emulation_format)
535 				xrgb8888_emulation_format = info;
536 			break;
537 		case DRM_FORMAT_RGB565:
538 			rgb565_supported = true;
539 			if (!xrgb8888_emulation_format)
540 				xrgb8888_emulation_format = info;
541 			break;
542 		case DRM_FORMAT_XRGB8888:
543 			xrgb8888_supported = true;
544 			break;
545 		}
546 
547 		fmt_buf_size = drm_format_info_min_pitch(info, 0, drm->mode_config.max_width) *
548 			       drm->mode_config.max_height;
549 		max_buffer_size = max(max_buffer_size, fmt_buf_size);
550 
551 		if (format == GUD_DRM_FORMAT_R1 || format == GUD_DRM_FORMAT_XRGB1111)
552 			continue; /* Internal not for userspace */
553 
554 		formats[num_formats++] = format;
555 	}
556 
557 	if (!num_formats && !xrgb8888_emulation_format) {
558 		dev_err(dev, "No supported pixel formats found\n");
559 		return -EINVAL;
560 	}
561 
562 	/* Prefer speed over color depth */
563 	if (rgb565_supported)
564 		drm->mode_config.preferred_depth = 16;
565 
566 	if (!xrgb8888_supported && xrgb8888_emulation_format) {
567 		gdrm->xrgb8888_emulation_format = xrgb8888_emulation_format;
568 		formats[num_formats++] = DRM_FORMAT_XRGB8888;
569 	}
570 
571 	if (desc.max_buffer_size)
572 		max_buffer_size = le32_to_cpu(desc.max_buffer_size);
573 	/* Prevent a misbehaving device from allocating loads of RAM. 4096x4096@XRGB8888 = 64 MB */
574 	if (max_buffer_size > SZ_64M)
575 		max_buffer_size = SZ_64M;
576 
577 	gdrm->bulk_pipe = usb_sndbulkpipe(interface_to_usbdev(intf), usb_endpoint_num(bulk_out));
578 	gdrm->bulk_len = max_buffer_size;
579 
580 	ret = gud_alloc_bulk_buffer(gdrm);
581 	if (ret)
582 		return ret;
583 
584 	if (gdrm->compression & GUD_COMPRESSION_LZ4) {
585 		gdrm->lz4_comp_mem = devm_kmalloc(dev, LZ4_MEM_COMPRESS, GFP_KERNEL);
586 		if (!gdrm->lz4_comp_mem)
587 			return -ENOMEM;
588 
589 		gdrm->compress_buf = vmalloc(gdrm->bulk_len);
590 		if (!gdrm->compress_buf)
591 			return -ENOMEM;
592 	}
593 
594 	ret = drm_simple_display_pipe_init(drm, &gdrm->pipe, &gud_pipe_funcs,
595 					   formats, num_formats,
596 					   gud_pipe_modifiers, NULL);
597 	if (ret)
598 		return ret;
599 
600 	devm_kfree(dev, formats);
601 	devm_kfree(dev, formats_dev);
602 
603 	ret = gud_get_properties(gdrm);
604 	if (ret) {
605 		dev_err(dev, "Failed to get properties (error=%d)\n", ret);
606 		return ret;
607 	}
608 
609 	drm_plane_enable_fb_damage_clips(&gdrm->pipe.plane);
610 
611 	ret = gud_get_connectors(gdrm);
612 	if (ret) {
613 		dev_err(dev, "Failed to get connectors (error=%d)\n", ret);
614 		return ret;
615 	}
616 
617 	drm_mode_config_reset(drm);
618 
619 	usb_set_intfdata(intf, gdrm);
620 
621 	gdrm->dmadev = usb_intf_get_dma_device(intf);
622 	if (!gdrm->dmadev)
623 		dev_warn(dev, "buffer sharing not supported");
624 
625 	ret = drm_dev_register(drm, 0);
626 	if (ret) {
627 		put_device(gdrm->dmadev);
628 		return ret;
629 	}
630 
631 	drm_kms_helper_poll_init(drm);
632 
633 	drm_fbdev_generic_setup(drm, 0);
634 
635 	return 0;
636 }
637 
638 static void gud_disconnect(struct usb_interface *interface)
639 {
640 	struct gud_device *gdrm = usb_get_intfdata(interface);
641 	struct drm_device *drm = &gdrm->drm;
642 
643 	drm_dbg(drm, "%s:\n", __func__);
644 
645 	drm_kms_helper_poll_fini(drm);
646 	drm_dev_unplug(drm);
647 	drm_atomic_helper_shutdown(drm);
648 	put_device(gdrm->dmadev);
649 	gdrm->dmadev = NULL;
650 }
651 
652 static int gud_suspend(struct usb_interface *intf, pm_message_t message)
653 {
654 	struct gud_device *gdrm = usb_get_intfdata(intf);
655 
656 	return drm_mode_config_helper_suspend(&gdrm->drm);
657 }
658 
659 static int gud_resume(struct usb_interface *intf)
660 {
661 	struct gud_device *gdrm = usb_get_intfdata(intf);
662 
663 	drm_mode_config_helper_resume(&gdrm->drm);
664 
665 	return 0;
666 }
667 
668 static const struct usb_device_id gud_id_table[] = {
669 	{ USB_DEVICE_INTERFACE_CLASS(0x1d50, 0x614d, USB_CLASS_VENDOR_SPEC) },
670 	{ USB_DEVICE_INTERFACE_CLASS(0x16d0, 0x10a9, USB_CLASS_VENDOR_SPEC) },
671 	{ }
672 };
673 
674 MODULE_DEVICE_TABLE(usb, gud_id_table);
675 
676 static struct usb_driver gud_usb_driver = {
677 	.name		= "gud",
678 	.probe		= gud_probe,
679 	.disconnect	= gud_disconnect,
680 	.id_table	= gud_id_table,
681 	.suspend	= gud_suspend,
682 	.resume		= gud_resume,
683 	.reset_resume	= gud_resume,
684 };
685 
686 module_usb_driver(gud_usb_driver);
687 
688 MODULE_AUTHOR("Noralf Trønnes");
689 MODULE_LICENSE("Dual MIT/GPL");
690