1 // SPDX-License-Identifier: MIT
2 /*
3 * Copyright 2020 Noralf Trønnes
4 */
5
6 #include <linux/dma-buf.h>
7 #include <linux/dma-mapping.h>
8 #include <linux/lz4.h>
9 #include <linux/module.h>
10 #include <linux/platform_device.h>
11 #include <linux/string_helpers.h>
12 #include <linux/usb.h>
13 #include <linux/vmalloc.h>
14 #include <linux/workqueue.h>
15
16 #include <drm/clients/drm_client_setup.h>
17 #include <drm/drm_atomic_helper.h>
18 #include <drm/drm_blend.h>
19 #include <drm/drm_damage_helper.h>
20 #include <drm/drm_debugfs.h>
21 #include <drm/drm_drv.h>
22 #include <drm/drm_fbdev_shmem.h>
23 #include <drm/drm_fourcc.h>
24 #include <drm/drm_gem_atomic_helper.h>
25 #include <drm/drm_gem_framebuffer_helper.h>
26 #include <drm/drm_gem_shmem_helper.h>
27 #include <drm/drm_managed.h>
28 #include <drm/drm_print.h>
29 #include <drm/drm_probe_helper.h>
30 #include <drm/drm_simple_kms_helper.h>
31 #include <drm/gud.h>
32
33 #include "gud_internal.h"
34
35 /* Only used internally */
36 static const struct drm_format_info gud_drm_format_r1 = {
37 .format = GUD_DRM_FORMAT_R1,
38 .num_planes = 1,
39 .char_per_block = { 1, 0, 0 },
40 .block_w = { 8, 0, 0 },
41 .block_h = { 1, 0, 0 },
42 .hsub = 1,
43 .vsub = 1,
44 };
45
46 static const struct drm_format_info gud_drm_format_xrgb1111 = {
47 .format = GUD_DRM_FORMAT_XRGB1111,
48 .num_planes = 1,
49 .char_per_block = { 1, 0, 0 },
50 .block_w = { 2, 0, 0 },
51 .block_h = { 1, 0, 0 },
52 .hsub = 1,
53 .vsub = 1,
54 };
55
gud_usb_control_msg(struct usb_interface * intf,bool in,u8 request,u16 value,void * buf,size_t len)56 static int gud_usb_control_msg(struct usb_interface *intf, bool in,
57 u8 request, u16 value, void *buf, size_t len)
58 {
59 u8 requesttype = USB_TYPE_VENDOR | USB_RECIP_INTERFACE;
60 u8 ifnum = intf->cur_altsetting->desc.bInterfaceNumber;
61 struct usb_device *usb = interface_to_usbdev(intf);
62 unsigned int pipe;
63
64 if (len && !buf)
65 return -EINVAL;
66
67 if (in) {
68 pipe = usb_rcvctrlpipe(usb, 0);
69 requesttype |= USB_DIR_IN;
70 } else {
71 pipe = usb_sndctrlpipe(usb, 0);
72 requesttype |= USB_DIR_OUT;
73 }
74
75 return usb_control_msg(usb, pipe, request, requesttype, value,
76 ifnum, buf, len, USB_CTRL_GET_TIMEOUT);
77 }
78
gud_get_display_descriptor(struct usb_interface * intf,struct gud_display_descriptor_req * desc)79 static int gud_get_display_descriptor(struct usb_interface *intf,
80 struct gud_display_descriptor_req *desc)
81 {
82 void *buf;
83 int ret;
84
85 buf = kmalloc(sizeof(*desc), GFP_KERNEL);
86 if (!buf)
87 return -ENOMEM;
88
89 ret = gud_usb_control_msg(intf, true, GUD_REQ_GET_DESCRIPTOR, 0, buf, sizeof(*desc));
90 memcpy(desc, buf, sizeof(*desc));
91 kfree(buf);
92 if (ret < 0)
93 return ret;
94 if (ret != sizeof(*desc))
95 return -EIO;
96
97 if (desc->magic != le32_to_cpu(GUD_DISPLAY_MAGIC))
98 return -ENODATA;
99
100 DRM_DEV_DEBUG_DRIVER(&intf->dev,
101 "version=%u flags=0x%x compression=0x%x max_buffer_size=%u\n",
102 desc->version, le32_to_cpu(desc->flags), desc->compression,
103 le32_to_cpu(desc->max_buffer_size));
104
105 if (!desc->version || !desc->max_width || !desc->max_height ||
106 le32_to_cpu(desc->min_width) > le32_to_cpu(desc->max_width) ||
107 le32_to_cpu(desc->min_height) > le32_to_cpu(desc->max_height))
108 return -EINVAL;
109
110 return 0;
111 }
112
gud_status_to_errno(u8 status)113 static int gud_status_to_errno(u8 status)
114 {
115 switch (status) {
116 case GUD_STATUS_OK:
117 return 0;
118 case GUD_STATUS_BUSY:
119 return -EBUSY;
120 case GUD_STATUS_REQUEST_NOT_SUPPORTED:
121 return -EOPNOTSUPP;
122 case GUD_STATUS_PROTOCOL_ERROR:
123 return -EPROTO;
124 case GUD_STATUS_INVALID_PARAMETER:
125 return -EINVAL;
126 case GUD_STATUS_ERROR:
127 return -EREMOTEIO;
128 default:
129 return -EREMOTEIO;
130 }
131 }
132
gud_usb_get_status(struct usb_interface * intf)133 static int gud_usb_get_status(struct usb_interface *intf)
134 {
135 int ret, status = -EIO;
136 u8 *buf;
137
138 buf = kmalloc(sizeof(*buf), GFP_KERNEL);
139 if (!buf)
140 return -ENOMEM;
141
142 ret = gud_usb_control_msg(intf, true, GUD_REQ_GET_STATUS, 0, buf, sizeof(*buf));
143 if (ret == sizeof(*buf))
144 status = gud_status_to_errno(*buf);
145 kfree(buf);
146
147 if (ret < 0)
148 return ret;
149
150 return status;
151 }
152
gud_usb_transfer(struct gud_device * gdrm,bool in,u8 request,u16 index,void * buf,size_t len)153 static int gud_usb_transfer(struct gud_device *gdrm, bool in, u8 request, u16 index,
154 void *buf, size_t len)
155 {
156 struct usb_interface *intf = to_usb_interface(gdrm->drm.dev);
157 int idx, ret;
158
159 drm_dbg(&gdrm->drm, "%s: request=0x%x index=%u len=%zu\n",
160 in ? "get" : "set", request, index, len);
161
162 if (!drm_dev_enter(&gdrm->drm, &idx))
163 return -ENODEV;
164
165 mutex_lock(&gdrm->ctrl_lock);
166
167 ret = gud_usb_control_msg(intf, in, request, index, buf, len);
168 if (ret == -EPIPE || ((gdrm->flags & GUD_DISPLAY_FLAG_STATUS_ON_SET) && !in && ret >= 0)) {
169 int status;
170
171 status = gud_usb_get_status(intf);
172 if (status < 0) {
173 ret = status;
174 } else if (ret < 0) {
175 dev_err_once(gdrm->drm.dev,
176 "Unexpected status OK for failed transfer\n");
177 ret = -EPIPE;
178 }
179 }
180
181 if (ret < 0) {
182 drm_dbg(&gdrm->drm, "ret=%d\n", ret);
183 gdrm->stats_num_errors++;
184 }
185
186 mutex_unlock(&gdrm->ctrl_lock);
187 drm_dev_exit(idx);
188
189 return ret;
190 }
191
192 /*
193 * @buf cannot be allocated on the stack.
194 * Returns number of bytes received or negative error code on failure.
195 */
gud_usb_get(struct gud_device * gdrm,u8 request,u16 index,void * buf,size_t max_len)196 int gud_usb_get(struct gud_device *gdrm, u8 request, u16 index, void *buf, size_t max_len)
197 {
198 return gud_usb_transfer(gdrm, true, request, index, buf, max_len);
199 }
200
201 /*
202 * @buf can be allocated on the stack or NULL.
203 * Returns zero on success or negative error code on failure.
204 */
gud_usb_set(struct gud_device * gdrm,u8 request,u16 index,void * buf,size_t len)205 int gud_usb_set(struct gud_device *gdrm, u8 request, u16 index, void *buf, size_t len)
206 {
207 void *trbuf = NULL;
208 int ret;
209
210 if (buf && len) {
211 trbuf = kmemdup(buf, len, GFP_KERNEL);
212 if (!trbuf)
213 return -ENOMEM;
214 }
215
216 ret = gud_usb_transfer(gdrm, false, request, index, trbuf, len);
217 kfree(trbuf);
218 if (ret < 0)
219 return ret;
220
221 return ret != len ? -EIO : 0;
222 }
223
224 /*
225 * @val can be allocated on the stack.
226 * Returns zero on success or negative error code on failure.
227 */
gud_usb_get_u8(struct gud_device * gdrm,u8 request,u16 index,u8 * val)228 int gud_usb_get_u8(struct gud_device *gdrm, u8 request, u16 index, u8 *val)
229 {
230 u8 *buf;
231 int ret;
232
233 buf = kmalloc(sizeof(*val), GFP_KERNEL);
234 if (!buf)
235 return -ENOMEM;
236
237 ret = gud_usb_get(gdrm, request, index, buf, sizeof(*val));
238 *val = *buf;
239 kfree(buf);
240 if (ret < 0)
241 return ret;
242
243 return ret != sizeof(*val) ? -EIO : 0;
244 }
245
246 /* Returns zero on success or negative error code on failure. */
gud_usb_set_u8(struct gud_device * gdrm,u8 request,u8 val)247 int gud_usb_set_u8(struct gud_device *gdrm, u8 request, u8 val)
248 {
249 return gud_usb_set(gdrm, request, 0, &val, sizeof(val));
250 }
251
gud_get_properties(struct gud_device * gdrm)252 static int gud_get_properties(struct gud_device *gdrm)
253 {
254 struct gud_property_req *properties;
255 unsigned int i, num_properties;
256 int ret;
257
258 properties = kcalloc(GUD_PROPERTIES_MAX_NUM, sizeof(*properties), GFP_KERNEL);
259 if (!properties)
260 return -ENOMEM;
261
262 ret = gud_usb_get(gdrm, GUD_REQ_GET_PROPERTIES, 0,
263 properties, GUD_PROPERTIES_MAX_NUM * sizeof(*properties));
264 if (ret <= 0)
265 goto out;
266 if (ret % sizeof(*properties)) {
267 ret = -EIO;
268 goto out;
269 }
270
271 num_properties = ret / sizeof(*properties);
272 ret = 0;
273
274 gdrm->properties = drmm_kcalloc(&gdrm->drm, num_properties, sizeof(*gdrm->properties),
275 GFP_KERNEL);
276 if (!gdrm->properties) {
277 ret = -ENOMEM;
278 goto out;
279 }
280
281 for (i = 0; i < num_properties; i++) {
282 u16 prop = le16_to_cpu(properties[i].prop);
283 u64 val = le64_to_cpu(properties[i].val);
284
285 switch (prop) {
286 case GUD_PROPERTY_ROTATION:
287 /*
288 * DRM UAPI matches the protocol so use the value directly,
289 * but mask out any additions on future devices.
290 */
291 val &= GUD_ROTATION_MASK;
292 ret = drm_plane_create_rotation_property(&gdrm->pipe.plane,
293 DRM_MODE_ROTATE_0, val);
294 break;
295 default:
296 /* New ones might show up in future devices, skip those we don't know. */
297 drm_dbg(&gdrm->drm, "Ignoring unknown property: %u\n", prop);
298 continue;
299 }
300
301 if (ret)
302 goto out;
303
304 gdrm->properties[gdrm->num_properties++] = prop;
305 }
306 out:
307 kfree(properties);
308
309 return ret;
310 }
311
312 /*
313 * FIXME: Dma-buf sharing requires DMA support by the importing device.
314 * This function is a workaround to make USB devices work as well.
315 * See todo.rst for how to fix the issue in the dma-buf framework.
316 */
gud_gem_prime_import(struct drm_device * drm,struct dma_buf * dma_buf)317 static struct drm_gem_object *gud_gem_prime_import(struct drm_device *drm, struct dma_buf *dma_buf)
318 {
319 struct gud_device *gdrm = to_gud_device(drm);
320
321 if (!gdrm->dmadev)
322 return ERR_PTR(-ENODEV);
323
324 return drm_gem_prime_import_dev(drm, dma_buf, gdrm->dmadev);
325 }
326
gud_stats_debugfs(struct seq_file * m,void * data)327 static int gud_stats_debugfs(struct seq_file *m, void *data)
328 {
329 struct drm_debugfs_entry *entry = m->private;
330 struct gud_device *gdrm = to_gud_device(entry->dev);
331 char buf[10];
332
333 string_get_size(gdrm->bulk_len, 1, STRING_UNITS_2, buf, sizeof(buf));
334 seq_printf(m, "Max buffer size: %s\n", buf);
335 seq_printf(m, "Number of errors: %u\n", gdrm->stats_num_errors);
336
337 seq_puts(m, "Compression: ");
338 if (gdrm->compression & GUD_COMPRESSION_LZ4)
339 seq_puts(m, " lz4");
340 if (!gdrm->compression)
341 seq_puts(m, " none");
342 seq_puts(m, "\n");
343
344 if (gdrm->compression) {
345 u64 remainder;
346 u64 ratio = div64_u64_rem(gdrm->stats_length, gdrm->stats_actual_length,
347 &remainder);
348 u64 ratio_frac = div64_u64(remainder * 10, gdrm->stats_actual_length);
349
350 seq_printf(m, "Compression ratio: %llu.%llu\n", ratio, ratio_frac);
351 }
352
353 return 0;
354 }
355
356 static const struct drm_simple_display_pipe_funcs gud_pipe_funcs = {
357 .check = gud_pipe_check,
358 .update = gud_pipe_update,
359 DRM_GEM_SIMPLE_DISPLAY_PIPE_SHADOW_PLANE_FUNCS
360 };
361
362 static const struct drm_mode_config_funcs gud_mode_config_funcs = {
363 .fb_create = drm_gem_fb_create_with_dirty,
364 .atomic_check = drm_atomic_helper_check,
365 .atomic_commit = drm_atomic_helper_commit,
366 };
367
368 static const u64 gud_pipe_modifiers[] = {
369 DRM_FORMAT_MOD_LINEAR,
370 DRM_FORMAT_MOD_INVALID
371 };
372
373 DEFINE_DRM_GEM_FOPS(gud_fops);
374
375 static const struct drm_driver gud_drm_driver = {
376 .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_ATOMIC,
377 .fops = &gud_fops,
378 DRM_GEM_SHMEM_DRIVER_OPS,
379 .gem_prime_import = gud_gem_prime_import,
380 DRM_FBDEV_SHMEM_DRIVER_OPS,
381
382 .name = "gud",
383 .desc = "Generic USB Display",
384 .major = 1,
385 .minor = 0,
386 };
387
gud_alloc_bulk_buffer(struct gud_device * gdrm)388 static int gud_alloc_bulk_buffer(struct gud_device *gdrm)
389 {
390 unsigned int i, num_pages;
391 struct page **pages;
392 void *ptr;
393 int ret;
394
395 gdrm->bulk_buf = vmalloc_32(gdrm->bulk_len);
396 if (!gdrm->bulk_buf)
397 return -ENOMEM;
398
399 num_pages = DIV_ROUND_UP(gdrm->bulk_len, PAGE_SIZE);
400 pages = kmalloc_array(num_pages, sizeof(struct page *), GFP_KERNEL);
401 if (!pages)
402 return -ENOMEM;
403
404 for (i = 0, ptr = gdrm->bulk_buf; i < num_pages; i++, ptr += PAGE_SIZE)
405 pages[i] = vmalloc_to_page(ptr);
406
407 ret = sg_alloc_table_from_pages(&gdrm->bulk_sgt, pages, num_pages,
408 0, gdrm->bulk_len, GFP_KERNEL);
409 kfree(pages);
410
411 return ret;
412 }
413
gud_free_buffers_and_mutex(void * data)414 static void gud_free_buffers_and_mutex(void *data)
415 {
416 struct gud_device *gdrm = data;
417
418 vfree(gdrm->compress_buf);
419 gdrm->compress_buf = NULL;
420 sg_free_table(&gdrm->bulk_sgt);
421 vfree(gdrm->bulk_buf);
422 gdrm->bulk_buf = NULL;
423 mutex_destroy(&gdrm->ctrl_lock);
424 }
425
gud_probe(struct usb_interface * intf,const struct usb_device_id * id)426 static int gud_probe(struct usb_interface *intf, const struct usb_device_id *id)
427 {
428 const struct drm_format_info *xrgb8888_emulation_format = NULL;
429 bool rgb565_supported = false, xrgb8888_supported = false;
430 unsigned int num_formats_dev, num_formats = 0;
431 struct usb_endpoint_descriptor *bulk_out;
432 struct gud_display_descriptor_req desc;
433 struct device *dev = &intf->dev;
434 size_t max_buffer_size = 0;
435 struct gud_device *gdrm;
436 struct drm_device *drm;
437 u8 *formats_dev;
438 u32 *formats;
439 int ret, i;
440
441 ret = usb_find_bulk_out_endpoint(intf->cur_altsetting, &bulk_out);
442 if (ret)
443 return ret;
444
445 ret = gud_get_display_descriptor(intf, &desc);
446 if (ret) {
447 DRM_DEV_DEBUG_DRIVER(dev, "Not a display interface: ret=%d\n", ret);
448 return -ENODEV;
449 }
450
451 if (desc.version > 1) {
452 dev_err(dev, "Protocol version %u is not supported\n", desc.version);
453 return -ENODEV;
454 }
455
456 gdrm = devm_drm_dev_alloc(dev, &gud_drm_driver, struct gud_device, drm);
457 if (IS_ERR(gdrm))
458 return PTR_ERR(gdrm);
459
460 drm = &gdrm->drm;
461 drm->mode_config.funcs = &gud_mode_config_funcs;
462 ret = drmm_mode_config_init(drm);
463 if (ret)
464 return ret;
465
466 gdrm->flags = le32_to_cpu(desc.flags);
467 gdrm->compression = desc.compression & GUD_COMPRESSION_LZ4;
468
469 if (gdrm->flags & GUD_DISPLAY_FLAG_FULL_UPDATE && gdrm->compression)
470 return -EINVAL;
471
472 mutex_init(&gdrm->ctrl_lock);
473 mutex_init(&gdrm->damage_lock);
474 INIT_WORK(&gdrm->work, gud_flush_work);
475 gud_clear_damage(gdrm);
476
477 ret = devm_add_action(dev, gud_free_buffers_and_mutex, gdrm);
478 if (ret)
479 return ret;
480
481 drm->mode_config.min_width = le32_to_cpu(desc.min_width);
482 drm->mode_config.max_width = le32_to_cpu(desc.max_width);
483 drm->mode_config.min_height = le32_to_cpu(desc.min_height);
484 drm->mode_config.max_height = le32_to_cpu(desc.max_height);
485
486 formats_dev = devm_kmalloc(dev, GUD_FORMATS_MAX_NUM, GFP_KERNEL);
487 /* Add room for emulated XRGB8888 */
488 formats = devm_kmalloc_array(dev, GUD_FORMATS_MAX_NUM + 1, sizeof(*formats), GFP_KERNEL);
489 if (!formats_dev || !formats)
490 return -ENOMEM;
491
492 ret = gud_usb_get(gdrm, GUD_REQ_GET_FORMATS, 0, formats_dev, GUD_FORMATS_MAX_NUM);
493 if (ret < 0)
494 return ret;
495
496 num_formats_dev = ret;
497 for (i = 0; i < num_formats_dev; i++) {
498 const struct drm_format_info *info;
499 size_t fmt_buf_size;
500 u32 format;
501
502 format = gud_to_fourcc(formats_dev[i]);
503 if (!format) {
504 drm_dbg(drm, "Unsupported format: 0x%02x\n", formats_dev[i]);
505 continue;
506 }
507
508 if (format == GUD_DRM_FORMAT_R1)
509 info = &gud_drm_format_r1;
510 else if (format == GUD_DRM_FORMAT_XRGB1111)
511 info = &gud_drm_format_xrgb1111;
512 else
513 info = drm_format_info(format);
514
515 switch (format) {
516 case GUD_DRM_FORMAT_R1:
517 fallthrough;
518 case DRM_FORMAT_R8:
519 fallthrough;
520 case GUD_DRM_FORMAT_XRGB1111:
521 fallthrough;
522 case DRM_FORMAT_RGB332:
523 fallthrough;
524 case DRM_FORMAT_RGB888:
525 if (!xrgb8888_emulation_format)
526 xrgb8888_emulation_format = info;
527 break;
528 case DRM_FORMAT_RGB565:
529 rgb565_supported = true;
530 if (!xrgb8888_emulation_format)
531 xrgb8888_emulation_format = info;
532 break;
533 case DRM_FORMAT_XRGB8888:
534 xrgb8888_supported = true;
535 break;
536 }
537
538 fmt_buf_size = drm_format_info_min_pitch(info, 0, drm->mode_config.max_width) *
539 drm->mode_config.max_height;
540 max_buffer_size = max(max_buffer_size, fmt_buf_size);
541
542 if (format == GUD_DRM_FORMAT_R1 || format == GUD_DRM_FORMAT_XRGB1111)
543 continue; /* Internal not for userspace */
544
545 formats[num_formats++] = format;
546 }
547
548 if (!num_formats && !xrgb8888_emulation_format) {
549 dev_err(dev, "No supported pixel formats found\n");
550 return -EINVAL;
551 }
552
553 /* Prefer speed over color depth */
554 if (rgb565_supported)
555 drm->mode_config.preferred_depth = 16;
556
557 if (!xrgb8888_supported && xrgb8888_emulation_format) {
558 gdrm->xrgb8888_emulation_format = xrgb8888_emulation_format;
559 formats[num_formats++] = DRM_FORMAT_XRGB8888;
560 }
561
562 if (desc.max_buffer_size)
563 max_buffer_size = le32_to_cpu(desc.max_buffer_size);
564 /* Prevent a misbehaving device from allocating loads of RAM. 4096x4096@XRGB8888 = 64 MB */
565 if (max_buffer_size > SZ_64M)
566 max_buffer_size = SZ_64M;
567
568 gdrm->bulk_pipe = usb_sndbulkpipe(interface_to_usbdev(intf), usb_endpoint_num(bulk_out));
569 gdrm->bulk_len = max_buffer_size;
570
571 ret = gud_alloc_bulk_buffer(gdrm);
572 if (ret)
573 return ret;
574
575 if (gdrm->compression & GUD_COMPRESSION_LZ4) {
576 gdrm->lz4_comp_mem = devm_kmalloc(dev, LZ4_MEM_COMPRESS, GFP_KERNEL);
577 if (!gdrm->lz4_comp_mem)
578 return -ENOMEM;
579
580 gdrm->compress_buf = vmalloc(gdrm->bulk_len);
581 if (!gdrm->compress_buf)
582 return -ENOMEM;
583 }
584
585 ret = drm_simple_display_pipe_init(drm, &gdrm->pipe, &gud_pipe_funcs,
586 formats, num_formats,
587 gud_pipe_modifiers, NULL);
588 if (ret)
589 return ret;
590
591 devm_kfree(dev, formats);
592 devm_kfree(dev, formats_dev);
593
594 ret = gud_get_properties(gdrm);
595 if (ret) {
596 dev_err(dev, "Failed to get properties (error=%d)\n", ret);
597 return ret;
598 }
599
600 drm_plane_enable_fb_damage_clips(&gdrm->pipe.plane);
601
602 ret = gud_get_connectors(gdrm);
603 if (ret) {
604 dev_err(dev, "Failed to get connectors (error=%d)\n", ret);
605 return ret;
606 }
607
608 drm_mode_config_reset(drm);
609
610 usb_set_intfdata(intf, gdrm);
611
612 gdrm->dmadev = usb_intf_get_dma_device(intf);
613 if (!gdrm->dmadev)
614 dev_warn(dev, "buffer sharing not supported");
615
616 drm_debugfs_add_file(drm, "stats", gud_stats_debugfs, NULL);
617
618 ret = drm_dev_register(drm, 0);
619 if (ret) {
620 put_device(gdrm->dmadev);
621 return ret;
622 }
623
624 drm_kms_helper_poll_init(drm);
625
626 drm_client_setup(drm, NULL);
627
628 return 0;
629 }
630
gud_disconnect(struct usb_interface * interface)631 static void gud_disconnect(struct usb_interface *interface)
632 {
633 struct gud_device *gdrm = usb_get_intfdata(interface);
634 struct drm_device *drm = &gdrm->drm;
635
636 drm_dbg(drm, "%s:\n", __func__);
637
638 drm_kms_helper_poll_fini(drm);
639 drm_dev_unplug(drm);
640 drm_atomic_helper_shutdown(drm);
641 put_device(gdrm->dmadev);
642 gdrm->dmadev = NULL;
643 }
644
gud_suspend(struct usb_interface * intf,pm_message_t message)645 static int gud_suspend(struct usb_interface *intf, pm_message_t message)
646 {
647 struct gud_device *gdrm = usb_get_intfdata(intf);
648
649 return drm_mode_config_helper_suspend(&gdrm->drm);
650 }
651
gud_resume(struct usb_interface * intf)652 static int gud_resume(struct usb_interface *intf)
653 {
654 struct gud_device *gdrm = usb_get_intfdata(intf);
655
656 drm_mode_config_helper_resume(&gdrm->drm);
657
658 return 0;
659 }
660
661 static const struct usb_device_id gud_id_table[] = {
662 { USB_DEVICE_INTERFACE_CLASS(0x1d50, 0x614d, USB_CLASS_VENDOR_SPEC) },
663 { USB_DEVICE_INTERFACE_CLASS(0x16d0, 0x10a9, USB_CLASS_VENDOR_SPEC) },
664 { }
665 };
666
667 MODULE_DEVICE_TABLE(usb, gud_id_table);
668
669 static struct usb_driver gud_usb_driver = {
670 .name = "gud",
671 .probe = gud_probe,
672 .disconnect = gud_disconnect,
673 .id_table = gud_id_table,
674 .suspend = gud_suspend,
675 .resume = gud_resume,
676 .reset_resume = gud_resume,
677 };
678
679 module_usb_driver(gud_usb_driver);
680
681 MODULE_AUTHOR("Noralf Trønnes");
682 MODULE_DESCRIPTION("GUD USB Display driver");
683 MODULE_LICENSE("Dual MIT/GPL");
684