1 // SPDX-License-Identifier: MIT 2 /* 3 * Copyright 2020 Noralf Trønnes 4 */ 5 6 #include <linux/dma-buf.h> 7 #include <linux/dma-mapping.h> 8 #include <linux/lz4.h> 9 #include <linux/module.h> 10 #include <linux/platform_device.h> 11 #include <linux/string_helpers.h> 12 #include <linux/usb.h> 13 #include <linux/vmalloc.h> 14 #include <linux/workqueue.h> 15 16 #include <drm/clients/drm_client_setup.h> 17 #include <drm/drm_atomic_helper.h> 18 #include <drm/drm_blend.h> 19 #include <drm/drm_damage_helper.h> 20 #include <drm/drm_debugfs.h> 21 #include <drm/drm_drv.h> 22 #include <drm/drm_fbdev_shmem.h> 23 #include <drm/drm_fourcc.h> 24 #include <drm/drm_gem_atomic_helper.h> 25 #include <drm/drm_gem_framebuffer_helper.h> 26 #include <drm/drm_gem_shmem_helper.h> 27 #include <drm/drm_managed.h> 28 #include <drm/drm_print.h> 29 #include <drm/drm_probe_helper.h> 30 #include <drm/drm_simple_kms_helper.h> 31 #include <drm/gud.h> 32 33 #include "gud_internal.h" 34 35 /* Only used internally */ 36 static const struct drm_format_info gud_drm_format_r1 = { 37 .format = GUD_DRM_FORMAT_R1, 38 .num_planes = 1, 39 .char_per_block = { 1, 0, 0 }, 40 .block_w = { 8, 0, 0 }, 41 .block_h = { 1, 0, 0 }, 42 .hsub = 1, 43 .vsub = 1, 44 }; 45 46 static const struct drm_format_info gud_drm_format_xrgb1111 = { 47 .format = GUD_DRM_FORMAT_XRGB1111, 48 .num_planes = 1, 49 .char_per_block = { 1, 0, 0 }, 50 .block_w = { 2, 0, 0 }, 51 .block_h = { 1, 0, 0 }, 52 .hsub = 1, 53 .vsub = 1, 54 }; 55 56 static int gud_usb_control_msg(struct usb_interface *intf, bool in, 57 u8 request, u16 value, void *buf, size_t len) 58 { 59 u8 requesttype = USB_TYPE_VENDOR | USB_RECIP_INTERFACE; 60 u8 ifnum = intf->cur_altsetting->desc.bInterfaceNumber; 61 struct usb_device *usb = interface_to_usbdev(intf); 62 unsigned int pipe; 63 64 if (len && !buf) 65 return -EINVAL; 66 67 if (in) { 68 pipe = usb_rcvctrlpipe(usb, 0); 69 requesttype |= USB_DIR_IN; 70 } else { 71 pipe = usb_sndctrlpipe(usb, 0); 72 requesttype |= USB_DIR_OUT; 73 } 74 75 return usb_control_msg(usb, pipe, request, requesttype, value, 76 ifnum, buf, len, USB_CTRL_GET_TIMEOUT); 77 } 78 79 static int gud_get_display_descriptor(struct usb_interface *intf, 80 struct gud_display_descriptor_req *desc) 81 { 82 void *buf; 83 int ret; 84 85 buf = kmalloc(sizeof(*desc), GFP_KERNEL); 86 if (!buf) 87 return -ENOMEM; 88 89 ret = gud_usb_control_msg(intf, true, GUD_REQ_GET_DESCRIPTOR, 0, buf, sizeof(*desc)); 90 memcpy(desc, buf, sizeof(*desc)); 91 kfree(buf); 92 if (ret < 0) 93 return ret; 94 if (ret != sizeof(*desc)) 95 return -EIO; 96 97 if (desc->magic != le32_to_cpu(GUD_DISPLAY_MAGIC)) 98 return -ENODATA; 99 100 DRM_DEV_DEBUG_DRIVER(&intf->dev, 101 "version=%u flags=0x%x compression=0x%x max_buffer_size=%u\n", 102 desc->version, le32_to_cpu(desc->flags), desc->compression, 103 le32_to_cpu(desc->max_buffer_size)); 104 105 if (!desc->version || !desc->max_width || !desc->max_height || 106 le32_to_cpu(desc->min_width) > le32_to_cpu(desc->max_width) || 107 le32_to_cpu(desc->min_height) > le32_to_cpu(desc->max_height)) 108 return -EINVAL; 109 110 return 0; 111 } 112 113 static int gud_status_to_errno(u8 status) 114 { 115 switch (status) { 116 case GUD_STATUS_OK: 117 return 0; 118 case GUD_STATUS_BUSY: 119 return -EBUSY; 120 case GUD_STATUS_REQUEST_NOT_SUPPORTED: 121 return -EOPNOTSUPP; 122 case GUD_STATUS_PROTOCOL_ERROR: 123 return -EPROTO; 124 case GUD_STATUS_INVALID_PARAMETER: 125 return -EINVAL; 126 case GUD_STATUS_ERROR: 127 return -EREMOTEIO; 128 default: 129 return -EREMOTEIO; 130 } 131 } 132 133 static int gud_usb_get_status(struct usb_interface *intf) 134 { 135 int ret, status = -EIO; 136 u8 *buf; 137 138 buf = kmalloc(sizeof(*buf), GFP_KERNEL); 139 if (!buf) 140 return -ENOMEM; 141 142 ret = gud_usb_control_msg(intf, true, GUD_REQ_GET_STATUS, 0, buf, sizeof(*buf)); 143 if (ret == sizeof(*buf)) 144 status = gud_status_to_errno(*buf); 145 kfree(buf); 146 147 if (ret < 0) 148 return ret; 149 150 return status; 151 } 152 153 static int gud_usb_transfer(struct gud_device *gdrm, bool in, u8 request, u16 index, 154 void *buf, size_t len) 155 { 156 struct usb_interface *intf = to_usb_interface(gdrm->drm.dev); 157 int idx, ret; 158 159 drm_dbg(&gdrm->drm, "%s: request=0x%x index=%u len=%zu\n", 160 in ? "get" : "set", request, index, len); 161 162 if (!drm_dev_enter(&gdrm->drm, &idx)) 163 return -ENODEV; 164 165 mutex_lock(&gdrm->ctrl_lock); 166 167 ret = gud_usb_control_msg(intf, in, request, index, buf, len); 168 if (ret == -EPIPE || ((gdrm->flags & GUD_DISPLAY_FLAG_STATUS_ON_SET) && !in && ret >= 0)) { 169 int status; 170 171 status = gud_usb_get_status(intf); 172 if (status < 0) { 173 ret = status; 174 } else if (ret < 0) { 175 dev_err_once(gdrm->drm.dev, 176 "Unexpected status OK for failed transfer\n"); 177 ret = -EPIPE; 178 } 179 } 180 181 if (ret < 0) { 182 drm_dbg(&gdrm->drm, "ret=%d\n", ret); 183 gdrm->stats_num_errors++; 184 } 185 186 mutex_unlock(&gdrm->ctrl_lock); 187 drm_dev_exit(idx); 188 189 return ret; 190 } 191 192 /* 193 * @buf cannot be allocated on the stack. 194 * Returns number of bytes received or negative error code on failure. 195 */ 196 int gud_usb_get(struct gud_device *gdrm, u8 request, u16 index, void *buf, size_t max_len) 197 { 198 return gud_usb_transfer(gdrm, true, request, index, buf, max_len); 199 } 200 201 /* 202 * @buf can be allocated on the stack or NULL. 203 * Returns zero on success or negative error code on failure. 204 */ 205 int gud_usb_set(struct gud_device *gdrm, u8 request, u16 index, void *buf, size_t len) 206 { 207 void *trbuf = NULL; 208 int ret; 209 210 if (buf && len) { 211 trbuf = kmemdup(buf, len, GFP_KERNEL); 212 if (!trbuf) 213 return -ENOMEM; 214 } 215 216 ret = gud_usb_transfer(gdrm, false, request, index, trbuf, len); 217 kfree(trbuf); 218 if (ret < 0) 219 return ret; 220 221 return ret != len ? -EIO : 0; 222 } 223 224 /* 225 * @val can be allocated on the stack. 226 * Returns zero on success or negative error code on failure. 227 */ 228 int gud_usb_get_u8(struct gud_device *gdrm, u8 request, u16 index, u8 *val) 229 { 230 u8 *buf; 231 int ret; 232 233 buf = kmalloc(sizeof(*val), GFP_KERNEL); 234 if (!buf) 235 return -ENOMEM; 236 237 ret = gud_usb_get(gdrm, request, index, buf, sizeof(*val)); 238 *val = *buf; 239 kfree(buf); 240 if (ret < 0) 241 return ret; 242 243 return ret != sizeof(*val) ? -EIO : 0; 244 } 245 246 /* Returns zero on success or negative error code on failure. */ 247 int gud_usb_set_u8(struct gud_device *gdrm, u8 request, u8 val) 248 { 249 return gud_usb_set(gdrm, request, 0, &val, sizeof(val)); 250 } 251 252 static int gud_get_properties(struct gud_device *gdrm) 253 { 254 struct gud_property_req *properties; 255 unsigned int i, num_properties; 256 int ret; 257 258 properties = kcalloc(GUD_PROPERTIES_MAX_NUM, sizeof(*properties), GFP_KERNEL); 259 if (!properties) 260 return -ENOMEM; 261 262 ret = gud_usb_get(gdrm, GUD_REQ_GET_PROPERTIES, 0, 263 properties, GUD_PROPERTIES_MAX_NUM * sizeof(*properties)); 264 if (ret <= 0) 265 goto out; 266 if (ret % sizeof(*properties)) { 267 ret = -EIO; 268 goto out; 269 } 270 271 num_properties = ret / sizeof(*properties); 272 ret = 0; 273 274 gdrm->properties = drmm_kcalloc(&gdrm->drm, num_properties, sizeof(*gdrm->properties), 275 GFP_KERNEL); 276 if (!gdrm->properties) { 277 ret = -ENOMEM; 278 goto out; 279 } 280 281 for (i = 0; i < num_properties; i++) { 282 u16 prop = le16_to_cpu(properties[i].prop); 283 u64 val = le64_to_cpu(properties[i].val); 284 285 switch (prop) { 286 case GUD_PROPERTY_ROTATION: 287 /* 288 * DRM UAPI matches the protocol so use the value directly, 289 * but mask out any additions on future devices. 290 */ 291 val &= GUD_ROTATION_MASK; 292 ret = drm_plane_create_rotation_property(&gdrm->pipe.plane, 293 DRM_MODE_ROTATE_0, val); 294 break; 295 default: 296 /* New ones might show up in future devices, skip those we don't know. */ 297 drm_dbg(&gdrm->drm, "Ignoring unknown property: %u\n", prop); 298 continue; 299 } 300 301 if (ret) 302 goto out; 303 304 gdrm->properties[gdrm->num_properties++] = prop; 305 } 306 out: 307 kfree(properties); 308 309 return ret; 310 } 311 312 static int gud_stats_debugfs(struct seq_file *m, void *data) 313 { 314 struct drm_debugfs_entry *entry = m->private; 315 struct gud_device *gdrm = to_gud_device(entry->dev); 316 char buf[10]; 317 318 string_get_size(gdrm->bulk_len, 1, STRING_UNITS_2, buf, sizeof(buf)); 319 seq_printf(m, "Max buffer size: %s\n", buf); 320 seq_printf(m, "Number of errors: %u\n", gdrm->stats_num_errors); 321 322 seq_puts(m, "Compression: "); 323 if (gdrm->compression & GUD_COMPRESSION_LZ4) 324 seq_puts(m, " lz4"); 325 if (!gdrm->compression) 326 seq_puts(m, " none"); 327 seq_puts(m, "\n"); 328 329 if (gdrm->compression) { 330 u64 remainder; 331 u64 ratio = div64_u64_rem(gdrm->stats_length, gdrm->stats_actual_length, 332 &remainder); 333 u64 ratio_frac = div64_u64(remainder * 10, gdrm->stats_actual_length); 334 335 seq_printf(m, "Compression ratio: %llu.%llu\n", ratio, ratio_frac); 336 } 337 338 return 0; 339 } 340 341 static const struct drm_simple_display_pipe_funcs gud_pipe_funcs = { 342 .check = gud_pipe_check, 343 .update = gud_pipe_update, 344 DRM_GEM_SIMPLE_DISPLAY_PIPE_SHADOW_PLANE_FUNCS 345 }; 346 347 static const struct drm_mode_config_funcs gud_mode_config_funcs = { 348 .fb_create = drm_gem_fb_create_with_dirty, 349 .atomic_check = drm_atomic_helper_check, 350 .atomic_commit = drm_atomic_helper_commit, 351 }; 352 353 static const u64 gud_pipe_modifiers[] = { 354 DRM_FORMAT_MOD_LINEAR, 355 DRM_FORMAT_MOD_INVALID 356 }; 357 358 DEFINE_DRM_GEM_FOPS(gud_fops); 359 360 static const struct drm_driver gud_drm_driver = { 361 .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_ATOMIC, 362 .fops = &gud_fops, 363 DRM_GEM_SHMEM_DRIVER_OPS, 364 DRM_FBDEV_SHMEM_DRIVER_OPS, 365 366 .name = "gud", 367 .desc = "Generic USB Display", 368 .major = 1, 369 .minor = 0, 370 }; 371 372 static int gud_alloc_bulk_buffer(struct gud_device *gdrm) 373 { 374 unsigned int i, num_pages; 375 struct page **pages; 376 void *ptr; 377 int ret; 378 379 gdrm->bulk_buf = vmalloc_32(gdrm->bulk_len); 380 if (!gdrm->bulk_buf) 381 return -ENOMEM; 382 383 num_pages = DIV_ROUND_UP(gdrm->bulk_len, PAGE_SIZE); 384 pages = kmalloc_array(num_pages, sizeof(struct page *), GFP_KERNEL); 385 if (!pages) 386 return -ENOMEM; 387 388 for (i = 0, ptr = gdrm->bulk_buf; i < num_pages; i++, ptr += PAGE_SIZE) 389 pages[i] = vmalloc_to_page(ptr); 390 391 ret = sg_alloc_table_from_pages(&gdrm->bulk_sgt, pages, num_pages, 392 0, gdrm->bulk_len, GFP_KERNEL); 393 kfree(pages); 394 395 return ret; 396 } 397 398 static void gud_free_buffers_and_mutex(void *data) 399 { 400 struct gud_device *gdrm = data; 401 402 vfree(gdrm->compress_buf); 403 gdrm->compress_buf = NULL; 404 sg_free_table(&gdrm->bulk_sgt); 405 vfree(gdrm->bulk_buf); 406 gdrm->bulk_buf = NULL; 407 mutex_destroy(&gdrm->ctrl_lock); 408 } 409 410 static int gud_probe(struct usb_interface *intf, const struct usb_device_id *id) 411 { 412 const struct drm_format_info *xrgb8888_emulation_format = NULL; 413 bool rgb565_supported = false, xrgb8888_supported = false; 414 unsigned int num_formats_dev, num_formats = 0; 415 struct usb_endpoint_descriptor *bulk_out; 416 struct gud_display_descriptor_req desc; 417 struct device *dev = &intf->dev; 418 size_t max_buffer_size = 0; 419 struct gud_device *gdrm; 420 struct drm_device *drm; 421 struct device *dma_dev; 422 u8 *formats_dev; 423 u32 *formats; 424 int ret, i; 425 426 ret = usb_find_bulk_out_endpoint(intf->cur_altsetting, &bulk_out); 427 if (ret) 428 return ret; 429 430 ret = gud_get_display_descriptor(intf, &desc); 431 if (ret) { 432 DRM_DEV_DEBUG_DRIVER(dev, "Not a display interface: ret=%d\n", ret); 433 return -ENODEV; 434 } 435 436 if (desc.version > 1) { 437 dev_err(dev, "Protocol version %u is not supported\n", desc.version); 438 return -ENODEV; 439 } 440 441 gdrm = devm_drm_dev_alloc(dev, &gud_drm_driver, struct gud_device, drm); 442 if (IS_ERR(gdrm)) 443 return PTR_ERR(gdrm); 444 445 drm = &gdrm->drm; 446 drm->mode_config.funcs = &gud_mode_config_funcs; 447 ret = drmm_mode_config_init(drm); 448 if (ret) 449 return ret; 450 451 gdrm->flags = le32_to_cpu(desc.flags); 452 gdrm->compression = desc.compression & GUD_COMPRESSION_LZ4; 453 454 if (gdrm->flags & GUD_DISPLAY_FLAG_FULL_UPDATE && gdrm->compression) 455 return -EINVAL; 456 457 mutex_init(&gdrm->ctrl_lock); 458 mutex_init(&gdrm->damage_lock); 459 INIT_WORK(&gdrm->work, gud_flush_work); 460 gud_clear_damage(gdrm); 461 462 ret = devm_add_action(dev, gud_free_buffers_and_mutex, gdrm); 463 if (ret) 464 return ret; 465 466 drm->mode_config.min_width = le32_to_cpu(desc.min_width); 467 drm->mode_config.max_width = le32_to_cpu(desc.max_width); 468 drm->mode_config.min_height = le32_to_cpu(desc.min_height); 469 drm->mode_config.max_height = le32_to_cpu(desc.max_height); 470 471 formats_dev = devm_kmalloc(dev, GUD_FORMATS_MAX_NUM, GFP_KERNEL); 472 /* Add room for emulated XRGB8888 */ 473 formats = devm_kmalloc_array(dev, GUD_FORMATS_MAX_NUM + 1, sizeof(*formats), GFP_KERNEL); 474 if (!formats_dev || !formats) 475 return -ENOMEM; 476 477 ret = gud_usb_get(gdrm, GUD_REQ_GET_FORMATS, 0, formats_dev, GUD_FORMATS_MAX_NUM); 478 if (ret < 0) 479 return ret; 480 481 num_formats_dev = ret; 482 for (i = 0; i < num_formats_dev; i++) { 483 const struct drm_format_info *info; 484 size_t fmt_buf_size; 485 u32 format; 486 487 format = gud_to_fourcc(formats_dev[i]); 488 if (!format) { 489 drm_dbg(drm, "Unsupported format: 0x%02x\n", formats_dev[i]); 490 continue; 491 } 492 493 if (format == GUD_DRM_FORMAT_R1) 494 info = &gud_drm_format_r1; 495 else if (format == GUD_DRM_FORMAT_XRGB1111) 496 info = &gud_drm_format_xrgb1111; 497 else 498 info = drm_format_info(format); 499 500 switch (format) { 501 case GUD_DRM_FORMAT_R1: 502 fallthrough; 503 case DRM_FORMAT_R8: 504 fallthrough; 505 case GUD_DRM_FORMAT_XRGB1111: 506 fallthrough; 507 case DRM_FORMAT_RGB332: 508 fallthrough; 509 case DRM_FORMAT_RGB888: 510 if (!xrgb8888_emulation_format) 511 xrgb8888_emulation_format = info; 512 break; 513 case DRM_FORMAT_RGB565: 514 rgb565_supported = true; 515 if (!xrgb8888_emulation_format) 516 xrgb8888_emulation_format = info; 517 break; 518 case DRM_FORMAT_XRGB8888: 519 xrgb8888_supported = true; 520 break; 521 } 522 523 fmt_buf_size = drm_format_info_min_pitch(info, 0, drm->mode_config.max_width) * 524 drm->mode_config.max_height; 525 max_buffer_size = max(max_buffer_size, fmt_buf_size); 526 527 if (format == GUD_DRM_FORMAT_R1 || format == GUD_DRM_FORMAT_XRGB1111) 528 continue; /* Internal not for userspace */ 529 530 formats[num_formats++] = format; 531 } 532 533 if (!num_formats && !xrgb8888_emulation_format) { 534 dev_err(dev, "No supported pixel formats found\n"); 535 return -EINVAL; 536 } 537 538 /* Prefer speed over color depth */ 539 if (rgb565_supported) 540 drm->mode_config.preferred_depth = 16; 541 542 if (!xrgb8888_supported && xrgb8888_emulation_format) { 543 gdrm->xrgb8888_emulation_format = xrgb8888_emulation_format; 544 formats[num_formats++] = DRM_FORMAT_XRGB8888; 545 } 546 547 if (desc.max_buffer_size) 548 max_buffer_size = le32_to_cpu(desc.max_buffer_size); 549 /* Prevent a misbehaving device from allocating loads of RAM. 4096x4096@XRGB8888 = 64 MB */ 550 if (max_buffer_size > SZ_64M) 551 max_buffer_size = SZ_64M; 552 553 gdrm->bulk_pipe = usb_sndbulkpipe(interface_to_usbdev(intf), usb_endpoint_num(bulk_out)); 554 gdrm->bulk_len = max_buffer_size; 555 556 ret = gud_alloc_bulk_buffer(gdrm); 557 if (ret) 558 return ret; 559 560 if (gdrm->compression & GUD_COMPRESSION_LZ4) { 561 gdrm->lz4_comp_mem = devm_kmalloc(dev, LZ4_MEM_COMPRESS, GFP_KERNEL); 562 if (!gdrm->lz4_comp_mem) 563 return -ENOMEM; 564 565 gdrm->compress_buf = vmalloc(gdrm->bulk_len); 566 if (!gdrm->compress_buf) 567 return -ENOMEM; 568 } 569 570 ret = drm_simple_display_pipe_init(drm, &gdrm->pipe, &gud_pipe_funcs, 571 formats, num_formats, 572 gud_pipe_modifiers, NULL); 573 if (ret) 574 return ret; 575 576 devm_kfree(dev, formats); 577 devm_kfree(dev, formats_dev); 578 579 ret = gud_get_properties(gdrm); 580 if (ret) { 581 dev_err(dev, "Failed to get properties (error=%d)\n", ret); 582 return ret; 583 } 584 585 drm_plane_enable_fb_damage_clips(&gdrm->pipe.plane); 586 587 ret = gud_get_connectors(gdrm); 588 if (ret) { 589 dev_err(dev, "Failed to get connectors (error=%d)\n", ret); 590 return ret; 591 } 592 593 drm_mode_config_reset(drm); 594 595 usb_set_intfdata(intf, gdrm); 596 597 dma_dev = usb_intf_get_dma_device(intf); 598 if (dma_dev) { 599 drm_dev_set_dma_dev(drm, dma_dev); 600 put_device(dma_dev); 601 } else { 602 dev_warn(dev, "buffer sharing not supported"); /* not an error */ 603 } 604 605 drm_debugfs_add_file(drm, "stats", gud_stats_debugfs, NULL); 606 607 ret = drm_dev_register(drm, 0); 608 if (ret) 609 return ret; 610 611 drm_kms_helper_poll_init(drm); 612 613 drm_client_setup(drm, NULL); 614 615 return 0; 616 } 617 618 static void gud_disconnect(struct usb_interface *interface) 619 { 620 struct gud_device *gdrm = usb_get_intfdata(interface); 621 struct drm_device *drm = &gdrm->drm; 622 623 drm_dbg(drm, "%s:\n", __func__); 624 625 drm_kms_helper_poll_fini(drm); 626 drm_dev_unplug(drm); 627 drm_atomic_helper_shutdown(drm); 628 } 629 630 static int gud_suspend(struct usb_interface *intf, pm_message_t message) 631 { 632 struct gud_device *gdrm = usb_get_intfdata(intf); 633 634 return drm_mode_config_helper_suspend(&gdrm->drm); 635 } 636 637 static int gud_resume(struct usb_interface *intf) 638 { 639 struct gud_device *gdrm = usb_get_intfdata(intf); 640 641 drm_mode_config_helper_resume(&gdrm->drm); 642 643 return 0; 644 } 645 646 static const struct usb_device_id gud_id_table[] = { 647 { USB_DEVICE_INTERFACE_CLASS(0x1d50, 0x614d, USB_CLASS_VENDOR_SPEC) }, 648 { USB_DEVICE_INTERFACE_CLASS(0x16d0, 0x10a9, USB_CLASS_VENDOR_SPEC) }, 649 { } 650 }; 651 652 MODULE_DEVICE_TABLE(usb, gud_id_table); 653 654 static struct usb_driver gud_usb_driver = { 655 .name = "gud", 656 .probe = gud_probe, 657 .disconnect = gud_disconnect, 658 .id_table = gud_id_table, 659 .suspend = gud_suspend, 660 .resume = gud_resume, 661 .reset_resume = gud_resume, 662 }; 663 664 module_usb_driver(gud_usb_driver); 665 666 MODULE_AUTHOR("Noralf Trønnes"); 667 MODULE_DESCRIPTION("GUD USB Display driver"); 668 MODULE_LICENSE("Dual MIT/GPL"); 669