1 /* 2 * Copyright (C) 2015 Red Hat, Inc. 3 * All Rights Reserved. 4 * 5 * Permission is hereby granted, free of charge, to any person obtaining 6 * a copy of this software and associated documentation files (the 7 * "Software"), to deal in the Software without restriction, including 8 * without limitation the rights to use, copy, modify, merge, publish, 9 * distribute, sublicense, and/or sell copies of the Software, and to 10 * permit persons to whom the Software is furnished to do so, subject to 11 * the following conditions: 12 * 13 * The above copyright notice and this permission notice (including the 14 * next paragraph) shall be included in all copies or substantial 15 * portions of the Software. 16 * 17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 18 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. 20 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE 21 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION 22 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION 23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 24 */ 25 26 #include <drm/drm_atomic_helper.h> 27 #include <drm/drm_damage_helper.h> 28 #include <drm/drm_fourcc.h> 29 #include <drm/drm_gem_atomic_helper.h> 30 #include <linux/virtio_dma_buf.h> 31 #include <drm/drm_managed.h> 32 #include <drm/drm_panic.h> 33 34 #include "virtgpu_drv.h" 35 36 static const uint32_t virtio_gpu_formats[] = { 37 DRM_FORMAT_HOST_XRGB8888, 38 }; 39 40 static const uint32_t virtio_gpu_cursor_formats[] = { 41 DRM_FORMAT_HOST_ARGB8888, 42 }; 43 44 uint32_t virtio_gpu_translate_format(uint32_t drm_fourcc) 45 { 46 uint32_t format; 47 48 switch (drm_fourcc) { 49 case DRM_FORMAT_XRGB8888: 50 format = VIRTIO_GPU_FORMAT_B8G8R8X8_UNORM; 51 break; 52 case DRM_FORMAT_ARGB8888: 53 format = VIRTIO_GPU_FORMAT_B8G8R8A8_UNORM; 54 break; 55 case DRM_FORMAT_BGRX8888: 56 format = VIRTIO_GPU_FORMAT_X8R8G8B8_UNORM; 57 break; 58 case DRM_FORMAT_BGRA8888: 59 format = VIRTIO_GPU_FORMAT_A8R8G8B8_UNORM; 60 break; 61 default: 62 /* 63 * This should not happen, we handle everything listed 64 * in virtio_gpu_formats[]. 65 */ 66 format = 0; 67 break; 68 } 69 WARN_ON(format == 0); 70 return format; 71 } 72 73 static struct 74 drm_plane_state *virtio_gpu_plane_duplicate_state(struct drm_plane *plane) 75 { 76 struct virtio_gpu_plane_state *new; 77 78 if (WARN_ON(!plane->state)) 79 return NULL; 80 81 new = kzalloc(sizeof(*new), GFP_KERNEL); 82 if (!new) 83 return NULL; 84 85 __drm_atomic_helper_plane_duplicate_state(plane, &new->base); 86 87 return &new->base; 88 } 89 90 static const struct drm_plane_funcs virtio_gpu_plane_funcs = { 91 .update_plane = drm_atomic_helper_update_plane, 92 .disable_plane = drm_atomic_helper_disable_plane, 93 .reset = drm_atomic_helper_plane_reset, 94 .atomic_duplicate_state = virtio_gpu_plane_duplicate_state, 95 .atomic_destroy_state = drm_atomic_helper_plane_destroy_state, 96 }; 97 98 static int virtio_gpu_plane_atomic_check(struct drm_plane *plane, 99 struct drm_atomic_state *state) 100 { 101 struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state, 102 plane); 103 struct drm_plane_state *old_plane_state = drm_atomic_get_old_plane_state(state, 104 plane); 105 bool is_cursor = plane->type == DRM_PLANE_TYPE_CURSOR; 106 struct drm_crtc_state *crtc_state; 107 int ret; 108 109 if (!new_plane_state->fb || WARN_ON(!new_plane_state->crtc)) 110 return 0; 111 112 /* 113 * Ignore damage clips if the framebuffer attached to the plane's state 114 * has changed since the last plane update (page-flip). In this case, a 115 * full plane update should happen because uploads are done per-buffer. 116 */ 117 if (old_plane_state->fb != new_plane_state->fb) 118 new_plane_state->ignore_damage_clips = true; 119 120 crtc_state = drm_atomic_get_crtc_state(state, 121 new_plane_state->crtc); 122 if (IS_ERR(crtc_state)) 123 return PTR_ERR(crtc_state); 124 125 ret = drm_atomic_helper_check_plane_state(new_plane_state, crtc_state, 126 DRM_PLANE_NO_SCALING, 127 DRM_PLANE_NO_SCALING, 128 is_cursor, true); 129 return ret; 130 } 131 132 /* For drm panic */ 133 static int virtio_gpu_panic_update_dumb_bo(struct virtio_gpu_device *vgdev, 134 struct drm_plane_state *state, 135 struct drm_rect *rect) 136 { 137 struct virtio_gpu_object *bo = 138 gem_to_virtio_gpu_obj(state->fb->obj[0]); 139 struct virtio_gpu_object_array *objs; 140 uint32_t w = rect->x2 - rect->x1; 141 uint32_t h = rect->y2 - rect->y1; 142 uint32_t x = rect->x1; 143 uint32_t y = rect->y1; 144 uint32_t off = x * state->fb->format->cpp[0] + 145 y * state->fb->pitches[0]; 146 147 objs = virtio_gpu_panic_array_alloc(); 148 if (!objs) 149 return -ENOMEM; 150 virtio_gpu_array_add_obj(objs, &bo->base.base); 151 152 return virtio_gpu_panic_cmd_transfer_to_host_2d(vgdev, off, w, h, x, y, 153 objs); 154 } 155 156 static void virtio_gpu_update_dumb_bo(struct virtio_gpu_device *vgdev, 157 struct drm_plane_state *state, 158 struct drm_rect *rect) 159 { 160 struct virtio_gpu_object *bo = 161 gem_to_virtio_gpu_obj(state->fb->obj[0]); 162 struct virtio_gpu_object_array *objs; 163 uint32_t w = rect->x2 - rect->x1; 164 uint32_t h = rect->y2 - rect->y1; 165 uint32_t x = rect->x1; 166 uint32_t y = rect->y1; 167 uint32_t off = x * state->fb->format->cpp[0] + 168 y * state->fb->pitches[0]; 169 170 objs = virtio_gpu_array_alloc(1); 171 if (!objs) 172 return; 173 virtio_gpu_array_add_obj(objs, &bo->base.base); 174 175 virtio_gpu_cmd_transfer_to_host_2d(vgdev, off, w, h, x, y, 176 objs, NULL); 177 } 178 179 /* For drm_panic */ 180 static void virtio_gpu_panic_resource_flush(struct drm_plane *plane, 181 uint32_t x, uint32_t y, 182 uint32_t width, uint32_t height) 183 { 184 struct drm_device *dev = plane->dev; 185 struct virtio_gpu_device *vgdev = dev->dev_private; 186 struct virtio_gpu_framebuffer *vgfb; 187 struct virtio_gpu_object *bo; 188 189 vgfb = to_virtio_gpu_framebuffer(plane->state->fb); 190 bo = gem_to_virtio_gpu_obj(vgfb->base.obj[0]); 191 192 virtio_gpu_panic_cmd_resource_flush(vgdev, bo->hw_res_handle, x, y, 193 width, height); 194 virtio_gpu_panic_notify(vgdev); 195 } 196 197 static void virtio_gpu_resource_flush(struct drm_plane *plane, 198 uint32_t x, uint32_t y, 199 uint32_t width, uint32_t height) 200 { 201 struct drm_device *dev = plane->dev; 202 struct virtio_gpu_device *vgdev = dev->dev_private; 203 struct virtio_gpu_framebuffer *vgfb; 204 struct virtio_gpu_plane_state *vgplane_st; 205 struct virtio_gpu_object *bo; 206 207 vgfb = to_virtio_gpu_framebuffer(plane->state->fb); 208 vgplane_st = to_virtio_gpu_plane_state(plane->state); 209 bo = gem_to_virtio_gpu_obj(vgfb->base.obj[0]); 210 if (vgplane_st->fence) { 211 struct virtio_gpu_object_array *objs; 212 213 objs = virtio_gpu_array_alloc(1); 214 if (!objs) 215 return; 216 virtio_gpu_array_add_obj(objs, vgfb->base.obj[0]); 217 virtio_gpu_array_lock_resv(objs); 218 virtio_gpu_cmd_resource_flush(vgdev, bo->hw_res_handle, x, y, 219 width, height, objs, 220 vgplane_st->fence); 221 virtio_gpu_notify(vgdev); 222 dma_fence_wait_timeout(&vgplane_st->fence->f, true, 223 msecs_to_jiffies(50)); 224 } else { 225 virtio_gpu_cmd_resource_flush(vgdev, bo->hw_res_handle, x, y, 226 width, height, NULL, NULL); 227 virtio_gpu_notify(vgdev); 228 } 229 } 230 231 static void virtio_gpu_primary_plane_update(struct drm_plane *plane, 232 struct drm_atomic_state *state) 233 { 234 struct drm_plane_state *old_state = drm_atomic_get_old_plane_state(state, 235 plane); 236 struct drm_device *dev = plane->dev; 237 struct virtio_gpu_device *vgdev = dev->dev_private; 238 struct virtio_gpu_output *output = NULL; 239 struct virtio_gpu_object *bo; 240 struct drm_rect rect; 241 242 if (plane->state->crtc) 243 output = drm_crtc_to_virtio_gpu_output(plane->state->crtc); 244 if (old_state->crtc) 245 output = drm_crtc_to_virtio_gpu_output(old_state->crtc); 246 if (WARN_ON(!output)) 247 return; 248 249 if (!plane->state->fb || !output->crtc.state->active) { 250 DRM_DEBUG("nofb\n"); 251 virtio_gpu_cmd_set_scanout(vgdev, output->index, 0, 252 plane->state->src_w >> 16, 253 plane->state->src_h >> 16, 254 0, 0); 255 virtio_gpu_notify(vgdev); 256 return; 257 } 258 259 if (!drm_atomic_helper_damage_merged(old_state, plane->state, &rect)) 260 return; 261 262 bo = gem_to_virtio_gpu_obj(plane->state->fb->obj[0]); 263 if (bo->dumb) 264 virtio_gpu_update_dumb_bo(vgdev, plane->state, &rect); 265 266 if (plane->state->fb != old_state->fb || 267 plane->state->src_w != old_state->src_w || 268 plane->state->src_h != old_state->src_h || 269 plane->state->src_x != old_state->src_x || 270 plane->state->src_y != old_state->src_y || 271 output->needs_modeset) { 272 output->needs_modeset = false; 273 DRM_DEBUG("handle 0x%x, crtc %dx%d+%d+%d, src %dx%d+%d+%d\n", 274 bo->hw_res_handle, 275 plane->state->crtc_w, plane->state->crtc_h, 276 plane->state->crtc_x, plane->state->crtc_y, 277 plane->state->src_w >> 16, 278 plane->state->src_h >> 16, 279 plane->state->src_x >> 16, 280 plane->state->src_y >> 16); 281 282 if (bo->host3d_blob || bo->guest_blob) { 283 virtio_gpu_cmd_set_scanout_blob 284 (vgdev, output->index, bo, 285 plane->state->fb, 286 plane->state->src_w >> 16, 287 plane->state->src_h >> 16, 288 plane->state->src_x >> 16, 289 plane->state->src_y >> 16); 290 } else { 291 virtio_gpu_cmd_set_scanout(vgdev, output->index, 292 bo->hw_res_handle, 293 plane->state->src_w >> 16, 294 plane->state->src_h >> 16, 295 plane->state->src_x >> 16, 296 plane->state->src_y >> 16); 297 } 298 } 299 300 virtio_gpu_resource_flush(plane, 301 rect.x1, 302 rect.y1, 303 rect.x2 - rect.x1, 304 rect.y2 - rect.y1); 305 } 306 307 static int virtio_gpu_prepare_imported_obj(struct drm_plane *plane, 308 struct drm_plane_state *new_state, 309 struct drm_gem_object *obj) 310 { 311 struct virtio_gpu_device *vgdev = plane->dev->dev_private; 312 struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(obj); 313 struct dma_buf_attachment *attach = obj->import_attach; 314 struct dma_resv *resv = attach->dmabuf->resv; 315 struct virtio_gpu_mem_entry *ents = NULL; 316 unsigned int nents; 317 int ret; 318 319 dma_resv_lock(resv, NULL); 320 321 ret = dma_buf_pin(attach); 322 if (ret) { 323 dma_resv_unlock(resv); 324 return ret; 325 } 326 327 if (!bo->sgt) { 328 ret = virtgpu_dma_buf_import_sgt(&ents, &nents, 329 bo, attach); 330 if (ret) 331 goto err; 332 333 virtio_gpu_object_attach(vgdev, bo, ents, nents); 334 } 335 336 dma_resv_unlock(resv); 337 return 0; 338 339 err: 340 dma_buf_unpin(attach); 341 dma_resv_unlock(resv); 342 return ret; 343 } 344 345 static int virtio_gpu_plane_prepare_fb(struct drm_plane *plane, 346 struct drm_plane_state *new_state) 347 { 348 struct drm_device *dev = plane->dev; 349 struct virtio_gpu_device *vgdev = dev->dev_private; 350 struct virtio_gpu_framebuffer *vgfb; 351 struct virtio_gpu_plane_state *vgplane_st; 352 struct virtio_gpu_object *bo; 353 struct drm_gem_object *obj; 354 int ret; 355 356 if (!new_state->fb) 357 return 0; 358 359 vgfb = to_virtio_gpu_framebuffer(new_state->fb); 360 vgplane_st = to_virtio_gpu_plane_state(new_state); 361 bo = gem_to_virtio_gpu_obj(vgfb->base.obj[0]); 362 363 drm_gem_plane_helper_prepare_fb(plane, new_state); 364 365 if (!bo || (plane->type == DRM_PLANE_TYPE_PRIMARY && !bo->guest_blob)) 366 return 0; 367 368 obj = new_state->fb->obj[0]; 369 if (bo->dumb || obj->import_attach) { 370 vgplane_st->fence = virtio_gpu_fence_alloc(vgdev, 371 vgdev->fence_drv.context, 372 0); 373 if (!vgplane_st->fence) 374 return -ENOMEM; 375 } 376 377 if (obj->import_attach) { 378 ret = virtio_gpu_prepare_imported_obj(plane, new_state, obj); 379 if (ret) 380 goto err_fence; 381 } 382 383 return 0; 384 385 err_fence: 386 if (vgplane_st->fence) { 387 dma_fence_put(&vgplane_st->fence->f); 388 vgplane_st->fence = NULL; 389 } 390 391 return ret; 392 } 393 394 static void virtio_gpu_cleanup_imported_obj(struct drm_gem_object *obj) 395 { 396 struct dma_buf_attachment *attach = obj->import_attach; 397 struct dma_resv *resv = attach->dmabuf->resv; 398 399 dma_resv_lock(resv, NULL); 400 dma_buf_unpin(attach); 401 dma_resv_unlock(resv); 402 } 403 404 static void virtio_gpu_plane_cleanup_fb(struct drm_plane *plane, 405 struct drm_plane_state *state) 406 { 407 struct virtio_gpu_plane_state *vgplane_st; 408 struct drm_gem_object *obj; 409 410 if (!state->fb) 411 return; 412 413 vgplane_st = to_virtio_gpu_plane_state(state); 414 if (vgplane_st->fence) { 415 dma_fence_put(&vgplane_st->fence->f); 416 vgplane_st->fence = NULL; 417 } 418 419 obj = state->fb->obj[0]; 420 if (obj->import_attach) 421 virtio_gpu_cleanup_imported_obj(obj); 422 } 423 424 static void virtio_gpu_cursor_plane_update(struct drm_plane *plane, 425 struct drm_atomic_state *state) 426 { 427 struct drm_plane_state *old_state = drm_atomic_get_old_plane_state(state, 428 plane); 429 struct drm_device *dev = plane->dev; 430 struct virtio_gpu_device *vgdev = dev->dev_private; 431 struct virtio_gpu_output *output = NULL; 432 struct virtio_gpu_framebuffer *vgfb; 433 struct virtio_gpu_plane_state *vgplane_st; 434 struct virtio_gpu_object *bo = NULL; 435 uint32_t handle; 436 437 if (plane->state->crtc) 438 output = drm_crtc_to_virtio_gpu_output(plane->state->crtc); 439 if (old_state->crtc) 440 output = drm_crtc_to_virtio_gpu_output(old_state->crtc); 441 if (WARN_ON(!output)) 442 return; 443 444 if (plane->state->fb) { 445 vgfb = to_virtio_gpu_framebuffer(plane->state->fb); 446 vgplane_st = to_virtio_gpu_plane_state(plane->state); 447 bo = gem_to_virtio_gpu_obj(vgfb->base.obj[0]); 448 handle = bo->hw_res_handle; 449 } else { 450 handle = 0; 451 } 452 453 if (bo && bo->dumb && (plane->state->fb != old_state->fb)) { 454 /* new cursor -- update & wait */ 455 struct virtio_gpu_object_array *objs; 456 457 objs = virtio_gpu_array_alloc(1); 458 if (!objs) 459 return; 460 virtio_gpu_array_add_obj(objs, vgfb->base.obj[0]); 461 virtio_gpu_array_lock_resv(objs); 462 virtio_gpu_cmd_transfer_to_host_2d 463 (vgdev, 0, 464 plane->state->crtc_w, 465 plane->state->crtc_h, 466 0, 0, objs, vgplane_st->fence); 467 virtio_gpu_notify(vgdev); 468 dma_fence_wait(&vgplane_st->fence->f, true); 469 } 470 471 if (plane->state->fb != old_state->fb) { 472 DRM_DEBUG("update, handle %d, pos +%d+%d, hot %d,%d\n", handle, 473 plane->state->crtc_x, 474 plane->state->crtc_y, 475 plane->state->hotspot_x, 476 plane->state->hotspot_y); 477 output->cursor.hdr.type = 478 cpu_to_le32(VIRTIO_GPU_CMD_UPDATE_CURSOR); 479 output->cursor.resource_id = cpu_to_le32(handle); 480 if (plane->state->fb) { 481 output->cursor.hot_x = 482 cpu_to_le32(plane->state->hotspot_x); 483 output->cursor.hot_y = 484 cpu_to_le32(plane->state->hotspot_y); 485 } else { 486 output->cursor.hot_x = cpu_to_le32(0); 487 output->cursor.hot_y = cpu_to_le32(0); 488 } 489 } else { 490 DRM_DEBUG("move +%d+%d\n", 491 plane->state->crtc_x, 492 plane->state->crtc_y); 493 output->cursor.hdr.type = 494 cpu_to_le32(VIRTIO_GPU_CMD_MOVE_CURSOR); 495 } 496 output->cursor.pos.x = cpu_to_le32(plane->state->crtc_x); 497 output->cursor.pos.y = cpu_to_le32(plane->state->crtc_y); 498 virtio_gpu_cursor_ping(vgdev, output); 499 } 500 501 static int virtio_drm_get_scanout_buffer(struct drm_plane *plane, 502 struct drm_scanout_buffer *sb) 503 { 504 struct virtio_gpu_object *bo; 505 506 if (!plane->state || !plane->state->fb || !plane->state->visible) 507 return -ENODEV; 508 509 bo = gem_to_virtio_gpu_obj(plane->state->fb->obj[0]); 510 511 /* Only support mapped shmem bo */ 512 if (virtio_gpu_is_vram(bo) || bo->base.base.import_attach || !bo->base.vaddr) 513 return -ENODEV; 514 515 iosys_map_set_vaddr(&sb->map[0], bo->base.vaddr); 516 517 sb->format = plane->state->fb->format; 518 sb->height = plane->state->fb->height; 519 sb->width = plane->state->fb->width; 520 sb->pitch[0] = plane->state->fb->pitches[0]; 521 return 0; 522 } 523 524 static void virtio_panic_flush(struct drm_plane *plane) 525 { 526 struct virtio_gpu_object *bo; 527 struct drm_device *dev = plane->dev; 528 struct virtio_gpu_device *vgdev = dev->dev_private; 529 struct drm_rect rect; 530 531 rect.x1 = 0; 532 rect.y1 = 0; 533 rect.x2 = plane->state->fb->width; 534 rect.y2 = plane->state->fb->height; 535 536 bo = gem_to_virtio_gpu_obj(plane->state->fb->obj[0]); 537 538 if (bo->dumb) { 539 if (virtio_gpu_panic_update_dumb_bo(vgdev, plane->state, 540 &rect)) 541 return; 542 } 543 544 virtio_gpu_panic_resource_flush(plane, 545 plane->state->src_x >> 16, 546 plane->state->src_y >> 16, 547 plane->state->src_w >> 16, 548 plane->state->src_h >> 16); 549 } 550 551 static const struct drm_plane_helper_funcs virtio_gpu_primary_helper_funcs = { 552 .prepare_fb = virtio_gpu_plane_prepare_fb, 553 .cleanup_fb = virtio_gpu_plane_cleanup_fb, 554 .atomic_check = virtio_gpu_plane_atomic_check, 555 .atomic_update = virtio_gpu_primary_plane_update, 556 .get_scanout_buffer = virtio_drm_get_scanout_buffer, 557 .panic_flush = virtio_panic_flush, 558 }; 559 560 static const struct drm_plane_helper_funcs virtio_gpu_cursor_helper_funcs = { 561 .prepare_fb = virtio_gpu_plane_prepare_fb, 562 .cleanup_fb = virtio_gpu_plane_cleanup_fb, 563 .atomic_check = virtio_gpu_plane_atomic_check, 564 .atomic_update = virtio_gpu_cursor_plane_update, 565 }; 566 567 struct drm_plane *virtio_gpu_plane_init(struct virtio_gpu_device *vgdev, 568 enum drm_plane_type type, 569 int index) 570 { 571 struct drm_device *dev = vgdev->ddev; 572 const struct drm_plane_helper_funcs *funcs; 573 struct drm_plane *plane; 574 const uint32_t *formats; 575 int nformats; 576 577 if (type == DRM_PLANE_TYPE_CURSOR) { 578 formats = virtio_gpu_cursor_formats; 579 nformats = ARRAY_SIZE(virtio_gpu_cursor_formats); 580 funcs = &virtio_gpu_cursor_helper_funcs; 581 } else { 582 formats = virtio_gpu_formats; 583 nformats = ARRAY_SIZE(virtio_gpu_formats); 584 funcs = &virtio_gpu_primary_helper_funcs; 585 } 586 587 plane = drmm_universal_plane_alloc(dev, struct drm_plane, dev, 588 1 << index, &virtio_gpu_plane_funcs, 589 formats, nformats, NULL, type, NULL); 590 if (IS_ERR(plane)) 591 return plane; 592 593 drm_plane_helper_add(plane, funcs); 594 595 if (type == DRM_PLANE_TYPE_PRIMARY) 596 drm_plane_enable_fb_damage_clips(plane); 597 598 return plane; 599 } 600