1 /* 2 * Copyright (C) 2015 Red Hat, Inc. 3 * All Rights Reserved. 4 * 5 * Permission is hereby granted, free of charge, to any person obtaining 6 * a copy of this software and associated documentation files (the 7 * "Software"), to deal in the Software without restriction, including 8 * without limitation the rights to use, copy, modify, merge, publish, 9 * distribute, sublicense, and/or sell copies of the Software, and to 10 * permit persons to whom the Software is furnished to do so, subject to 11 * the following conditions: 12 * 13 * The above copyright notice and this permission notice (including the 14 * next paragraph) shall be included in all copies or substantial 15 * portions of the Software. 16 * 17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 18 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. 20 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE 21 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION 22 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION 23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 24 */ 25 26 #include <drm/drm_atomic_helper.h> 27 #include <drm/drm_damage_helper.h> 28 #include <drm/drm_fourcc.h> 29 #include <drm/drm_gem_atomic_helper.h> 30 #include <linux/virtio_dma_buf.h> 31 #include <drm/drm_managed.h> 32 #include <drm/drm_panic.h> 33 34 #include "virtgpu_drv.h" 35 36 static const uint32_t virtio_gpu_formats[] = { 37 DRM_FORMAT_HOST_XRGB8888, 38 }; 39 40 static const uint32_t virtio_gpu_cursor_formats[] = { 41 DRM_FORMAT_HOST_ARGB8888, 42 }; 43 44 uint32_t virtio_gpu_translate_format(uint32_t drm_fourcc) 45 { 46 uint32_t format; 47 48 switch (drm_fourcc) { 49 case DRM_FORMAT_XRGB8888: 50 format = VIRTIO_GPU_FORMAT_B8G8R8X8_UNORM; 51 break; 52 case DRM_FORMAT_ARGB8888: 53 format = VIRTIO_GPU_FORMAT_B8G8R8A8_UNORM; 54 break; 55 case DRM_FORMAT_BGRX8888: 56 format = VIRTIO_GPU_FORMAT_X8R8G8B8_UNORM; 57 break; 58 case DRM_FORMAT_BGRA8888: 59 format = VIRTIO_GPU_FORMAT_A8R8G8B8_UNORM; 60 break; 61 default: 62 /* 63 * This should not happen, we handle everything listed 64 * in virtio_gpu_formats[]. 65 */ 66 format = 0; 67 break; 68 } 69 WARN_ON(format == 0); 70 return format; 71 } 72 73 static struct 74 drm_plane_state *virtio_gpu_plane_duplicate_state(struct drm_plane *plane) 75 { 76 struct virtio_gpu_plane_state *new; 77 78 if (WARN_ON(!plane->state)) 79 return NULL; 80 81 new = kzalloc(sizeof(*new), GFP_KERNEL); 82 if (!new) 83 return NULL; 84 85 __drm_atomic_helper_plane_duplicate_state(plane, &new->base); 86 87 return &new->base; 88 } 89 90 static const struct drm_plane_funcs virtio_gpu_plane_funcs = { 91 .update_plane = drm_atomic_helper_update_plane, 92 .disable_plane = drm_atomic_helper_disable_plane, 93 .reset = drm_atomic_helper_plane_reset, 94 .atomic_duplicate_state = virtio_gpu_plane_duplicate_state, 95 .atomic_destroy_state = drm_atomic_helper_plane_destroy_state, 96 }; 97 98 static int virtio_gpu_plane_atomic_check(struct drm_plane *plane, 99 struct drm_atomic_state *state) 100 { 101 struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state, 102 plane); 103 struct drm_plane_state *old_plane_state = drm_atomic_get_old_plane_state(state, 104 plane); 105 bool is_cursor = plane->type == DRM_PLANE_TYPE_CURSOR; 106 struct drm_crtc_state *crtc_state; 107 int ret; 108 109 if (!new_plane_state->fb || WARN_ON(!new_plane_state->crtc)) 110 return 0; 111 112 /* 113 * Ignore damage clips if the framebuffer attached to the plane's state 114 * has changed since the last plane update (page-flip). In this case, a 115 * full plane update should happen because uploads are done per-buffer. 116 */ 117 if (old_plane_state->fb != new_plane_state->fb) 118 new_plane_state->ignore_damage_clips = true; 119 120 crtc_state = drm_atomic_get_crtc_state(state, 121 new_plane_state->crtc); 122 if (IS_ERR(crtc_state)) 123 return PTR_ERR(crtc_state); 124 125 ret = drm_atomic_helper_check_plane_state(new_plane_state, crtc_state, 126 DRM_PLANE_NO_SCALING, 127 DRM_PLANE_NO_SCALING, 128 is_cursor, true); 129 return ret; 130 } 131 132 /* For drm panic */ 133 static int virtio_gpu_panic_update_dumb_bo(struct virtio_gpu_device *vgdev, 134 struct drm_plane_state *state, 135 struct drm_rect *rect) 136 { 137 struct virtio_gpu_object *bo = 138 gem_to_virtio_gpu_obj(state->fb->obj[0]); 139 struct virtio_gpu_object_array *objs; 140 uint32_t w = rect->x2 - rect->x1; 141 uint32_t h = rect->y2 - rect->y1; 142 uint32_t x = rect->x1; 143 uint32_t y = rect->y1; 144 uint32_t off = x * state->fb->format->cpp[0] + 145 y * state->fb->pitches[0]; 146 147 objs = virtio_gpu_panic_array_alloc(); 148 if (!objs) 149 return -ENOMEM; 150 virtio_gpu_array_add_obj(objs, &bo->base.base); 151 152 return virtio_gpu_panic_cmd_transfer_to_host_2d(vgdev, off, w, h, x, y, 153 objs); 154 } 155 156 static void virtio_gpu_update_dumb_bo(struct virtio_gpu_device *vgdev, 157 struct drm_plane_state *state, 158 struct drm_rect *rect) 159 { 160 struct virtio_gpu_object *bo = 161 gem_to_virtio_gpu_obj(state->fb->obj[0]); 162 struct virtio_gpu_object_array *objs; 163 uint32_t w = rect->x2 - rect->x1; 164 uint32_t h = rect->y2 - rect->y1; 165 uint32_t x = rect->x1; 166 uint32_t y = rect->y1; 167 uint32_t off = x * state->fb->format->cpp[0] + 168 y * state->fb->pitches[0]; 169 170 objs = virtio_gpu_array_alloc(1); 171 if (!objs) 172 return; 173 virtio_gpu_array_add_obj(objs, &bo->base.base); 174 175 virtio_gpu_cmd_transfer_to_host_2d(vgdev, off, w, h, x, y, 176 objs, NULL); 177 } 178 179 /* For drm_panic */ 180 static void virtio_gpu_panic_resource_flush(struct drm_plane *plane, 181 uint32_t x, uint32_t y, 182 uint32_t width, uint32_t height) 183 { 184 struct drm_device *dev = plane->dev; 185 struct virtio_gpu_device *vgdev = dev->dev_private; 186 struct virtio_gpu_framebuffer *vgfb; 187 struct virtio_gpu_object *bo; 188 189 vgfb = to_virtio_gpu_framebuffer(plane->state->fb); 190 bo = gem_to_virtio_gpu_obj(vgfb->base.obj[0]); 191 192 virtio_gpu_panic_cmd_resource_flush(vgdev, bo->hw_res_handle, x, y, 193 width, height); 194 virtio_gpu_panic_notify(vgdev); 195 } 196 197 static void virtio_gpu_resource_flush(struct drm_plane *plane, 198 uint32_t x, uint32_t y, 199 uint32_t width, uint32_t height) 200 { 201 struct drm_device *dev = plane->dev; 202 struct virtio_gpu_device *vgdev = dev->dev_private; 203 struct virtio_gpu_framebuffer *vgfb; 204 struct virtio_gpu_plane_state *vgplane_st; 205 struct virtio_gpu_object *bo; 206 207 vgfb = to_virtio_gpu_framebuffer(plane->state->fb); 208 vgplane_st = to_virtio_gpu_plane_state(plane->state); 209 bo = gem_to_virtio_gpu_obj(vgfb->base.obj[0]); 210 if (vgplane_st->fence) { 211 struct virtio_gpu_object_array *objs; 212 213 objs = virtio_gpu_array_alloc(1); 214 if (!objs) 215 return; 216 virtio_gpu_array_add_obj(objs, vgfb->base.obj[0]); 217 virtio_gpu_array_lock_resv(objs); 218 virtio_gpu_cmd_resource_flush(vgdev, bo->hw_res_handle, x, y, 219 width, height, objs, 220 vgplane_st->fence); 221 virtio_gpu_notify(vgdev); 222 dma_fence_wait_timeout(&vgplane_st->fence->f, true, 223 msecs_to_jiffies(50)); 224 } else { 225 virtio_gpu_cmd_resource_flush(vgdev, bo->hw_res_handle, x, y, 226 width, height, NULL, NULL); 227 virtio_gpu_notify(vgdev); 228 } 229 } 230 231 static void virtio_gpu_primary_plane_update(struct drm_plane *plane, 232 struct drm_atomic_state *state) 233 { 234 struct drm_plane_state *old_state = drm_atomic_get_old_plane_state(state, 235 plane); 236 struct drm_device *dev = plane->dev; 237 struct virtio_gpu_device *vgdev = dev->dev_private; 238 struct virtio_gpu_output *output = NULL; 239 struct virtio_gpu_object *bo; 240 struct drm_rect rect; 241 242 if (plane->state->crtc) 243 output = drm_crtc_to_virtio_gpu_output(plane->state->crtc); 244 if (old_state->crtc) 245 output = drm_crtc_to_virtio_gpu_output(old_state->crtc); 246 if (WARN_ON(!output)) 247 return; 248 249 if (!plane->state->fb || !output->crtc.state->active) { 250 DRM_DEBUG("nofb\n"); 251 virtio_gpu_cmd_set_scanout(vgdev, output->index, 0, 252 plane->state->src_w >> 16, 253 plane->state->src_h >> 16, 254 0, 0); 255 virtio_gpu_notify(vgdev); 256 return; 257 } 258 259 if (!drm_atomic_helper_damage_merged(old_state, plane->state, &rect)) 260 return; 261 262 bo = gem_to_virtio_gpu_obj(plane->state->fb->obj[0]); 263 if (bo->dumb) 264 virtio_gpu_update_dumb_bo(vgdev, plane->state, &rect); 265 266 if (plane->state->fb != old_state->fb || 267 plane->state->src_w != old_state->src_w || 268 plane->state->src_h != old_state->src_h || 269 plane->state->src_x != old_state->src_x || 270 plane->state->src_y != old_state->src_y || 271 output->needs_modeset) { 272 output->needs_modeset = false; 273 DRM_DEBUG("handle 0x%x, crtc %dx%d+%d+%d, src %dx%d+%d+%d\n", 274 bo->hw_res_handle, 275 plane->state->crtc_w, plane->state->crtc_h, 276 plane->state->crtc_x, plane->state->crtc_y, 277 plane->state->src_w >> 16, 278 plane->state->src_h >> 16, 279 plane->state->src_x >> 16, 280 plane->state->src_y >> 16); 281 282 if (bo->host3d_blob || bo->guest_blob) { 283 virtio_gpu_cmd_set_scanout_blob 284 (vgdev, output->index, bo, 285 plane->state->fb, 286 plane->state->src_w >> 16, 287 plane->state->src_h >> 16, 288 plane->state->src_x >> 16, 289 plane->state->src_y >> 16); 290 } else { 291 virtio_gpu_cmd_set_scanout(vgdev, output->index, 292 bo->hw_res_handle, 293 plane->state->src_w >> 16, 294 plane->state->src_h >> 16, 295 plane->state->src_x >> 16, 296 plane->state->src_y >> 16); 297 } 298 } 299 300 virtio_gpu_resource_flush(plane, 301 rect.x1, 302 rect.y1, 303 rect.x2 - rect.x1, 304 rect.y2 - rect.y1); 305 } 306 307 static int virtio_gpu_prepare_imported_obj(struct drm_plane *plane, 308 struct drm_plane_state *new_state, 309 struct drm_gem_object *obj) 310 { 311 struct virtio_gpu_device *vgdev = plane->dev->dev_private; 312 struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(obj); 313 struct dma_buf_attachment *attach = obj->import_attach; 314 struct dma_resv *resv = attach->dmabuf->resv; 315 struct virtio_gpu_mem_entry *ents = NULL; 316 unsigned int nents; 317 int ret; 318 319 dma_resv_lock(resv, NULL); 320 321 ret = dma_buf_pin(attach); 322 if (ret) { 323 dma_resv_unlock(resv); 324 return ret; 325 } 326 327 if (!bo->sgt) { 328 ret = virtgpu_dma_buf_import_sgt(&ents, &nents, 329 bo, attach); 330 if (ret) 331 goto err; 332 333 virtio_gpu_object_attach(vgdev, bo, ents, nents); 334 } 335 336 dma_resv_unlock(resv); 337 return 0; 338 339 err: 340 dma_buf_unpin(attach); 341 dma_resv_unlock(resv); 342 return ret; 343 } 344 345 static int virtio_gpu_plane_prepare_fb(struct drm_plane *plane, 346 struct drm_plane_state *new_state) 347 { 348 struct drm_device *dev = plane->dev; 349 struct virtio_gpu_device *vgdev = dev->dev_private; 350 struct virtio_gpu_framebuffer *vgfb; 351 struct virtio_gpu_plane_state *vgplane_st; 352 struct virtio_gpu_object *bo; 353 struct drm_gem_object *obj; 354 int ret; 355 356 if (!new_state->fb) 357 return 0; 358 359 vgfb = to_virtio_gpu_framebuffer(new_state->fb); 360 vgplane_st = to_virtio_gpu_plane_state(new_state); 361 bo = gem_to_virtio_gpu_obj(vgfb->base.obj[0]); 362 363 drm_gem_plane_helper_prepare_fb(plane, new_state); 364 365 if (!bo || (plane->type == DRM_PLANE_TYPE_PRIMARY && !bo->guest_blob)) 366 return 0; 367 368 obj = new_state->fb->obj[0]; 369 if (obj->import_attach) { 370 ret = virtio_gpu_prepare_imported_obj(plane, new_state, obj); 371 if (ret) 372 return ret; 373 } 374 375 if (bo->dumb || obj->import_attach) { 376 vgplane_st->fence = virtio_gpu_fence_alloc(vgdev, 377 vgdev->fence_drv.context, 378 0); 379 if (!vgplane_st->fence) 380 return -ENOMEM; 381 } 382 383 return 0; 384 } 385 386 static void virtio_gpu_cleanup_imported_obj(struct drm_gem_object *obj) 387 { 388 struct dma_buf_attachment *attach = obj->import_attach; 389 struct dma_resv *resv = attach->dmabuf->resv; 390 391 dma_resv_lock(resv, NULL); 392 dma_buf_unpin(attach); 393 dma_resv_unlock(resv); 394 } 395 396 static void virtio_gpu_plane_cleanup_fb(struct drm_plane *plane, 397 struct drm_plane_state *state) 398 { 399 struct virtio_gpu_plane_state *vgplane_st; 400 struct drm_gem_object *obj; 401 402 if (!state->fb) 403 return; 404 405 vgplane_st = to_virtio_gpu_plane_state(state); 406 if (vgplane_st->fence) { 407 dma_fence_put(&vgplane_st->fence->f); 408 vgplane_st->fence = NULL; 409 } 410 411 obj = state->fb->obj[0]; 412 if (obj->import_attach) 413 virtio_gpu_cleanup_imported_obj(obj); 414 } 415 416 static void virtio_gpu_cursor_plane_update(struct drm_plane *plane, 417 struct drm_atomic_state *state) 418 { 419 struct drm_plane_state *old_state = drm_atomic_get_old_plane_state(state, 420 plane); 421 struct drm_device *dev = plane->dev; 422 struct virtio_gpu_device *vgdev = dev->dev_private; 423 struct virtio_gpu_output *output = NULL; 424 struct virtio_gpu_framebuffer *vgfb; 425 struct virtio_gpu_plane_state *vgplane_st; 426 struct virtio_gpu_object *bo = NULL; 427 uint32_t handle; 428 429 if (plane->state->crtc) 430 output = drm_crtc_to_virtio_gpu_output(plane->state->crtc); 431 if (old_state->crtc) 432 output = drm_crtc_to_virtio_gpu_output(old_state->crtc); 433 if (WARN_ON(!output)) 434 return; 435 436 if (plane->state->fb) { 437 vgfb = to_virtio_gpu_framebuffer(plane->state->fb); 438 vgplane_st = to_virtio_gpu_plane_state(plane->state); 439 bo = gem_to_virtio_gpu_obj(vgfb->base.obj[0]); 440 handle = bo->hw_res_handle; 441 } else { 442 handle = 0; 443 } 444 445 if (bo && bo->dumb && (plane->state->fb != old_state->fb)) { 446 /* new cursor -- update & wait */ 447 struct virtio_gpu_object_array *objs; 448 449 objs = virtio_gpu_array_alloc(1); 450 if (!objs) 451 return; 452 virtio_gpu_array_add_obj(objs, vgfb->base.obj[0]); 453 virtio_gpu_array_lock_resv(objs); 454 virtio_gpu_cmd_transfer_to_host_2d 455 (vgdev, 0, 456 plane->state->crtc_w, 457 plane->state->crtc_h, 458 0, 0, objs, vgplane_st->fence); 459 virtio_gpu_notify(vgdev); 460 dma_fence_wait(&vgplane_st->fence->f, true); 461 } 462 463 if (plane->state->fb != old_state->fb) { 464 DRM_DEBUG("update, handle %d, pos +%d+%d, hot %d,%d\n", handle, 465 plane->state->crtc_x, 466 plane->state->crtc_y, 467 plane->state->hotspot_x, 468 plane->state->hotspot_y); 469 output->cursor.hdr.type = 470 cpu_to_le32(VIRTIO_GPU_CMD_UPDATE_CURSOR); 471 output->cursor.resource_id = cpu_to_le32(handle); 472 if (plane->state->fb) { 473 output->cursor.hot_x = 474 cpu_to_le32(plane->state->hotspot_x); 475 output->cursor.hot_y = 476 cpu_to_le32(plane->state->hotspot_y); 477 } else { 478 output->cursor.hot_x = cpu_to_le32(0); 479 output->cursor.hot_y = cpu_to_le32(0); 480 } 481 } else { 482 DRM_DEBUG("move +%d+%d\n", 483 plane->state->crtc_x, 484 plane->state->crtc_y); 485 output->cursor.hdr.type = 486 cpu_to_le32(VIRTIO_GPU_CMD_MOVE_CURSOR); 487 } 488 output->cursor.pos.x = cpu_to_le32(plane->state->crtc_x); 489 output->cursor.pos.y = cpu_to_le32(plane->state->crtc_y); 490 virtio_gpu_cursor_ping(vgdev, output); 491 } 492 493 static int virtio_drm_get_scanout_buffer(struct drm_plane *plane, 494 struct drm_scanout_buffer *sb) 495 { 496 struct virtio_gpu_object *bo; 497 498 if (!plane->state || !plane->state->fb || !plane->state->visible) 499 return -ENODEV; 500 501 bo = gem_to_virtio_gpu_obj(plane->state->fb->obj[0]); 502 503 /* Only support mapped shmem bo */ 504 if (virtio_gpu_is_vram(bo) || bo->base.base.import_attach || !bo->base.vaddr) 505 return -ENODEV; 506 507 iosys_map_set_vaddr(&sb->map[0], bo->base.vaddr); 508 509 sb->format = plane->state->fb->format; 510 sb->height = plane->state->fb->height; 511 sb->width = plane->state->fb->width; 512 sb->pitch[0] = plane->state->fb->pitches[0]; 513 return 0; 514 } 515 516 static void virtio_panic_flush(struct drm_plane *plane) 517 { 518 struct virtio_gpu_object *bo; 519 struct drm_device *dev = plane->dev; 520 struct virtio_gpu_device *vgdev = dev->dev_private; 521 struct drm_rect rect; 522 523 rect.x1 = 0; 524 rect.y1 = 0; 525 rect.x2 = plane->state->fb->width; 526 rect.y2 = plane->state->fb->height; 527 528 bo = gem_to_virtio_gpu_obj(plane->state->fb->obj[0]); 529 530 if (bo->dumb) { 531 if (virtio_gpu_panic_update_dumb_bo(vgdev, plane->state, 532 &rect)) 533 return; 534 } 535 536 virtio_gpu_panic_resource_flush(plane, 537 plane->state->src_x >> 16, 538 plane->state->src_y >> 16, 539 plane->state->src_w >> 16, 540 plane->state->src_h >> 16); 541 } 542 543 static const struct drm_plane_helper_funcs virtio_gpu_primary_helper_funcs = { 544 .prepare_fb = virtio_gpu_plane_prepare_fb, 545 .cleanup_fb = virtio_gpu_plane_cleanup_fb, 546 .atomic_check = virtio_gpu_plane_atomic_check, 547 .atomic_update = virtio_gpu_primary_plane_update, 548 .get_scanout_buffer = virtio_drm_get_scanout_buffer, 549 .panic_flush = virtio_panic_flush, 550 }; 551 552 static const struct drm_plane_helper_funcs virtio_gpu_cursor_helper_funcs = { 553 .prepare_fb = virtio_gpu_plane_prepare_fb, 554 .cleanup_fb = virtio_gpu_plane_cleanup_fb, 555 .atomic_check = virtio_gpu_plane_atomic_check, 556 .atomic_update = virtio_gpu_cursor_plane_update, 557 }; 558 559 struct drm_plane *virtio_gpu_plane_init(struct virtio_gpu_device *vgdev, 560 enum drm_plane_type type, 561 int index) 562 { 563 struct drm_device *dev = vgdev->ddev; 564 const struct drm_plane_helper_funcs *funcs; 565 struct drm_plane *plane; 566 const uint32_t *formats; 567 int nformats; 568 569 if (type == DRM_PLANE_TYPE_CURSOR) { 570 formats = virtio_gpu_cursor_formats; 571 nformats = ARRAY_SIZE(virtio_gpu_cursor_formats); 572 funcs = &virtio_gpu_cursor_helper_funcs; 573 } else { 574 formats = virtio_gpu_formats; 575 nformats = ARRAY_SIZE(virtio_gpu_formats); 576 funcs = &virtio_gpu_primary_helper_funcs; 577 } 578 579 plane = drmm_universal_plane_alloc(dev, struct drm_plane, dev, 580 1 << index, &virtio_gpu_plane_funcs, 581 formats, nformats, NULL, type, NULL); 582 if (IS_ERR(plane)) 583 return plane; 584 585 drm_plane_helper_add(plane, funcs); 586 587 if (type == DRM_PLANE_TYPE_PRIMARY) 588 drm_plane_enable_fb_damage_clips(plane); 589 590 return plane; 591 } 592