1 /* 2 * Copyright (C) 2015 Red Hat, Inc. 3 * All Rights Reserved. 4 * 5 * Permission is hereby granted, free of charge, to any person obtaining 6 * a copy of this software and associated documentation files (the 7 * "Software"), to deal in the Software without restriction, including 8 * without limitation the rights to use, copy, modify, merge, publish, 9 * distribute, sublicense, and/or sell copies of the Software, and to 10 * permit persons to whom the Software is furnished to do so, subject to 11 * the following conditions: 12 * 13 * The above copyright notice and this permission notice (including the 14 * next paragraph) shall be included in all copies or substantial 15 * portions of the Software. 16 * 17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 18 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. 20 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE 21 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION 22 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION 23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 24 */ 25 26 #include <drm/drm_atomic_helper.h> 27 #include <drm/drm_damage_helper.h> 28 #include <drm/drm_fourcc.h> 29 30 #include "virtgpu_drv.h" 31 32 static const uint32_t virtio_gpu_formats[] = { 33 DRM_FORMAT_HOST_XRGB8888, 34 }; 35 36 static const uint32_t virtio_gpu_cursor_formats[] = { 37 DRM_FORMAT_HOST_ARGB8888, 38 }; 39 40 uint32_t virtio_gpu_translate_format(uint32_t drm_fourcc) 41 { 42 uint32_t format; 43 44 switch (drm_fourcc) { 45 case DRM_FORMAT_XRGB8888: 46 format = VIRTIO_GPU_FORMAT_B8G8R8X8_UNORM; 47 break; 48 case DRM_FORMAT_ARGB8888: 49 format = VIRTIO_GPU_FORMAT_B8G8R8A8_UNORM; 50 break; 51 case DRM_FORMAT_BGRX8888: 52 format = VIRTIO_GPU_FORMAT_X8R8G8B8_UNORM; 53 break; 54 case DRM_FORMAT_BGRA8888: 55 format = VIRTIO_GPU_FORMAT_A8R8G8B8_UNORM; 56 break; 57 default: 58 /* 59 * This should not happen, we handle everything listed 60 * in virtio_gpu_formats[]. 61 */ 62 format = 0; 63 break; 64 } 65 WARN_ON(format == 0); 66 return format; 67 } 68 69 static const struct drm_plane_funcs virtio_gpu_plane_funcs = { 70 .update_plane = drm_atomic_helper_update_plane, 71 .disable_plane = drm_atomic_helper_disable_plane, 72 .reset = drm_atomic_helper_plane_reset, 73 .atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state, 74 .atomic_destroy_state = drm_atomic_helper_plane_destroy_state, 75 }; 76 77 static int virtio_gpu_plane_atomic_check(struct drm_plane *plane, 78 struct drm_atomic_state *state) 79 { 80 struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state, 81 plane); 82 struct drm_plane_state *old_plane_state = drm_atomic_get_old_plane_state(state, 83 plane); 84 bool is_cursor = plane->type == DRM_PLANE_TYPE_CURSOR; 85 struct drm_crtc_state *crtc_state; 86 int ret; 87 88 if (!new_plane_state->fb || WARN_ON(!new_plane_state->crtc)) 89 return 0; 90 91 /* 92 * Ignore damage clips if the framebuffer attached to the plane's state 93 * has changed since the last plane update (page-flip). In this case, a 94 * full plane update should happen because uploads are done per-buffer. 95 */ 96 if (old_plane_state->fb != new_plane_state->fb) 97 new_plane_state->ignore_damage_clips = true; 98 99 crtc_state = drm_atomic_get_crtc_state(state, 100 new_plane_state->crtc); 101 if (IS_ERR(crtc_state)) 102 return PTR_ERR(crtc_state); 103 104 ret = drm_atomic_helper_check_plane_state(new_plane_state, crtc_state, 105 DRM_PLANE_NO_SCALING, 106 DRM_PLANE_NO_SCALING, 107 is_cursor, true); 108 return ret; 109 } 110 111 static void virtio_gpu_update_dumb_bo(struct virtio_gpu_device *vgdev, 112 struct drm_plane_state *state, 113 struct drm_rect *rect) 114 { 115 struct virtio_gpu_object *bo = 116 gem_to_virtio_gpu_obj(state->fb->obj[0]); 117 struct virtio_gpu_object_array *objs; 118 uint32_t w = rect->x2 - rect->x1; 119 uint32_t h = rect->y2 - rect->y1; 120 uint32_t x = rect->x1; 121 uint32_t y = rect->y1; 122 uint32_t off = x * state->fb->format->cpp[0] + 123 y * state->fb->pitches[0]; 124 125 objs = virtio_gpu_array_alloc(1); 126 if (!objs) 127 return; 128 virtio_gpu_array_add_obj(objs, &bo->base.base); 129 130 virtio_gpu_cmd_transfer_to_host_2d(vgdev, off, w, h, x, y, 131 objs, NULL); 132 } 133 134 static void virtio_gpu_resource_flush(struct drm_plane *plane, 135 uint32_t x, uint32_t y, 136 uint32_t width, uint32_t height) 137 { 138 struct drm_device *dev = plane->dev; 139 struct virtio_gpu_device *vgdev = dev->dev_private; 140 struct virtio_gpu_framebuffer *vgfb; 141 struct virtio_gpu_object *bo; 142 143 vgfb = to_virtio_gpu_framebuffer(plane->state->fb); 144 bo = gem_to_virtio_gpu_obj(vgfb->base.obj[0]); 145 if (vgfb->fence) { 146 struct virtio_gpu_object_array *objs; 147 148 objs = virtio_gpu_array_alloc(1); 149 if (!objs) 150 return; 151 virtio_gpu_array_add_obj(objs, vgfb->base.obj[0]); 152 virtio_gpu_array_lock_resv(objs); 153 virtio_gpu_cmd_resource_flush(vgdev, bo->hw_res_handle, x, y, 154 width, height, objs, vgfb->fence); 155 virtio_gpu_notify(vgdev); 156 157 dma_fence_wait_timeout(&vgfb->fence->f, true, 158 msecs_to_jiffies(50)); 159 dma_fence_put(&vgfb->fence->f); 160 vgfb->fence = NULL; 161 } else { 162 virtio_gpu_cmd_resource_flush(vgdev, bo->hw_res_handle, x, y, 163 width, height, NULL, NULL); 164 virtio_gpu_notify(vgdev); 165 } 166 } 167 168 static void virtio_gpu_primary_plane_update(struct drm_plane *plane, 169 struct drm_atomic_state *state) 170 { 171 struct drm_plane_state *old_state = drm_atomic_get_old_plane_state(state, 172 plane); 173 struct drm_device *dev = plane->dev; 174 struct virtio_gpu_device *vgdev = dev->dev_private; 175 struct virtio_gpu_output *output = NULL; 176 struct virtio_gpu_object *bo; 177 struct drm_rect rect; 178 179 if (plane->state->crtc) 180 output = drm_crtc_to_virtio_gpu_output(plane->state->crtc); 181 if (old_state->crtc) 182 output = drm_crtc_to_virtio_gpu_output(old_state->crtc); 183 if (WARN_ON(!output)) 184 return; 185 186 if (!plane->state->fb || !output->crtc.state->active) { 187 DRM_DEBUG("nofb\n"); 188 virtio_gpu_cmd_set_scanout(vgdev, output->index, 0, 189 plane->state->src_w >> 16, 190 plane->state->src_h >> 16, 191 0, 0); 192 virtio_gpu_notify(vgdev); 193 return; 194 } 195 196 if (!drm_atomic_helper_damage_merged(old_state, plane->state, &rect)) 197 return; 198 199 bo = gem_to_virtio_gpu_obj(plane->state->fb->obj[0]); 200 if (bo->dumb) 201 virtio_gpu_update_dumb_bo(vgdev, plane->state, &rect); 202 203 if (plane->state->fb != old_state->fb || 204 plane->state->src_w != old_state->src_w || 205 plane->state->src_h != old_state->src_h || 206 plane->state->src_x != old_state->src_x || 207 plane->state->src_y != old_state->src_y || 208 output->needs_modeset) { 209 output->needs_modeset = false; 210 DRM_DEBUG("handle 0x%x, crtc %dx%d+%d+%d, src %dx%d+%d+%d\n", 211 bo->hw_res_handle, 212 plane->state->crtc_w, plane->state->crtc_h, 213 plane->state->crtc_x, plane->state->crtc_y, 214 plane->state->src_w >> 16, 215 plane->state->src_h >> 16, 216 plane->state->src_x >> 16, 217 plane->state->src_y >> 16); 218 219 if (bo->host3d_blob || bo->guest_blob) { 220 virtio_gpu_cmd_set_scanout_blob 221 (vgdev, output->index, bo, 222 plane->state->fb, 223 plane->state->src_w >> 16, 224 plane->state->src_h >> 16, 225 plane->state->src_x >> 16, 226 plane->state->src_y >> 16); 227 } else { 228 virtio_gpu_cmd_set_scanout(vgdev, output->index, 229 bo->hw_res_handle, 230 plane->state->src_w >> 16, 231 plane->state->src_h >> 16, 232 plane->state->src_x >> 16, 233 plane->state->src_y >> 16); 234 } 235 } 236 237 virtio_gpu_resource_flush(plane, 238 rect.x1, 239 rect.y1, 240 rect.x2 - rect.x1, 241 rect.y2 - rect.y1); 242 } 243 244 static int virtio_gpu_plane_prepare_fb(struct drm_plane *plane, 245 struct drm_plane_state *new_state) 246 { 247 struct drm_device *dev = plane->dev; 248 struct virtio_gpu_device *vgdev = dev->dev_private; 249 struct virtio_gpu_framebuffer *vgfb; 250 struct virtio_gpu_object *bo; 251 252 if (!new_state->fb) 253 return 0; 254 255 vgfb = to_virtio_gpu_framebuffer(new_state->fb); 256 bo = gem_to_virtio_gpu_obj(vgfb->base.obj[0]); 257 if (!bo || (plane->type == DRM_PLANE_TYPE_PRIMARY && !bo->guest_blob)) 258 return 0; 259 260 if (bo->dumb && (plane->state->fb != new_state->fb)) { 261 vgfb->fence = virtio_gpu_fence_alloc(vgdev, vgdev->fence_drv.context, 262 0); 263 if (!vgfb->fence) 264 return -ENOMEM; 265 } 266 267 return 0; 268 } 269 270 static void virtio_gpu_plane_cleanup_fb(struct drm_plane *plane, 271 struct drm_plane_state *state) 272 { 273 struct virtio_gpu_framebuffer *vgfb; 274 275 if (!state->fb) 276 return; 277 278 vgfb = to_virtio_gpu_framebuffer(state->fb); 279 if (vgfb->fence) { 280 dma_fence_put(&vgfb->fence->f); 281 vgfb->fence = NULL; 282 } 283 } 284 285 static void virtio_gpu_cursor_plane_update(struct drm_plane *plane, 286 struct drm_atomic_state *state) 287 { 288 struct drm_plane_state *old_state = drm_atomic_get_old_plane_state(state, 289 plane); 290 struct drm_device *dev = plane->dev; 291 struct virtio_gpu_device *vgdev = dev->dev_private; 292 struct virtio_gpu_output *output = NULL; 293 struct virtio_gpu_framebuffer *vgfb; 294 struct virtio_gpu_object *bo = NULL; 295 uint32_t handle; 296 297 if (plane->state->crtc) 298 output = drm_crtc_to_virtio_gpu_output(plane->state->crtc); 299 if (old_state->crtc) 300 output = drm_crtc_to_virtio_gpu_output(old_state->crtc); 301 if (WARN_ON(!output)) 302 return; 303 304 if (plane->state->fb) { 305 vgfb = to_virtio_gpu_framebuffer(plane->state->fb); 306 bo = gem_to_virtio_gpu_obj(vgfb->base.obj[0]); 307 handle = bo->hw_res_handle; 308 } else { 309 handle = 0; 310 } 311 312 if (bo && bo->dumb && (plane->state->fb != old_state->fb)) { 313 /* new cursor -- update & wait */ 314 struct virtio_gpu_object_array *objs; 315 316 objs = virtio_gpu_array_alloc(1); 317 if (!objs) 318 return; 319 virtio_gpu_array_add_obj(objs, vgfb->base.obj[0]); 320 virtio_gpu_array_lock_resv(objs); 321 virtio_gpu_cmd_transfer_to_host_2d 322 (vgdev, 0, 323 plane->state->crtc_w, 324 plane->state->crtc_h, 325 0, 0, objs, vgfb->fence); 326 virtio_gpu_notify(vgdev); 327 dma_fence_wait(&vgfb->fence->f, true); 328 dma_fence_put(&vgfb->fence->f); 329 vgfb->fence = NULL; 330 } 331 332 if (plane->state->fb != old_state->fb) { 333 DRM_DEBUG("update, handle %d, pos +%d+%d, hot %d,%d\n", handle, 334 plane->state->crtc_x, 335 plane->state->crtc_y, 336 plane->state->hotspot_x, 337 plane->state->hotspot_y); 338 output->cursor.hdr.type = 339 cpu_to_le32(VIRTIO_GPU_CMD_UPDATE_CURSOR); 340 output->cursor.resource_id = cpu_to_le32(handle); 341 if (plane->state->fb) { 342 output->cursor.hot_x = 343 cpu_to_le32(plane->state->hotspot_x); 344 output->cursor.hot_y = 345 cpu_to_le32(plane->state->hotspot_y); 346 } else { 347 output->cursor.hot_x = cpu_to_le32(0); 348 output->cursor.hot_y = cpu_to_le32(0); 349 } 350 } else { 351 DRM_DEBUG("move +%d+%d\n", 352 plane->state->crtc_x, 353 plane->state->crtc_y); 354 output->cursor.hdr.type = 355 cpu_to_le32(VIRTIO_GPU_CMD_MOVE_CURSOR); 356 } 357 output->cursor.pos.x = cpu_to_le32(plane->state->crtc_x); 358 output->cursor.pos.y = cpu_to_le32(plane->state->crtc_y); 359 virtio_gpu_cursor_ping(vgdev, output); 360 } 361 362 static const struct drm_plane_helper_funcs virtio_gpu_primary_helper_funcs = { 363 .prepare_fb = virtio_gpu_plane_prepare_fb, 364 .cleanup_fb = virtio_gpu_plane_cleanup_fb, 365 .atomic_check = virtio_gpu_plane_atomic_check, 366 .atomic_update = virtio_gpu_primary_plane_update, 367 }; 368 369 static const struct drm_plane_helper_funcs virtio_gpu_cursor_helper_funcs = { 370 .prepare_fb = virtio_gpu_plane_prepare_fb, 371 .cleanup_fb = virtio_gpu_plane_cleanup_fb, 372 .atomic_check = virtio_gpu_plane_atomic_check, 373 .atomic_update = virtio_gpu_cursor_plane_update, 374 }; 375 376 struct drm_plane *virtio_gpu_plane_init(struct virtio_gpu_device *vgdev, 377 enum drm_plane_type type, 378 int index) 379 { 380 struct drm_device *dev = vgdev->ddev; 381 const struct drm_plane_helper_funcs *funcs; 382 struct drm_plane *plane; 383 const uint32_t *formats; 384 int nformats; 385 386 if (type == DRM_PLANE_TYPE_CURSOR) { 387 formats = virtio_gpu_cursor_formats; 388 nformats = ARRAY_SIZE(virtio_gpu_cursor_formats); 389 funcs = &virtio_gpu_cursor_helper_funcs; 390 } else { 391 formats = virtio_gpu_formats; 392 nformats = ARRAY_SIZE(virtio_gpu_formats); 393 funcs = &virtio_gpu_primary_helper_funcs; 394 } 395 396 plane = drmm_universal_plane_alloc(dev, struct drm_plane, dev, 397 1 << index, &virtio_gpu_plane_funcs, 398 formats, nformats, NULL, type, NULL); 399 if (IS_ERR(plane)) 400 return plane; 401 402 drm_plane_helper_add(plane, funcs); 403 404 if (type == DRM_PLANE_TYPE_PRIMARY) 405 drm_plane_enable_fb_damage_clips(plane); 406 407 return plane; 408 } 409