1 /*
2 * Copyright (C) 2015 Red Hat, Inc.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining
6 * a copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sublicense, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the
14 * next paragraph) shall be included in all copies or substantial
15 * portions of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
20 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
21 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
22 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24 */
25
26 #include <drm/drm_atomic_helper.h>
27 #include <drm/drm_damage_helper.h>
28 #include <drm/drm_fourcc.h>
29 #include <drm/drm_gem_atomic_helper.h>
30 #include <linux/virtio_dma_buf.h>
31
32 #include "virtgpu_drv.h"
33
34 static const uint32_t virtio_gpu_formats[] = {
35 DRM_FORMAT_HOST_XRGB8888,
36 };
37
38 static const uint32_t virtio_gpu_cursor_formats[] = {
39 DRM_FORMAT_HOST_ARGB8888,
40 };
41
virtio_gpu_translate_format(uint32_t drm_fourcc)42 uint32_t virtio_gpu_translate_format(uint32_t drm_fourcc)
43 {
44 uint32_t format;
45
46 switch (drm_fourcc) {
47 case DRM_FORMAT_XRGB8888:
48 format = VIRTIO_GPU_FORMAT_B8G8R8X8_UNORM;
49 break;
50 case DRM_FORMAT_ARGB8888:
51 format = VIRTIO_GPU_FORMAT_B8G8R8A8_UNORM;
52 break;
53 case DRM_FORMAT_BGRX8888:
54 format = VIRTIO_GPU_FORMAT_X8R8G8B8_UNORM;
55 break;
56 case DRM_FORMAT_BGRA8888:
57 format = VIRTIO_GPU_FORMAT_A8R8G8B8_UNORM;
58 break;
59 default:
60 /*
61 * This should not happen, we handle everything listed
62 * in virtio_gpu_formats[].
63 */
64 format = 0;
65 break;
66 }
67 WARN_ON(format == 0);
68 return format;
69 }
70
71 static struct
virtio_gpu_plane_duplicate_state(struct drm_plane * plane)72 drm_plane_state *virtio_gpu_plane_duplicate_state(struct drm_plane *plane)
73 {
74 struct virtio_gpu_plane_state *new;
75
76 if (WARN_ON(!plane->state))
77 return NULL;
78
79 new = kzalloc(sizeof(*new), GFP_KERNEL);
80 if (!new)
81 return NULL;
82
83 __drm_atomic_helper_plane_duplicate_state(plane, &new->base);
84
85 return &new->base;
86 }
87
88 static const struct drm_plane_funcs virtio_gpu_plane_funcs = {
89 .update_plane = drm_atomic_helper_update_plane,
90 .disable_plane = drm_atomic_helper_disable_plane,
91 .reset = drm_atomic_helper_plane_reset,
92 .atomic_duplicate_state = virtio_gpu_plane_duplicate_state,
93 .atomic_destroy_state = drm_atomic_helper_plane_destroy_state,
94 };
95
virtio_gpu_plane_atomic_check(struct drm_plane * plane,struct drm_atomic_state * state)96 static int virtio_gpu_plane_atomic_check(struct drm_plane *plane,
97 struct drm_atomic_state *state)
98 {
99 struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state,
100 plane);
101 struct drm_plane_state *old_plane_state = drm_atomic_get_old_plane_state(state,
102 plane);
103 bool is_cursor = plane->type == DRM_PLANE_TYPE_CURSOR;
104 struct drm_crtc_state *crtc_state;
105 int ret;
106
107 if (!new_plane_state->fb || WARN_ON(!new_plane_state->crtc))
108 return 0;
109
110 /*
111 * Ignore damage clips if the framebuffer attached to the plane's state
112 * has changed since the last plane update (page-flip). In this case, a
113 * full plane update should happen because uploads are done per-buffer.
114 */
115 if (old_plane_state->fb != new_plane_state->fb)
116 new_plane_state->ignore_damage_clips = true;
117
118 crtc_state = drm_atomic_get_crtc_state(state,
119 new_plane_state->crtc);
120 if (IS_ERR(crtc_state))
121 return PTR_ERR(crtc_state);
122
123 ret = drm_atomic_helper_check_plane_state(new_plane_state, crtc_state,
124 DRM_PLANE_NO_SCALING,
125 DRM_PLANE_NO_SCALING,
126 is_cursor, true);
127 return ret;
128 }
129
virtio_gpu_update_dumb_bo(struct virtio_gpu_device * vgdev,struct drm_plane_state * state,struct drm_rect * rect)130 static void virtio_gpu_update_dumb_bo(struct virtio_gpu_device *vgdev,
131 struct drm_plane_state *state,
132 struct drm_rect *rect)
133 {
134 struct virtio_gpu_object *bo =
135 gem_to_virtio_gpu_obj(state->fb->obj[0]);
136 struct virtio_gpu_object_array *objs;
137 uint32_t w = rect->x2 - rect->x1;
138 uint32_t h = rect->y2 - rect->y1;
139 uint32_t x = rect->x1;
140 uint32_t y = rect->y1;
141 uint32_t off = x * state->fb->format->cpp[0] +
142 y * state->fb->pitches[0];
143
144 objs = virtio_gpu_array_alloc(1);
145 if (!objs)
146 return;
147 virtio_gpu_array_add_obj(objs, &bo->base.base);
148
149 virtio_gpu_cmd_transfer_to_host_2d(vgdev, off, w, h, x, y,
150 objs, NULL);
151 }
152
virtio_gpu_resource_flush(struct drm_plane * plane,uint32_t x,uint32_t y,uint32_t width,uint32_t height)153 static void virtio_gpu_resource_flush(struct drm_plane *plane,
154 uint32_t x, uint32_t y,
155 uint32_t width, uint32_t height)
156 {
157 struct drm_device *dev = plane->dev;
158 struct virtio_gpu_device *vgdev = dev->dev_private;
159 struct virtio_gpu_framebuffer *vgfb;
160 struct virtio_gpu_plane_state *vgplane_st;
161 struct virtio_gpu_object *bo;
162
163 vgfb = to_virtio_gpu_framebuffer(plane->state->fb);
164 vgplane_st = to_virtio_gpu_plane_state(plane->state);
165 bo = gem_to_virtio_gpu_obj(vgfb->base.obj[0]);
166 if (vgplane_st->fence) {
167 struct virtio_gpu_object_array *objs;
168
169 objs = virtio_gpu_array_alloc(1);
170 if (!objs)
171 return;
172 virtio_gpu_array_add_obj(objs, vgfb->base.obj[0]);
173 virtio_gpu_array_lock_resv(objs);
174 virtio_gpu_cmd_resource_flush(vgdev, bo->hw_res_handle, x, y,
175 width, height, objs,
176 vgplane_st->fence);
177 virtio_gpu_notify(vgdev);
178 dma_fence_wait_timeout(&vgplane_st->fence->f, true,
179 msecs_to_jiffies(50));
180 } else {
181 virtio_gpu_cmd_resource_flush(vgdev, bo->hw_res_handle, x, y,
182 width, height, NULL, NULL);
183 virtio_gpu_notify(vgdev);
184 }
185 }
186
virtio_gpu_primary_plane_update(struct drm_plane * plane,struct drm_atomic_state * state)187 static void virtio_gpu_primary_plane_update(struct drm_plane *plane,
188 struct drm_atomic_state *state)
189 {
190 struct drm_plane_state *old_state = drm_atomic_get_old_plane_state(state,
191 plane);
192 struct drm_device *dev = plane->dev;
193 struct virtio_gpu_device *vgdev = dev->dev_private;
194 struct virtio_gpu_output *output = NULL;
195 struct virtio_gpu_object *bo;
196 struct drm_rect rect;
197
198 if (plane->state->crtc)
199 output = drm_crtc_to_virtio_gpu_output(plane->state->crtc);
200 if (old_state->crtc)
201 output = drm_crtc_to_virtio_gpu_output(old_state->crtc);
202 if (WARN_ON(!output))
203 return;
204
205 if (!plane->state->fb || !output->crtc.state->active) {
206 DRM_DEBUG("nofb\n");
207 virtio_gpu_cmd_set_scanout(vgdev, output->index, 0,
208 plane->state->src_w >> 16,
209 plane->state->src_h >> 16,
210 0, 0);
211 virtio_gpu_notify(vgdev);
212 return;
213 }
214
215 if (!drm_atomic_helper_damage_merged(old_state, plane->state, &rect))
216 return;
217
218 bo = gem_to_virtio_gpu_obj(plane->state->fb->obj[0]);
219 if (bo->dumb)
220 virtio_gpu_update_dumb_bo(vgdev, plane->state, &rect);
221
222 if (plane->state->fb != old_state->fb ||
223 plane->state->src_w != old_state->src_w ||
224 plane->state->src_h != old_state->src_h ||
225 plane->state->src_x != old_state->src_x ||
226 plane->state->src_y != old_state->src_y ||
227 output->needs_modeset) {
228 output->needs_modeset = false;
229 DRM_DEBUG("handle 0x%x, crtc %dx%d+%d+%d, src %dx%d+%d+%d\n",
230 bo->hw_res_handle,
231 plane->state->crtc_w, plane->state->crtc_h,
232 plane->state->crtc_x, plane->state->crtc_y,
233 plane->state->src_w >> 16,
234 plane->state->src_h >> 16,
235 plane->state->src_x >> 16,
236 plane->state->src_y >> 16);
237
238 if (bo->host3d_blob || bo->guest_blob) {
239 virtio_gpu_cmd_set_scanout_blob
240 (vgdev, output->index, bo,
241 plane->state->fb,
242 plane->state->src_w >> 16,
243 plane->state->src_h >> 16,
244 plane->state->src_x >> 16,
245 plane->state->src_y >> 16);
246 } else {
247 virtio_gpu_cmd_set_scanout(vgdev, output->index,
248 bo->hw_res_handle,
249 plane->state->src_w >> 16,
250 plane->state->src_h >> 16,
251 plane->state->src_x >> 16,
252 plane->state->src_y >> 16);
253 }
254 }
255
256 virtio_gpu_resource_flush(plane,
257 rect.x1,
258 rect.y1,
259 rect.x2 - rect.x1,
260 rect.y2 - rect.y1);
261 }
262
virtio_gpu_prepare_imported_obj(struct drm_plane * plane,struct drm_plane_state * new_state,struct drm_gem_object * obj)263 static int virtio_gpu_prepare_imported_obj(struct drm_plane *plane,
264 struct drm_plane_state *new_state,
265 struct drm_gem_object *obj)
266 {
267 struct virtio_gpu_device *vgdev = plane->dev->dev_private;
268 struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(obj);
269 struct dma_buf_attachment *attach = obj->import_attach;
270 struct dma_resv *resv = attach->dmabuf->resv;
271 struct virtio_gpu_mem_entry *ents = NULL;
272 unsigned int nents;
273 int ret;
274
275 dma_resv_lock(resv, NULL);
276
277 ret = dma_buf_pin(attach);
278 if (ret) {
279 dma_resv_unlock(resv);
280 return ret;
281 }
282
283 if (!bo->sgt) {
284 ret = virtgpu_dma_buf_import_sgt(&ents, &nents,
285 bo, attach);
286 if (ret)
287 goto err;
288
289 virtio_gpu_object_attach(vgdev, bo, ents, nents);
290 }
291
292 dma_resv_unlock(resv);
293 return 0;
294
295 err:
296 dma_buf_unpin(attach);
297 dma_resv_unlock(resv);
298 return ret;
299 }
300
virtio_gpu_plane_prepare_fb(struct drm_plane * plane,struct drm_plane_state * new_state)301 static int virtio_gpu_plane_prepare_fb(struct drm_plane *plane,
302 struct drm_plane_state *new_state)
303 {
304 struct drm_device *dev = plane->dev;
305 struct virtio_gpu_device *vgdev = dev->dev_private;
306 struct virtio_gpu_framebuffer *vgfb;
307 struct virtio_gpu_plane_state *vgplane_st;
308 struct virtio_gpu_object *bo;
309 struct drm_gem_object *obj;
310 int ret;
311
312 if (!new_state->fb)
313 return 0;
314
315 vgfb = to_virtio_gpu_framebuffer(new_state->fb);
316 vgplane_st = to_virtio_gpu_plane_state(new_state);
317 bo = gem_to_virtio_gpu_obj(vgfb->base.obj[0]);
318
319 drm_gem_plane_helper_prepare_fb(plane, new_state);
320
321 if (!bo || (plane->type == DRM_PLANE_TYPE_PRIMARY && !bo->guest_blob))
322 return 0;
323
324 obj = new_state->fb->obj[0];
325 if (obj->import_attach) {
326 ret = virtio_gpu_prepare_imported_obj(plane, new_state, obj);
327 if (ret)
328 return ret;
329 }
330
331 if (bo->dumb || obj->import_attach) {
332 vgplane_st->fence = virtio_gpu_fence_alloc(vgdev,
333 vgdev->fence_drv.context,
334 0);
335 if (!vgplane_st->fence)
336 return -ENOMEM;
337 }
338
339 return 0;
340 }
341
virtio_gpu_cleanup_imported_obj(struct drm_gem_object * obj)342 static void virtio_gpu_cleanup_imported_obj(struct drm_gem_object *obj)
343 {
344 struct dma_buf_attachment *attach = obj->import_attach;
345 struct dma_resv *resv = attach->dmabuf->resv;
346
347 dma_resv_lock(resv, NULL);
348 dma_buf_unpin(attach);
349 dma_resv_unlock(resv);
350 }
351
virtio_gpu_plane_cleanup_fb(struct drm_plane * plane,struct drm_plane_state * state)352 static void virtio_gpu_plane_cleanup_fb(struct drm_plane *plane,
353 struct drm_plane_state *state)
354 {
355 struct virtio_gpu_plane_state *vgplane_st;
356 struct drm_gem_object *obj;
357
358 if (!state->fb)
359 return;
360
361 vgplane_st = to_virtio_gpu_plane_state(state);
362 if (vgplane_st->fence) {
363 dma_fence_put(&vgplane_st->fence->f);
364 vgplane_st->fence = NULL;
365 }
366
367 obj = state->fb->obj[0];
368 if (obj->import_attach)
369 virtio_gpu_cleanup_imported_obj(obj);
370 }
371
virtio_gpu_cursor_plane_update(struct drm_plane * plane,struct drm_atomic_state * state)372 static void virtio_gpu_cursor_plane_update(struct drm_plane *plane,
373 struct drm_atomic_state *state)
374 {
375 struct drm_plane_state *old_state = drm_atomic_get_old_plane_state(state,
376 plane);
377 struct drm_device *dev = plane->dev;
378 struct virtio_gpu_device *vgdev = dev->dev_private;
379 struct virtio_gpu_output *output = NULL;
380 struct virtio_gpu_framebuffer *vgfb;
381 struct virtio_gpu_plane_state *vgplane_st;
382 struct virtio_gpu_object *bo = NULL;
383 uint32_t handle;
384
385 if (plane->state->crtc)
386 output = drm_crtc_to_virtio_gpu_output(plane->state->crtc);
387 if (old_state->crtc)
388 output = drm_crtc_to_virtio_gpu_output(old_state->crtc);
389 if (WARN_ON(!output))
390 return;
391
392 if (plane->state->fb) {
393 vgfb = to_virtio_gpu_framebuffer(plane->state->fb);
394 vgplane_st = to_virtio_gpu_plane_state(plane->state);
395 bo = gem_to_virtio_gpu_obj(vgfb->base.obj[0]);
396 handle = bo->hw_res_handle;
397 } else {
398 handle = 0;
399 }
400
401 if (bo && bo->dumb && (plane->state->fb != old_state->fb)) {
402 /* new cursor -- update & wait */
403 struct virtio_gpu_object_array *objs;
404
405 objs = virtio_gpu_array_alloc(1);
406 if (!objs)
407 return;
408 virtio_gpu_array_add_obj(objs, vgfb->base.obj[0]);
409 virtio_gpu_array_lock_resv(objs);
410 virtio_gpu_cmd_transfer_to_host_2d
411 (vgdev, 0,
412 plane->state->crtc_w,
413 plane->state->crtc_h,
414 0, 0, objs, vgplane_st->fence);
415 virtio_gpu_notify(vgdev);
416 dma_fence_wait(&vgplane_st->fence->f, true);
417 }
418
419 if (plane->state->fb != old_state->fb) {
420 DRM_DEBUG("update, handle %d, pos +%d+%d, hot %d,%d\n", handle,
421 plane->state->crtc_x,
422 plane->state->crtc_y,
423 plane->state->hotspot_x,
424 plane->state->hotspot_y);
425 output->cursor.hdr.type =
426 cpu_to_le32(VIRTIO_GPU_CMD_UPDATE_CURSOR);
427 output->cursor.resource_id = cpu_to_le32(handle);
428 if (plane->state->fb) {
429 output->cursor.hot_x =
430 cpu_to_le32(plane->state->hotspot_x);
431 output->cursor.hot_y =
432 cpu_to_le32(plane->state->hotspot_y);
433 } else {
434 output->cursor.hot_x = cpu_to_le32(0);
435 output->cursor.hot_y = cpu_to_le32(0);
436 }
437 } else {
438 DRM_DEBUG("move +%d+%d\n",
439 plane->state->crtc_x,
440 plane->state->crtc_y);
441 output->cursor.hdr.type =
442 cpu_to_le32(VIRTIO_GPU_CMD_MOVE_CURSOR);
443 }
444 output->cursor.pos.x = cpu_to_le32(plane->state->crtc_x);
445 output->cursor.pos.y = cpu_to_le32(plane->state->crtc_y);
446 virtio_gpu_cursor_ping(vgdev, output);
447 }
448
449 static const struct drm_plane_helper_funcs virtio_gpu_primary_helper_funcs = {
450 .prepare_fb = virtio_gpu_plane_prepare_fb,
451 .cleanup_fb = virtio_gpu_plane_cleanup_fb,
452 .atomic_check = virtio_gpu_plane_atomic_check,
453 .atomic_update = virtio_gpu_primary_plane_update,
454 };
455
456 static const struct drm_plane_helper_funcs virtio_gpu_cursor_helper_funcs = {
457 .prepare_fb = virtio_gpu_plane_prepare_fb,
458 .cleanup_fb = virtio_gpu_plane_cleanup_fb,
459 .atomic_check = virtio_gpu_plane_atomic_check,
460 .atomic_update = virtio_gpu_cursor_plane_update,
461 };
462
virtio_gpu_plane_init(struct virtio_gpu_device * vgdev,enum drm_plane_type type,int index)463 struct drm_plane *virtio_gpu_plane_init(struct virtio_gpu_device *vgdev,
464 enum drm_plane_type type,
465 int index)
466 {
467 struct drm_device *dev = vgdev->ddev;
468 const struct drm_plane_helper_funcs *funcs;
469 struct drm_plane *plane;
470 const uint32_t *formats;
471 int nformats;
472
473 if (type == DRM_PLANE_TYPE_CURSOR) {
474 formats = virtio_gpu_cursor_formats;
475 nformats = ARRAY_SIZE(virtio_gpu_cursor_formats);
476 funcs = &virtio_gpu_cursor_helper_funcs;
477 } else {
478 formats = virtio_gpu_formats;
479 nformats = ARRAY_SIZE(virtio_gpu_formats);
480 funcs = &virtio_gpu_primary_helper_funcs;
481 }
482
483 plane = drmm_universal_plane_alloc(dev, struct drm_plane, dev,
484 1 << index, &virtio_gpu_plane_funcs,
485 formats, nformats, NULL, type, NULL);
486 if (IS_ERR(plane))
487 return plane;
488
489 drm_plane_helper_add(plane, funcs);
490
491 if (type == DRM_PLANE_TYPE_PRIMARY)
492 drm_plane_enable_fb_damage_clips(plane);
493
494 return plane;
495 }
496