1 // SPDX-License-Identifier: GPL-2.0 OR MIT
2 /**************************************************************************
3 *
4 * Copyright (c) 2009-2024 Broadcom. All Rights Reserved. The term
5 * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
6 *
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the
9 * "Software"), to deal in the Software without restriction, including
10 * without limitation the rights to use, copy, modify, merge, publish,
11 * distribute, sub license, and/or sell copies of the Software, and to
12 * permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the
16 * next paragraph) shall be included in all copies or substantial portions
17 * of the Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
22 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
23 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
24 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
25 * USE OR OTHER DEALINGS IN THE SOFTWARE.
26 *
27 **************************************************************************/
28 #include "vmwgfx_kms.h"
29
30 #include "vmwgfx_bo.h"
31 #include "vmwgfx_vkms.h"
32 #include "vmw_surface_cache.h"
33
34 #include <drm/drm_atomic.h>
35 #include <drm/drm_atomic_helper.h>
36 #include <drm/drm_damage_helper.h>
37 #include <drm/drm_fourcc.h>
38 #include <drm/drm_rect.h>
39 #include <drm/drm_sysfs.h>
40 #include <drm/drm_edid.h>
41
vmw_du_init(struct vmw_display_unit * du)42 void vmw_du_init(struct vmw_display_unit *du)
43 {
44 vmw_vkms_crtc_init(&du->crtc);
45 }
46
vmw_du_cleanup(struct vmw_display_unit * du)47 void vmw_du_cleanup(struct vmw_display_unit *du)
48 {
49 struct vmw_private *dev_priv = vmw_priv(du->primary.dev);
50
51 vmw_vkms_crtc_cleanup(&du->crtc);
52 drm_plane_cleanup(&du->primary);
53 if (vmw_cmd_supported(dev_priv))
54 drm_plane_cleanup(&du->cursor.base);
55
56 drm_connector_unregister(&du->connector);
57 drm_crtc_cleanup(&du->crtc);
58 drm_encoder_cleanup(&du->encoder);
59 drm_connector_cleanup(&du->connector);
60 }
61
62 /*
63 * Display Unit Cursor functions
64 */
65
66 static int vmw_du_cursor_plane_unmap_cm(struct vmw_plane_state *vps);
67 static void vmw_cursor_update_mob(struct vmw_private *dev_priv,
68 struct vmw_plane_state *vps,
69 u32 *image, u32 width, u32 height,
70 u32 hotspotX, u32 hotspotY);
71
72 struct vmw_svga_fifo_cmd_define_cursor {
73 u32 cmd;
74 SVGAFifoCmdDefineAlphaCursor cursor;
75 };
76
77 /**
78 * vmw_send_define_cursor_cmd - queue a define cursor command
79 * @dev_priv: the private driver struct
80 * @image: buffer which holds the cursor image
81 * @width: width of the mouse cursor image
82 * @height: height of the mouse cursor image
83 * @hotspotX: the horizontal position of mouse hotspot
84 * @hotspotY: the vertical position of mouse hotspot
85 */
vmw_send_define_cursor_cmd(struct vmw_private * dev_priv,u32 * image,u32 width,u32 height,u32 hotspotX,u32 hotspotY)86 static void vmw_send_define_cursor_cmd(struct vmw_private *dev_priv,
87 u32 *image, u32 width, u32 height,
88 u32 hotspotX, u32 hotspotY)
89 {
90 struct vmw_svga_fifo_cmd_define_cursor *cmd;
91 const u32 image_size = width * height * sizeof(*image);
92 const u32 cmd_size = sizeof(*cmd) + image_size;
93
94 /* Try to reserve fifocmd space and swallow any failures;
95 such reservations cannot be left unconsumed for long
96 under the risk of clogging other fifocmd users, so
97 we treat reservations separtely from the way we treat
98 other fallible KMS-atomic resources at prepare_fb */
99 cmd = VMW_CMD_RESERVE(dev_priv, cmd_size);
100
101 if (unlikely(!cmd))
102 return;
103
104 memset(cmd, 0, sizeof(*cmd));
105
106 memcpy(&cmd[1], image, image_size);
107
108 cmd->cmd = SVGA_CMD_DEFINE_ALPHA_CURSOR;
109 cmd->cursor.id = 0;
110 cmd->cursor.width = width;
111 cmd->cursor.height = height;
112 cmd->cursor.hotspotX = hotspotX;
113 cmd->cursor.hotspotY = hotspotY;
114
115 vmw_cmd_commit_flush(dev_priv, cmd_size);
116 }
117
118 /**
119 * vmw_cursor_update_image - update the cursor image on the provided plane
120 * @dev_priv: the private driver struct
121 * @vps: the plane state of the cursor plane
122 * @image: buffer which holds the cursor image
123 * @width: width of the mouse cursor image
124 * @height: height of the mouse cursor image
125 * @hotspotX: the horizontal position of mouse hotspot
126 * @hotspotY: the vertical position of mouse hotspot
127 */
vmw_cursor_update_image(struct vmw_private * dev_priv,struct vmw_plane_state * vps,u32 * image,u32 width,u32 height,u32 hotspotX,u32 hotspotY)128 static void vmw_cursor_update_image(struct vmw_private *dev_priv,
129 struct vmw_plane_state *vps,
130 u32 *image, u32 width, u32 height,
131 u32 hotspotX, u32 hotspotY)
132 {
133 if (vps->cursor.bo)
134 vmw_cursor_update_mob(dev_priv, vps, image,
135 vps->base.crtc_w, vps->base.crtc_h,
136 hotspotX, hotspotY);
137
138 else
139 vmw_send_define_cursor_cmd(dev_priv, image, width, height,
140 hotspotX, hotspotY);
141 }
142
143
144 /**
145 * vmw_cursor_update_mob - Update cursor vis CursorMob mechanism
146 *
147 * Called from inside vmw_du_cursor_plane_atomic_update to actually
148 * make the cursor-image live.
149 *
150 * @dev_priv: device to work with
151 * @vps: the plane state of the cursor plane
152 * @image: cursor source data to fill the MOB with
153 * @width: source data width
154 * @height: source data height
155 * @hotspotX: cursor hotspot x
156 * @hotspotY: cursor hotspot Y
157 */
vmw_cursor_update_mob(struct vmw_private * dev_priv,struct vmw_plane_state * vps,u32 * image,u32 width,u32 height,u32 hotspotX,u32 hotspotY)158 static void vmw_cursor_update_mob(struct vmw_private *dev_priv,
159 struct vmw_plane_state *vps,
160 u32 *image, u32 width, u32 height,
161 u32 hotspotX, u32 hotspotY)
162 {
163 SVGAGBCursorHeader *header;
164 SVGAGBAlphaCursorHeader *alpha_header;
165 const u32 image_size = width * height * sizeof(*image);
166
167 header = vmw_bo_map_and_cache(vps->cursor.bo);
168 alpha_header = &header->header.alphaHeader;
169
170 memset(header, 0, sizeof(*header));
171
172 header->type = SVGA_ALPHA_CURSOR;
173 header->sizeInBytes = image_size;
174
175 alpha_header->hotspotX = hotspotX;
176 alpha_header->hotspotY = hotspotY;
177 alpha_header->width = width;
178 alpha_header->height = height;
179
180 memcpy(header + 1, image, image_size);
181 vmw_write(dev_priv, SVGA_REG_CURSOR_MOBID,
182 vps->cursor.bo->tbo.resource->start);
183 }
184
185
vmw_du_cursor_mob_size(u32 w,u32 h)186 static u32 vmw_du_cursor_mob_size(u32 w, u32 h)
187 {
188 return w * h * sizeof(u32) + sizeof(SVGAGBCursorHeader);
189 }
190
191 /**
192 * vmw_du_cursor_plane_acquire_image -- Acquire the image data
193 * @vps: cursor plane state
194 */
vmw_du_cursor_plane_acquire_image(struct vmw_plane_state * vps)195 static u32 *vmw_du_cursor_plane_acquire_image(struct vmw_plane_state *vps)
196 {
197 struct vmw_surface *surf;
198
199 if (vmw_user_object_is_null(&vps->uo))
200 return NULL;
201
202 surf = vmw_user_object_surface(&vps->uo);
203 if (surf && !vmw_user_object_is_mapped(&vps->uo))
204 return surf->snooper.image;
205
206 return vmw_user_object_map(&vps->uo);
207 }
208
vmw_du_cursor_plane_has_changed(struct vmw_plane_state * old_vps,struct vmw_plane_state * new_vps)209 static bool vmw_du_cursor_plane_has_changed(struct vmw_plane_state *old_vps,
210 struct vmw_plane_state *new_vps)
211 {
212 void *old_image;
213 void *new_image;
214 u32 size;
215 bool changed;
216
217 if (old_vps->base.crtc_w != new_vps->base.crtc_w ||
218 old_vps->base.crtc_h != new_vps->base.crtc_h)
219 return true;
220
221 if (old_vps->cursor.hotspot_x != new_vps->cursor.hotspot_x ||
222 old_vps->cursor.hotspot_y != new_vps->cursor.hotspot_y)
223 return true;
224
225 size = new_vps->base.crtc_w * new_vps->base.crtc_h * sizeof(u32);
226
227 old_image = vmw_du_cursor_plane_acquire_image(old_vps);
228 new_image = vmw_du_cursor_plane_acquire_image(new_vps);
229
230 changed = false;
231 if (old_image && new_image && old_image != new_image)
232 changed = memcmp(old_image, new_image, size) != 0;
233
234 return changed;
235 }
236
vmw_du_destroy_cursor_mob(struct vmw_bo ** vbo)237 static void vmw_du_destroy_cursor_mob(struct vmw_bo **vbo)
238 {
239 if (!(*vbo))
240 return;
241
242 ttm_bo_unpin(&(*vbo)->tbo);
243 vmw_bo_unreference(vbo);
244 }
245
vmw_du_put_cursor_mob(struct vmw_cursor_plane * vcp,struct vmw_plane_state * vps)246 static void vmw_du_put_cursor_mob(struct vmw_cursor_plane *vcp,
247 struct vmw_plane_state *vps)
248 {
249 u32 i;
250
251 if (!vps->cursor.bo)
252 return;
253
254 vmw_du_cursor_plane_unmap_cm(vps);
255
256 /* Look for a free slot to return this mob to the cache. */
257 for (i = 0; i < ARRAY_SIZE(vcp->cursor_mobs); i++) {
258 if (!vcp->cursor_mobs[i]) {
259 vcp->cursor_mobs[i] = vps->cursor.bo;
260 vps->cursor.bo = NULL;
261 return;
262 }
263 }
264
265 /* Cache is full: See if this mob is bigger than an existing mob. */
266 for (i = 0; i < ARRAY_SIZE(vcp->cursor_mobs); i++) {
267 if (vcp->cursor_mobs[i]->tbo.base.size <
268 vps->cursor.bo->tbo.base.size) {
269 vmw_du_destroy_cursor_mob(&vcp->cursor_mobs[i]);
270 vcp->cursor_mobs[i] = vps->cursor.bo;
271 vps->cursor.bo = NULL;
272 return;
273 }
274 }
275
276 /* Destroy it if it's not worth caching. */
277 vmw_du_destroy_cursor_mob(&vps->cursor.bo);
278 }
279
vmw_du_get_cursor_mob(struct vmw_cursor_plane * vcp,struct vmw_plane_state * vps)280 static int vmw_du_get_cursor_mob(struct vmw_cursor_plane *vcp,
281 struct vmw_plane_state *vps)
282 {
283 struct vmw_private *dev_priv = vmw_priv(vcp->base.dev);
284 u32 size = vmw_du_cursor_mob_size(vps->base.crtc_w, vps->base.crtc_h);
285 u32 i;
286 u32 cursor_max_dim, mob_max_size;
287 struct vmw_fence_obj *fence = NULL;
288 int ret;
289
290 if (!dev_priv->has_mob ||
291 (dev_priv->capabilities2 & SVGA_CAP2_CURSOR_MOB) == 0)
292 return -EINVAL;
293
294 mob_max_size = vmw_read(dev_priv, SVGA_REG_MOB_MAX_SIZE);
295 cursor_max_dim = vmw_read(dev_priv, SVGA_REG_CURSOR_MAX_DIMENSION);
296
297 if (size > mob_max_size || vps->base.crtc_w > cursor_max_dim ||
298 vps->base.crtc_h > cursor_max_dim)
299 return -EINVAL;
300
301 if (vps->cursor.bo) {
302 if (vps->cursor.bo->tbo.base.size >= size)
303 return 0;
304 vmw_du_put_cursor_mob(vcp, vps);
305 }
306
307 /* Look for an unused mob in the cache. */
308 for (i = 0; i < ARRAY_SIZE(vcp->cursor_mobs); i++) {
309 if (vcp->cursor_mobs[i] &&
310 vcp->cursor_mobs[i]->tbo.base.size >= size) {
311 vps->cursor.bo = vcp->cursor_mobs[i];
312 vcp->cursor_mobs[i] = NULL;
313 return 0;
314 }
315 }
316 /* Create a new mob if we can't find an existing one. */
317 ret = vmw_bo_create_and_populate(dev_priv, size,
318 VMW_BO_DOMAIN_MOB,
319 &vps->cursor.bo);
320
321 if (ret != 0)
322 return ret;
323
324 /* Fence the mob creation so we are guarateed to have the mob */
325 ret = ttm_bo_reserve(&vps->cursor.bo->tbo, false, false, NULL);
326 if (ret != 0)
327 goto teardown;
328
329 ret = vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL);
330 if (ret != 0) {
331 ttm_bo_unreserve(&vps->cursor.bo->tbo);
332 goto teardown;
333 }
334
335 dma_fence_wait(&fence->base, false);
336 dma_fence_put(&fence->base);
337
338 ttm_bo_unreserve(&vps->cursor.bo->tbo);
339 return 0;
340
341 teardown:
342 vmw_du_destroy_cursor_mob(&vps->cursor.bo);
343 return ret;
344 }
345
346
vmw_cursor_update_position(struct vmw_private * dev_priv,bool show,int x,int y)347 static void vmw_cursor_update_position(struct vmw_private *dev_priv,
348 bool show, int x, int y)
349 {
350 const uint32_t svga_cursor_on = show ? SVGA_CURSOR_ON_SHOW
351 : SVGA_CURSOR_ON_HIDE;
352 uint32_t count;
353
354 spin_lock(&dev_priv->cursor_lock);
355 if (dev_priv->capabilities2 & SVGA_CAP2_EXTRA_REGS) {
356 vmw_write(dev_priv, SVGA_REG_CURSOR4_X, x);
357 vmw_write(dev_priv, SVGA_REG_CURSOR4_Y, y);
358 vmw_write(dev_priv, SVGA_REG_CURSOR4_SCREEN_ID, SVGA3D_INVALID_ID);
359 vmw_write(dev_priv, SVGA_REG_CURSOR4_ON, svga_cursor_on);
360 vmw_write(dev_priv, SVGA_REG_CURSOR4_SUBMIT, 1);
361 } else if (vmw_is_cursor_bypass3_enabled(dev_priv)) {
362 vmw_fifo_mem_write(dev_priv, SVGA_FIFO_CURSOR_ON, svga_cursor_on);
363 vmw_fifo_mem_write(dev_priv, SVGA_FIFO_CURSOR_X, x);
364 vmw_fifo_mem_write(dev_priv, SVGA_FIFO_CURSOR_Y, y);
365 count = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_CURSOR_COUNT);
366 vmw_fifo_mem_write(dev_priv, SVGA_FIFO_CURSOR_COUNT, ++count);
367 } else {
368 vmw_write(dev_priv, SVGA_REG_CURSOR_X, x);
369 vmw_write(dev_priv, SVGA_REG_CURSOR_Y, y);
370 vmw_write(dev_priv, SVGA_REG_CURSOR_ON, svga_cursor_on);
371 }
372 spin_unlock(&dev_priv->cursor_lock);
373 }
374
vmw_kms_cursor_snoop(struct vmw_surface * srf,struct ttm_object_file * tfile,struct ttm_buffer_object * bo,SVGA3dCmdHeader * header)375 void vmw_kms_cursor_snoop(struct vmw_surface *srf,
376 struct ttm_object_file *tfile,
377 struct ttm_buffer_object *bo,
378 SVGA3dCmdHeader *header)
379 {
380 struct ttm_bo_kmap_obj map;
381 unsigned long kmap_offset;
382 unsigned long kmap_num;
383 SVGA3dCopyBox *box;
384 unsigned box_count;
385 void *virtual;
386 bool is_iomem;
387 struct vmw_dma_cmd {
388 SVGA3dCmdHeader header;
389 SVGA3dCmdSurfaceDMA dma;
390 } *cmd;
391 int i, ret;
392 const struct SVGA3dSurfaceDesc *desc =
393 vmw_surface_get_desc(VMW_CURSOR_SNOOP_FORMAT);
394 const u32 image_pitch = VMW_CURSOR_SNOOP_WIDTH * desc->pitchBytesPerBlock;
395
396 cmd = container_of(header, struct vmw_dma_cmd, header);
397
398 /* No snooper installed, nothing to copy */
399 if (!srf->snooper.image)
400 return;
401
402 if (cmd->dma.host.face != 0 || cmd->dma.host.mipmap != 0) {
403 DRM_ERROR("face and mipmap for cursors should never != 0\n");
404 return;
405 }
406
407 if (cmd->header.size < 64) {
408 DRM_ERROR("at least one full copy box must be given\n");
409 return;
410 }
411
412 box = (SVGA3dCopyBox *)&cmd[1];
413 box_count = (cmd->header.size - sizeof(SVGA3dCmdSurfaceDMA)) /
414 sizeof(SVGA3dCopyBox);
415
416 if (cmd->dma.guest.ptr.offset % PAGE_SIZE ||
417 box->x != 0 || box->y != 0 || box->z != 0 ||
418 box->srcx != 0 || box->srcy != 0 || box->srcz != 0 ||
419 box->d != 1 || box_count != 1 ||
420 box->w > VMW_CURSOR_SNOOP_WIDTH || box->h > VMW_CURSOR_SNOOP_HEIGHT) {
421 /* TODO handle none page aligned offsets */
422 /* TODO handle more dst & src != 0 */
423 /* TODO handle more then one copy */
424 DRM_ERROR("Can't snoop dma request for cursor!\n");
425 DRM_ERROR("(%u, %u, %u) (%u, %u, %u) (%ux%ux%u) %u %u\n",
426 box->srcx, box->srcy, box->srcz,
427 box->x, box->y, box->z,
428 box->w, box->h, box->d, box_count,
429 cmd->dma.guest.ptr.offset);
430 return;
431 }
432
433 kmap_offset = cmd->dma.guest.ptr.offset >> PAGE_SHIFT;
434 kmap_num = (VMW_CURSOR_SNOOP_HEIGHT*image_pitch) >> PAGE_SHIFT;
435
436 ret = ttm_bo_reserve(bo, true, false, NULL);
437 if (unlikely(ret != 0)) {
438 DRM_ERROR("reserve failed\n");
439 return;
440 }
441
442 ret = ttm_bo_kmap(bo, kmap_offset, kmap_num, &map);
443 if (unlikely(ret != 0))
444 goto err_unreserve;
445
446 virtual = ttm_kmap_obj_virtual(&map, &is_iomem);
447
448 if (box->w == VMW_CURSOR_SNOOP_WIDTH && cmd->dma.guest.pitch == image_pitch) {
449 memcpy(srf->snooper.image, virtual,
450 VMW_CURSOR_SNOOP_HEIGHT*image_pitch);
451 } else {
452 /* Image is unsigned pointer. */
453 for (i = 0; i < box->h; i++)
454 memcpy(srf->snooper.image + i * image_pitch,
455 virtual + i * cmd->dma.guest.pitch,
456 box->w * desc->pitchBytesPerBlock);
457 }
458
459 srf->snooper.age++;
460
461 ttm_bo_kunmap(&map);
462 err_unreserve:
463 ttm_bo_unreserve(bo);
464 }
465
466 /**
467 * vmw_kms_legacy_hotspot_clear - Clear legacy hotspots
468 *
469 * @dev_priv: Pointer to the device private struct.
470 *
471 * Clears all legacy hotspots.
472 */
vmw_kms_legacy_hotspot_clear(struct vmw_private * dev_priv)473 void vmw_kms_legacy_hotspot_clear(struct vmw_private *dev_priv)
474 {
475 struct drm_device *dev = &dev_priv->drm;
476 struct vmw_display_unit *du;
477 struct drm_crtc *crtc;
478
479 drm_modeset_lock_all(dev);
480 drm_for_each_crtc(crtc, dev) {
481 du = vmw_crtc_to_du(crtc);
482
483 du->hotspot_x = 0;
484 du->hotspot_y = 0;
485 }
486 drm_modeset_unlock_all(dev);
487 }
488
vmw_kms_cursor_post_execbuf(struct vmw_private * dev_priv)489 void vmw_kms_cursor_post_execbuf(struct vmw_private *dev_priv)
490 {
491 struct drm_device *dev = &dev_priv->drm;
492 struct vmw_display_unit *du;
493 struct drm_crtc *crtc;
494
495 mutex_lock(&dev->mode_config.mutex);
496
497 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
498 du = vmw_crtc_to_du(crtc);
499 if (!du->cursor_surface ||
500 du->cursor_age == du->cursor_surface->snooper.age ||
501 !du->cursor_surface->snooper.image)
502 continue;
503
504 du->cursor_age = du->cursor_surface->snooper.age;
505 vmw_send_define_cursor_cmd(dev_priv,
506 du->cursor_surface->snooper.image,
507 VMW_CURSOR_SNOOP_WIDTH,
508 VMW_CURSOR_SNOOP_HEIGHT,
509 du->hotspot_x + du->core_hotspot_x,
510 du->hotspot_y + du->core_hotspot_y);
511 }
512
513 mutex_unlock(&dev->mode_config.mutex);
514 }
515
516
vmw_du_cursor_plane_destroy(struct drm_plane * plane)517 void vmw_du_cursor_plane_destroy(struct drm_plane *plane)
518 {
519 struct vmw_cursor_plane *vcp = vmw_plane_to_vcp(plane);
520 u32 i;
521
522 vmw_cursor_update_position(vmw_priv(plane->dev), false, 0, 0);
523
524 for (i = 0; i < ARRAY_SIZE(vcp->cursor_mobs); i++)
525 vmw_du_destroy_cursor_mob(&vcp->cursor_mobs[i]);
526
527 drm_plane_cleanup(plane);
528 }
529
530
vmw_du_primary_plane_destroy(struct drm_plane * plane)531 void vmw_du_primary_plane_destroy(struct drm_plane *plane)
532 {
533 drm_plane_cleanup(plane);
534
535 /* Planes are static in our case so we don't free it */
536 }
537
538
539 /**
540 * vmw_du_plane_unpin_surf - unpins resource associated with a framebuffer surface
541 *
542 * @vps: plane state associated with the display surface
543 */
vmw_du_plane_unpin_surf(struct vmw_plane_state * vps)544 void vmw_du_plane_unpin_surf(struct vmw_plane_state *vps)
545 {
546 struct vmw_surface *surf = vmw_user_object_surface(&vps->uo);
547
548 if (surf) {
549 if (vps->pinned) {
550 vmw_resource_unpin(&surf->res);
551 vps->pinned--;
552 }
553 }
554 }
555
556
557 /**
558 * vmw_du_plane_cleanup_fb - Unpins the plane surface
559 *
560 * @plane: display plane
561 * @old_state: Contains the FB to clean up
562 *
563 * Unpins the framebuffer surface
564 *
565 * Returns 0 on success
566 */
567 void
vmw_du_plane_cleanup_fb(struct drm_plane * plane,struct drm_plane_state * old_state)568 vmw_du_plane_cleanup_fb(struct drm_plane *plane,
569 struct drm_plane_state *old_state)
570 {
571 struct vmw_plane_state *vps = vmw_plane_state_to_vps(old_state);
572
573 vmw_du_plane_unpin_surf(vps);
574 }
575
576
577 /**
578 * vmw_du_cursor_plane_map_cm - Maps the cursor mobs.
579 *
580 * @vps: plane_state
581 *
582 * Returns 0 on success
583 */
584
585 static int
vmw_du_cursor_plane_map_cm(struct vmw_plane_state * vps)586 vmw_du_cursor_plane_map_cm(struct vmw_plane_state *vps)
587 {
588 int ret;
589 u32 size = vmw_du_cursor_mob_size(vps->base.crtc_w, vps->base.crtc_h);
590 struct ttm_buffer_object *bo;
591
592 if (!vps->cursor.bo)
593 return -EINVAL;
594
595 bo = &vps->cursor.bo->tbo;
596
597 if (bo->base.size < size)
598 return -EINVAL;
599
600 if (vps->cursor.bo->map.virtual)
601 return 0;
602
603 ret = ttm_bo_reserve(bo, false, false, NULL);
604 if (unlikely(ret != 0))
605 return -ENOMEM;
606
607 vmw_bo_map_and_cache(vps->cursor.bo);
608
609 ttm_bo_unreserve(bo);
610
611 if (unlikely(ret != 0))
612 return -ENOMEM;
613
614 return 0;
615 }
616
617
618 /**
619 * vmw_du_cursor_plane_unmap_cm - Unmaps the cursor mobs.
620 *
621 * @vps: state of the cursor plane
622 *
623 * Returns 0 on success
624 */
625
626 static int
vmw_du_cursor_plane_unmap_cm(struct vmw_plane_state * vps)627 vmw_du_cursor_plane_unmap_cm(struct vmw_plane_state *vps)
628 {
629 int ret = 0;
630 struct vmw_bo *vbo = vps->cursor.bo;
631
632 if (!vbo || !vbo->map.virtual)
633 return 0;
634
635 ret = ttm_bo_reserve(&vbo->tbo, true, false, NULL);
636 if (likely(ret == 0)) {
637 vmw_bo_unmap(vbo);
638 ttm_bo_unreserve(&vbo->tbo);
639 }
640
641 return ret;
642 }
643
644
645 /**
646 * vmw_du_cursor_plane_cleanup_fb - Unpins the plane surface
647 *
648 * @plane: cursor plane
649 * @old_state: contains the state to clean up
650 *
651 * Unmaps all cursor bo mappings and unpins the cursor surface
652 *
653 * Returns 0 on success
654 */
655 void
vmw_du_cursor_plane_cleanup_fb(struct drm_plane * plane,struct drm_plane_state * old_state)656 vmw_du_cursor_plane_cleanup_fb(struct drm_plane *plane,
657 struct drm_plane_state *old_state)
658 {
659 struct vmw_cursor_plane *vcp = vmw_plane_to_vcp(plane);
660 struct vmw_plane_state *vps = vmw_plane_state_to_vps(old_state);
661
662 if (!vmw_user_object_is_null(&vps->uo))
663 vmw_user_object_unmap(&vps->uo);
664
665 vmw_du_cursor_plane_unmap_cm(vps);
666 vmw_du_put_cursor_mob(vcp, vps);
667
668 vmw_du_plane_unpin_surf(vps);
669 vmw_user_object_unref(&vps->uo);
670 }
671
672
673 /**
674 * vmw_du_cursor_plane_prepare_fb - Readies the cursor by referencing it
675 *
676 * @plane: display plane
677 * @new_state: info on the new plane state, including the FB
678 *
679 * Returns 0 on success
680 */
681 int
vmw_du_cursor_plane_prepare_fb(struct drm_plane * plane,struct drm_plane_state * new_state)682 vmw_du_cursor_plane_prepare_fb(struct drm_plane *plane,
683 struct drm_plane_state *new_state)
684 {
685 struct drm_framebuffer *fb = new_state->fb;
686 struct vmw_cursor_plane *vcp = vmw_plane_to_vcp(plane);
687 struct vmw_plane_state *vps = vmw_plane_state_to_vps(new_state);
688 struct vmw_bo *bo = NULL;
689 int ret = 0;
690
691 if (!vmw_user_object_is_null(&vps->uo)) {
692 vmw_user_object_unmap(&vps->uo);
693 vmw_user_object_unref(&vps->uo);
694 }
695
696 if (fb) {
697 if (vmw_framebuffer_to_vfb(fb)->bo) {
698 vps->uo.buffer = vmw_framebuffer_to_vfbd(fb)->buffer;
699 vps->uo.surface = NULL;
700 } else {
701 memcpy(&vps->uo, &vmw_framebuffer_to_vfbs(fb)->uo, sizeof(vps->uo));
702 }
703 vmw_user_object_ref(&vps->uo);
704 }
705
706 bo = vmw_user_object_buffer(&vps->uo);
707 if (bo) {
708 struct ttm_operation_ctx ctx = {false, false};
709
710 ret = ttm_bo_reserve(&bo->tbo, true, false, NULL);
711 if (ret != 0)
712 return -ENOMEM;
713
714 ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
715 if (ret != 0)
716 return -ENOMEM;
717
718 vmw_bo_pin_reserved(bo, true);
719 if (vmw_framebuffer_to_vfb(fb)->bo) {
720 const u32 size = new_state->crtc_w * new_state->crtc_h * sizeof(u32);
721
722 (void)vmw_bo_map_and_cache_size(bo, size);
723 } else {
724 vmw_bo_map_and_cache(bo);
725 }
726 ttm_bo_unreserve(&bo->tbo);
727 }
728
729 if (!vmw_user_object_is_null(&vps->uo)) {
730 vmw_du_get_cursor_mob(vcp, vps);
731 vmw_du_cursor_plane_map_cm(vps);
732 }
733
734 return 0;
735 }
736
737
738 void
vmw_du_cursor_plane_atomic_update(struct drm_plane * plane,struct drm_atomic_state * state)739 vmw_du_cursor_plane_atomic_update(struct drm_plane *plane,
740 struct drm_atomic_state *state)
741 {
742 struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
743 plane);
744 struct drm_plane_state *old_state = drm_atomic_get_old_plane_state(state,
745 plane);
746 struct drm_crtc *crtc = new_state->crtc ?: old_state->crtc;
747 struct vmw_private *dev_priv = vmw_priv(crtc->dev);
748 struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
749 struct vmw_plane_state *vps = vmw_plane_state_to_vps(new_state);
750 struct vmw_plane_state *old_vps = vmw_plane_state_to_vps(old_state);
751 struct vmw_bo *old_bo = NULL;
752 struct vmw_bo *new_bo = NULL;
753 s32 hotspot_x, hotspot_y;
754 int ret;
755
756 hotspot_x = du->hotspot_x + new_state->hotspot_x;
757 hotspot_y = du->hotspot_y + new_state->hotspot_y;
758
759 du->cursor_surface = vmw_user_object_surface(&vps->uo);
760
761 if (vmw_user_object_is_null(&vps->uo)) {
762 vmw_cursor_update_position(dev_priv, false, 0, 0);
763 return;
764 }
765
766 vps->cursor.hotspot_x = hotspot_x;
767 vps->cursor.hotspot_y = hotspot_y;
768
769 if (du->cursor_surface)
770 du->cursor_age = du->cursor_surface->snooper.age;
771
772 if (!vmw_user_object_is_null(&old_vps->uo)) {
773 old_bo = vmw_user_object_buffer(&old_vps->uo);
774 ret = ttm_bo_reserve(&old_bo->tbo, false, false, NULL);
775 if (ret != 0)
776 return;
777 }
778
779 if (!vmw_user_object_is_null(&vps->uo)) {
780 new_bo = vmw_user_object_buffer(&vps->uo);
781 if (old_bo != new_bo) {
782 ret = ttm_bo_reserve(&new_bo->tbo, false, false, NULL);
783 if (ret != 0)
784 return;
785 } else {
786 new_bo = NULL;
787 }
788 }
789 if (!vmw_du_cursor_plane_has_changed(old_vps, vps)) {
790 /*
791 * If it hasn't changed, avoid making the device do extra
792 * work by keeping the old cursor active.
793 */
794 struct vmw_cursor_plane_state tmp = old_vps->cursor;
795 old_vps->cursor = vps->cursor;
796 vps->cursor = tmp;
797 } else {
798 void *image = vmw_du_cursor_plane_acquire_image(vps);
799 if (image)
800 vmw_cursor_update_image(dev_priv, vps, image,
801 new_state->crtc_w,
802 new_state->crtc_h,
803 hotspot_x, hotspot_y);
804 }
805
806 if (old_bo)
807 ttm_bo_unreserve(&old_bo->tbo);
808 if (new_bo)
809 ttm_bo_unreserve(&new_bo->tbo);
810
811 du->cursor_x = new_state->crtc_x + du->set_gui_x;
812 du->cursor_y = new_state->crtc_y + du->set_gui_y;
813
814 vmw_cursor_update_position(dev_priv, true,
815 du->cursor_x + hotspot_x,
816 du->cursor_y + hotspot_y);
817
818 du->core_hotspot_x = hotspot_x - du->hotspot_x;
819 du->core_hotspot_y = hotspot_y - du->hotspot_y;
820 }
821
822
823 /**
824 * vmw_du_primary_plane_atomic_check - check if the new state is okay
825 *
826 * @plane: display plane
827 * @state: info on the new plane state, including the FB
828 *
829 * Check if the new state is settable given the current state. Other
830 * than what the atomic helper checks, we care about crtc fitting
831 * the FB and maintaining one active framebuffer.
832 *
833 * Returns 0 on success
834 */
vmw_du_primary_plane_atomic_check(struct drm_plane * plane,struct drm_atomic_state * state)835 int vmw_du_primary_plane_atomic_check(struct drm_plane *plane,
836 struct drm_atomic_state *state)
837 {
838 struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
839 plane);
840 struct drm_plane_state *old_state = drm_atomic_get_old_plane_state(state,
841 plane);
842 struct drm_crtc_state *crtc_state = NULL;
843 struct drm_framebuffer *new_fb = new_state->fb;
844 struct drm_framebuffer *old_fb = old_state->fb;
845 int ret;
846
847 /*
848 * Ignore damage clips if the framebuffer attached to the plane's state
849 * has changed since the last plane update (page-flip). In this case, a
850 * full plane update should happen because uploads are done per-buffer.
851 */
852 if (old_fb != new_fb)
853 new_state->ignore_damage_clips = true;
854
855 if (new_state->crtc)
856 crtc_state = drm_atomic_get_new_crtc_state(state,
857 new_state->crtc);
858
859 ret = drm_atomic_helper_check_plane_state(new_state, crtc_state,
860 DRM_PLANE_NO_SCALING,
861 DRM_PLANE_NO_SCALING,
862 false, true);
863 return ret;
864 }
865
866
867 /**
868 * vmw_du_cursor_plane_atomic_check - check if the new state is okay
869 *
870 * @plane: cursor plane
871 * @state: info on the new plane state
872 *
873 * This is a chance to fail if the new cursor state does not fit
874 * our requirements.
875 *
876 * Returns 0 on success
877 */
vmw_du_cursor_plane_atomic_check(struct drm_plane * plane,struct drm_atomic_state * state)878 int vmw_du_cursor_plane_atomic_check(struct drm_plane *plane,
879 struct drm_atomic_state *state)
880 {
881 struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
882 plane);
883 int ret = 0;
884 struct drm_crtc_state *crtc_state = NULL;
885 struct vmw_surface *surface = NULL;
886 struct drm_framebuffer *fb = new_state->fb;
887
888 if (new_state->crtc)
889 crtc_state = drm_atomic_get_new_crtc_state(new_state->state,
890 new_state->crtc);
891
892 ret = drm_atomic_helper_check_plane_state(new_state, crtc_state,
893 DRM_PLANE_NO_SCALING,
894 DRM_PLANE_NO_SCALING,
895 true, true);
896 if (ret)
897 return ret;
898
899 /* Turning off */
900 if (!fb)
901 return 0;
902
903 /* A lot of the code assumes this */
904 if (new_state->crtc_w != 64 || new_state->crtc_h != 64) {
905 DRM_ERROR("Invalid cursor dimensions (%d, %d)\n",
906 new_state->crtc_w, new_state->crtc_h);
907 return -EINVAL;
908 }
909
910 if (!vmw_framebuffer_to_vfb(fb)->bo) {
911 surface = vmw_user_object_surface(&vmw_framebuffer_to_vfbs(fb)->uo);
912
913 WARN_ON(!surface);
914
915 if (!surface ||
916 (!surface->snooper.image && !surface->res.guest_memory_bo)) {
917 DRM_ERROR("surface not suitable for cursor\n");
918 return -EINVAL;
919 }
920 }
921
922 return 0;
923 }
924
925
vmw_du_crtc_atomic_check(struct drm_crtc * crtc,struct drm_atomic_state * state)926 int vmw_du_crtc_atomic_check(struct drm_crtc *crtc,
927 struct drm_atomic_state *state)
928 {
929 struct vmw_private *vmw = vmw_priv(crtc->dev);
930 struct drm_crtc_state *new_state = drm_atomic_get_new_crtc_state(state,
931 crtc);
932 struct vmw_display_unit *du = vmw_crtc_to_du(new_state->crtc);
933 int connector_mask = drm_connector_mask(&du->connector);
934 bool has_primary = new_state->plane_mask &
935 drm_plane_mask(crtc->primary);
936
937 /*
938 * This is fine in general, but broken userspace might expect
939 * some actual rendering so give a clue as why it's blank.
940 */
941 if (new_state->enable && !has_primary)
942 drm_dbg_driver(&vmw->drm,
943 "CRTC without a primary plane will be blank.\n");
944
945
946 if (new_state->connector_mask != connector_mask &&
947 new_state->connector_mask != 0) {
948 DRM_ERROR("Invalid connectors configuration\n");
949 return -EINVAL;
950 }
951
952 /*
953 * Our virtual device does not have a dot clock, so use the logical
954 * clock value as the dot clock.
955 */
956 if (new_state->mode.crtc_clock == 0)
957 new_state->adjusted_mode.crtc_clock = new_state->mode.clock;
958
959 return 0;
960 }
961
962
vmw_du_crtc_atomic_begin(struct drm_crtc * crtc,struct drm_atomic_state * state)963 void vmw_du_crtc_atomic_begin(struct drm_crtc *crtc,
964 struct drm_atomic_state *state)
965 {
966 vmw_vkms_crtc_atomic_begin(crtc, state);
967 }
968
969 /**
970 * vmw_du_crtc_duplicate_state - duplicate crtc state
971 * @crtc: DRM crtc
972 *
973 * Allocates and returns a copy of the crtc state (both common and
974 * vmw-specific) for the specified crtc.
975 *
976 * Returns: The newly allocated crtc state, or NULL on failure.
977 */
978 struct drm_crtc_state *
vmw_du_crtc_duplicate_state(struct drm_crtc * crtc)979 vmw_du_crtc_duplicate_state(struct drm_crtc *crtc)
980 {
981 struct drm_crtc_state *state;
982 struct vmw_crtc_state *vcs;
983
984 if (WARN_ON(!crtc->state))
985 return NULL;
986
987 vcs = kmemdup(crtc->state, sizeof(*vcs), GFP_KERNEL);
988
989 if (!vcs)
990 return NULL;
991
992 state = &vcs->base;
993
994 __drm_atomic_helper_crtc_duplicate_state(crtc, state);
995
996 return state;
997 }
998
999
1000 /**
1001 * vmw_du_crtc_reset - creates a blank vmw crtc state
1002 * @crtc: DRM crtc
1003 *
1004 * Resets the atomic state for @crtc by freeing the state pointer (which
1005 * might be NULL, e.g. at driver load time) and allocating a new empty state
1006 * object.
1007 */
vmw_du_crtc_reset(struct drm_crtc * crtc)1008 void vmw_du_crtc_reset(struct drm_crtc *crtc)
1009 {
1010 struct vmw_crtc_state *vcs;
1011
1012
1013 if (crtc->state) {
1014 __drm_atomic_helper_crtc_destroy_state(crtc->state);
1015
1016 kfree(vmw_crtc_state_to_vcs(crtc->state));
1017 }
1018
1019 vcs = kzalloc(sizeof(*vcs), GFP_KERNEL);
1020
1021 if (!vcs) {
1022 DRM_ERROR("Cannot allocate vmw_crtc_state\n");
1023 return;
1024 }
1025
1026 __drm_atomic_helper_crtc_reset(crtc, &vcs->base);
1027 }
1028
1029
1030 /**
1031 * vmw_du_crtc_destroy_state - destroy crtc state
1032 * @crtc: DRM crtc
1033 * @state: state object to destroy
1034 *
1035 * Destroys the crtc state (both common and vmw-specific) for the
1036 * specified plane.
1037 */
1038 void
vmw_du_crtc_destroy_state(struct drm_crtc * crtc,struct drm_crtc_state * state)1039 vmw_du_crtc_destroy_state(struct drm_crtc *crtc,
1040 struct drm_crtc_state *state)
1041 {
1042 drm_atomic_helper_crtc_destroy_state(crtc, state);
1043 }
1044
1045
1046 /**
1047 * vmw_du_plane_duplicate_state - duplicate plane state
1048 * @plane: drm plane
1049 *
1050 * Allocates and returns a copy of the plane state (both common and
1051 * vmw-specific) for the specified plane.
1052 *
1053 * Returns: The newly allocated plane state, or NULL on failure.
1054 */
1055 struct drm_plane_state *
vmw_du_plane_duplicate_state(struct drm_plane * plane)1056 vmw_du_plane_duplicate_state(struct drm_plane *plane)
1057 {
1058 struct drm_plane_state *state;
1059 struct vmw_plane_state *vps;
1060
1061 vps = kmemdup(plane->state, sizeof(*vps), GFP_KERNEL);
1062
1063 if (!vps)
1064 return NULL;
1065
1066 vps->pinned = 0;
1067 vps->cpp = 0;
1068
1069 memset(&vps->cursor, 0, sizeof(vps->cursor));
1070
1071 /* Each ref counted resource needs to be acquired again */
1072 vmw_user_object_ref(&vps->uo);
1073 state = &vps->base;
1074
1075 __drm_atomic_helper_plane_duplicate_state(plane, state);
1076
1077 return state;
1078 }
1079
1080
1081 /**
1082 * vmw_du_plane_reset - creates a blank vmw plane state
1083 * @plane: drm plane
1084 *
1085 * Resets the atomic state for @plane by freeing the state pointer (which might
1086 * be NULL, e.g. at driver load time) and allocating a new empty state object.
1087 */
vmw_du_plane_reset(struct drm_plane * plane)1088 void vmw_du_plane_reset(struct drm_plane *plane)
1089 {
1090 struct vmw_plane_state *vps;
1091
1092 if (plane->state)
1093 vmw_du_plane_destroy_state(plane, plane->state);
1094
1095 vps = kzalloc(sizeof(*vps), GFP_KERNEL);
1096
1097 if (!vps) {
1098 DRM_ERROR("Cannot allocate vmw_plane_state\n");
1099 return;
1100 }
1101
1102 __drm_atomic_helper_plane_reset(plane, &vps->base);
1103 }
1104
1105
1106 /**
1107 * vmw_du_plane_destroy_state - destroy plane state
1108 * @plane: DRM plane
1109 * @state: state object to destroy
1110 *
1111 * Destroys the plane state (both common and vmw-specific) for the
1112 * specified plane.
1113 */
1114 void
vmw_du_plane_destroy_state(struct drm_plane * plane,struct drm_plane_state * state)1115 vmw_du_plane_destroy_state(struct drm_plane *plane,
1116 struct drm_plane_state *state)
1117 {
1118 struct vmw_plane_state *vps = vmw_plane_state_to_vps(state);
1119
1120 /* Should have been freed by cleanup_fb */
1121 vmw_user_object_unref(&vps->uo);
1122
1123 drm_atomic_helper_plane_destroy_state(plane, state);
1124 }
1125
1126
1127 /**
1128 * vmw_du_connector_duplicate_state - duplicate connector state
1129 * @connector: DRM connector
1130 *
1131 * Allocates and returns a copy of the connector state (both common and
1132 * vmw-specific) for the specified connector.
1133 *
1134 * Returns: The newly allocated connector state, or NULL on failure.
1135 */
1136 struct drm_connector_state *
vmw_du_connector_duplicate_state(struct drm_connector * connector)1137 vmw_du_connector_duplicate_state(struct drm_connector *connector)
1138 {
1139 struct drm_connector_state *state;
1140 struct vmw_connector_state *vcs;
1141
1142 if (WARN_ON(!connector->state))
1143 return NULL;
1144
1145 vcs = kmemdup(connector->state, sizeof(*vcs), GFP_KERNEL);
1146
1147 if (!vcs)
1148 return NULL;
1149
1150 state = &vcs->base;
1151
1152 __drm_atomic_helper_connector_duplicate_state(connector, state);
1153
1154 return state;
1155 }
1156
1157
1158 /**
1159 * vmw_du_connector_reset - creates a blank vmw connector state
1160 * @connector: DRM connector
1161 *
1162 * Resets the atomic state for @connector by freeing the state pointer (which
1163 * might be NULL, e.g. at driver load time) and allocating a new empty state
1164 * object.
1165 */
vmw_du_connector_reset(struct drm_connector * connector)1166 void vmw_du_connector_reset(struct drm_connector *connector)
1167 {
1168 struct vmw_connector_state *vcs;
1169
1170
1171 if (connector->state) {
1172 __drm_atomic_helper_connector_destroy_state(connector->state);
1173
1174 kfree(vmw_connector_state_to_vcs(connector->state));
1175 }
1176
1177 vcs = kzalloc(sizeof(*vcs), GFP_KERNEL);
1178
1179 if (!vcs) {
1180 DRM_ERROR("Cannot allocate vmw_connector_state\n");
1181 return;
1182 }
1183
1184 __drm_atomic_helper_connector_reset(connector, &vcs->base);
1185 }
1186
1187
1188 /**
1189 * vmw_du_connector_destroy_state - destroy connector state
1190 * @connector: DRM connector
1191 * @state: state object to destroy
1192 *
1193 * Destroys the connector state (both common and vmw-specific) for the
1194 * specified plane.
1195 */
1196 void
vmw_du_connector_destroy_state(struct drm_connector * connector,struct drm_connector_state * state)1197 vmw_du_connector_destroy_state(struct drm_connector *connector,
1198 struct drm_connector_state *state)
1199 {
1200 drm_atomic_helper_connector_destroy_state(connector, state);
1201 }
1202 /*
1203 * Generic framebuffer code
1204 */
1205
1206 /*
1207 * Surface framebuffer code
1208 */
1209
vmw_framebuffer_surface_destroy(struct drm_framebuffer * framebuffer)1210 static void vmw_framebuffer_surface_destroy(struct drm_framebuffer *framebuffer)
1211 {
1212 struct vmw_framebuffer_surface *vfbs =
1213 vmw_framebuffer_to_vfbs(framebuffer);
1214
1215 drm_framebuffer_cleanup(framebuffer);
1216 vmw_user_object_unref(&vfbs->uo);
1217
1218 kfree(vfbs);
1219 }
1220
1221 /**
1222 * vmw_kms_readback - Perform a readback from the screen system to
1223 * a buffer-object backed framebuffer.
1224 *
1225 * @dev_priv: Pointer to the device private structure.
1226 * @file_priv: Pointer to a struct drm_file identifying the caller.
1227 * Must be set to NULL if @user_fence_rep is NULL.
1228 * @vfb: Pointer to the buffer-object backed framebuffer.
1229 * @user_fence_rep: User-space provided structure for fence information.
1230 * Must be set to non-NULL if @file_priv is non-NULL.
1231 * @vclips: Array of clip rects.
1232 * @num_clips: Number of clip rects in @vclips.
1233 *
1234 * Returns 0 on success, negative error code on failure. -ERESTARTSYS if
1235 * interrupted.
1236 */
vmw_kms_readback(struct vmw_private * dev_priv,struct drm_file * file_priv,struct vmw_framebuffer * vfb,struct drm_vmw_fence_rep __user * user_fence_rep,struct drm_vmw_rect * vclips,uint32_t num_clips)1237 int vmw_kms_readback(struct vmw_private *dev_priv,
1238 struct drm_file *file_priv,
1239 struct vmw_framebuffer *vfb,
1240 struct drm_vmw_fence_rep __user *user_fence_rep,
1241 struct drm_vmw_rect *vclips,
1242 uint32_t num_clips)
1243 {
1244 switch (dev_priv->active_display_unit) {
1245 case vmw_du_screen_object:
1246 return vmw_kms_sou_readback(dev_priv, file_priv, vfb,
1247 user_fence_rep, vclips, num_clips,
1248 NULL);
1249 case vmw_du_screen_target:
1250 return vmw_kms_stdu_readback(dev_priv, file_priv, vfb,
1251 user_fence_rep, NULL, vclips, num_clips,
1252 1, NULL);
1253 default:
1254 WARN_ONCE(true,
1255 "Readback called with invalid display system.\n");
1256 }
1257
1258 return -ENOSYS;
1259 }
1260
vmw_framebuffer_surface_create_handle(struct drm_framebuffer * fb,struct drm_file * file_priv,unsigned int * handle)1261 static int vmw_framebuffer_surface_create_handle(struct drm_framebuffer *fb,
1262 struct drm_file *file_priv,
1263 unsigned int *handle)
1264 {
1265 struct vmw_framebuffer_surface *vfbs = vmw_framebuffer_to_vfbs(fb);
1266 struct vmw_bo *bo = vmw_user_object_buffer(&vfbs->uo);
1267
1268 if (WARN_ON(!bo))
1269 return -EINVAL;
1270 return drm_gem_handle_create(file_priv, &bo->tbo.base, handle);
1271 }
1272
1273 static const struct drm_framebuffer_funcs vmw_framebuffer_surface_funcs = {
1274 .create_handle = vmw_framebuffer_surface_create_handle,
1275 .destroy = vmw_framebuffer_surface_destroy,
1276 .dirty = drm_atomic_helper_dirtyfb,
1277 };
1278
vmw_kms_new_framebuffer_surface(struct vmw_private * dev_priv,struct vmw_user_object * uo,struct vmw_framebuffer ** out,const struct drm_mode_fb_cmd2 * mode_cmd)1279 static int vmw_kms_new_framebuffer_surface(struct vmw_private *dev_priv,
1280 struct vmw_user_object *uo,
1281 struct vmw_framebuffer **out,
1282 const struct drm_mode_fb_cmd2
1283 *mode_cmd)
1284
1285 {
1286 struct drm_device *dev = &dev_priv->drm;
1287 struct vmw_framebuffer_surface *vfbs;
1288 struct vmw_surface *surface;
1289 int ret;
1290
1291 /* 3D is only supported on HWv8 and newer hosts */
1292 if (dev_priv->active_display_unit == vmw_du_legacy)
1293 return -ENOSYS;
1294
1295 surface = vmw_user_object_surface(uo);
1296
1297 /*
1298 * Sanity checks.
1299 */
1300
1301 if (!drm_any_plane_has_format(&dev_priv->drm,
1302 mode_cmd->pixel_format,
1303 mode_cmd->modifier[0])) {
1304 drm_dbg(&dev_priv->drm,
1305 "unsupported pixel format %p4cc / modifier 0x%llx\n",
1306 &mode_cmd->pixel_format, mode_cmd->modifier[0]);
1307 return -EINVAL;
1308 }
1309
1310 /* Surface must be marked as a scanout. */
1311 if (unlikely(!surface->metadata.scanout))
1312 return -EINVAL;
1313
1314 if (unlikely(surface->metadata.mip_levels[0] != 1 ||
1315 surface->metadata.num_sizes != 1 ||
1316 surface->metadata.base_size.width < mode_cmd->width ||
1317 surface->metadata.base_size.height < mode_cmd->height ||
1318 surface->metadata.base_size.depth != 1)) {
1319 DRM_ERROR("Incompatible surface dimensions "
1320 "for requested mode.\n");
1321 return -EINVAL;
1322 }
1323
1324 vfbs = kzalloc(sizeof(*vfbs), GFP_KERNEL);
1325 if (!vfbs) {
1326 ret = -ENOMEM;
1327 goto out_err1;
1328 }
1329
1330 drm_helper_mode_fill_fb_struct(dev, &vfbs->base.base, mode_cmd);
1331 memcpy(&vfbs->uo, uo, sizeof(vfbs->uo));
1332 vmw_user_object_ref(&vfbs->uo);
1333
1334 *out = &vfbs->base;
1335
1336 ret = drm_framebuffer_init(dev, &vfbs->base.base,
1337 &vmw_framebuffer_surface_funcs);
1338 if (ret)
1339 goto out_err2;
1340
1341 return 0;
1342
1343 out_err2:
1344 vmw_user_object_unref(&vfbs->uo);
1345 kfree(vfbs);
1346 out_err1:
1347 return ret;
1348 }
1349
1350 /*
1351 * Buffer-object framebuffer code
1352 */
1353
vmw_framebuffer_bo_create_handle(struct drm_framebuffer * fb,struct drm_file * file_priv,unsigned int * handle)1354 static int vmw_framebuffer_bo_create_handle(struct drm_framebuffer *fb,
1355 struct drm_file *file_priv,
1356 unsigned int *handle)
1357 {
1358 struct vmw_framebuffer_bo *vfbd =
1359 vmw_framebuffer_to_vfbd(fb);
1360 return drm_gem_handle_create(file_priv, &vfbd->buffer->tbo.base, handle);
1361 }
1362
vmw_framebuffer_bo_destroy(struct drm_framebuffer * framebuffer)1363 static void vmw_framebuffer_bo_destroy(struct drm_framebuffer *framebuffer)
1364 {
1365 struct vmw_framebuffer_bo *vfbd =
1366 vmw_framebuffer_to_vfbd(framebuffer);
1367
1368 drm_framebuffer_cleanup(framebuffer);
1369 vmw_bo_unreference(&vfbd->buffer);
1370
1371 kfree(vfbd);
1372 }
1373
1374 static const struct drm_framebuffer_funcs vmw_framebuffer_bo_funcs = {
1375 .create_handle = vmw_framebuffer_bo_create_handle,
1376 .destroy = vmw_framebuffer_bo_destroy,
1377 .dirty = drm_atomic_helper_dirtyfb,
1378 };
1379
vmw_kms_new_framebuffer_bo(struct vmw_private * dev_priv,struct vmw_bo * bo,struct vmw_framebuffer ** out,const struct drm_mode_fb_cmd2 * mode_cmd)1380 static int vmw_kms_new_framebuffer_bo(struct vmw_private *dev_priv,
1381 struct vmw_bo *bo,
1382 struct vmw_framebuffer **out,
1383 const struct drm_mode_fb_cmd2
1384 *mode_cmd)
1385
1386 {
1387 struct drm_device *dev = &dev_priv->drm;
1388 struct vmw_framebuffer_bo *vfbd;
1389 unsigned int requested_size;
1390 int ret;
1391
1392 requested_size = mode_cmd->height * mode_cmd->pitches[0];
1393 if (unlikely(requested_size > bo->tbo.base.size)) {
1394 DRM_ERROR("Screen buffer object size is too small "
1395 "for requested mode.\n");
1396 return -EINVAL;
1397 }
1398
1399 if (!drm_any_plane_has_format(&dev_priv->drm,
1400 mode_cmd->pixel_format,
1401 mode_cmd->modifier[0])) {
1402 drm_dbg(&dev_priv->drm,
1403 "unsupported pixel format %p4cc / modifier 0x%llx\n",
1404 &mode_cmd->pixel_format, mode_cmd->modifier[0]);
1405 return -EINVAL;
1406 }
1407
1408 vfbd = kzalloc(sizeof(*vfbd), GFP_KERNEL);
1409 if (!vfbd) {
1410 ret = -ENOMEM;
1411 goto out_err1;
1412 }
1413
1414 vfbd->base.base.obj[0] = &bo->tbo.base;
1415 drm_helper_mode_fill_fb_struct(dev, &vfbd->base.base, mode_cmd);
1416 vfbd->base.bo = true;
1417 vfbd->buffer = vmw_bo_reference(bo);
1418 *out = &vfbd->base;
1419
1420 ret = drm_framebuffer_init(dev, &vfbd->base.base,
1421 &vmw_framebuffer_bo_funcs);
1422 if (ret)
1423 goto out_err2;
1424
1425 return 0;
1426
1427 out_err2:
1428 vmw_bo_unreference(&bo);
1429 kfree(vfbd);
1430 out_err1:
1431 return ret;
1432 }
1433
1434
1435 /**
1436 * vmw_kms_srf_ok - check if a surface can be created
1437 *
1438 * @dev_priv: Pointer to device private struct.
1439 * @width: requested width
1440 * @height: requested height
1441 *
1442 * Surfaces need to be less than texture size
1443 */
1444 static bool
vmw_kms_srf_ok(struct vmw_private * dev_priv,uint32_t width,uint32_t height)1445 vmw_kms_srf_ok(struct vmw_private *dev_priv, uint32_t width, uint32_t height)
1446 {
1447 if (width > dev_priv->texture_max_width ||
1448 height > dev_priv->texture_max_height)
1449 return false;
1450
1451 return true;
1452 }
1453
1454 /**
1455 * vmw_kms_new_framebuffer - Create a new framebuffer.
1456 *
1457 * @dev_priv: Pointer to device private struct.
1458 * @uo: Pointer to user object to wrap the kms framebuffer around.
1459 * Either the buffer or surface inside the user object must be NULL.
1460 * @mode_cmd: Frame-buffer metadata.
1461 */
1462 struct vmw_framebuffer *
vmw_kms_new_framebuffer(struct vmw_private * dev_priv,struct vmw_user_object * uo,const struct drm_mode_fb_cmd2 * mode_cmd)1463 vmw_kms_new_framebuffer(struct vmw_private *dev_priv,
1464 struct vmw_user_object *uo,
1465 const struct drm_mode_fb_cmd2 *mode_cmd)
1466 {
1467 struct vmw_framebuffer *vfb = NULL;
1468 int ret;
1469
1470 /* Create the new framebuffer depending one what we have */
1471 if (vmw_user_object_surface(uo)) {
1472 ret = vmw_kms_new_framebuffer_surface(dev_priv, uo, &vfb,
1473 mode_cmd);
1474 } else if (uo->buffer) {
1475 ret = vmw_kms_new_framebuffer_bo(dev_priv, uo->buffer, &vfb,
1476 mode_cmd);
1477 } else {
1478 BUG();
1479 }
1480
1481 if (ret)
1482 return ERR_PTR(ret);
1483
1484 return vfb;
1485 }
1486
1487 /*
1488 * Generic Kernel modesetting functions
1489 */
1490
vmw_kms_fb_create(struct drm_device * dev,struct drm_file * file_priv,const struct drm_mode_fb_cmd2 * mode_cmd)1491 static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev,
1492 struct drm_file *file_priv,
1493 const struct drm_mode_fb_cmd2 *mode_cmd)
1494 {
1495 struct vmw_private *dev_priv = vmw_priv(dev);
1496 struct vmw_framebuffer *vfb = NULL;
1497 struct vmw_user_object uo = {0};
1498 int ret;
1499
1500 /* returns either a bo or surface */
1501 ret = vmw_user_object_lookup(dev_priv, file_priv, mode_cmd->handles[0],
1502 &uo);
1503 if (ret) {
1504 DRM_ERROR("Invalid buffer object handle %u (0x%x).\n",
1505 mode_cmd->handles[0], mode_cmd->handles[0]);
1506 goto err_out;
1507 }
1508
1509
1510 if (vmw_user_object_surface(&uo) &&
1511 !vmw_kms_srf_ok(dev_priv, mode_cmd->width, mode_cmd->height)) {
1512 DRM_ERROR("Surface size cannot exceed %dx%d\n",
1513 dev_priv->texture_max_width,
1514 dev_priv->texture_max_height);
1515 ret = -EINVAL;
1516 goto err_out;
1517 }
1518
1519
1520 vfb = vmw_kms_new_framebuffer(dev_priv, &uo, mode_cmd);
1521 if (IS_ERR(vfb)) {
1522 ret = PTR_ERR(vfb);
1523 goto err_out;
1524 }
1525
1526 err_out:
1527 /* vmw_user_object_lookup takes one ref so does new_fb */
1528 vmw_user_object_unref(&uo);
1529
1530 if (ret) {
1531 DRM_ERROR("failed to create vmw_framebuffer: %i\n", ret);
1532 return ERR_PTR(ret);
1533 }
1534
1535 return &vfb->base;
1536 }
1537
1538 /**
1539 * vmw_kms_check_display_memory - Validates display memory required for a
1540 * topology
1541 * @dev: DRM device
1542 * @num_rects: number of drm_rect in rects
1543 * @rects: array of drm_rect representing the topology to validate indexed by
1544 * crtc index.
1545 *
1546 * Returns:
1547 * 0 on success otherwise negative error code
1548 */
vmw_kms_check_display_memory(struct drm_device * dev,uint32_t num_rects,struct drm_rect * rects)1549 static int vmw_kms_check_display_memory(struct drm_device *dev,
1550 uint32_t num_rects,
1551 struct drm_rect *rects)
1552 {
1553 struct vmw_private *dev_priv = vmw_priv(dev);
1554 struct drm_rect bounding_box = {0};
1555 u64 total_pixels = 0, pixel_mem, bb_mem;
1556 int i;
1557
1558 for (i = 0; i < num_rects; i++) {
1559 /*
1560 * For STDU only individual screen (screen target) is limited by
1561 * SCREENTARGET_MAX_WIDTH/HEIGHT registers.
1562 */
1563 if (dev_priv->active_display_unit == vmw_du_screen_target &&
1564 (drm_rect_width(&rects[i]) > dev_priv->stdu_max_width ||
1565 drm_rect_height(&rects[i]) > dev_priv->stdu_max_height)) {
1566 VMW_DEBUG_KMS("Screen size not supported.\n");
1567 return -EINVAL;
1568 }
1569
1570 /* Bounding box upper left is at (0,0). */
1571 if (rects[i].x2 > bounding_box.x2)
1572 bounding_box.x2 = rects[i].x2;
1573
1574 if (rects[i].y2 > bounding_box.y2)
1575 bounding_box.y2 = rects[i].y2;
1576
1577 total_pixels += (u64) drm_rect_width(&rects[i]) *
1578 (u64) drm_rect_height(&rects[i]);
1579 }
1580
1581 /* Virtual svga device primary limits are always in 32-bpp. */
1582 pixel_mem = total_pixels * 4;
1583
1584 /*
1585 * For HV10 and below prim_bb_mem is vram size. When
1586 * SVGA_REG_MAX_PRIMARY_BOUNDING_BOX_MEM is not present vram size is
1587 * limit on primary bounding box
1588 */
1589 if (pixel_mem > dev_priv->max_primary_mem) {
1590 VMW_DEBUG_KMS("Combined output size too large.\n");
1591 return -EINVAL;
1592 }
1593
1594 /* SVGA_CAP_NO_BB_RESTRICTION is available for STDU only. */
1595 if (dev_priv->active_display_unit != vmw_du_screen_target ||
1596 !(dev_priv->capabilities & SVGA_CAP_NO_BB_RESTRICTION)) {
1597 bb_mem = (u64) bounding_box.x2 * bounding_box.y2 * 4;
1598
1599 if (bb_mem > dev_priv->max_primary_mem) {
1600 VMW_DEBUG_KMS("Topology is beyond supported limits.\n");
1601 return -EINVAL;
1602 }
1603 }
1604
1605 return 0;
1606 }
1607
1608 /**
1609 * vmw_crtc_state_and_lock - Return new or current crtc state with locked
1610 * crtc mutex
1611 * @state: The atomic state pointer containing the new atomic state
1612 * @crtc: The crtc
1613 *
1614 * This function returns the new crtc state if it's part of the state update.
1615 * Otherwise returns the current crtc state. It also makes sure that the
1616 * crtc mutex is locked.
1617 *
1618 * Returns: A valid crtc state pointer or NULL. It may also return a
1619 * pointer error, in particular -EDEADLK if locking needs to be rerun.
1620 */
1621 static struct drm_crtc_state *
vmw_crtc_state_and_lock(struct drm_atomic_state * state,struct drm_crtc * crtc)1622 vmw_crtc_state_and_lock(struct drm_atomic_state *state, struct drm_crtc *crtc)
1623 {
1624 struct drm_crtc_state *crtc_state;
1625
1626 crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
1627 if (crtc_state) {
1628 lockdep_assert_held(&crtc->mutex.mutex.base);
1629 } else {
1630 int ret = drm_modeset_lock(&crtc->mutex, state->acquire_ctx);
1631
1632 if (ret != 0 && ret != -EALREADY)
1633 return ERR_PTR(ret);
1634
1635 crtc_state = crtc->state;
1636 }
1637
1638 return crtc_state;
1639 }
1640
1641 /**
1642 * vmw_kms_check_implicit - Verify that all implicit display units scan out
1643 * from the same fb after the new state is committed.
1644 * @dev: The drm_device.
1645 * @state: The new state to be checked.
1646 *
1647 * Returns:
1648 * Zero on success,
1649 * -EINVAL on invalid state,
1650 * -EDEADLK if modeset locking needs to be rerun.
1651 */
vmw_kms_check_implicit(struct drm_device * dev,struct drm_atomic_state * state)1652 static int vmw_kms_check_implicit(struct drm_device *dev,
1653 struct drm_atomic_state *state)
1654 {
1655 struct drm_framebuffer *implicit_fb = NULL;
1656 struct drm_crtc *crtc;
1657 struct drm_crtc_state *crtc_state;
1658 struct drm_plane_state *plane_state;
1659
1660 drm_for_each_crtc(crtc, dev) {
1661 struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
1662
1663 if (!du->is_implicit)
1664 continue;
1665
1666 crtc_state = vmw_crtc_state_and_lock(state, crtc);
1667 if (IS_ERR(crtc_state))
1668 return PTR_ERR(crtc_state);
1669
1670 if (!crtc_state || !crtc_state->enable)
1671 continue;
1672
1673 /*
1674 * Can't move primary planes across crtcs, so this is OK.
1675 * It also means we don't need to take the plane mutex.
1676 */
1677 plane_state = du->primary.state;
1678 if (plane_state->crtc != crtc)
1679 continue;
1680
1681 if (!implicit_fb)
1682 implicit_fb = plane_state->fb;
1683 else if (implicit_fb != plane_state->fb)
1684 return -EINVAL;
1685 }
1686
1687 return 0;
1688 }
1689
1690 /**
1691 * vmw_kms_check_topology - Validates topology in drm_atomic_state
1692 * @dev: DRM device
1693 * @state: the driver state object
1694 *
1695 * Returns:
1696 * 0 on success otherwise negative error code
1697 */
vmw_kms_check_topology(struct drm_device * dev,struct drm_atomic_state * state)1698 static int vmw_kms_check_topology(struct drm_device *dev,
1699 struct drm_atomic_state *state)
1700 {
1701 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
1702 struct drm_rect *rects;
1703 struct drm_crtc *crtc;
1704 uint32_t i;
1705 int ret = 0;
1706
1707 rects = kcalloc(dev->mode_config.num_crtc, sizeof(struct drm_rect),
1708 GFP_KERNEL);
1709 if (!rects)
1710 return -ENOMEM;
1711
1712 drm_for_each_crtc(crtc, dev) {
1713 struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
1714 struct drm_crtc_state *crtc_state;
1715
1716 i = drm_crtc_index(crtc);
1717
1718 crtc_state = vmw_crtc_state_and_lock(state, crtc);
1719 if (IS_ERR(crtc_state)) {
1720 ret = PTR_ERR(crtc_state);
1721 goto clean;
1722 }
1723
1724 if (!crtc_state)
1725 continue;
1726
1727 if (crtc_state->enable) {
1728 rects[i].x1 = du->gui_x;
1729 rects[i].y1 = du->gui_y;
1730 rects[i].x2 = du->gui_x + crtc_state->mode.hdisplay;
1731 rects[i].y2 = du->gui_y + crtc_state->mode.vdisplay;
1732 } else {
1733 rects[i].x1 = 0;
1734 rects[i].y1 = 0;
1735 rects[i].x2 = 0;
1736 rects[i].y2 = 0;
1737 }
1738 }
1739
1740 /* Determine change to topology due to new atomic state */
1741 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
1742 new_crtc_state, i) {
1743 struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
1744 struct drm_connector *connector;
1745 struct drm_connector_state *conn_state;
1746 struct vmw_connector_state *vmw_conn_state;
1747
1748 if (!du->pref_active && new_crtc_state->enable) {
1749 VMW_DEBUG_KMS("Enabling a disabled display unit\n");
1750 ret = -EINVAL;
1751 goto clean;
1752 }
1753
1754 /*
1755 * For vmwgfx each crtc has only one connector attached and it
1756 * is not changed so don't really need to check the
1757 * crtc->connector_mask and iterate over it.
1758 */
1759 connector = &du->connector;
1760 conn_state = drm_atomic_get_connector_state(state, connector);
1761 if (IS_ERR(conn_state)) {
1762 ret = PTR_ERR(conn_state);
1763 goto clean;
1764 }
1765
1766 vmw_conn_state = vmw_connector_state_to_vcs(conn_state);
1767 vmw_conn_state->gui_x = du->gui_x;
1768 vmw_conn_state->gui_y = du->gui_y;
1769 }
1770
1771 ret = vmw_kms_check_display_memory(dev, dev->mode_config.num_crtc,
1772 rects);
1773
1774 clean:
1775 kfree(rects);
1776 return ret;
1777 }
1778
1779 /**
1780 * vmw_kms_atomic_check_modeset- validate state object for modeset changes
1781 *
1782 * @dev: DRM device
1783 * @state: the driver state object
1784 *
1785 * This is a simple wrapper around drm_atomic_helper_check_modeset() for
1786 * us to assign a value to mode->crtc_clock so that
1787 * drm_calc_timestamping_constants() won't throw an error message
1788 *
1789 * Returns:
1790 * Zero for success or -errno
1791 */
1792 static int
vmw_kms_atomic_check_modeset(struct drm_device * dev,struct drm_atomic_state * state)1793 vmw_kms_atomic_check_modeset(struct drm_device *dev,
1794 struct drm_atomic_state *state)
1795 {
1796 struct drm_crtc *crtc;
1797 struct drm_crtc_state *crtc_state;
1798 bool need_modeset = false;
1799 int i, ret;
1800
1801 ret = drm_atomic_helper_check(dev, state);
1802 if (ret)
1803 return ret;
1804
1805 ret = vmw_kms_check_implicit(dev, state);
1806 if (ret) {
1807 VMW_DEBUG_KMS("Invalid implicit state\n");
1808 return ret;
1809 }
1810
1811 for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
1812 if (drm_atomic_crtc_needs_modeset(crtc_state))
1813 need_modeset = true;
1814 }
1815
1816 if (need_modeset)
1817 return vmw_kms_check_topology(dev, state);
1818
1819 return ret;
1820 }
1821
1822 static const struct drm_mode_config_funcs vmw_kms_funcs = {
1823 .fb_create = vmw_kms_fb_create,
1824 .atomic_check = vmw_kms_atomic_check_modeset,
1825 .atomic_commit = drm_atomic_helper_commit,
1826 };
1827
vmw_kms_generic_present(struct vmw_private * dev_priv,struct drm_file * file_priv,struct vmw_framebuffer * vfb,struct vmw_surface * surface,uint32_t sid,int32_t destX,int32_t destY,struct drm_vmw_rect * clips,uint32_t num_clips)1828 static int vmw_kms_generic_present(struct vmw_private *dev_priv,
1829 struct drm_file *file_priv,
1830 struct vmw_framebuffer *vfb,
1831 struct vmw_surface *surface,
1832 uint32_t sid,
1833 int32_t destX, int32_t destY,
1834 struct drm_vmw_rect *clips,
1835 uint32_t num_clips)
1836 {
1837 return vmw_kms_sou_do_surface_dirty(dev_priv, vfb, NULL, clips,
1838 &surface->res, destX, destY,
1839 num_clips, 1, NULL, NULL);
1840 }
1841
1842
vmw_kms_present(struct vmw_private * dev_priv,struct drm_file * file_priv,struct vmw_framebuffer * vfb,struct vmw_surface * surface,uint32_t sid,int32_t destX,int32_t destY,struct drm_vmw_rect * clips,uint32_t num_clips)1843 int vmw_kms_present(struct vmw_private *dev_priv,
1844 struct drm_file *file_priv,
1845 struct vmw_framebuffer *vfb,
1846 struct vmw_surface *surface,
1847 uint32_t sid,
1848 int32_t destX, int32_t destY,
1849 struct drm_vmw_rect *clips,
1850 uint32_t num_clips)
1851 {
1852 int ret;
1853
1854 switch (dev_priv->active_display_unit) {
1855 case vmw_du_screen_target:
1856 ret = vmw_kms_stdu_surface_dirty(dev_priv, vfb, NULL, clips,
1857 &surface->res, destX, destY,
1858 num_clips, 1, NULL, NULL);
1859 break;
1860 case vmw_du_screen_object:
1861 ret = vmw_kms_generic_present(dev_priv, file_priv, vfb, surface,
1862 sid, destX, destY, clips,
1863 num_clips);
1864 break;
1865 default:
1866 WARN_ONCE(true,
1867 "Present called with invalid display system.\n");
1868 ret = -ENOSYS;
1869 break;
1870 }
1871 if (ret)
1872 return ret;
1873
1874 vmw_cmd_flush(dev_priv, false);
1875
1876 return 0;
1877 }
1878
1879 static void
vmw_kms_create_hotplug_mode_update_property(struct vmw_private * dev_priv)1880 vmw_kms_create_hotplug_mode_update_property(struct vmw_private *dev_priv)
1881 {
1882 if (dev_priv->hotplug_mode_update_property)
1883 return;
1884
1885 dev_priv->hotplug_mode_update_property =
1886 drm_property_create_range(&dev_priv->drm,
1887 DRM_MODE_PROP_IMMUTABLE,
1888 "hotplug_mode_update", 0, 1);
1889 }
1890
1891 static void
vmw_atomic_commit_tail(struct drm_atomic_state * old_state)1892 vmw_atomic_commit_tail(struct drm_atomic_state *old_state)
1893 {
1894 struct vmw_private *vmw = vmw_priv(old_state->dev);
1895 struct drm_crtc *crtc;
1896 struct drm_crtc_state *old_crtc_state;
1897 int i;
1898
1899 drm_atomic_helper_commit_tail(old_state);
1900
1901 if (vmw->vkms_enabled) {
1902 for_each_old_crtc_in_state(old_state, crtc, old_crtc_state, i) {
1903 struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
1904 (void)old_crtc_state;
1905 flush_work(&du->vkms.crc_generator_work);
1906 }
1907 }
1908 }
1909
1910 static const struct drm_mode_config_helper_funcs vmw_mode_config_helpers = {
1911 .atomic_commit_tail = vmw_atomic_commit_tail,
1912 };
1913
vmw_kms_init(struct vmw_private * dev_priv)1914 int vmw_kms_init(struct vmw_private *dev_priv)
1915 {
1916 struct drm_device *dev = &dev_priv->drm;
1917 int ret;
1918 static const char *display_unit_names[] = {
1919 "Invalid",
1920 "Legacy",
1921 "Screen Object",
1922 "Screen Target",
1923 "Invalid (max)"
1924 };
1925
1926 drm_mode_config_init(dev);
1927 dev->mode_config.funcs = &vmw_kms_funcs;
1928 dev->mode_config.min_width = 1;
1929 dev->mode_config.min_height = 1;
1930 dev->mode_config.max_width = dev_priv->texture_max_width;
1931 dev->mode_config.max_height = dev_priv->texture_max_height;
1932 dev->mode_config.preferred_depth = dev_priv->assume_16bpp ? 16 : 32;
1933 dev->mode_config.helper_private = &vmw_mode_config_helpers;
1934
1935 drm_mode_create_suggested_offset_properties(dev);
1936 vmw_kms_create_hotplug_mode_update_property(dev_priv);
1937
1938 ret = vmw_kms_stdu_init_display(dev_priv);
1939 if (ret) {
1940 ret = vmw_kms_sou_init_display(dev_priv);
1941 if (ret) /* Fallback */
1942 ret = vmw_kms_ldu_init_display(dev_priv);
1943 }
1944 BUILD_BUG_ON(ARRAY_SIZE(display_unit_names) != (vmw_du_max + 1));
1945 drm_info(&dev_priv->drm, "%s display unit initialized\n",
1946 display_unit_names[dev_priv->active_display_unit]);
1947
1948 return ret;
1949 }
1950
vmw_kms_close(struct vmw_private * dev_priv)1951 int vmw_kms_close(struct vmw_private *dev_priv)
1952 {
1953 int ret = 0;
1954
1955 /*
1956 * Docs says we should take the lock before calling this function
1957 * but since it destroys encoders and our destructor calls
1958 * drm_encoder_cleanup which takes the lock we deadlock.
1959 */
1960 drm_mode_config_cleanup(&dev_priv->drm);
1961 if (dev_priv->active_display_unit == vmw_du_legacy)
1962 ret = vmw_kms_ldu_close_display(dev_priv);
1963
1964 return ret;
1965 }
1966
vmw_kms_cursor_bypass_ioctl(struct drm_device * dev,void * data,struct drm_file * file_priv)1967 int vmw_kms_cursor_bypass_ioctl(struct drm_device *dev, void *data,
1968 struct drm_file *file_priv)
1969 {
1970 struct drm_vmw_cursor_bypass_arg *arg = data;
1971 struct vmw_display_unit *du;
1972 struct drm_crtc *crtc;
1973 int ret = 0;
1974
1975 mutex_lock(&dev->mode_config.mutex);
1976 if (arg->flags & DRM_VMW_CURSOR_BYPASS_ALL) {
1977
1978 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
1979 du = vmw_crtc_to_du(crtc);
1980 du->hotspot_x = arg->xhot;
1981 du->hotspot_y = arg->yhot;
1982 }
1983
1984 mutex_unlock(&dev->mode_config.mutex);
1985 return 0;
1986 }
1987
1988 crtc = drm_crtc_find(dev, file_priv, arg->crtc_id);
1989 if (!crtc) {
1990 ret = -ENOENT;
1991 goto out;
1992 }
1993
1994 du = vmw_crtc_to_du(crtc);
1995
1996 du->hotspot_x = arg->xhot;
1997 du->hotspot_y = arg->yhot;
1998
1999 out:
2000 mutex_unlock(&dev->mode_config.mutex);
2001
2002 return ret;
2003 }
2004
vmw_kms_write_svga(struct vmw_private * vmw_priv,unsigned width,unsigned height,unsigned pitch,unsigned bpp,unsigned depth)2005 int vmw_kms_write_svga(struct vmw_private *vmw_priv,
2006 unsigned width, unsigned height, unsigned pitch,
2007 unsigned bpp, unsigned depth)
2008 {
2009 if (vmw_priv->capabilities & SVGA_CAP_PITCHLOCK)
2010 vmw_write(vmw_priv, SVGA_REG_PITCHLOCK, pitch);
2011 else if (vmw_fifo_have_pitchlock(vmw_priv))
2012 vmw_fifo_mem_write(vmw_priv, SVGA_FIFO_PITCHLOCK, pitch);
2013 vmw_write(vmw_priv, SVGA_REG_WIDTH, width);
2014 vmw_write(vmw_priv, SVGA_REG_HEIGHT, height);
2015 if ((vmw_priv->capabilities & SVGA_CAP_8BIT_EMULATION) != 0)
2016 vmw_write(vmw_priv, SVGA_REG_BITS_PER_PIXEL, bpp);
2017
2018 if (vmw_read(vmw_priv, SVGA_REG_DEPTH) != depth) {
2019 DRM_ERROR("Invalid depth %u for %u bpp, host expects %u\n",
2020 depth, bpp, vmw_read(vmw_priv, SVGA_REG_DEPTH));
2021 return -EINVAL;
2022 }
2023
2024 return 0;
2025 }
2026
2027 static
vmw_kms_validate_mode_vram(struct vmw_private * dev_priv,u64 pitch,u64 height)2028 bool vmw_kms_validate_mode_vram(struct vmw_private *dev_priv,
2029 u64 pitch,
2030 u64 height)
2031 {
2032 return (pitch * height) < (u64)dev_priv->vram_size;
2033 }
2034
2035 /**
2036 * vmw_du_update_layout - Update the display unit with topology from resolution
2037 * plugin and generate DRM uevent
2038 * @dev_priv: device private
2039 * @num_rects: number of drm_rect in rects
2040 * @rects: toplogy to update
2041 */
vmw_du_update_layout(struct vmw_private * dev_priv,unsigned int num_rects,struct drm_rect * rects)2042 static int vmw_du_update_layout(struct vmw_private *dev_priv,
2043 unsigned int num_rects, struct drm_rect *rects)
2044 {
2045 struct drm_device *dev = &dev_priv->drm;
2046 struct vmw_display_unit *du;
2047 struct drm_connector *con;
2048 struct drm_connector_list_iter conn_iter;
2049 struct drm_modeset_acquire_ctx ctx;
2050 struct drm_crtc *crtc;
2051 int ret;
2052
2053 /* Currently gui_x/y is protected with the crtc mutex */
2054 mutex_lock(&dev->mode_config.mutex);
2055 drm_modeset_acquire_init(&ctx, 0);
2056 retry:
2057 drm_for_each_crtc(crtc, dev) {
2058 ret = drm_modeset_lock(&crtc->mutex, &ctx);
2059 if (ret < 0) {
2060 if (ret == -EDEADLK) {
2061 drm_modeset_backoff(&ctx);
2062 goto retry;
2063 }
2064 goto out_fini;
2065 }
2066 }
2067
2068 drm_connector_list_iter_begin(dev, &conn_iter);
2069 drm_for_each_connector_iter(con, &conn_iter) {
2070 du = vmw_connector_to_du(con);
2071 if (num_rects > du->unit) {
2072 du->pref_width = drm_rect_width(&rects[du->unit]);
2073 du->pref_height = drm_rect_height(&rects[du->unit]);
2074 du->pref_active = true;
2075 du->gui_x = rects[du->unit].x1;
2076 du->gui_y = rects[du->unit].y1;
2077 } else {
2078 du->pref_width = VMWGFX_MIN_INITIAL_WIDTH;
2079 du->pref_height = VMWGFX_MIN_INITIAL_HEIGHT;
2080 du->pref_active = false;
2081 du->gui_x = 0;
2082 du->gui_y = 0;
2083 }
2084 }
2085 drm_connector_list_iter_end(&conn_iter);
2086
2087 list_for_each_entry(con, &dev->mode_config.connector_list, head) {
2088 du = vmw_connector_to_du(con);
2089 if (num_rects > du->unit) {
2090 drm_object_property_set_value
2091 (&con->base, dev->mode_config.suggested_x_property,
2092 du->gui_x);
2093 drm_object_property_set_value
2094 (&con->base, dev->mode_config.suggested_y_property,
2095 du->gui_y);
2096 } else {
2097 drm_object_property_set_value
2098 (&con->base, dev->mode_config.suggested_x_property,
2099 0);
2100 drm_object_property_set_value
2101 (&con->base, dev->mode_config.suggested_y_property,
2102 0);
2103 }
2104 con->status = vmw_du_connector_detect(con, true);
2105 }
2106 out_fini:
2107 drm_modeset_drop_locks(&ctx);
2108 drm_modeset_acquire_fini(&ctx);
2109 mutex_unlock(&dev->mode_config.mutex);
2110
2111 drm_sysfs_hotplug_event(dev);
2112
2113 return 0;
2114 }
2115
vmw_du_crtc_gamma_set(struct drm_crtc * crtc,u16 * r,u16 * g,u16 * b,uint32_t size,struct drm_modeset_acquire_ctx * ctx)2116 int vmw_du_crtc_gamma_set(struct drm_crtc *crtc,
2117 u16 *r, u16 *g, u16 *b,
2118 uint32_t size,
2119 struct drm_modeset_acquire_ctx *ctx)
2120 {
2121 struct vmw_private *dev_priv = vmw_priv(crtc->dev);
2122 int i;
2123
2124 for (i = 0; i < size; i++) {
2125 DRM_DEBUG("%d r/g/b = 0x%04x / 0x%04x / 0x%04x\n", i,
2126 r[i], g[i], b[i]);
2127 vmw_write(dev_priv, SVGA_PALETTE_BASE + i * 3 + 0, r[i] >> 8);
2128 vmw_write(dev_priv, SVGA_PALETTE_BASE + i * 3 + 1, g[i] >> 8);
2129 vmw_write(dev_priv, SVGA_PALETTE_BASE + i * 3 + 2, b[i] >> 8);
2130 }
2131
2132 return 0;
2133 }
2134
vmw_du_connector_dpms(struct drm_connector * connector,int mode)2135 int vmw_du_connector_dpms(struct drm_connector *connector, int mode)
2136 {
2137 return 0;
2138 }
2139
2140 enum drm_connector_status
vmw_du_connector_detect(struct drm_connector * connector,bool force)2141 vmw_du_connector_detect(struct drm_connector *connector, bool force)
2142 {
2143 uint32_t num_displays;
2144 struct drm_device *dev = connector->dev;
2145 struct vmw_private *dev_priv = vmw_priv(dev);
2146 struct vmw_display_unit *du = vmw_connector_to_du(connector);
2147
2148 num_displays = vmw_read(dev_priv, SVGA_REG_NUM_DISPLAYS);
2149
2150 return ((vmw_connector_to_du(connector)->unit < num_displays &&
2151 du->pref_active) ?
2152 connector_status_connected : connector_status_disconnected);
2153 }
2154
2155 /**
2156 * vmw_guess_mode_timing - Provide fake timings for a
2157 * 60Hz vrefresh mode.
2158 *
2159 * @mode: Pointer to a struct drm_display_mode with hdisplay and vdisplay
2160 * members filled in.
2161 */
vmw_guess_mode_timing(struct drm_display_mode * mode)2162 void vmw_guess_mode_timing(struct drm_display_mode *mode)
2163 {
2164 mode->hsync_start = mode->hdisplay + 50;
2165 mode->hsync_end = mode->hsync_start + 50;
2166 mode->htotal = mode->hsync_end + 50;
2167
2168 mode->vsync_start = mode->vdisplay + 50;
2169 mode->vsync_end = mode->vsync_start + 50;
2170 mode->vtotal = mode->vsync_end + 50;
2171
2172 mode->clock = (u32)mode->htotal * (u32)mode->vtotal / 100 * 6;
2173 }
2174
2175
2176 /**
2177 * vmw_kms_update_layout_ioctl - Handler for DRM_VMW_UPDATE_LAYOUT ioctl
2178 * @dev: drm device for the ioctl
2179 * @data: data pointer for the ioctl
2180 * @file_priv: drm file for the ioctl call
2181 *
2182 * Update preferred topology of display unit as per ioctl request. The topology
2183 * is expressed as array of drm_vmw_rect.
2184 * e.g.
2185 * [0 0 640 480] [640 0 800 600] [0 480 640 480]
2186 *
2187 * NOTE:
2188 * The x and y offset (upper left) in drm_vmw_rect cannot be less than 0. Beside
2189 * device limit on topology, x + w and y + h (lower right) cannot be greater
2190 * than INT_MAX. So topology beyond these limits will return with error.
2191 *
2192 * Returns:
2193 * Zero on success, negative errno on failure.
2194 */
vmw_kms_update_layout_ioctl(struct drm_device * dev,void * data,struct drm_file * file_priv)2195 int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data,
2196 struct drm_file *file_priv)
2197 {
2198 struct vmw_private *dev_priv = vmw_priv(dev);
2199 struct drm_mode_config *mode_config = &dev->mode_config;
2200 struct drm_vmw_update_layout_arg *arg =
2201 (struct drm_vmw_update_layout_arg *)data;
2202 const void __user *user_rects;
2203 struct drm_vmw_rect *rects;
2204 struct drm_rect *drm_rects;
2205 unsigned rects_size;
2206 int ret, i;
2207
2208 if (!arg->num_outputs) {
2209 struct drm_rect def_rect = {0, 0,
2210 VMWGFX_MIN_INITIAL_WIDTH,
2211 VMWGFX_MIN_INITIAL_HEIGHT};
2212 vmw_du_update_layout(dev_priv, 1, &def_rect);
2213 return 0;
2214 } else if (arg->num_outputs > VMWGFX_NUM_DISPLAY_UNITS) {
2215 return -E2BIG;
2216 }
2217
2218 rects_size = arg->num_outputs * sizeof(struct drm_vmw_rect);
2219 rects = kcalloc(arg->num_outputs, sizeof(struct drm_vmw_rect),
2220 GFP_KERNEL);
2221 if (unlikely(!rects))
2222 return -ENOMEM;
2223
2224 user_rects = (void __user *)(unsigned long)arg->rects;
2225 ret = copy_from_user(rects, user_rects, rects_size);
2226 if (unlikely(ret != 0)) {
2227 DRM_ERROR("Failed to get rects.\n");
2228 ret = -EFAULT;
2229 goto out_free;
2230 }
2231
2232 drm_rects = (struct drm_rect *)rects;
2233
2234 VMW_DEBUG_KMS("Layout count = %u\n", arg->num_outputs);
2235 for (i = 0; i < arg->num_outputs; i++) {
2236 struct drm_vmw_rect curr_rect;
2237
2238 /* Verify user-space for overflow as kernel use drm_rect */
2239 if ((rects[i].x + rects[i].w > INT_MAX) ||
2240 (rects[i].y + rects[i].h > INT_MAX)) {
2241 ret = -ERANGE;
2242 goto out_free;
2243 }
2244
2245 curr_rect = rects[i];
2246 drm_rects[i].x1 = curr_rect.x;
2247 drm_rects[i].y1 = curr_rect.y;
2248 drm_rects[i].x2 = curr_rect.x + curr_rect.w;
2249 drm_rects[i].y2 = curr_rect.y + curr_rect.h;
2250
2251 VMW_DEBUG_KMS(" x1 = %d y1 = %d x2 = %d y2 = %d\n",
2252 drm_rects[i].x1, drm_rects[i].y1,
2253 drm_rects[i].x2, drm_rects[i].y2);
2254
2255 /*
2256 * Currently this check is limiting the topology within
2257 * mode_config->max (which actually is max texture size
2258 * supported by virtual device). This limit is here to address
2259 * window managers that create a big framebuffer for whole
2260 * topology.
2261 */
2262 if (drm_rects[i].x1 < 0 || drm_rects[i].y1 < 0 ||
2263 drm_rects[i].x2 > mode_config->max_width ||
2264 drm_rects[i].y2 > mode_config->max_height) {
2265 VMW_DEBUG_KMS("Invalid layout %d %d %d %d\n",
2266 drm_rects[i].x1, drm_rects[i].y1,
2267 drm_rects[i].x2, drm_rects[i].y2);
2268 ret = -EINVAL;
2269 goto out_free;
2270 }
2271 }
2272
2273 ret = vmw_kms_check_display_memory(dev, arg->num_outputs, drm_rects);
2274
2275 if (ret == 0)
2276 vmw_du_update_layout(dev_priv, arg->num_outputs, drm_rects);
2277
2278 out_free:
2279 kfree(rects);
2280 return ret;
2281 }
2282
2283 /**
2284 * vmw_kms_helper_dirty - Helper to build commands and perform actions based
2285 * on a set of cliprects and a set of display units.
2286 *
2287 * @dev_priv: Pointer to a device private structure.
2288 * @framebuffer: Pointer to the framebuffer on which to perform the actions.
2289 * @clips: A set of struct drm_clip_rect. Either this os @vclips must be NULL.
2290 * Cliprects are given in framebuffer coordinates.
2291 * @vclips: A set of struct drm_vmw_rect cliprects. Either this or @clips must
2292 * be NULL. Cliprects are given in source coordinates.
2293 * @dest_x: X coordinate offset for the crtc / destination clip rects.
2294 * @dest_y: Y coordinate offset for the crtc / destination clip rects.
2295 * @num_clips: Number of cliprects in the @clips or @vclips array.
2296 * @increment: Integer with which to increment the clip counter when looping.
2297 * Used to skip a predetermined number of clip rects.
2298 * @dirty: Closure structure. See the description of struct vmw_kms_dirty.
2299 */
vmw_kms_helper_dirty(struct vmw_private * dev_priv,struct vmw_framebuffer * framebuffer,const struct drm_clip_rect * clips,const struct drm_vmw_rect * vclips,s32 dest_x,s32 dest_y,int num_clips,int increment,struct vmw_kms_dirty * dirty)2300 int vmw_kms_helper_dirty(struct vmw_private *dev_priv,
2301 struct vmw_framebuffer *framebuffer,
2302 const struct drm_clip_rect *clips,
2303 const struct drm_vmw_rect *vclips,
2304 s32 dest_x, s32 dest_y,
2305 int num_clips,
2306 int increment,
2307 struct vmw_kms_dirty *dirty)
2308 {
2309 struct vmw_display_unit *units[VMWGFX_NUM_DISPLAY_UNITS];
2310 struct drm_crtc *crtc;
2311 u32 num_units = 0;
2312 u32 i, k;
2313
2314 dirty->dev_priv = dev_priv;
2315
2316 /* If crtc is passed, no need to iterate over other display units */
2317 if (dirty->crtc) {
2318 units[num_units++] = vmw_crtc_to_du(dirty->crtc);
2319 } else {
2320 list_for_each_entry(crtc, &dev_priv->drm.mode_config.crtc_list,
2321 head) {
2322 struct drm_plane *plane = crtc->primary;
2323
2324 if (plane->state->fb == &framebuffer->base)
2325 units[num_units++] = vmw_crtc_to_du(crtc);
2326 }
2327 }
2328
2329 for (k = 0; k < num_units; k++) {
2330 struct vmw_display_unit *unit = units[k];
2331 s32 crtc_x = unit->crtc.x;
2332 s32 crtc_y = unit->crtc.y;
2333 s32 crtc_width = unit->crtc.mode.hdisplay;
2334 s32 crtc_height = unit->crtc.mode.vdisplay;
2335 const struct drm_clip_rect *clips_ptr = clips;
2336 const struct drm_vmw_rect *vclips_ptr = vclips;
2337
2338 dirty->unit = unit;
2339 if (dirty->fifo_reserve_size > 0) {
2340 dirty->cmd = VMW_CMD_RESERVE(dev_priv,
2341 dirty->fifo_reserve_size);
2342 if (!dirty->cmd)
2343 return -ENOMEM;
2344
2345 memset(dirty->cmd, 0, dirty->fifo_reserve_size);
2346 }
2347 dirty->num_hits = 0;
2348 for (i = 0; i < num_clips; i++, clips_ptr += increment,
2349 vclips_ptr += increment) {
2350 s32 clip_left;
2351 s32 clip_top;
2352
2353 /*
2354 * Select clip array type. Note that integer type
2355 * in @clips is unsigned short, whereas in @vclips
2356 * it's 32-bit.
2357 */
2358 if (clips) {
2359 dirty->fb_x = (s32) clips_ptr->x1;
2360 dirty->fb_y = (s32) clips_ptr->y1;
2361 dirty->unit_x2 = (s32) clips_ptr->x2 + dest_x -
2362 crtc_x;
2363 dirty->unit_y2 = (s32) clips_ptr->y2 + dest_y -
2364 crtc_y;
2365 } else {
2366 dirty->fb_x = vclips_ptr->x;
2367 dirty->fb_y = vclips_ptr->y;
2368 dirty->unit_x2 = dirty->fb_x + vclips_ptr->w +
2369 dest_x - crtc_x;
2370 dirty->unit_y2 = dirty->fb_y + vclips_ptr->h +
2371 dest_y - crtc_y;
2372 }
2373
2374 dirty->unit_x1 = dirty->fb_x + dest_x - crtc_x;
2375 dirty->unit_y1 = dirty->fb_y + dest_y - crtc_y;
2376
2377 /* Skip this clip if it's outside the crtc region */
2378 if (dirty->unit_x1 >= crtc_width ||
2379 dirty->unit_y1 >= crtc_height ||
2380 dirty->unit_x2 <= 0 || dirty->unit_y2 <= 0)
2381 continue;
2382
2383 /* Clip right and bottom to crtc limits */
2384 dirty->unit_x2 = min_t(s32, dirty->unit_x2,
2385 crtc_width);
2386 dirty->unit_y2 = min_t(s32, dirty->unit_y2,
2387 crtc_height);
2388
2389 /* Clip left and top to crtc limits */
2390 clip_left = min_t(s32, dirty->unit_x1, 0);
2391 clip_top = min_t(s32, dirty->unit_y1, 0);
2392 dirty->unit_x1 -= clip_left;
2393 dirty->unit_y1 -= clip_top;
2394 dirty->fb_x -= clip_left;
2395 dirty->fb_y -= clip_top;
2396
2397 dirty->clip(dirty);
2398 }
2399
2400 dirty->fifo_commit(dirty);
2401 }
2402
2403 return 0;
2404 }
2405
2406 /**
2407 * vmw_kms_helper_validation_finish - Helper for post KMS command submission
2408 * cleanup and fencing
2409 * @dev_priv: Pointer to the device-private struct
2410 * @file_priv: Pointer identifying the client when user-space fencing is used
2411 * @ctx: Pointer to the validation context
2412 * @out_fence: If non-NULL, returned refcounted fence-pointer
2413 * @user_fence_rep: If non-NULL, pointer to user-space address area
2414 * in which to copy user-space fence info
2415 */
vmw_kms_helper_validation_finish(struct vmw_private * dev_priv,struct drm_file * file_priv,struct vmw_validation_context * ctx,struct vmw_fence_obj ** out_fence,struct drm_vmw_fence_rep __user * user_fence_rep)2416 void vmw_kms_helper_validation_finish(struct vmw_private *dev_priv,
2417 struct drm_file *file_priv,
2418 struct vmw_validation_context *ctx,
2419 struct vmw_fence_obj **out_fence,
2420 struct drm_vmw_fence_rep __user *
2421 user_fence_rep)
2422 {
2423 struct vmw_fence_obj *fence = NULL;
2424 uint32_t handle = 0;
2425 int ret = 0;
2426
2427 if (file_priv || user_fence_rep || vmw_validation_has_bos(ctx) ||
2428 out_fence)
2429 ret = vmw_execbuf_fence_commands(file_priv, dev_priv, &fence,
2430 file_priv ? &handle : NULL);
2431 vmw_validation_done(ctx, fence);
2432 if (file_priv)
2433 vmw_execbuf_copy_fence_user(dev_priv, vmw_fpriv(file_priv),
2434 ret, user_fence_rep, fence,
2435 handle, -1);
2436 if (out_fence)
2437 *out_fence = fence;
2438 else
2439 vmw_fence_obj_unreference(&fence);
2440 }
2441
2442 /**
2443 * vmw_kms_create_implicit_placement_property - Set up the implicit placement
2444 * property.
2445 *
2446 * @dev_priv: Pointer to a device private struct.
2447 *
2448 * Sets up the implicit placement property unless it's already set up.
2449 */
2450 void
vmw_kms_create_implicit_placement_property(struct vmw_private * dev_priv)2451 vmw_kms_create_implicit_placement_property(struct vmw_private *dev_priv)
2452 {
2453 if (dev_priv->implicit_placement_property)
2454 return;
2455
2456 dev_priv->implicit_placement_property =
2457 drm_property_create_range(&dev_priv->drm,
2458 DRM_MODE_PROP_IMMUTABLE,
2459 "implicit_placement", 0, 1);
2460 }
2461
2462 /**
2463 * vmw_kms_suspend - Save modesetting state and turn modesetting off.
2464 *
2465 * @dev: Pointer to the drm device
2466 * Return: 0 on success. Negative error code on failure.
2467 */
vmw_kms_suspend(struct drm_device * dev)2468 int vmw_kms_suspend(struct drm_device *dev)
2469 {
2470 struct vmw_private *dev_priv = vmw_priv(dev);
2471
2472 dev_priv->suspend_state = drm_atomic_helper_suspend(dev);
2473 if (IS_ERR(dev_priv->suspend_state)) {
2474 int ret = PTR_ERR(dev_priv->suspend_state);
2475
2476 DRM_ERROR("Failed kms suspend: %d\n", ret);
2477 dev_priv->suspend_state = NULL;
2478
2479 return ret;
2480 }
2481
2482 return 0;
2483 }
2484
2485
2486 /**
2487 * vmw_kms_resume - Re-enable modesetting and restore state
2488 *
2489 * @dev: Pointer to the drm device
2490 * Return: 0 on success. Negative error code on failure.
2491 *
2492 * State is resumed from a previous vmw_kms_suspend(). It's illegal
2493 * to call this function without a previous vmw_kms_suspend().
2494 */
vmw_kms_resume(struct drm_device * dev)2495 int vmw_kms_resume(struct drm_device *dev)
2496 {
2497 struct vmw_private *dev_priv = vmw_priv(dev);
2498 int ret;
2499
2500 if (WARN_ON(!dev_priv->suspend_state))
2501 return 0;
2502
2503 ret = drm_atomic_helper_resume(dev, dev_priv->suspend_state);
2504 dev_priv->suspend_state = NULL;
2505
2506 return ret;
2507 }
2508
2509 /**
2510 * vmw_kms_lost_device - Notify kms that modesetting capabilities will be lost
2511 *
2512 * @dev: Pointer to the drm device
2513 */
vmw_kms_lost_device(struct drm_device * dev)2514 void vmw_kms_lost_device(struct drm_device *dev)
2515 {
2516 drm_atomic_helper_shutdown(dev);
2517 }
2518
2519 /**
2520 * vmw_du_helper_plane_update - Helper to do plane update on a display unit.
2521 * @update: The closure structure.
2522 *
2523 * Call this helper after setting callbacks in &vmw_du_update_plane to do plane
2524 * update on display unit.
2525 *
2526 * Return: 0 on success or a negative error code on failure.
2527 */
vmw_du_helper_plane_update(struct vmw_du_update_plane * update)2528 int vmw_du_helper_plane_update(struct vmw_du_update_plane *update)
2529 {
2530 struct drm_plane_state *state = update->plane->state;
2531 struct drm_plane_state *old_state = update->old_state;
2532 struct drm_atomic_helper_damage_iter iter;
2533 struct drm_rect clip;
2534 struct drm_rect bb;
2535 DECLARE_VAL_CONTEXT(val_ctx, NULL, 0);
2536 uint32_t reserved_size = 0;
2537 uint32_t submit_size = 0;
2538 uint32_t curr_size = 0;
2539 uint32_t num_hits = 0;
2540 void *cmd_start;
2541 char *cmd_next;
2542 int ret;
2543
2544 /*
2545 * Iterate in advance to check if really need plane update and find the
2546 * number of clips that actually are in plane src for fifo allocation.
2547 */
2548 drm_atomic_helper_damage_iter_init(&iter, old_state, state);
2549 drm_atomic_for_each_plane_damage(&iter, &clip)
2550 num_hits++;
2551
2552 if (num_hits == 0)
2553 return 0;
2554
2555 if (update->vfb->bo) {
2556 struct vmw_framebuffer_bo *vfbbo =
2557 container_of(update->vfb, typeof(*vfbbo), base);
2558
2559 /*
2560 * For screen targets we want a mappable bo, for everything else we want
2561 * accelerated i.e. host backed (vram or gmr) bo. If the display unit
2562 * is not screen target then mob's shouldn't be available.
2563 */
2564 if (update->dev_priv->active_display_unit == vmw_du_screen_target) {
2565 vmw_bo_placement_set(vfbbo->buffer,
2566 VMW_BO_DOMAIN_SYS | VMW_BO_DOMAIN_MOB | VMW_BO_DOMAIN_GMR,
2567 VMW_BO_DOMAIN_SYS | VMW_BO_DOMAIN_MOB | VMW_BO_DOMAIN_GMR);
2568 } else {
2569 WARN_ON(update->dev_priv->has_mob);
2570 vmw_bo_placement_set_default_accelerated(vfbbo->buffer);
2571 }
2572 ret = vmw_validation_add_bo(&val_ctx, vfbbo->buffer);
2573 } else {
2574 struct vmw_framebuffer_surface *vfbs =
2575 container_of(update->vfb, typeof(*vfbs), base);
2576 struct vmw_surface *surf = vmw_user_object_surface(&vfbs->uo);
2577
2578 ret = vmw_validation_add_resource(&val_ctx, &surf->res,
2579 0, VMW_RES_DIRTY_NONE, NULL,
2580 NULL);
2581 }
2582
2583 if (ret)
2584 return ret;
2585
2586 ret = vmw_validation_prepare(&val_ctx, update->mutex, update->intr);
2587 if (ret)
2588 goto out_unref;
2589
2590 reserved_size = update->calc_fifo_size(update, num_hits);
2591 cmd_start = VMW_CMD_RESERVE(update->dev_priv, reserved_size);
2592 if (!cmd_start) {
2593 ret = -ENOMEM;
2594 goto out_revert;
2595 }
2596
2597 cmd_next = cmd_start;
2598
2599 if (update->post_prepare) {
2600 curr_size = update->post_prepare(update, cmd_next);
2601 cmd_next += curr_size;
2602 submit_size += curr_size;
2603 }
2604
2605 if (update->pre_clip) {
2606 curr_size = update->pre_clip(update, cmd_next, num_hits);
2607 cmd_next += curr_size;
2608 submit_size += curr_size;
2609 }
2610
2611 bb.x1 = INT_MAX;
2612 bb.y1 = INT_MAX;
2613 bb.x2 = INT_MIN;
2614 bb.y2 = INT_MIN;
2615
2616 drm_atomic_helper_damage_iter_init(&iter, old_state, state);
2617 drm_atomic_for_each_plane_damage(&iter, &clip) {
2618 uint32_t fb_x = clip.x1;
2619 uint32_t fb_y = clip.y1;
2620
2621 vmw_du_translate_to_crtc(state, &clip);
2622 if (update->clip) {
2623 curr_size = update->clip(update, cmd_next, &clip, fb_x,
2624 fb_y);
2625 cmd_next += curr_size;
2626 submit_size += curr_size;
2627 }
2628 bb.x1 = min_t(int, bb.x1, clip.x1);
2629 bb.y1 = min_t(int, bb.y1, clip.y1);
2630 bb.x2 = max_t(int, bb.x2, clip.x2);
2631 bb.y2 = max_t(int, bb.y2, clip.y2);
2632 }
2633
2634 curr_size = update->post_clip(update, cmd_next, &bb);
2635 submit_size += curr_size;
2636
2637 if (reserved_size < submit_size)
2638 submit_size = 0;
2639
2640 vmw_cmd_commit(update->dev_priv, submit_size);
2641
2642 vmw_kms_helper_validation_finish(update->dev_priv, NULL, &val_ctx,
2643 update->out_fence, NULL);
2644 return ret;
2645
2646 out_revert:
2647 vmw_validation_revert(&val_ctx);
2648
2649 out_unref:
2650 vmw_validation_unref_lists(&val_ctx);
2651 return ret;
2652 }
2653
2654 /**
2655 * vmw_connector_mode_valid - implements drm_connector_helper_funcs.mode_valid callback
2656 *
2657 * @connector: the drm connector, part of a DU container
2658 * @mode: drm mode to check
2659 *
2660 * Returns MODE_OK on success, or a drm_mode_status error code.
2661 */
vmw_connector_mode_valid(struct drm_connector * connector,struct drm_display_mode * mode)2662 enum drm_mode_status vmw_connector_mode_valid(struct drm_connector *connector,
2663 struct drm_display_mode *mode)
2664 {
2665 enum drm_mode_status ret;
2666 struct drm_device *dev = connector->dev;
2667 struct vmw_private *dev_priv = vmw_priv(dev);
2668 u32 assumed_cpp = 4;
2669
2670 if (dev_priv->assume_16bpp)
2671 assumed_cpp = 2;
2672
2673 ret = drm_mode_validate_size(mode, dev_priv->texture_max_width,
2674 dev_priv->texture_max_height);
2675 if (ret != MODE_OK)
2676 return ret;
2677
2678 if (!vmw_kms_validate_mode_vram(dev_priv,
2679 mode->hdisplay * assumed_cpp,
2680 mode->vdisplay))
2681 return MODE_MEM;
2682
2683 return MODE_OK;
2684 }
2685
2686 /**
2687 * vmw_connector_get_modes - implements drm_connector_helper_funcs.get_modes callback
2688 *
2689 * @connector: the drm connector, part of a DU container
2690 *
2691 * Returns the number of added modes.
2692 */
vmw_connector_get_modes(struct drm_connector * connector)2693 int vmw_connector_get_modes(struct drm_connector *connector)
2694 {
2695 struct vmw_display_unit *du = vmw_connector_to_du(connector);
2696 struct drm_device *dev = connector->dev;
2697 struct vmw_private *dev_priv = vmw_priv(dev);
2698 struct drm_display_mode *mode = NULL;
2699 struct drm_display_mode prefmode = { DRM_MODE("preferred",
2700 DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED,
2701 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
2702 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC)
2703 };
2704 u32 max_width;
2705 u32 max_height;
2706 u32 num_modes;
2707
2708 /* Add preferred mode */
2709 mode = drm_mode_duplicate(dev, &prefmode);
2710 if (!mode)
2711 return 0;
2712
2713 mode->hdisplay = du->pref_width;
2714 mode->vdisplay = du->pref_height;
2715 vmw_guess_mode_timing(mode);
2716 drm_mode_set_name(mode);
2717
2718 drm_mode_probed_add(connector, mode);
2719 drm_dbg_kms(dev, "preferred mode " DRM_MODE_FMT "\n", DRM_MODE_ARG(mode));
2720
2721 /* Probe connector for all modes not exceeding our geom limits */
2722 max_width = dev_priv->texture_max_width;
2723 max_height = dev_priv->texture_max_height;
2724
2725 if (dev_priv->active_display_unit == vmw_du_screen_target) {
2726 max_width = min(dev_priv->stdu_max_width, max_width);
2727 max_height = min(dev_priv->stdu_max_height, max_height);
2728 }
2729
2730 num_modes = 1 + drm_add_modes_noedid(connector, max_width, max_height);
2731
2732 return num_modes;
2733 }
2734
vmw_user_object_ref(struct vmw_user_object * uo)2735 struct vmw_user_object *vmw_user_object_ref(struct vmw_user_object *uo)
2736 {
2737 if (uo->buffer)
2738 vmw_user_bo_ref(uo->buffer);
2739 else if (uo->surface)
2740 vmw_surface_reference(uo->surface);
2741 return uo;
2742 }
2743
vmw_user_object_unref(struct vmw_user_object * uo)2744 void vmw_user_object_unref(struct vmw_user_object *uo)
2745 {
2746 if (uo->buffer)
2747 vmw_user_bo_unref(&uo->buffer);
2748 else if (uo->surface)
2749 vmw_surface_unreference(&uo->surface);
2750 }
2751
2752 struct vmw_bo *
vmw_user_object_buffer(struct vmw_user_object * uo)2753 vmw_user_object_buffer(struct vmw_user_object *uo)
2754 {
2755 if (uo->buffer)
2756 return uo->buffer;
2757 else if (uo->surface)
2758 return uo->surface->res.guest_memory_bo;
2759 return NULL;
2760 }
2761
2762 struct vmw_surface *
vmw_user_object_surface(struct vmw_user_object * uo)2763 vmw_user_object_surface(struct vmw_user_object *uo)
2764 {
2765 if (uo->buffer)
2766 return uo->buffer->dumb_surface;
2767 return uo->surface;
2768 }
2769
vmw_user_object_map(struct vmw_user_object * uo)2770 void *vmw_user_object_map(struct vmw_user_object *uo)
2771 {
2772 struct vmw_bo *bo = vmw_user_object_buffer(uo);
2773
2774 WARN_ON(!bo);
2775 return vmw_bo_map_and_cache(bo);
2776 }
2777
vmw_user_object_map_size(struct vmw_user_object * uo,size_t size)2778 void *vmw_user_object_map_size(struct vmw_user_object *uo, size_t size)
2779 {
2780 struct vmw_bo *bo = vmw_user_object_buffer(uo);
2781
2782 WARN_ON(!bo);
2783 return vmw_bo_map_and_cache_size(bo, size);
2784 }
2785
vmw_user_object_unmap(struct vmw_user_object * uo)2786 void vmw_user_object_unmap(struct vmw_user_object *uo)
2787 {
2788 struct vmw_bo *bo = vmw_user_object_buffer(uo);
2789 int ret;
2790
2791 WARN_ON(!bo);
2792
2793 /* Fence the mob creation so we are guarateed to have the mob */
2794 ret = ttm_bo_reserve(&bo->tbo, false, false, NULL);
2795 if (ret != 0)
2796 return;
2797
2798 vmw_bo_unmap(bo);
2799 vmw_bo_pin_reserved(bo, false);
2800
2801 ttm_bo_unreserve(&bo->tbo);
2802 }
2803
vmw_user_object_is_mapped(struct vmw_user_object * uo)2804 bool vmw_user_object_is_mapped(struct vmw_user_object *uo)
2805 {
2806 struct vmw_bo *bo;
2807
2808 if (!uo || vmw_user_object_is_null(uo))
2809 return false;
2810
2811 bo = vmw_user_object_buffer(uo);
2812
2813 if (WARN_ON(!bo))
2814 return false;
2815
2816 WARN_ON(bo->map.bo && !bo->map.virtual);
2817 return bo->map.virtual;
2818 }
2819
vmw_user_object_is_null(struct vmw_user_object * uo)2820 bool vmw_user_object_is_null(struct vmw_user_object *uo)
2821 {
2822 return !uo->buffer && !uo->surface;
2823 }
2824