xref: /linux/drivers/gpu/drm/vmwgfx/vmwgfx_cursor_plane.c (revision d5c1b4b43249bfa038df2f63e2d506bbf6e07df9)
1 // SPDX-License-Identifier: GPL-2.0 OR MIT
2 /**************************************************************************
3  *
4  * Copyright (c) 2024-2025 Broadcom. All Rights Reserved. The term
5  * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
6  *
7  **************************************************************************/
8 #include "vmwgfx_cursor_plane.h"
9 
10 #include "vmwgfx_bo.h"
11 #include "vmwgfx_drv.h"
12 #include "vmwgfx_kms.h"
13 #include "vmwgfx_resource_priv.h"
14 #include "vmw_surface_cache.h"
15 
16 #include "drm/drm_atomic.h"
17 #include "drm/drm_atomic_helper.h"
18 #include "drm/drm_plane.h"
19 #include <asm/page.h>
20 
21 #define VMW_CURSOR_SNOOP_FORMAT SVGA3D_A8R8G8B8
22 #define VMW_CURSOR_SNOOP_WIDTH 64
23 #define VMW_CURSOR_SNOOP_HEIGHT 64
24 
25 struct vmw_svga_fifo_cmd_define_cursor {
26 	u32 cmd;
27 	SVGAFifoCmdDefineAlphaCursor cursor;
28 };
29 
30 /**
31  * vmw_send_define_cursor_cmd - queue a define cursor command
32  * @dev_priv: the private driver struct
33  * @image: buffer which holds the cursor image
34  * @width: width of the mouse cursor image
35  * @height: height of the mouse cursor image
36  * @hotspotX: the horizontal position of mouse hotspot
37  * @hotspotY: the vertical position of mouse hotspot
38  */
vmw_send_define_cursor_cmd(struct vmw_private * dev_priv,u32 * image,u32 width,u32 height,u32 hotspotX,u32 hotspotY)39 static void vmw_send_define_cursor_cmd(struct vmw_private *dev_priv,
40 				       u32 *image, u32 width, u32 height,
41 				       u32 hotspotX, u32 hotspotY)
42 {
43 	struct vmw_svga_fifo_cmd_define_cursor *cmd;
44 	const u32 image_size = width * height * sizeof(*image);
45 	const u32 cmd_size = sizeof(*cmd) + image_size;
46 
47 	/*
48 	 * Try to reserve fifocmd space and swallow any failures;
49 	 * such reservations cannot be left unconsumed for long
50 	 * under the risk of clogging other fifocmd users, so
51 	 * we treat reservations separtely from the way we treat
52 	 * other fallible KMS-atomic resources at prepare_fb
53 	 */
54 	cmd = VMW_CMD_RESERVE(dev_priv, cmd_size);
55 
56 	if (unlikely(!cmd))
57 		return;
58 
59 	memset(cmd, 0, sizeof(*cmd));
60 
61 	memcpy(&cmd[1], image, image_size);
62 
63 	cmd->cmd = SVGA_CMD_DEFINE_ALPHA_CURSOR;
64 	cmd->cursor.id = 0;
65 	cmd->cursor.width = width;
66 	cmd->cursor.height = height;
67 	cmd->cursor.hotspotX = hotspotX;
68 	cmd->cursor.hotspotY = hotspotY;
69 
70 	vmw_cmd_commit_flush(dev_priv, cmd_size);
71 }
72 
73 static void
vmw_cursor_plane_update_legacy(struct vmw_private * vmw,struct vmw_plane_state * vps)74 vmw_cursor_plane_update_legacy(struct vmw_private *vmw,
75 			       struct vmw_plane_state *vps)
76 {
77 	struct vmw_surface *surface = vmw_user_object_surface(&vps->uo);
78 	s32 hotspot_x = vps->cursor.legacy.hotspot_x + vps->base.hotspot_x;
79 	s32 hotspot_y = vps->cursor.legacy.hotspot_y + vps->base.hotspot_y;
80 
81 	if (WARN_ON(!surface || !surface->snooper.image))
82 		return;
83 
84 	if (vps->cursor.legacy.id != surface->snooper.id) {
85 		vmw_send_define_cursor_cmd(vmw, surface->snooper.image,
86 					   vps->base.crtc_w, vps->base.crtc_h,
87 					   hotspot_x, hotspot_y);
88 		vps->cursor.legacy.id = surface->snooper.id;
89 	}
90 }
91 
92 static enum vmw_cursor_update_type
vmw_cursor_update_type(struct vmw_private * vmw,struct vmw_plane_state * vps)93 vmw_cursor_update_type(struct vmw_private *vmw, struct vmw_plane_state *vps)
94 {
95 	struct vmw_surface *surface = vmw_user_object_surface(&vps->uo);
96 
97 	if (surface && surface->snooper.image)
98 		return VMW_CURSOR_UPDATE_LEGACY;
99 
100 	if (vmw->has_mob) {
101 		if ((vmw->capabilities2 & SVGA_CAP2_CURSOR_MOB) != 0)
102 			return VMW_CURSOR_UPDATE_MOB;
103 		else
104 			return VMW_CURSOR_UPDATE_GB_ONLY;
105 	}
106 	drm_warn_once(&vmw->drm, "Unknown Cursor Type!\n");
107 	return VMW_CURSOR_UPDATE_NONE;
108 }
109 
vmw_cursor_update_mob(struct vmw_private * vmw,struct vmw_plane_state * vps)110 static void vmw_cursor_update_mob(struct vmw_private *vmw,
111 				  struct vmw_plane_state *vps)
112 {
113 	SVGAGBCursorHeader *header;
114 	SVGAGBAlphaCursorHeader *alpha_header;
115 	struct vmw_bo *bo = vmw_user_object_buffer(&vps->uo);
116 	u32 *image = vmw_bo_map_and_cache(bo);
117 	const u32 image_size = vps->base.crtc_w * vps->base.crtc_h * sizeof(*image);
118 
119 	header = vmw_bo_map_and_cache(vps->cursor.mob);
120 	alpha_header = &header->header.alphaHeader;
121 
122 	memset(header, 0, sizeof(*header));
123 
124 	header->type = SVGA_ALPHA_CURSOR;
125 	header->sizeInBytes = image_size;
126 
127 	alpha_header->hotspotX = vps->cursor.legacy.hotspot_x + vps->base.hotspot_x;
128 	alpha_header->hotspotY = vps->cursor.legacy.hotspot_y + vps->base.hotspot_y;
129 	alpha_header->width = vps->base.crtc_w;
130 	alpha_header->height = vps->base.crtc_h;
131 
132 	memcpy(header + 1, image, image_size);
133 	vmw_write(vmw, SVGA_REG_CURSOR_MOBID, vmw_bo_mobid(vps->cursor.mob));
134 
135 	vmw_bo_unmap(bo);
136 	vmw_bo_unmap(vps->cursor.mob);
137 }
138 
vmw_cursor_mob_size(enum vmw_cursor_update_type update_type,u32 w,u32 h)139 static u32 vmw_cursor_mob_size(enum vmw_cursor_update_type update_type,
140 			       u32 w, u32 h)
141 {
142 	switch (update_type) {
143 	case VMW_CURSOR_UPDATE_LEGACY:
144 	case VMW_CURSOR_UPDATE_GB_ONLY:
145 	case VMW_CURSOR_UPDATE_NONE:
146 		return 0;
147 	case VMW_CURSOR_UPDATE_MOB:
148 		return w * h * sizeof(u32) + sizeof(SVGAGBCursorHeader);
149 	}
150 	return 0;
151 }
152 
vmw_cursor_mob_destroy(struct vmw_bo ** vbo)153 static void vmw_cursor_mob_destroy(struct vmw_bo **vbo)
154 {
155 	if (!(*vbo))
156 		return;
157 
158 	ttm_bo_unpin(&(*vbo)->tbo);
159 	vmw_bo_unreference(vbo);
160 }
161 
162 /**
163  * vmw_cursor_mob_unmap - Unmaps the cursor mobs.
164  *
165  * @vps: state of the cursor plane
166  *
167  * Returns 0 on success
168  */
169 
170 static int
vmw_cursor_mob_unmap(struct vmw_plane_state * vps)171 vmw_cursor_mob_unmap(struct vmw_plane_state *vps)
172 {
173 	int ret = 0;
174 	struct vmw_bo *vbo = vps->cursor.mob;
175 
176 	if (!vbo || !vbo->map.virtual)
177 		return 0;
178 
179 	ret = ttm_bo_reserve(&vbo->tbo, true, false, NULL);
180 	if (likely(ret == 0)) {
181 		vmw_bo_unmap(vbo);
182 		ttm_bo_unreserve(&vbo->tbo);
183 	}
184 
185 	return ret;
186 }
187 
vmw_cursor_mob_put(struct vmw_cursor_plane * vcp,struct vmw_plane_state * vps)188 static void vmw_cursor_mob_put(struct vmw_cursor_plane *vcp,
189 			       struct vmw_plane_state *vps)
190 {
191 	u32 i;
192 
193 	if (!vps->cursor.mob)
194 		return;
195 
196 	vmw_cursor_mob_unmap(vps);
197 
198 	/* Look for a free slot to return this mob to the cache. */
199 	for (i = 0; i < ARRAY_SIZE(vcp->cursor_mobs); i++) {
200 		if (!vcp->cursor_mobs[i]) {
201 			vcp->cursor_mobs[i] = vps->cursor.mob;
202 			vps->cursor.mob = NULL;
203 			return;
204 		}
205 	}
206 
207 	/* Cache is full: See if this mob is bigger than an existing mob. */
208 	for (i = 0; i < ARRAY_SIZE(vcp->cursor_mobs); i++) {
209 		if (vcp->cursor_mobs[i]->tbo.base.size <
210 		    vps->cursor.mob->tbo.base.size) {
211 			vmw_cursor_mob_destroy(&vcp->cursor_mobs[i]);
212 			vcp->cursor_mobs[i] = vps->cursor.mob;
213 			vps->cursor.mob = NULL;
214 			return;
215 		}
216 	}
217 
218 	/* Destroy it if it's not worth caching. */
219 	vmw_cursor_mob_destroy(&vps->cursor.mob);
220 }
221 
vmw_cursor_mob_get(struct vmw_cursor_plane * vcp,struct vmw_plane_state * vps)222 static int vmw_cursor_mob_get(struct vmw_cursor_plane *vcp,
223 			      struct vmw_plane_state *vps)
224 {
225 	struct vmw_private *dev_priv = vmw_priv(vcp->base.dev);
226 	u32 size = vmw_cursor_mob_size(vps->cursor.update_type,
227 				       vps->base.crtc_w, vps->base.crtc_h);
228 	u32 i;
229 	u32 cursor_max_dim, mob_max_size;
230 	struct vmw_fence_obj *fence = NULL;
231 	int ret;
232 
233 	if (!dev_priv->has_mob ||
234 	    (dev_priv->capabilities2 & SVGA_CAP2_CURSOR_MOB) == 0)
235 		return -EINVAL;
236 
237 	mob_max_size = vmw_read(dev_priv, SVGA_REG_MOB_MAX_SIZE);
238 	cursor_max_dim = vmw_read(dev_priv, SVGA_REG_CURSOR_MAX_DIMENSION);
239 
240 	if (size > mob_max_size || vps->base.crtc_w > cursor_max_dim ||
241 	    vps->base.crtc_h > cursor_max_dim)
242 		return -EINVAL;
243 
244 	if (vps->cursor.mob) {
245 		if (vps->cursor.mob->tbo.base.size >= size)
246 			return 0;
247 		vmw_cursor_mob_put(vcp, vps);
248 	}
249 
250 	/* Look for an unused mob in the cache. */
251 	for (i = 0; i < ARRAY_SIZE(vcp->cursor_mobs); i++) {
252 		if (vcp->cursor_mobs[i] &&
253 		    vcp->cursor_mobs[i]->tbo.base.size >= size) {
254 			vps->cursor.mob = vcp->cursor_mobs[i];
255 			vcp->cursor_mobs[i] = NULL;
256 			return 0;
257 		}
258 	}
259 	/* Create a new mob if we can't find an existing one. */
260 	ret = vmw_bo_create_and_populate(dev_priv, size, VMW_BO_DOMAIN_MOB,
261 					 &vps->cursor.mob);
262 
263 	if (ret != 0)
264 		return ret;
265 
266 	/* Fence the mob creation so we are guarateed to have the mob */
267 	ret = ttm_bo_reserve(&vps->cursor.mob->tbo, false, false, NULL);
268 	if (ret != 0)
269 		goto teardown;
270 
271 	ret = vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL);
272 	if (ret != 0) {
273 		ttm_bo_unreserve(&vps->cursor.mob->tbo);
274 		goto teardown;
275 	}
276 
277 	dma_fence_wait(&fence->base, false);
278 	dma_fence_put(&fence->base);
279 
280 	ttm_bo_unreserve(&vps->cursor.mob->tbo);
281 
282 	return 0;
283 
284 teardown:
285 	vmw_cursor_mob_destroy(&vps->cursor.mob);
286 	return ret;
287 }
288 
vmw_cursor_update_position(struct vmw_private * dev_priv,bool show,int x,int y)289 static void vmw_cursor_update_position(struct vmw_private *dev_priv,
290 				       bool show, int x, int y)
291 {
292 	const u32 svga_cursor_on = show ? SVGA_CURSOR_ON_SHOW
293 				   : SVGA_CURSOR_ON_HIDE;
294 	u32 count;
295 
296 	spin_lock(&dev_priv->cursor_lock);
297 	if (dev_priv->capabilities2 & SVGA_CAP2_EXTRA_REGS) {
298 		vmw_write(dev_priv, SVGA_REG_CURSOR4_X, x);
299 		vmw_write(dev_priv, SVGA_REG_CURSOR4_Y, y);
300 		vmw_write(dev_priv, SVGA_REG_CURSOR4_SCREEN_ID, SVGA3D_INVALID_ID);
301 		vmw_write(dev_priv, SVGA_REG_CURSOR4_ON, svga_cursor_on);
302 		vmw_write(dev_priv, SVGA_REG_CURSOR4_SUBMIT, 1);
303 	} else if (vmw_is_cursor_bypass3_enabled(dev_priv)) {
304 		vmw_fifo_mem_write(dev_priv, SVGA_FIFO_CURSOR_ON, svga_cursor_on);
305 		vmw_fifo_mem_write(dev_priv, SVGA_FIFO_CURSOR_X, x);
306 		vmw_fifo_mem_write(dev_priv, SVGA_FIFO_CURSOR_Y, y);
307 		count = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_CURSOR_COUNT);
308 		vmw_fifo_mem_write(dev_priv, SVGA_FIFO_CURSOR_COUNT, ++count);
309 	} else {
310 		vmw_write(dev_priv, SVGA_REG_CURSOR_X, x);
311 		vmw_write(dev_priv, SVGA_REG_CURSOR_Y, y);
312 		vmw_write(dev_priv, SVGA_REG_CURSOR_ON, svga_cursor_on);
313 	}
314 	spin_unlock(&dev_priv->cursor_lock);
315 }
316 
vmw_kms_cursor_snoop(struct vmw_surface * srf,struct ttm_object_file * tfile,struct ttm_buffer_object * bo,SVGA3dCmdHeader * header)317 void vmw_kms_cursor_snoop(struct vmw_surface *srf,
318 			  struct ttm_object_file *tfile,
319 			  struct ttm_buffer_object *bo,
320 			  SVGA3dCmdHeader *header)
321 {
322 	struct ttm_bo_kmap_obj map;
323 	unsigned long kmap_offset;
324 	unsigned long kmap_num;
325 	SVGA3dCopyBox *box;
326 	u32 box_count;
327 	void *virtual;
328 	bool is_iomem;
329 	struct vmw_dma_cmd {
330 		SVGA3dCmdHeader header;
331 		SVGA3dCmdSurfaceDMA dma;
332 	} *cmd;
333 	int i, ret;
334 	const struct SVGA3dSurfaceDesc *desc =
335 		vmw_surface_get_desc(VMW_CURSOR_SNOOP_FORMAT);
336 	const u32 image_pitch = VMW_CURSOR_SNOOP_WIDTH * desc->pitchBytesPerBlock;
337 
338 	cmd = container_of(header, struct vmw_dma_cmd, header);
339 
340 	/* No snooper installed, nothing to copy */
341 	if (!srf->snooper.image)
342 		return;
343 
344 	if (cmd->dma.host.face != 0 || cmd->dma.host.mipmap != 0) {
345 		DRM_ERROR("face and mipmap for cursors should never != 0\n");
346 		return;
347 	}
348 
349 	if (cmd->header.size < 64) {
350 		DRM_ERROR("at least one full copy box must be given\n");
351 		return;
352 	}
353 
354 	box = (SVGA3dCopyBox *)&cmd[1];
355 	box_count = (cmd->header.size - sizeof(SVGA3dCmdSurfaceDMA)) /
356 			sizeof(SVGA3dCopyBox);
357 
358 	if (cmd->dma.guest.ptr.offset % PAGE_SIZE ||
359 	    box->x != 0    || box->y != 0    || box->z != 0    ||
360 	    box->srcx != 0 || box->srcy != 0 || box->srcz != 0 ||
361 	    box->d != 1    || box_count != 1 ||
362 	    box->w > VMW_CURSOR_SNOOP_WIDTH || box->h > VMW_CURSOR_SNOOP_HEIGHT) {
363 		/* TODO handle none page aligned offsets */
364 		/* TODO handle more dst & src != 0 */
365 		/* TODO handle more then one copy */
366 		DRM_ERROR("Can't snoop dma request for cursor!\n");
367 		DRM_ERROR("(%u, %u, %u) (%u, %u, %u) (%ux%ux%u) %u %u\n",
368 			  box->srcx, box->srcy, box->srcz,
369 			  box->x, box->y, box->z,
370 			  box->w, box->h, box->d, box_count,
371 			  cmd->dma.guest.ptr.offset);
372 		return;
373 	}
374 
375 	kmap_offset = cmd->dma.guest.ptr.offset >> PAGE_SHIFT;
376 	kmap_num = (VMW_CURSOR_SNOOP_HEIGHT * image_pitch) >> PAGE_SHIFT;
377 
378 	ret = ttm_bo_reserve(bo, true, false, NULL);
379 	if (unlikely(ret != 0)) {
380 		DRM_ERROR("reserve failed\n");
381 		return;
382 	}
383 
384 	ret = ttm_bo_kmap(bo, kmap_offset, kmap_num, &map);
385 	if (unlikely(ret != 0))
386 		goto err_unreserve;
387 
388 	virtual = ttm_kmap_obj_virtual(&map, &is_iomem);
389 
390 	if (box->w == VMW_CURSOR_SNOOP_WIDTH && cmd->dma.guest.pitch == image_pitch) {
391 		memcpy(srf->snooper.image, virtual,
392 		       VMW_CURSOR_SNOOP_HEIGHT * image_pitch);
393 	} else {
394 		/* Image is unsigned pointer. */
395 		for (i = 0; i < box->h; i++)
396 			memcpy(srf->snooper.image + i * image_pitch,
397 			       virtual + i * cmd->dma.guest.pitch,
398 			       box->w * desc->pitchBytesPerBlock);
399 	}
400 	srf->snooper.id++;
401 
402 	ttm_bo_kunmap(&map);
403 err_unreserve:
404 	ttm_bo_unreserve(bo);
405 }
406 
vmw_cursor_plane_destroy(struct drm_plane * plane)407 void vmw_cursor_plane_destroy(struct drm_plane *plane)
408 {
409 	struct vmw_cursor_plane *vcp = vmw_plane_to_vcp(plane);
410 	u32 i;
411 
412 	vmw_cursor_update_position(vmw_priv(plane->dev), false, 0, 0);
413 
414 	for (i = 0; i < ARRAY_SIZE(vcp->cursor_mobs); i++)
415 		vmw_cursor_mob_destroy(&vcp->cursor_mobs[i]);
416 
417 	drm_plane_cleanup(plane);
418 }
419 
420 /**
421  * vmw_cursor_mob_map - Maps the cursor mobs.
422  *
423  * @vps: plane_state
424  *
425  * Returns 0 on success
426  */
427 
428 static int
vmw_cursor_mob_map(struct vmw_plane_state * vps)429 vmw_cursor_mob_map(struct vmw_plane_state *vps)
430 {
431 	int ret;
432 	u32 size = vmw_cursor_mob_size(vps->cursor.update_type,
433 				       vps->base.crtc_w, vps->base.crtc_h);
434 	struct vmw_bo *vbo = vps->cursor.mob;
435 
436 	if (!vbo)
437 		return -EINVAL;
438 
439 	if (vbo->tbo.base.size < size)
440 		return -EINVAL;
441 
442 	if (vbo->map.virtual)
443 		return 0;
444 
445 	ret = ttm_bo_reserve(&vbo->tbo, false, false, NULL);
446 	if (unlikely(ret != 0))
447 		return -ENOMEM;
448 
449 	vmw_bo_map_and_cache(vbo);
450 
451 	ttm_bo_unreserve(&vbo->tbo);
452 
453 	return 0;
454 }
455 
456 /**
457  * vmw_cursor_plane_cleanup_fb - Unpins the plane surface
458  *
459  * @plane: cursor plane
460  * @old_state: contains the state to clean up
461  *
462  * Unmaps all cursor bo mappings and unpins the cursor surface
463  *
464  * Returns 0 on success
465  */
466 void
vmw_cursor_plane_cleanup_fb(struct drm_plane * plane,struct drm_plane_state * old_state)467 vmw_cursor_plane_cleanup_fb(struct drm_plane *plane,
468 			    struct drm_plane_state *old_state)
469 {
470 	struct vmw_cursor_plane *vcp = vmw_plane_to_vcp(plane);
471 	struct vmw_plane_state *vps = vmw_plane_state_to_vps(old_state);
472 
473 	if (!vmw_user_object_is_null(&vps->uo))
474 		vmw_user_object_unmap(&vps->uo);
475 
476 	vmw_cursor_mob_unmap(vps);
477 	vmw_cursor_mob_put(vcp, vps);
478 
479 	vmw_du_plane_unpin_surf(vps);
480 	vmw_user_object_unref(&vps->uo);
481 }
482 
483 static bool
vmw_cursor_buffer_changed(struct vmw_plane_state * new_vps,struct vmw_plane_state * old_vps)484 vmw_cursor_buffer_changed(struct vmw_plane_state *new_vps,
485 			  struct vmw_plane_state *old_vps)
486 {
487 	struct vmw_bo *new_bo = vmw_user_object_buffer(&new_vps->uo);
488 	struct vmw_bo *old_bo = vmw_user_object_buffer(&old_vps->uo);
489 	struct vmw_surface *surf;
490 	bool dirty = false;
491 	int ret;
492 
493 	if (new_bo != old_bo)
494 		return true;
495 
496 	if (new_bo) {
497 		if (!old_bo) {
498 			return true;
499 		} else if (new_bo->dirty) {
500 			vmw_bo_dirty_scan(new_bo);
501 			dirty = vmw_bo_is_dirty(new_bo);
502 			if (dirty) {
503 				surf = vmw_user_object_surface(&new_vps->uo);
504 				if (surf)
505 					vmw_bo_dirty_transfer_to_res(&surf->res);
506 				else
507 					vmw_bo_dirty_clear(new_bo);
508 			}
509 			return dirty;
510 		} else if (new_bo != old_bo) {
511 			/*
512 			 * Currently unused because the top exits right away.
513 			 * In most cases buffer being different will mean
514 			 * that the contents is different. For the few percent
515 			 * of cases where that's not true the cost of doing
516 			 * the memcmp on all other seems to outweight the
517 			 * benefits. Leave the conditional to be able to
518 			 * trivially validate it by removing the initial
519 			 * if (new_bo != old_bo) at the start.
520 			 */
521 			void *old_image;
522 			void *new_image;
523 			bool changed = false;
524 			struct ww_acquire_ctx ctx;
525 			const u32 size = new_vps->base.crtc_w *
526 					 new_vps->base.crtc_h * sizeof(u32);
527 
528 			ww_acquire_init(&ctx, &reservation_ww_class);
529 
530 			ret = ttm_bo_reserve(&old_bo->tbo, false, false, &ctx);
531 			if (ret != 0) {
532 				ww_acquire_fini(&ctx);
533 				return true;
534 			}
535 
536 			ret = ttm_bo_reserve(&new_bo->tbo, false, false, &ctx);
537 			if (ret != 0) {
538 				ttm_bo_unreserve(&old_bo->tbo);
539 				ww_acquire_fini(&ctx);
540 				return true;
541 			}
542 
543 			old_image = vmw_bo_map_and_cache(old_bo);
544 			new_image = vmw_bo_map_and_cache(new_bo);
545 
546 			if (old_image && new_image && old_image != new_image)
547 				changed = memcmp(old_image, new_image, size) !=
548 					  0;
549 
550 			ttm_bo_unreserve(&new_bo->tbo);
551 			ttm_bo_unreserve(&old_bo->tbo);
552 
553 			ww_acquire_fini(&ctx);
554 
555 			return changed;
556 		}
557 		return false;
558 	}
559 
560 	return false;
561 }
562 
563 static bool
vmw_cursor_plane_changed(struct vmw_plane_state * new_vps,struct vmw_plane_state * old_vps)564 vmw_cursor_plane_changed(struct vmw_plane_state *new_vps,
565 			 struct vmw_plane_state *old_vps)
566 {
567 	if (old_vps->base.crtc_w != new_vps->base.crtc_w ||
568 	    old_vps->base.crtc_h != new_vps->base.crtc_h)
569 		return true;
570 
571 	if (old_vps->base.hotspot_x != new_vps->base.hotspot_x ||
572 	    old_vps->base.hotspot_y != new_vps->base.hotspot_y)
573 		return true;
574 
575 	if (old_vps->cursor.legacy.hotspot_x !=
576 		    new_vps->cursor.legacy.hotspot_x ||
577 	    old_vps->cursor.legacy.hotspot_y !=
578 		    new_vps->cursor.legacy.hotspot_y)
579 		return true;
580 
581 	if (old_vps->base.fb != new_vps->base.fb)
582 		return true;
583 
584 	return false;
585 }
586 
587 /**
588  * vmw_cursor_plane_prepare_fb - Readies the cursor by referencing it
589  *
590  * @plane:  display plane
591  * @new_state: info on the new plane state, including the FB
592  *
593  * Returns 0 on success
594  */
vmw_cursor_plane_prepare_fb(struct drm_plane * plane,struct drm_plane_state * new_state)595 int vmw_cursor_plane_prepare_fb(struct drm_plane *plane,
596 				struct drm_plane_state *new_state)
597 {
598 	struct drm_framebuffer *fb = new_state->fb;
599 	struct vmw_cursor_plane *vcp = vmw_plane_to_vcp(plane);
600 	struct vmw_plane_state *vps = vmw_plane_state_to_vps(new_state);
601 	struct vmw_plane_state *old_vps = vmw_plane_state_to_vps(plane->state);
602 	struct vmw_private *vmw = vmw_priv(plane->dev);
603 	struct vmw_bo *bo = NULL;
604 	struct vmw_surface *surface;
605 	int ret = 0;
606 
607 	if (!vmw_user_object_is_null(&vps->uo)) {
608 		vmw_user_object_unmap(&vps->uo);
609 		vmw_user_object_unref(&vps->uo);
610 	}
611 
612 	if (fb) {
613 		if (vmw_framebuffer_to_vfb(fb)->bo) {
614 			vps->uo.buffer = vmw_framebuffer_to_vfbd(fb)->buffer;
615 			vps->uo.surface = NULL;
616 		} else {
617 			memcpy(&vps->uo, &vmw_framebuffer_to_vfbs(fb)->uo, sizeof(vps->uo));
618 		}
619 		vmw_user_object_ref(&vps->uo);
620 	}
621 
622 	vps->cursor.update_type = vmw_cursor_update_type(vmw, vps);
623 	switch (vps->cursor.update_type) {
624 	case VMW_CURSOR_UPDATE_LEGACY:
625 		surface = vmw_user_object_surface(&vps->uo);
626 		if (!surface || vps->cursor.legacy.id == surface->snooper.id)
627 			vps->cursor.update_type = VMW_CURSOR_UPDATE_NONE;
628 		break;
629 	case VMW_CURSOR_UPDATE_GB_ONLY:
630 	case VMW_CURSOR_UPDATE_MOB: {
631 		bo = vmw_user_object_buffer(&vps->uo);
632 		if (bo) {
633 			struct ttm_operation_ctx ctx = { false, false };
634 
635 			ret = ttm_bo_reserve(&bo->tbo, true, false, NULL);
636 			if (ret != 0)
637 				return -ENOMEM;
638 
639 			ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
640 			if (ret != 0)
641 				return -ENOMEM;
642 
643 			/*
644 			 * vmw_bo_pin_reserved also validates, so to skip
645 			 * the extra validation use ttm_bo_pin directly
646 			 */
647 			if (!bo->tbo.pin_count)
648 				ttm_bo_pin(&bo->tbo);
649 
650 			if (vmw_framebuffer_to_vfb(fb)->bo) {
651 				const u32 size = new_state->crtc_w *
652 						 new_state->crtc_h *
653 						 sizeof(u32);
654 
655 				(void)vmw_bo_map_and_cache_size(bo, size);
656 			} else {
657 				vmw_bo_map_and_cache(bo);
658 			}
659 			ttm_bo_unreserve(&bo->tbo);
660 		}
661 		if (!vmw_user_object_is_null(&vps->uo)) {
662 			if (!vmw_cursor_plane_changed(vps, old_vps) &&
663 			    !vmw_cursor_buffer_changed(vps, old_vps)) {
664 				vps->cursor.update_type =
665 					VMW_CURSOR_UPDATE_NONE;
666 			} else {
667 				vmw_cursor_mob_get(vcp, vps);
668 				vmw_cursor_mob_map(vps);
669 			}
670 		}
671 	}
672 		break;
673 	case VMW_CURSOR_UPDATE_NONE:
674 		/* do nothing */
675 		break;
676 	}
677 
678 	return 0;
679 }
680 
681 /**
682  * vmw_cursor_plane_atomic_check - check if the new state is okay
683  *
684  * @plane: cursor plane
685  * @state: info on the new plane state
686  *
687  * This is a chance to fail if the new cursor state does not fit
688  * our requirements.
689  *
690  * Returns 0 on success
691  */
vmw_cursor_plane_atomic_check(struct drm_plane * plane,struct drm_atomic_state * state)692 int vmw_cursor_plane_atomic_check(struct drm_plane *plane,
693 				  struct drm_atomic_state *state)
694 {
695 	struct drm_plane_state *new_state =
696 		drm_atomic_get_new_plane_state(state, plane);
697 	struct vmw_private *vmw = vmw_priv(plane->dev);
698 	int ret = 0;
699 	struct drm_crtc_state *crtc_state = NULL;
700 	struct vmw_surface *surface = NULL;
701 	struct vmw_plane_state *vps = vmw_plane_state_to_vps(new_state);
702 	enum vmw_cursor_update_type update_type;
703 	struct drm_framebuffer *fb = new_state->fb;
704 
705 	if (new_state->crtc)
706 		crtc_state = drm_atomic_get_new_crtc_state(new_state->state,
707 							   new_state->crtc);
708 
709 	ret = drm_atomic_helper_check_plane_state(new_state, crtc_state,
710 						  DRM_PLANE_NO_SCALING,
711 						  DRM_PLANE_NO_SCALING, true,
712 						  true);
713 	if (ret)
714 		return ret;
715 
716 	/* Turning off */
717 	if (!fb)
718 		return 0;
719 
720 	update_type = vmw_cursor_update_type(vmw, vps);
721 	if (update_type == VMW_CURSOR_UPDATE_LEGACY) {
722 		if (new_state->crtc_w != VMW_CURSOR_SNOOP_WIDTH ||
723 		    new_state->crtc_h != VMW_CURSOR_SNOOP_HEIGHT) {
724 			drm_warn(&vmw->drm,
725 				 "Invalid cursor dimensions (%d, %d)\n",
726 				 new_state->crtc_w, new_state->crtc_h);
727 			return -EINVAL;
728 		}
729 		surface = vmw_user_object_surface(&vps->uo);
730 		if (!surface || !surface->snooper.image) {
731 			drm_warn(&vmw->drm,
732 				 "surface not suitable for cursor\n");
733 			return -EINVAL;
734 		}
735 	}
736 
737 	return 0;
738 }
739 
740 void
vmw_cursor_plane_atomic_update(struct drm_plane * plane,struct drm_atomic_state * state)741 vmw_cursor_plane_atomic_update(struct drm_plane *plane,
742 			       struct drm_atomic_state *state)
743 {
744 	struct vmw_bo *bo;
745 	struct drm_plane_state *new_state =
746 		drm_atomic_get_new_plane_state(state, plane);
747 	struct drm_plane_state *old_state =
748 		drm_atomic_get_old_plane_state(state, plane);
749 	struct drm_crtc *crtc = new_state->crtc ?: old_state->crtc;
750 	struct vmw_private *dev_priv = vmw_priv(plane->dev);
751 	struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
752 	struct vmw_plane_state *vps = vmw_plane_state_to_vps(new_state);
753 	s32 hotspot_x, hotspot_y, cursor_x, cursor_y;
754 
755 	/*
756 	 * Hide the cursor if the new bo is null
757 	 */
758 	if (vmw_user_object_is_null(&vps->uo)) {
759 		vmw_cursor_update_position(dev_priv, false, 0, 0);
760 		return;
761 	}
762 
763 	switch (vps->cursor.update_type) {
764 	case VMW_CURSOR_UPDATE_LEGACY:
765 		vmw_cursor_plane_update_legacy(dev_priv, vps);
766 		break;
767 	case VMW_CURSOR_UPDATE_MOB:
768 		vmw_cursor_update_mob(dev_priv, vps);
769 		break;
770 	case VMW_CURSOR_UPDATE_GB_ONLY:
771 		bo = vmw_user_object_buffer(&vps->uo);
772 		if (bo)
773 			vmw_send_define_cursor_cmd(dev_priv, bo->map.virtual,
774 						   vps->base.crtc_w,
775 						   vps->base.crtc_h,
776 						   vps->base.hotspot_x,
777 						   vps->base.hotspot_y);
778 		break;
779 	case VMW_CURSOR_UPDATE_NONE:
780 		/* do nothing */
781 		break;
782 	}
783 
784 	/*
785 	 * For all update types update the cursor position
786 	 */
787 	cursor_x = new_state->crtc_x + du->set_gui_x;
788 	cursor_y = new_state->crtc_y + du->set_gui_y;
789 
790 	hotspot_x = vps->cursor.legacy.hotspot_x + new_state->hotspot_x;
791 	hotspot_y = vps->cursor.legacy.hotspot_y + new_state->hotspot_y;
792 
793 	vmw_cursor_update_position(dev_priv, true, cursor_x + hotspot_x,
794 				   cursor_y + hotspot_y);
795 }
796 
vmw_kms_cursor_bypass_ioctl(struct drm_device * dev,void * data,struct drm_file * file_priv)797 int vmw_kms_cursor_bypass_ioctl(struct drm_device *dev, void *data,
798 				struct drm_file *file_priv)
799 {
800 	struct drm_vmw_cursor_bypass_arg *arg = data;
801 	struct vmw_display_unit *du;
802 	struct vmw_plane_state *vps;
803 	struct drm_crtc *crtc;
804 	int ret = 0;
805 
806 	mutex_lock(&dev->mode_config.mutex);
807 	if (arg->flags & DRM_VMW_CURSOR_BYPASS_ALL) {
808 		list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
809 			du = vmw_crtc_to_du(crtc);
810 			vps = vmw_plane_state_to_vps(du->cursor.base.state);
811 			vps->cursor.legacy.hotspot_x = arg->xhot;
812 			vps->cursor.legacy.hotspot_y = arg->yhot;
813 		}
814 
815 		mutex_unlock(&dev->mode_config.mutex);
816 		return 0;
817 	}
818 
819 	crtc = drm_crtc_find(dev, file_priv, arg->crtc_id);
820 	if (!crtc) {
821 		ret = -ENOENT;
822 		goto out;
823 	}
824 
825 	du = vmw_crtc_to_du(crtc);
826 	vps = vmw_plane_state_to_vps(du->cursor.base.state);
827 	vps->cursor.legacy.hotspot_x = arg->xhot;
828 	vps->cursor.legacy.hotspot_y = arg->yhot;
829 
830 out:
831 	mutex_unlock(&dev->mode_config.mutex);
832 
833 	return ret;
834 }
835 
vmw_cursor_snooper_create(struct drm_file * file_priv,struct vmw_surface_metadata * metadata)836 void *vmw_cursor_snooper_create(struct drm_file *file_priv,
837 				struct vmw_surface_metadata *metadata)
838 {
839 	if (!file_priv->atomic && metadata->scanout &&
840 	    metadata->num_sizes == 1 &&
841 	    metadata->sizes[0].width == VMW_CURSOR_SNOOP_WIDTH &&
842 	    metadata->sizes[0].height == VMW_CURSOR_SNOOP_HEIGHT &&
843 	    metadata->format == VMW_CURSOR_SNOOP_FORMAT) {
844 		const struct SVGA3dSurfaceDesc *desc =
845 			vmw_surface_get_desc(VMW_CURSOR_SNOOP_FORMAT);
846 		const u32 cursor_size_bytes = VMW_CURSOR_SNOOP_WIDTH *
847 					      VMW_CURSOR_SNOOP_HEIGHT *
848 					      desc->pitchBytesPerBlock;
849 		void *image = kzalloc(cursor_size_bytes, GFP_KERNEL);
850 
851 		if (!image) {
852 			DRM_ERROR("Failed to allocate cursor_image\n");
853 			return ERR_PTR(-ENOMEM);
854 		}
855 		return image;
856 	}
857 	return NULL;
858 }
859