xref: /linux/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c (revision 9bffa1ad25b8b3b95d8f463e5c24dabe3c87d54d)
1 // SPDX-License-Identifier: GPL-2.0 OR MIT
2 /**************************************************************************
3  *
4  * Copyright (c) 2009-2024 Broadcom. All Rights Reserved. The term
5  * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
6  *
7  * Permission is hereby granted, free of charge, to any person obtaining a
8  * copy of this software and associated documentation files (the
9  * "Software"), to deal in the Software without restriction, including
10  * without limitation the rights to use, copy, modify, merge, publish,
11  * distribute, sub license, and/or sell copies of the Software, and to
12  * permit persons to whom the Software is furnished to do so, subject to
13  * the following conditions:
14  *
15  * The above copyright notice and this permission notice (including the
16  * next paragraph) shall be included in all copies or substantial portions
17  * of the Software.
18  *
19  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
22  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
23  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
24  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
25  * USE OR OTHER DEALINGS IN THE SOFTWARE.
26  *
27  **************************************************************************/
28 #include "vmwgfx_kms.h"
29 
30 #include "vmwgfx_bo.h"
31 #include "vmwgfx_vkms.h"
32 #include "vmw_surface_cache.h"
33 
34 #include <drm/drm_atomic.h>
35 #include <drm/drm_atomic_helper.h>
36 #include <drm/drm_damage_helper.h>
37 #include <drm/drm_fourcc.h>
38 #include <drm/drm_rect.h>
39 #include <drm/drm_sysfs.h>
40 #include <drm/drm_edid.h>
41 
vmw_du_init(struct vmw_display_unit * du)42 void vmw_du_init(struct vmw_display_unit *du)
43 {
44 	vmw_vkms_crtc_init(&du->crtc);
45 }
46 
vmw_du_cleanup(struct vmw_display_unit * du)47 void vmw_du_cleanup(struct vmw_display_unit *du)
48 {
49 	struct vmw_private *dev_priv = vmw_priv(du->primary.dev);
50 
51 	vmw_vkms_crtc_cleanup(&du->crtc);
52 	drm_plane_cleanup(&du->primary);
53 	if (vmw_cmd_supported(dev_priv))
54 		drm_plane_cleanup(&du->cursor.base);
55 
56 	drm_connector_unregister(&du->connector);
57 	drm_crtc_cleanup(&du->crtc);
58 	drm_encoder_cleanup(&du->encoder);
59 	drm_connector_cleanup(&du->connector);
60 }
61 
62 /*
63  * Display Unit Cursor functions
64  */
65 
66 static int vmw_du_cursor_plane_unmap_cm(struct vmw_plane_state *vps);
67 static void vmw_cursor_update_mob(struct vmw_private *dev_priv,
68 				  struct vmw_plane_state *vps,
69 				  u32 *image, u32 width, u32 height,
70 				  u32 hotspotX, u32 hotspotY);
71 
72 struct vmw_svga_fifo_cmd_define_cursor {
73 	u32 cmd;
74 	SVGAFifoCmdDefineAlphaCursor cursor;
75 };
76 
77 /**
78  * vmw_send_define_cursor_cmd - queue a define cursor command
79  * @dev_priv: the private driver struct
80  * @image: buffer which holds the cursor image
81  * @width: width of the mouse cursor image
82  * @height: height of the mouse cursor image
83  * @hotspotX: the horizontal position of mouse hotspot
84  * @hotspotY: the vertical position of mouse hotspot
85  */
vmw_send_define_cursor_cmd(struct vmw_private * dev_priv,u32 * image,u32 width,u32 height,u32 hotspotX,u32 hotspotY)86 static void vmw_send_define_cursor_cmd(struct vmw_private *dev_priv,
87 				       u32 *image, u32 width, u32 height,
88 				       u32 hotspotX, u32 hotspotY)
89 {
90 	struct vmw_svga_fifo_cmd_define_cursor *cmd;
91 	const u32 image_size = width * height * sizeof(*image);
92 	const u32 cmd_size = sizeof(*cmd) + image_size;
93 
94 	/* Try to reserve fifocmd space and swallow any failures;
95 	   such reservations cannot be left unconsumed for long
96 	   under the risk of clogging other fifocmd users, so
97 	   we treat reservations separtely from the way we treat
98 	   other fallible KMS-atomic resources at prepare_fb */
99 	cmd = VMW_CMD_RESERVE(dev_priv, cmd_size);
100 
101 	if (unlikely(!cmd))
102 		return;
103 
104 	memset(cmd, 0, sizeof(*cmd));
105 
106 	memcpy(&cmd[1], image, image_size);
107 
108 	cmd->cmd = SVGA_CMD_DEFINE_ALPHA_CURSOR;
109 	cmd->cursor.id = 0;
110 	cmd->cursor.width = width;
111 	cmd->cursor.height = height;
112 	cmd->cursor.hotspotX = hotspotX;
113 	cmd->cursor.hotspotY = hotspotY;
114 
115 	vmw_cmd_commit_flush(dev_priv, cmd_size);
116 }
117 
118 /**
119  * vmw_cursor_update_image - update the cursor image on the provided plane
120  * @dev_priv: the private driver struct
121  * @vps: the plane state of the cursor plane
122  * @image: buffer which holds the cursor image
123  * @width: width of the mouse cursor image
124  * @height: height of the mouse cursor image
125  * @hotspotX: the horizontal position of mouse hotspot
126  * @hotspotY: the vertical position of mouse hotspot
127  */
vmw_cursor_update_image(struct vmw_private * dev_priv,struct vmw_plane_state * vps,u32 * image,u32 width,u32 height,u32 hotspotX,u32 hotspotY)128 static void vmw_cursor_update_image(struct vmw_private *dev_priv,
129 				    struct vmw_plane_state *vps,
130 				    u32 *image, u32 width, u32 height,
131 				    u32 hotspotX, u32 hotspotY)
132 {
133 	if (vps->cursor.bo)
134 		vmw_cursor_update_mob(dev_priv, vps, image,
135 				      vps->base.crtc_w, vps->base.crtc_h,
136 				      hotspotX, hotspotY);
137 
138 	else
139 		vmw_send_define_cursor_cmd(dev_priv, image, width, height,
140 					   hotspotX, hotspotY);
141 }
142 
143 
144 /**
145  * vmw_cursor_update_mob - Update cursor vis CursorMob mechanism
146  *
147  * Called from inside vmw_du_cursor_plane_atomic_update to actually
148  * make the cursor-image live.
149  *
150  * @dev_priv: device to work with
151  * @vps: the plane state of the cursor plane
152  * @image: cursor source data to fill the MOB with
153  * @width: source data width
154  * @height: source data height
155  * @hotspotX: cursor hotspot x
156  * @hotspotY: cursor hotspot Y
157  */
vmw_cursor_update_mob(struct vmw_private * dev_priv,struct vmw_plane_state * vps,u32 * image,u32 width,u32 height,u32 hotspotX,u32 hotspotY)158 static void vmw_cursor_update_mob(struct vmw_private *dev_priv,
159 				  struct vmw_plane_state *vps,
160 				  u32 *image, u32 width, u32 height,
161 				  u32 hotspotX, u32 hotspotY)
162 {
163 	SVGAGBCursorHeader *header;
164 	SVGAGBAlphaCursorHeader *alpha_header;
165 	const u32 image_size = width * height * sizeof(*image);
166 
167 	header = vmw_bo_map_and_cache(vps->cursor.bo);
168 	alpha_header = &header->header.alphaHeader;
169 
170 	memset(header, 0, sizeof(*header));
171 
172 	header->type = SVGA_ALPHA_CURSOR;
173 	header->sizeInBytes = image_size;
174 
175 	alpha_header->hotspotX = hotspotX;
176 	alpha_header->hotspotY = hotspotY;
177 	alpha_header->width = width;
178 	alpha_header->height = height;
179 
180 	memcpy(header + 1, image, image_size);
181 	vmw_write(dev_priv, SVGA_REG_CURSOR_MOBID,
182 		  vps->cursor.bo->tbo.resource->start);
183 }
184 
185 
vmw_du_cursor_mob_size(u32 w,u32 h)186 static u32 vmw_du_cursor_mob_size(u32 w, u32 h)
187 {
188 	return w * h * sizeof(u32) + sizeof(SVGAGBCursorHeader);
189 }
190 
191 /**
192  * vmw_du_cursor_plane_acquire_image -- Acquire the image data
193  * @vps: cursor plane state
194  */
vmw_du_cursor_plane_acquire_image(struct vmw_plane_state * vps)195 static u32 *vmw_du_cursor_plane_acquire_image(struct vmw_plane_state *vps)
196 {
197 	struct vmw_surface *surf;
198 
199 	if (vmw_user_object_is_null(&vps->uo))
200 		return NULL;
201 
202 	surf = vmw_user_object_surface(&vps->uo);
203 	if (surf && !vmw_user_object_is_mapped(&vps->uo))
204 		return surf->snooper.image;
205 
206 	return vmw_user_object_map(&vps->uo);
207 }
208 
vmw_du_cursor_plane_has_changed(struct vmw_plane_state * old_vps,struct vmw_plane_state * new_vps)209 static bool vmw_du_cursor_plane_has_changed(struct vmw_plane_state *old_vps,
210 					    struct vmw_plane_state *new_vps)
211 {
212 	void *old_image;
213 	void *new_image;
214 	u32 size;
215 	bool changed;
216 
217 	if (old_vps->base.crtc_w != new_vps->base.crtc_w ||
218 	    old_vps->base.crtc_h != new_vps->base.crtc_h)
219 	    return true;
220 
221 	if (old_vps->cursor.hotspot_x != new_vps->cursor.hotspot_x ||
222 	    old_vps->cursor.hotspot_y != new_vps->cursor.hotspot_y)
223 	    return true;
224 
225 	size = new_vps->base.crtc_w * new_vps->base.crtc_h * sizeof(u32);
226 
227 	old_image = vmw_du_cursor_plane_acquire_image(old_vps);
228 	new_image = vmw_du_cursor_plane_acquire_image(new_vps);
229 
230 	changed = false;
231 	if (old_image && new_image && old_image != new_image)
232 		changed = memcmp(old_image, new_image, size) != 0;
233 
234 	return changed;
235 }
236 
vmw_du_destroy_cursor_mob(struct vmw_bo ** vbo)237 static void vmw_du_destroy_cursor_mob(struct vmw_bo **vbo)
238 {
239 	if (!(*vbo))
240 		return;
241 
242 	ttm_bo_unpin(&(*vbo)->tbo);
243 	vmw_bo_unreference(vbo);
244 }
245 
vmw_du_put_cursor_mob(struct vmw_cursor_plane * vcp,struct vmw_plane_state * vps)246 static void vmw_du_put_cursor_mob(struct vmw_cursor_plane *vcp,
247 				  struct vmw_plane_state *vps)
248 {
249 	u32 i;
250 
251 	if (!vps->cursor.bo)
252 		return;
253 
254 	vmw_du_cursor_plane_unmap_cm(vps);
255 
256 	/* Look for a free slot to return this mob to the cache. */
257 	for (i = 0; i < ARRAY_SIZE(vcp->cursor_mobs); i++) {
258 		if (!vcp->cursor_mobs[i]) {
259 			vcp->cursor_mobs[i] = vps->cursor.bo;
260 			vps->cursor.bo = NULL;
261 			return;
262 		}
263 	}
264 
265 	/* Cache is full: See if this mob is bigger than an existing mob. */
266 	for (i = 0; i < ARRAY_SIZE(vcp->cursor_mobs); i++) {
267 		if (vcp->cursor_mobs[i]->tbo.base.size <
268 		    vps->cursor.bo->tbo.base.size) {
269 			vmw_du_destroy_cursor_mob(&vcp->cursor_mobs[i]);
270 			vcp->cursor_mobs[i] = vps->cursor.bo;
271 			vps->cursor.bo = NULL;
272 			return;
273 		}
274 	}
275 
276 	/* Destroy it if it's not worth caching. */
277 	vmw_du_destroy_cursor_mob(&vps->cursor.bo);
278 }
279 
vmw_du_get_cursor_mob(struct vmw_cursor_plane * vcp,struct vmw_plane_state * vps)280 static int vmw_du_get_cursor_mob(struct vmw_cursor_plane *vcp,
281 				 struct vmw_plane_state *vps)
282 {
283 	struct vmw_private *dev_priv = vmw_priv(vcp->base.dev);
284 	u32 size = vmw_du_cursor_mob_size(vps->base.crtc_w, vps->base.crtc_h);
285 	u32 i;
286 	u32 cursor_max_dim, mob_max_size;
287 	struct vmw_fence_obj *fence = NULL;
288 	int ret;
289 
290 	if (!dev_priv->has_mob ||
291 	    (dev_priv->capabilities2 & SVGA_CAP2_CURSOR_MOB) == 0)
292 		return -EINVAL;
293 
294 	mob_max_size = vmw_read(dev_priv, SVGA_REG_MOB_MAX_SIZE);
295 	cursor_max_dim = vmw_read(dev_priv, SVGA_REG_CURSOR_MAX_DIMENSION);
296 
297 	if (size > mob_max_size || vps->base.crtc_w > cursor_max_dim ||
298 	    vps->base.crtc_h > cursor_max_dim)
299 		return -EINVAL;
300 
301 	if (vps->cursor.bo) {
302 		if (vps->cursor.bo->tbo.base.size >= size)
303 			return 0;
304 		vmw_du_put_cursor_mob(vcp, vps);
305 	}
306 
307 	/* Look for an unused mob in the cache. */
308 	for (i = 0; i < ARRAY_SIZE(vcp->cursor_mobs); i++) {
309 		if (vcp->cursor_mobs[i] &&
310 		    vcp->cursor_mobs[i]->tbo.base.size >= size) {
311 			vps->cursor.bo = vcp->cursor_mobs[i];
312 			vcp->cursor_mobs[i] = NULL;
313 			return 0;
314 		}
315 	}
316 	/* Create a new mob if we can't find an existing one. */
317 	ret = vmw_bo_create_and_populate(dev_priv, size,
318 					 VMW_BO_DOMAIN_MOB,
319 					 &vps->cursor.bo);
320 
321 	if (ret != 0)
322 		return ret;
323 
324 	/* Fence the mob creation so we are guarateed to have the mob */
325 	ret = ttm_bo_reserve(&vps->cursor.bo->tbo, false, false, NULL);
326 	if (ret != 0)
327 		goto teardown;
328 
329 	ret = vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL);
330 	if (ret != 0) {
331 		ttm_bo_unreserve(&vps->cursor.bo->tbo);
332 		goto teardown;
333 	}
334 
335 	dma_fence_wait(&fence->base, false);
336 	dma_fence_put(&fence->base);
337 
338 	ttm_bo_unreserve(&vps->cursor.bo->tbo);
339 	return 0;
340 
341 teardown:
342 	vmw_du_destroy_cursor_mob(&vps->cursor.bo);
343 	return ret;
344 }
345 
346 
vmw_cursor_update_position(struct vmw_private * dev_priv,bool show,int x,int y)347 static void vmw_cursor_update_position(struct vmw_private *dev_priv,
348 				       bool show, int x, int y)
349 {
350 	const uint32_t svga_cursor_on = show ? SVGA_CURSOR_ON_SHOW
351 					     : SVGA_CURSOR_ON_HIDE;
352 	uint32_t count;
353 
354 	spin_lock(&dev_priv->cursor_lock);
355 	if (dev_priv->capabilities2 & SVGA_CAP2_EXTRA_REGS) {
356 		vmw_write(dev_priv, SVGA_REG_CURSOR4_X, x);
357 		vmw_write(dev_priv, SVGA_REG_CURSOR4_Y, y);
358 		vmw_write(dev_priv, SVGA_REG_CURSOR4_SCREEN_ID, SVGA3D_INVALID_ID);
359 		vmw_write(dev_priv, SVGA_REG_CURSOR4_ON, svga_cursor_on);
360 		vmw_write(dev_priv, SVGA_REG_CURSOR4_SUBMIT, 1);
361 	} else if (vmw_is_cursor_bypass3_enabled(dev_priv)) {
362 		vmw_fifo_mem_write(dev_priv, SVGA_FIFO_CURSOR_ON, svga_cursor_on);
363 		vmw_fifo_mem_write(dev_priv, SVGA_FIFO_CURSOR_X, x);
364 		vmw_fifo_mem_write(dev_priv, SVGA_FIFO_CURSOR_Y, y);
365 		count = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_CURSOR_COUNT);
366 		vmw_fifo_mem_write(dev_priv, SVGA_FIFO_CURSOR_COUNT, ++count);
367 	} else {
368 		vmw_write(dev_priv, SVGA_REG_CURSOR_X, x);
369 		vmw_write(dev_priv, SVGA_REG_CURSOR_Y, y);
370 		vmw_write(dev_priv, SVGA_REG_CURSOR_ON, svga_cursor_on);
371 	}
372 	spin_unlock(&dev_priv->cursor_lock);
373 }
374 
vmw_kms_cursor_snoop(struct vmw_surface * srf,struct ttm_object_file * tfile,struct ttm_buffer_object * bo,SVGA3dCmdHeader * header)375 void vmw_kms_cursor_snoop(struct vmw_surface *srf,
376 			  struct ttm_object_file *tfile,
377 			  struct ttm_buffer_object *bo,
378 			  SVGA3dCmdHeader *header)
379 {
380 	struct ttm_bo_kmap_obj map;
381 	unsigned long kmap_offset;
382 	unsigned long kmap_num;
383 	SVGA3dCopyBox *box;
384 	unsigned box_count;
385 	void *virtual;
386 	bool is_iomem;
387 	struct vmw_dma_cmd {
388 		SVGA3dCmdHeader header;
389 		SVGA3dCmdSurfaceDMA dma;
390 	} *cmd;
391 	int i, ret;
392 	const struct SVGA3dSurfaceDesc *desc =
393 		vmw_surface_get_desc(VMW_CURSOR_SNOOP_FORMAT);
394 	const u32 image_pitch = VMW_CURSOR_SNOOP_WIDTH * desc->pitchBytesPerBlock;
395 
396 	cmd = container_of(header, struct vmw_dma_cmd, header);
397 
398 	/* No snooper installed, nothing to copy */
399 	if (!srf->snooper.image)
400 		return;
401 
402 	if (cmd->dma.host.face != 0 || cmd->dma.host.mipmap != 0) {
403 		DRM_ERROR("face and mipmap for cursors should never != 0\n");
404 		return;
405 	}
406 
407 	if (cmd->header.size < 64) {
408 		DRM_ERROR("at least one full copy box must be given\n");
409 		return;
410 	}
411 
412 	box = (SVGA3dCopyBox *)&cmd[1];
413 	box_count = (cmd->header.size - sizeof(SVGA3dCmdSurfaceDMA)) /
414 			sizeof(SVGA3dCopyBox);
415 
416 	if (cmd->dma.guest.ptr.offset % PAGE_SIZE ||
417 	    box->x != 0    || box->y != 0    || box->z != 0    ||
418 	    box->srcx != 0 || box->srcy != 0 || box->srcz != 0 ||
419 	    box->d != 1    || box_count != 1 ||
420 	    box->w > VMW_CURSOR_SNOOP_WIDTH || box->h > VMW_CURSOR_SNOOP_HEIGHT) {
421 		/* TODO handle none page aligned offsets */
422 		/* TODO handle more dst & src != 0 */
423 		/* TODO handle more then one copy */
424 		DRM_ERROR("Can't snoop dma request for cursor!\n");
425 		DRM_ERROR("(%u, %u, %u) (%u, %u, %u) (%ux%ux%u) %u %u\n",
426 			  box->srcx, box->srcy, box->srcz,
427 			  box->x, box->y, box->z,
428 			  box->w, box->h, box->d, box_count,
429 			  cmd->dma.guest.ptr.offset);
430 		return;
431 	}
432 
433 	kmap_offset = cmd->dma.guest.ptr.offset >> PAGE_SHIFT;
434 	kmap_num = (VMW_CURSOR_SNOOP_HEIGHT*image_pitch) >> PAGE_SHIFT;
435 
436 	ret = ttm_bo_reserve(bo, true, false, NULL);
437 	if (unlikely(ret != 0)) {
438 		DRM_ERROR("reserve failed\n");
439 		return;
440 	}
441 
442 	ret = ttm_bo_kmap(bo, kmap_offset, kmap_num, &map);
443 	if (unlikely(ret != 0))
444 		goto err_unreserve;
445 
446 	virtual = ttm_kmap_obj_virtual(&map, &is_iomem);
447 
448 	if (box->w == VMW_CURSOR_SNOOP_WIDTH && cmd->dma.guest.pitch == image_pitch) {
449 		memcpy(srf->snooper.image, virtual,
450 		       VMW_CURSOR_SNOOP_HEIGHT*image_pitch);
451 	} else {
452 		/* Image is unsigned pointer. */
453 		for (i = 0; i < box->h; i++)
454 			memcpy(srf->snooper.image + i * image_pitch,
455 			       virtual + i * cmd->dma.guest.pitch,
456 			       box->w * desc->pitchBytesPerBlock);
457 	}
458 
459 	srf->snooper.age++;
460 
461 	ttm_bo_kunmap(&map);
462 err_unreserve:
463 	ttm_bo_unreserve(bo);
464 }
465 
466 /**
467  * vmw_kms_legacy_hotspot_clear - Clear legacy hotspots
468  *
469  * @dev_priv: Pointer to the device private struct.
470  *
471  * Clears all legacy hotspots.
472  */
vmw_kms_legacy_hotspot_clear(struct vmw_private * dev_priv)473 void vmw_kms_legacy_hotspot_clear(struct vmw_private *dev_priv)
474 {
475 	struct drm_device *dev = &dev_priv->drm;
476 	struct vmw_display_unit *du;
477 	struct drm_crtc *crtc;
478 
479 	drm_modeset_lock_all(dev);
480 	drm_for_each_crtc(crtc, dev) {
481 		du = vmw_crtc_to_du(crtc);
482 
483 		du->hotspot_x = 0;
484 		du->hotspot_y = 0;
485 	}
486 	drm_modeset_unlock_all(dev);
487 }
488 
vmw_kms_cursor_post_execbuf(struct vmw_private * dev_priv)489 void vmw_kms_cursor_post_execbuf(struct vmw_private *dev_priv)
490 {
491 	struct drm_device *dev = &dev_priv->drm;
492 	struct vmw_display_unit *du;
493 	struct drm_crtc *crtc;
494 
495 	mutex_lock(&dev->mode_config.mutex);
496 
497 	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
498 		du = vmw_crtc_to_du(crtc);
499 		if (!du->cursor_surface ||
500 		    du->cursor_age == du->cursor_surface->snooper.age ||
501 		    !du->cursor_surface->snooper.image)
502 			continue;
503 
504 		du->cursor_age = du->cursor_surface->snooper.age;
505 		vmw_send_define_cursor_cmd(dev_priv,
506 					   du->cursor_surface->snooper.image,
507 					   VMW_CURSOR_SNOOP_WIDTH,
508 					   VMW_CURSOR_SNOOP_HEIGHT,
509 					   du->hotspot_x + du->core_hotspot_x,
510 					   du->hotspot_y + du->core_hotspot_y);
511 	}
512 
513 	mutex_unlock(&dev->mode_config.mutex);
514 }
515 
516 
vmw_du_cursor_plane_destroy(struct drm_plane * plane)517 void vmw_du_cursor_plane_destroy(struct drm_plane *plane)
518 {
519 	struct vmw_cursor_plane *vcp = vmw_plane_to_vcp(plane);
520 	u32 i;
521 
522 	vmw_cursor_update_position(vmw_priv(plane->dev), false, 0, 0);
523 
524 	for (i = 0; i < ARRAY_SIZE(vcp->cursor_mobs); i++)
525 		vmw_du_destroy_cursor_mob(&vcp->cursor_mobs[i]);
526 
527 	drm_plane_cleanup(plane);
528 }
529 
530 
vmw_du_primary_plane_destroy(struct drm_plane * plane)531 void vmw_du_primary_plane_destroy(struct drm_plane *plane)
532 {
533 	drm_plane_cleanup(plane);
534 
535 	/* Planes are static in our case so we don't free it */
536 }
537 
538 
539 /**
540  * vmw_du_plane_unpin_surf - unpins resource associated with a framebuffer surface
541  *
542  * @vps: plane state associated with the display surface
543  */
vmw_du_plane_unpin_surf(struct vmw_plane_state * vps)544 void vmw_du_plane_unpin_surf(struct vmw_plane_state *vps)
545 {
546 	struct vmw_surface *surf = vmw_user_object_surface(&vps->uo);
547 
548 	if (surf) {
549 		if (vps->pinned) {
550 			vmw_resource_unpin(&surf->res);
551 			vps->pinned--;
552 		}
553 	}
554 }
555 
556 
557 /**
558  * vmw_du_plane_cleanup_fb - Unpins the plane surface
559  *
560  * @plane:  display plane
561  * @old_state: Contains the FB to clean up
562  *
563  * Unpins the framebuffer surface
564  *
565  * Returns 0 on success
566  */
567 void
vmw_du_plane_cleanup_fb(struct drm_plane * plane,struct drm_plane_state * old_state)568 vmw_du_plane_cleanup_fb(struct drm_plane *plane,
569 			struct drm_plane_state *old_state)
570 {
571 	struct vmw_plane_state *vps = vmw_plane_state_to_vps(old_state);
572 
573 	vmw_du_plane_unpin_surf(vps);
574 }
575 
576 
577 /**
578  * vmw_du_cursor_plane_map_cm - Maps the cursor mobs.
579  *
580  * @vps: plane_state
581  *
582  * Returns 0 on success
583  */
584 
585 static int
vmw_du_cursor_plane_map_cm(struct vmw_plane_state * vps)586 vmw_du_cursor_plane_map_cm(struct vmw_plane_state *vps)
587 {
588 	int ret;
589 	u32 size = vmw_du_cursor_mob_size(vps->base.crtc_w, vps->base.crtc_h);
590 	struct ttm_buffer_object *bo;
591 
592 	if (!vps->cursor.bo)
593 		return -EINVAL;
594 
595 	bo = &vps->cursor.bo->tbo;
596 
597 	if (bo->base.size < size)
598 		return -EINVAL;
599 
600 	if (vps->cursor.bo->map.virtual)
601 		return 0;
602 
603 	ret = ttm_bo_reserve(bo, false, false, NULL);
604 	if (unlikely(ret != 0))
605 		return -ENOMEM;
606 
607 	vmw_bo_map_and_cache(vps->cursor.bo);
608 
609 	ttm_bo_unreserve(bo);
610 
611 	if (unlikely(ret != 0))
612 		return -ENOMEM;
613 
614 	return 0;
615 }
616 
617 
618 /**
619  * vmw_du_cursor_plane_unmap_cm - Unmaps the cursor mobs.
620  *
621  * @vps: state of the cursor plane
622  *
623  * Returns 0 on success
624  */
625 
626 static int
vmw_du_cursor_plane_unmap_cm(struct vmw_plane_state * vps)627 vmw_du_cursor_plane_unmap_cm(struct vmw_plane_state *vps)
628 {
629 	int ret = 0;
630 	struct vmw_bo *vbo = vps->cursor.bo;
631 
632 	if (!vbo || !vbo->map.virtual)
633 		return 0;
634 
635 	ret = ttm_bo_reserve(&vbo->tbo, true, false, NULL);
636 	if (likely(ret == 0)) {
637 		vmw_bo_unmap(vbo);
638 		ttm_bo_unreserve(&vbo->tbo);
639 	}
640 
641 	return ret;
642 }
643 
644 
645 /**
646  * vmw_du_cursor_plane_cleanup_fb - Unpins the plane surface
647  *
648  * @plane: cursor plane
649  * @old_state: contains the state to clean up
650  *
651  * Unmaps all cursor bo mappings and unpins the cursor surface
652  *
653  * Returns 0 on success
654  */
655 void
vmw_du_cursor_plane_cleanup_fb(struct drm_plane * plane,struct drm_plane_state * old_state)656 vmw_du_cursor_plane_cleanup_fb(struct drm_plane *plane,
657 			       struct drm_plane_state *old_state)
658 {
659 	struct vmw_cursor_plane *vcp = vmw_plane_to_vcp(plane);
660 	struct vmw_plane_state *vps = vmw_plane_state_to_vps(old_state);
661 
662 	if (!vmw_user_object_is_null(&vps->uo))
663 		vmw_user_object_unmap(&vps->uo);
664 
665 	vmw_du_cursor_plane_unmap_cm(vps);
666 	vmw_du_put_cursor_mob(vcp, vps);
667 
668 	vmw_du_plane_unpin_surf(vps);
669 	vmw_user_object_unref(&vps->uo);
670 }
671 
672 
673 /**
674  * vmw_du_cursor_plane_prepare_fb - Readies the cursor by referencing it
675  *
676  * @plane:  display plane
677  * @new_state: info on the new plane state, including the FB
678  *
679  * Returns 0 on success
680  */
681 int
vmw_du_cursor_plane_prepare_fb(struct drm_plane * plane,struct drm_plane_state * new_state)682 vmw_du_cursor_plane_prepare_fb(struct drm_plane *plane,
683 			       struct drm_plane_state *new_state)
684 {
685 	struct drm_framebuffer *fb = new_state->fb;
686 	struct vmw_cursor_plane *vcp = vmw_plane_to_vcp(plane);
687 	struct vmw_plane_state *vps = vmw_plane_state_to_vps(new_state);
688 	struct vmw_bo *bo = NULL;
689 	int ret = 0;
690 
691 	if (!vmw_user_object_is_null(&vps->uo)) {
692 		vmw_user_object_unmap(&vps->uo);
693 		vmw_user_object_unref(&vps->uo);
694 	}
695 
696 	if (fb) {
697 		if (vmw_framebuffer_to_vfb(fb)->bo) {
698 			vps->uo.buffer = vmw_framebuffer_to_vfbd(fb)->buffer;
699 			vps->uo.surface = NULL;
700 		} else {
701 			memcpy(&vps->uo, &vmw_framebuffer_to_vfbs(fb)->uo, sizeof(vps->uo));
702 		}
703 		vmw_user_object_ref(&vps->uo);
704 	}
705 
706 	bo = vmw_user_object_buffer(&vps->uo);
707 	if (bo) {
708 		struct ttm_operation_ctx ctx = {false, false};
709 
710 		ret = ttm_bo_reserve(&bo->tbo, true, false, NULL);
711 		if (ret != 0)
712 			return -ENOMEM;
713 
714 		ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
715 		if (ret != 0)
716 			return -ENOMEM;
717 
718 		vmw_bo_pin_reserved(bo, true);
719 		if (vmw_framebuffer_to_vfb(fb)->bo) {
720 			const u32 size = new_state->crtc_w * new_state->crtc_h * sizeof(u32);
721 
722 			(void)vmw_bo_map_and_cache_size(bo, size);
723 		} else {
724 			vmw_bo_map_and_cache(bo);
725 		}
726 		ttm_bo_unreserve(&bo->tbo);
727 	}
728 
729 	if (!vmw_user_object_is_null(&vps->uo)) {
730 		vmw_du_get_cursor_mob(vcp, vps);
731 		vmw_du_cursor_plane_map_cm(vps);
732 	}
733 
734 	return 0;
735 }
736 
737 
738 void
vmw_du_cursor_plane_atomic_update(struct drm_plane * plane,struct drm_atomic_state * state)739 vmw_du_cursor_plane_atomic_update(struct drm_plane *plane,
740 				  struct drm_atomic_state *state)
741 {
742 	struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
743 									   plane);
744 	struct drm_plane_state *old_state = drm_atomic_get_old_plane_state(state,
745 									   plane);
746 	struct drm_crtc *crtc = new_state->crtc ?: old_state->crtc;
747 	struct vmw_private *dev_priv = vmw_priv(crtc->dev);
748 	struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
749 	struct vmw_plane_state *vps = vmw_plane_state_to_vps(new_state);
750 	struct vmw_plane_state *old_vps = vmw_plane_state_to_vps(old_state);
751 	struct vmw_bo *old_bo = NULL;
752 	struct vmw_bo *new_bo = NULL;
753 	struct ww_acquire_ctx ctx;
754 	s32 hotspot_x, hotspot_y;
755 	int ret;
756 
757 	hotspot_x = du->hotspot_x + new_state->hotspot_x;
758 	hotspot_y = du->hotspot_y + new_state->hotspot_y;
759 
760 	du->cursor_surface = vmw_user_object_surface(&vps->uo);
761 
762 	if (vmw_user_object_is_null(&vps->uo)) {
763 		vmw_cursor_update_position(dev_priv, false, 0, 0);
764 		return;
765 	}
766 
767 	vps->cursor.hotspot_x = hotspot_x;
768 	vps->cursor.hotspot_y = hotspot_y;
769 
770 	if (du->cursor_surface)
771 		du->cursor_age = du->cursor_surface->snooper.age;
772 
773 	ww_acquire_init(&ctx, &reservation_ww_class);
774 
775 	if (!vmw_user_object_is_null(&old_vps->uo)) {
776 		old_bo = vmw_user_object_buffer(&old_vps->uo);
777 		ret = ttm_bo_reserve(&old_bo->tbo, false, false, &ctx);
778 		if (ret != 0)
779 			return;
780 	}
781 
782 	if (!vmw_user_object_is_null(&vps->uo)) {
783 		new_bo = vmw_user_object_buffer(&vps->uo);
784 		if (old_bo != new_bo) {
785 			ret = ttm_bo_reserve(&new_bo->tbo, false, false, &ctx);
786 			if (ret != 0) {
787 				if (old_bo) {
788 					ttm_bo_unreserve(&old_bo->tbo);
789 					ww_acquire_fini(&ctx);
790 				}
791 				return;
792 			}
793 		} else {
794 			new_bo = NULL;
795 		}
796 	}
797 	if (!vmw_du_cursor_plane_has_changed(old_vps, vps)) {
798 		/*
799 		 * If it hasn't changed, avoid making the device do extra
800 		 * work by keeping the old cursor active.
801 		 */
802 		struct vmw_cursor_plane_state tmp = old_vps->cursor;
803 		old_vps->cursor = vps->cursor;
804 		vps->cursor = tmp;
805 	} else {
806 		void *image = vmw_du_cursor_plane_acquire_image(vps);
807 		if (image)
808 			vmw_cursor_update_image(dev_priv, vps, image,
809 						new_state->crtc_w,
810 						new_state->crtc_h,
811 						hotspot_x, hotspot_y);
812 	}
813 
814 	if (new_bo)
815 		ttm_bo_unreserve(&new_bo->tbo);
816 	if (old_bo)
817 		ttm_bo_unreserve(&old_bo->tbo);
818 
819 	ww_acquire_fini(&ctx);
820 
821 	du->cursor_x = new_state->crtc_x + du->set_gui_x;
822 	du->cursor_y = new_state->crtc_y + du->set_gui_y;
823 
824 	vmw_cursor_update_position(dev_priv, true,
825 				   du->cursor_x + hotspot_x,
826 				   du->cursor_y + hotspot_y);
827 
828 	du->core_hotspot_x = hotspot_x - du->hotspot_x;
829 	du->core_hotspot_y = hotspot_y - du->hotspot_y;
830 }
831 
832 
833 /**
834  * vmw_du_primary_plane_atomic_check - check if the new state is okay
835  *
836  * @plane: display plane
837  * @state: info on the new plane state, including the FB
838  *
839  * Check if the new state is settable given the current state.  Other
840  * than what the atomic helper checks, we care about crtc fitting
841  * the FB and maintaining one active framebuffer.
842  *
843  * Returns 0 on success
844  */
vmw_du_primary_plane_atomic_check(struct drm_plane * plane,struct drm_atomic_state * state)845 int vmw_du_primary_plane_atomic_check(struct drm_plane *plane,
846 				      struct drm_atomic_state *state)
847 {
848 	struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
849 									   plane);
850 	struct drm_plane_state *old_state = drm_atomic_get_old_plane_state(state,
851 									   plane);
852 	struct drm_crtc_state *crtc_state = NULL;
853 	struct drm_framebuffer *new_fb = new_state->fb;
854 	struct drm_framebuffer *old_fb = old_state->fb;
855 	int ret;
856 
857 	/*
858 	 * Ignore damage clips if the framebuffer attached to the plane's state
859 	 * has changed since the last plane update (page-flip). In this case, a
860 	 * full plane update should happen because uploads are done per-buffer.
861 	 */
862 	if (old_fb != new_fb)
863 		new_state->ignore_damage_clips = true;
864 
865 	if (new_state->crtc)
866 		crtc_state = drm_atomic_get_new_crtc_state(state,
867 							   new_state->crtc);
868 
869 	ret = drm_atomic_helper_check_plane_state(new_state, crtc_state,
870 						  DRM_PLANE_NO_SCALING,
871 						  DRM_PLANE_NO_SCALING,
872 						  false, true);
873 	return ret;
874 }
875 
876 
877 /**
878  * vmw_du_cursor_plane_atomic_check - check if the new state is okay
879  *
880  * @plane: cursor plane
881  * @state: info on the new plane state
882  *
883  * This is a chance to fail if the new cursor state does not fit
884  * our requirements.
885  *
886  * Returns 0 on success
887  */
vmw_du_cursor_plane_atomic_check(struct drm_plane * plane,struct drm_atomic_state * state)888 int vmw_du_cursor_plane_atomic_check(struct drm_plane *plane,
889 				     struct drm_atomic_state *state)
890 {
891 	struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
892 									   plane);
893 	int ret = 0;
894 	struct drm_crtc_state *crtc_state = NULL;
895 	struct vmw_surface *surface = NULL;
896 	struct drm_framebuffer *fb = new_state->fb;
897 
898 	if (new_state->crtc)
899 		crtc_state = drm_atomic_get_new_crtc_state(new_state->state,
900 							   new_state->crtc);
901 
902 	ret = drm_atomic_helper_check_plane_state(new_state, crtc_state,
903 						  DRM_PLANE_NO_SCALING,
904 						  DRM_PLANE_NO_SCALING,
905 						  true, true);
906 	if (ret)
907 		return ret;
908 
909 	/* Turning off */
910 	if (!fb)
911 		return 0;
912 
913 	/* A lot of the code assumes this */
914 	if (new_state->crtc_w != 64 || new_state->crtc_h != 64) {
915 		DRM_ERROR("Invalid cursor dimensions (%d, %d)\n",
916 			  new_state->crtc_w, new_state->crtc_h);
917 		return -EINVAL;
918 	}
919 
920 	if (!vmw_framebuffer_to_vfb(fb)->bo) {
921 		surface = vmw_user_object_surface(&vmw_framebuffer_to_vfbs(fb)->uo);
922 
923 		WARN_ON(!surface);
924 
925 		if (!surface ||
926 		    (!surface->snooper.image && !surface->res.guest_memory_bo)) {
927 			DRM_ERROR("surface not suitable for cursor\n");
928 			return -EINVAL;
929 		}
930 	}
931 
932 	return 0;
933 }
934 
935 
vmw_du_crtc_atomic_check(struct drm_crtc * crtc,struct drm_atomic_state * state)936 int vmw_du_crtc_atomic_check(struct drm_crtc *crtc,
937 			     struct drm_atomic_state *state)
938 {
939 	struct vmw_private *vmw = vmw_priv(crtc->dev);
940 	struct drm_crtc_state *new_state = drm_atomic_get_new_crtc_state(state,
941 									 crtc);
942 	struct vmw_display_unit *du = vmw_crtc_to_du(new_state->crtc);
943 	int connector_mask = drm_connector_mask(&du->connector);
944 	bool has_primary = new_state->plane_mask &
945 			   drm_plane_mask(crtc->primary);
946 
947 	/*
948 	 * This is fine in general, but broken userspace might expect
949 	 * some actual rendering so give a clue as why it's blank.
950 	 */
951 	if (new_state->enable && !has_primary)
952 		drm_dbg_driver(&vmw->drm,
953 			       "CRTC without a primary plane will be blank.\n");
954 
955 
956 	if (new_state->connector_mask != connector_mask &&
957 	    new_state->connector_mask != 0) {
958 		DRM_ERROR("Invalid connectors configuration\n");
959 		return -EINVAL;
960 	}
961 
962 	/*
963 	 * Our virtual device does not have a dot clock, so use the logical
964 	 * clock value as the dot clock.
965 	 */
966 	if (new_state->mode.crtc_clock == 0)
967 		new_state->adjusted_mode.crtc_clock = new_state->mode.clock;
968 
969 	return 0;
970 }
971 
972 
vmw_du_crtc_atomic_begin(struct drm_crtc * crtc,struct drm_atomic_state * state)973 void vmw_du_crtc_atomic_begin(struct drm_crtc *crtc,
974 			      struct drm_atomic_state *state)
975 {
976 	vmw_vkms_crtc_atomic_begin(crtc, state);
977 }
978 
979 /**
980  * vmw_du_crtc_duplicate_state - duplicate crtc state
981  * @crtc: DRM crtc
982  *
983  * Allocates and returns a copy of the crtc state (both common and
984  * vmw-specific) for the specified crtc.
985  *
986  * Returns: The newly allocated crtc state, or NULL on failure.
987  */
988 struct drm_crtc_state *
vmw_du_crtc_duplicate_state(struct drm_crtc * crtc)989 vmw_du_crtc_duplicate_state(struct drm_crtc *crtc)
990 {
991 	struct drm_crtc_state *state;
992 	struct vmw_crtc_state *vcs;
993 
994 	if (WARN_ON(!crtc->state))
995 		return NULL;
996 
997 	vcs = kmemdup(crtc->state, sizeof(*vcs), GFP_KERNEL);
998 
999 	if (!vcs)
1000 		return NULL;
1001 
1002 	state = &vcs->base;
1003 
1004 	__drm_atomic_helper_crtc_duplicate_state(crtc, state);
1005 
1006 	return state;
1007 }
1008 
1009 
1010 /**
1011  * vmw_du_crtc_reset - creates a blank vmw crtc state
1012  * @crtc: DRM crtc
1013  *
1014  * Resets the atomic state for @crtc by freeing the state pointer (which
1015  * might be NULL, e.g. at driver load time) and allocating a new empty state
1016  * object.
1017  */
vmw_du_crtc_reset(struct drm_crtc * crtc)1018 void vmw_du_crtc_reset(struct drm_crtc *crtc)
1019 {
1020 	struct vmw_crtc_state *vcs;
1021 
1022 
1023 	if (crtc->state) {
1024 		__drm_atomic_helper_crtc_destroy_state(crtc->state);
1025 
1026 		kfree(vmw_crtc_state_to_vcs(crtc->state));
1027 	}
1028 
1029 	vcs = kzalloc(sizeof(*vcs), GFP_KERNEL);
1030 
1031 	if (!vcs) {
1032 		DRM_ERROR("Cannot allocate vmw_crtc_state\n");
1033 		return;
1034 	}
1035 
1036 	__drm_atomic_helper_crtc_reset(crtc, &vcs->base);
1037 }
1038 
1039 
1040 /**
1041  * vmw_du_crtc_destroy_state - destroy crtc state
1042  * @crtc: DRM crtc
1043  * @state: state object to destroy
1044  *
1045  * Destroys the crtc state (both common and vmw-specific) for the
1046  * specified plane.
1047  */
1048 void
vmw_du_crtc_destroy_state(struct drm_crtc * crtc,struct drm_crtc_state * state)1049 vmw_du_crtc_destroy_state(struct drm_crtc *crtc,
1050 			  struct drm_crtc_state *state)
1051 {
1052 	drm_atomic_helper_crtc_destroy_state(crtc, state);
1053 }
1054 
1055 
1056 /**
1057  * vmw_du_plane_duplicate_state - duplicate plane state
1058  * @plane: drm plane
1059  *
1060  * Allocates and returns a copy of the plane state (both common and
1061  * vmw-specific) for the specified plane.
1062  *
1063  * Returns: The newly allocated plane state, or NULL on failure.
1064  */
1065 struct drm_plane_state *
vmw_du_plane_duplicate_state(struct drm_plane * plane)1066 vmw_du_plane_duplicate_state(struct drm_plane *plane)
1067 {
1068 	struct drm_plane_state *state;
1069 	struct vmw_plane_state *vps;
1070 
1071 	vps = kmemdup(plane->state, sizeof(*vps), GFP_KERNEL);
1072 
1073 	if (!vps)
1074 		return NULL;
1075 
1076 	vps->pinned = 0;
1077 	vps->cpp = 0;
1078 
1079 	memset(&vps->cursor, 0, sizeof(vps->cursor));
1080 
1081 	/* Each ref counted resource needs to be acquired again */
1082 	vmw_user_object_ref(&vps->uo);
1083 	state = &vps->base;
1084 
1085 	__drm_atomic_helper_plane_duplicate_state(plane, state);
1086 
1087 	return state;
1088 }
1089 
1090 
1091 /**
1092  * vmw_du_plane_reset - creates a blank vmw plane state
1093  * @plane: drm plane
1094  *
1095  * Resets the atomic state for @plane by freeing the state pointer (which might
1096  * be NULL, e.g. at driver load time) and allocating a new empty state object.
1097  */
vmw_du_plane_reset(struct drm_plane * plane)1098 void vmw_du_plane_reset(struct drm_plane *plane)
1099 {
1100 	struct vmw_plane_state *vps;
1101 
1102 	if (plane->state)
1103 		vmw_du_plane_destroy_state(plane, plane->state);
1104 
1105 	vps = kzalloc(sizeof(*vps), GFP_KERNEL);
1106 
1107 	if (!vps) {
1108 		DRM_ERROR("Cannot allocate vmw_plane_state\n");
1109 		return;
1110 	}
1111 
1112 	__drm_atomic_helper_plane_reset(plane, &vps->base);
1113 }
1114 
1115 
1116 /**
1117  * vmw_du_plane_destroy_state - destroy plane state
1118  * @plane: DRM plane
1119  * @state: state object to destroy
1120  *
1121  * Destroys the plane state (both common and vmw-specific) for the
1122  * specified plane.
1123  */
1124 void
vmw_du_plane_destroy_state(struct drm_plane * plane,struct drm_plane_state * state)1125 vmw_du_plane_destroy_state(struct drm_plane *plane,
1126 			   struct drm_plane_state *state)
1127 {
1128 	struct vmw_plane_state *vps = vmw_plane_state_to_vps(state);
1129 
1130 	/* Should have been freed by cleanup_fb */
1131 	vmw_user_object_unref(&vps->uo);
1132 
1133 	drm_atomic_helper_plane_destroy_state(plane, state);
1134 }
1135 
1136 
1137 /**
1138  * vmw_du_connector_duplicate_state - duplicate connector state
1139  * @connector: DRM connector
1140  *
1141  * Allocates and returns a copy of the connector state (both common and
1142  * vmw-specific) for the specified connector.
1143  *
1144  * Returns: The newly allocated connector state, or NULL on failure.
1145  */
1146 struct drm_connector_state *
vmw_du_connector_duplicate_state(struct drm_connector * connector)1147 vmw_du_connector_duplicate_state(struct drm_connector *connector)
1148 {
1149 	struct drm_connector_state *state;
1150 	struct vmw_connector_state *vcs;
1151 
1152 	if (WARN_ON(!connector->state))
1153 		return NULL;
1154 
1155 	vcs = kmemdup(connector->state, sizeof(*vcs), GFP_KERNEL);
1156 
1157 	if (!vcs)
1158 		return NULL;
1159 
1160 	state = &vcs->base;
1161 
1162 	__drm_atomic_helper_connector_duplicate_state(connector, state);
1163 
1164 	return state;
1165 }
1166 
1167 
1168 /**
1169  * vmw_du_connector_reset - creates a blank vmw connector state
1170  * @connector: DRM connector
1171  *
1172  * Resets the atomic state for @connector by freeing the state pointer (which
1173  * might be NULL, e.g. at driver load time) and allocating a new empty state
1174  * object.
1175  */
vmw_du_connector_reset(struct drm_connector * connector)1176 void vmw_du_connector_reset(struct drm_connector *connector)
1177 {
1178 	struct vmw_connector_state *vcs;
1179 
1180 
1181 	if (connector->state) {
1182 		__drm_atomic_helper_connector_destroy_state(connector->state);
1183 
1184 		kfree(vmw_connector_state_to_vcs(connector->state));
1185 	}
1186 
1187 	vcs = kzalloc(sizeof(*vcs), GFP_KERNEL);
1188 
1189 	if (!vcs) {
1190 		DRM_ERROR("Cannot allocate vmw_connector_state\n");
1191 		return;
1192 	}
1193 
1194 	__drm_atomic_helper_connector_reset(connector, &vcs->base);
1195 }
1196 
1197 
1198 /**
1199  * vmw_du_connector_destroy_state - destroy connector state
1200  * @connector: DRM connector
1201  * @state: state object to destroy
1202  *
1203  * Destroys the connector state (both common and vmw-specific) for the
1204  * specified plane.
1205  */
1206 void
vmw_du_connector_destroy_state(struct drm_connector * connector,struct drm_connector_state * state)1207 vmw_du_connector_destroy_state(struct drm_connector *connector,
1208 			  struct drm_connector_state *state)
1209 {
1210 	drm_atomic_helper_connector_destroy_state(connector, state);
1211 }
1212 /*
1213  * Generic framebuffer code
1214  */
1215 
1216 /*
1217  * Surface framebuffer code
1218  */
1219 
vmw_framebuffer_surface_destroy(struct drm_framebuffer * framebuffer)1220 static void vmw_framebuffer_surface_destroy(struct drm_framebuffer *framebuffer)
1221 {
1222 	struct vmw_framebuffer_surface *vfbs =
1223 		vmw_framebuffer_to_vfbs(framebuffer);
1224 
1225 	drm_framebuffer_cleanup(framebuffer);
1226 	vmw_user_object_unref(&vfbs->uo);
1227 
1228 	kfree(vfbs);
1229 }
1230 
1231 /**
1232  * vmw_kms_readback - Perform a readback from the screen system to
1233  * a buffer-object backed framebuffer.
1234  *
1235  * @dev_priv: Pointer to the device private structure.
1236  * @file_priv: Pointer to a struct drm_file identifying the caller.
1237  * Must be set to NULL if @user_fence_rep is NULL.
1238  * @vfb: Pointer to the buffer-object backed framebuffer.
1239  * @user_fence_rep: User-space provided structure for fence information.
1240  * Must be set to non-NULL if @file_priv is non-NULL.
1241  * @vclips: Array of clip rects.
1242  * @num_clips: Number of clip rects in @vclips.
1243  *
1244  * Returns 0 on success, negative error code on failure. -ERESTARTSYS if
1245  * interrupted.
1246  */
vmw_kms_readback(struct vmw_private * dev_priv,struct drm_file * file_priv,struct vmw_framebuffer * vfb,struct drm_vmw_fence_rep __user * user_fence_rep,struct drm_vmw_rect * vclips,uint32_t num_clips)1247 int vmw_kms_readback(struct vmw_private *dev_priv,
1248 		     struct drm_file *file_priv,
1249 		     struct vmw_framebuffer *vfb,
1250 		     struct drm_vmw_fence_rep __user *user_fence_rep,
1251 		     struct drm_vmw_rect *vclips,
1252 		     uint32_t num_clips)
1253 {
1254 	switch (dev_priv->active_display_unit) {
1255 	case vmw_du_screen_object:
1256 		return vmw_kms_sou_readback(dev_priv, file_priv, vfb,
1257 					    user_fence_rep, vclips, num_clips,
1258 					    NULL);
1259 	case vmw_du_screen_target:
1260 		return vmw_kms_stdu_readback(dev_priv, file_priv, vfb,
1261 					     user_fence_rep, NULL, vclips, num_clips,
1262 					     1, NULL);
1263 	default:
1264 		WARN_ONCE(true,
1265 			  "Readback called with invalid display system.\n");
1266 }
1267 
1268 	return -ENOSYS;
1269 }
1270 
vmw_framebuffer_surface_create_handle(struct drm_framebuffer * fb,struct drm_file * file_priv,unsigned int * handle)1271 static int vmw_framebuffer_surface_create_handle(struct drm_framebuffer *fb,
1272 						 struct drm_file *file_priv,
1273 						 unsigned int *handle)
1274 {
1275 	struct vmw_framebuffer_surface *vfbs = vmw_framebuffer_to_vfbs(fb);
1276 	struct vmw_bo *bo = vmw_user_object_buffer(&vfbs->uo);
1277 
1278 	if (WARN_ON(!bo))
1279 		return -EINVAL;
1280 	return drm_gem_handle_create(file_priv, &bo->tbo.base, handle);
1281 }
1282 
1283 static const struct drm_framebuffer_funcs vmw_framebuffer_surface_funcs = {
1284 	.create_handle = vmw_framebuffer_surface_create_handle,
1285 	.destroy = vmw_framebuffer_surface_destroy,
1286 	.dirty = drm_atomic_helper_dirtyfb,
1287 };
1288 
vmw_kms_new_framebuffer_surface(struct vmw_private * dev_priv,struct vmw_user_object * uo,struct vmw_framebuffer ** out,const struct drm_mode_fb_cmd2 * mode_cmd)1289 static int vmw_kms_new_framebuffer_surface(struct vmw_private *dev_priv,
1290 					   struct vmw_user_object *uo,
1291 					   struct vmw_framebuffer **out,
1292 					   const struct drm_mode_fb_cmd2
1293 					   *mode_cmd)
1294 
1295 {
1296 	struct drm_device *dev = &dev_priv->drm;
1297 	struct vmw_framebuffer_surface *vfbs;
1298 	struct vmw_surface *surface;
1299 	int ret;
1300 
1301 	/* 3D is only supported on HWv8 and newer hosts */
1302 	if (dev_priv->active_display_unit == vmw_du_legacy)
1303 		return -ENOSYS;
1304 
1305 	surface = vmw_user_object_surface(uo);
1306 
1307 	/*
1308 	 * Sanity checks.
1309 	 */
1310 
1311 	if (!drm_any_plane_has_format(&dev_priv->drm,
1312 				      mode_cmd->pixel_format,
1313 				      mode_cmd->modifier[0])) {
1314 		drm_dbg(&dev_priv->drm,
1315 			"unsupported pixel format %p4cc / modifier 0x%llx\n",
1316 			&mode_cmd->pixel_format, mode_cmd->modifier[0]);
1317 		return -EINVAL;
1318 	}
1319 
1320 	/* Surface must be marked as a scanout. */
1321 	if (unlikely(!surface->metadata.scanout))
1322 		return -EINVAL;
1323 
1324 	if (unlikely(surface->metadata.mip_levels[0] != 1 ||
1325 		     surface->metadata.num_sizes != 1 ||
1326 		     surface->metadata.base_size.width < mode_cmd->width ||
1327 		     surface->metadata.base_size.height < mode_cmd->height ||
1328 		     surface->metadata.base_size.depth != 1)) {
1329 		DRM_ERROR("Incompatible surface dimensions "
1330 			  "for requested mode.\n");
1331 		return -EINVAL;
1332 	}
1333 
1334 	vfbs = kzalloc(sizeof(*vfbs), GFP_KERNEL);
1335 	if (!vfbs) {
1336 		ret = -ENOMEM;
1337 		goto out_err1;
1338 	}
1339 
1340 	drm_helper_mode_fill_fb_struct(dev, &vfbs->base.base, mode_cmd);
1341 	memcpy(&vfbs->uo, uo, sizeof(vfbs->uo));
1342 	vmw_user_object_ref(&vfbs->uo);
1343 
1344 	*out = &vfbs->base;
1345 
1346 	ret = drm_framebuffer_init(dev, &vfbs->base.base,
1347 				   &vmw_framebuffer_surface_funcs);
1348 	if (ret)
1349 		goto out_err2;
1350 
1351 	return 0;
1352 
1353 out_err2:
1354 	vmw_user_object_unref(&vfbs->uo);
1355 	kfree(vfbs);
1356 out_err1:
1357 	return ret;
1358 }
1359 
1360 /*
1361  * Buffer-object framebuffer code
1362  */
1363 
vmw_framebuffer_bo_create_handle(struct drm_framebuffer * fb,struct drm_file * file_priv,unsigned int * handle)1364 static int vmw_framebuffer_bo_create_handle(struct drm_framebuffer *fb,
1365 					    struct drm_file *file_priv,
1366 					    unsigned int *handle)
1367 {
1368 	struct vmw_framebuffer_bo *vfbd =
1369 			vmw_framebuffer_to_vfbd(fb);
1370 	return drm_gem_handle_create(file_priv, &vfbd->buffer->tbo.base, handle);
1371 }
1372 
vmw_framebuffer_bo_destroy(struct drm_framebuffer * framebuffer)1373 static void vmw_framebuffer_bo_destroy(struct drm_framebuffer *framebuffer)
1374 {
1375 	struct vmw_framebuffer_bo *vfbd =
1376 		vmw_framebuffer_to_vfbd(framebuffer);
1377 
1378 	drm_framebuffer_cleanup(framebuffer);
1379 	vmw_bo_unreference(&vfbd->buffer);
1380 
1381 	kfree(vfbd);
1382 }
1383 
1384 static const struct drm_framebuffer_funcs vmw_framebuffer_bo_funcs = {
1385 	.create_handle = vmw_framebuffer_bo_create_handle,
1386 	.destroy = vmw_framebuffer_bo_destroy,
1387 	.dirty = drm_atomic_helper_dirtyfb,
1388 };
1389 
vmw_kms_new_framebuffer_bo(struct vmw_private * dev_priv,struct vmw_bo * bo,struct vmw_framebuffer ** out,const struct drm_mode_fb_cmd2 * mode_cmd)1390 static int vmw_kms_new_framebuffer_bo(struct vmw_private *dev_priv,
1391 				      struct vmw_bo *bo,
1392 				      struct vmw_framebuffer **out,
1393 				      const struct drm_mode_fb_cmd2
1394 				      *mode_cmd)
1395 
1396 {
1397 	struct drm_device *dev = &dev_priv->drm;
1398 	struct vmw_framebuffer_bo *vfbd;
1399 	unsigned int requested_size;
1400 	int ret;
1401 
1402 	requested_size = mode_cmd->height * mode_cmd->pitches[0];
1403 	if (unlikely(requested_size > bo->tbo.base.size)) {
1404 		DRM_ERROR("Screen buffer object size is too small "
1405 			  "for requested mode.\n");
1406 		return -EINVAL;
1407 	}
1408 
1409 	if (!drm_any_plane_has_format(&dev_priv->drm,
1410 				      mode_cmd->pixel_format,
1411 				      mode_cmd->modifier[0])) {
1412 		drm_dbg(&dev_priv->drm,
1413 			"unsupported pixel format %p4cc / modifier 0x%llx\n",
1414 			&mode_cmd->pixel_format, mode_cmd->modifier[0]);
1415 		return -EINVAL;
1416 	}
1417 
1418 	vfbd = kzalloc(sizeof(*vfbd), GFP_KERNEL);
1419 	if (!vfbd) {
1420 		ret = -ENOMEM;
1421 		goto out_err1;
1422 	}
1423 
1424 	vfbd->base.base.obj[0] = &bo->tbo.base;
1425 	drm_helper_mode_fill_fb_struct(dev, &vfbd->base.base, mode_cmd);
1426 	vfbd->base.bo = true;
1427 	vfbd->buffer = vmw_bo_reference(bo);
1428 	*out = &vfbd->base;
1429 
1430 	ret = drm_framebuffer_init(dev, &vfbd->base.base,
1431 				   &vmw_framebuffer_bo_funcs);
1432 	if (ret)
1433 		goto out_err2;
1434 
1435 	return 0;
1436 
1437 out_err2:
1438 	vmw_bo_unreference(&bo);
1439 	kfree(vfbd);
1440 out_err1:
1441 	return ret;
1442 }
1443 
1444 
1445 /**
1446  * vmw_kms_srf_ok - check if a surface can be created
1447  *
1448  * @dev_priv: Pointer to device private struct.
1449  * @width: requested width
1450  * @height: requested height
1451  *
1452  * Surfaces need to be less than texture size
1453  */
1454 static bool
vmw_kms_srf_ok(struct vmw_private * dev_priv,uint32_t width,uint32_t height)1455 vmw_kms_srf_ok(struct vmw_private *dev_priv, uint32_t width, uint32_t height)
1456 {
1457 	if (width  > dev_priv->texture_max_width ||
1458 	    height > dev_priv->texture_max_height)
1459 		return false;
1460 
1461 	return true;
1462 }
1463 
1464 /**
1465  * vmw_kms_new_framebuffer - Create a new framebuffer.
1466  *
1467  * @dev_priv: Pointer to device private struct.
1468  * @uo: Pointer to user object to wrap the kms framebuffer around.
1469  * Either the buffer or surface inside the user object must be NULL.
1470  * @mode_cmd: Frame-buffer metadata.
1471  */
1472 struct vmw_framebuffer *
vmw_kms_new_framebuffer(struct vmw_private * dev_priv,struct vmw_user_object * uo,const struct drm_mode_fb_cmd2 * mode_cmd)1473 vmw_kms_new_framebuffer(struct vmw_private *dev_priv,
1474 			struct vmw_user_object *uo,
1475 			const struct drm_mode_fb_cmd2 *mode_cmd)
1476 {
1477 	struct vmw_framebuffer *vfb = NULL;
1478 	int ret;
1479 
1480 	/* Create the new framebuffer depending one what we have */
1481 	if (vmw_user_object_surface(uo)) {
1482 		ret = vmw_kms_new_framebuffer_surface(dev_priv, uo, &vfb,
1483 						      mode_cmd);
1484 	} else if (uo->buffer) {
1485 		ret = vmw_kms_new_framebuffer_bo(dev_priv, uo->buffer, &vfb,
1486 						 mode_cmd);
1487 	} else {
1488 		BUG();
1489 	}
1490 
1491 	if (ret)
1492 		return ERR_PTR(ret);
1493 
1494 	return vfb;
1495 }
1496 
1497 /*
1498  * Generic Kernel modesetting functions
1499  */
1500 
vmw_kms_fb_create(struct drm_device * dev,struct drm_file * file_priv,const struct drm_mode_fb_cmd2 * mode_cmd)1501 static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev,
1502 						 struct drm_file *file_priv,
1503 						 const struct drm_mode_fb_cmd2 *mode_cmd)
1504 {
1505 	struct vmw_private *dev_priv = vmw_priv(dev);
1506 	struct vmw_framebuffer *vfb = NULL;
1507 	struct vmw_user_object uo = {0};
1508 	int ret;
1509 
1510 	/* returns either a bo or surface */
1511 	ret = vmw_user_object_lookup(dev_priv, file_priv, mode_cmd->handles[0],
1512 				     &uo);
1513 	if (ret) {
1514 		DRM_ERROR("Invalid buffer object handle %u (0x%x).\n",
1515 			  mode_cmd->handles[0], mode_cmd->handles[0]);
1516 		goto err_out;
1517 	}
1518 
1519 
1520 	if (vmw_user_object_surface(&uo) &&
1521 	    !vmw_kms_srf_ok(dev_priv, mode_cmd->width, mode_cmd->height)) {
1522 		DRM_ERROR("Surface size cannot exceed %dx%d\n",
1523 			dev_priv->texture_max_width,
1524 			dev_priv->texture_max_height);
1525 		ret = -EINVAL;
1526 		goto err_out;
1527 	}
1528 
1529 
1530 	vfb = vmw_kms_new_framebuffer(dev_priv, &uo, mode_cmd);
1531 	if (IS_ERR(vfb)) {
1532 		ret = PTR_ERR(vfb);
1533 		goto err_out;
1534 	}
1535 
1536 err_out:
1537 	/* vmw_user_object_lookup takes one ref so does new_fb */
1538 	vmw_user_object_unref(&uo);
1539 
1540 	if (ret) {
1541 		DRM_ERROR("failed to create vmw_framebuffer: %i\n", ret);
1542 		return ERR_PTR(ret);
1543 	}
1544 
1545 	return &vfb->base;
1546 }
1547 
1548 /**
1549  * vmw_kms_check_display_memory - Validates display memory required for a
1550  * topology
1551  * @dev: DRM device
1552  * @num_rects: number of drm_rect in rects
1553  * @rects: array of drm_rect representing the topology to validate indexed by
1554  * crtc index.
1555  *
1556  * Returns:
1557  * 0 on success otherwise negative error code
1558  */
vmw_kms_check_display_memory(struct drm_device * dev,uint32_t num_rects,struct drm_rect * rects)1559 static int vmw_kms_check_display_memory(struct drm_device *dev,
1560 					uint32_t num_rects,
1561 					struct drm_rect *rects)
1562 {
1563 	struct vmw_private *dev_priv = vmw_priv(dev);
1564 	struct drm_rect bounding_box = {0};
1565 	u64 total_pixels = 0, pixel_mem, bb_mem;
1566 	int i;
1567 
1568 	for (i = 0; i < num_rects; i++) {
1569 		/*
1570 		 * For STDU only individual screen (screen target) is limited by
1571 		 * SCREENTARGET_MAX_WIDTH/HEIGHT registers.
1572 		 */
1573 		if (dev_priv->active_display_unit == vmw_du_screen_target &&
1574 		    (drm_rect_width(&rects[i]) > dev_priv->stdu_max_width ||
1575 		     drm_rect_height(&rects[i]) > dev_priv->stdu_max_height)) {
1576 			VMW_DEBUG_KMS("Screen size not supported.\n");
1577 			return -EINVAL;
1578 		}
1579 
1580 		/* Bounding box upper left is at (0,0). */
1581 		if (rects[i].x2 > bounding_box.x2)
1582 			bounding_box.x2 = rects[i].x2;
1583 
1584 		if (rects[i].y2 > bounding_box.y2)
1585 			bounding_box.y2 = rects[i].y2;
1586 
1587 		total_pixels += (u64) drm_rect_width(&rects[i]) *
1588 			(u64) drm_rect_height(&rects[i]);
1589 	}
1590 
1591 	/* Virtual svga device primary limits are always in 32-bpp. */
1592 	pixel_mem = total_pixels * 4;
1593 
1594 	/*
1595 	 * For HV10 and below prim_bb_mem is vram size. When
1596 	 * SVGA_REG_MAX_PRIMARY_BOUNDING_BOX_MEM is not present vram size is
1597 	 * limit on primary bounding box
1598 	 */
1599 	if (pixel_mem > dev_priv->max_primary_mem) {
1600 		VMW_DEBUG_KMS("Combined output size too large.\n");
1601 		return -EINVAL;
1602 	}
1603 
1604 	/* SVGA_CAP_NO_BB_RESTRICTION is available for STDU only. */
1605 	if (dev_priv->active_display_unit != vmw_du_screen_target ||
1606 	    !(dev_priv->capabilities & SVGA_CAP_NO_BB_RESTRICTION)) {
1607 		bb_mem = (u64) bounding_box.x2 * bounding_box.y2 * 4;
1608 
1609 		if (bb_mem > dev_priv->max_primary_mem) {
1610 			VMW_DEBUG_KMS("Topology is beyond supported limits.\n");
1611 			return -EINVAL;
1612 		}
1613 	}
1614 
1615 	return 0;
1616 }
1617 
1618 /**
1619  * vmw_crtc_state_and_lock - Return new or current crtc state with locked
1620  * crtc mutex
1621  * @state: The atomic state pointer containing the new atomic state
1622  * @crtc: The crtc
1623  *
1624  * This function returns the new crtc state if it's part of the state update.
1625  * Otherwise returns the current crtc state. It also makes sure that the
1626  * crtc mutex is locked.
1627  *
1628  * Returns: A valid crtc state pointer or NULL. It may also return a
1629  * pointer error, in particular -EDEADLK if locking needs to be rerun.
1630  */
1631 static struct drm_crtc_state *
vmw_crtc_state_and_lock(struct drm_atomic_state * state,struct drm_crtc * crtc)1632 vmw_crtc_state_and_lock(struct drm_atomic_state *state, struct drm_crtc *crtc)
1633 {
1634 	struct drm_crtc_state *crtc_state;
1635 
1636 	crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
1637 	if (crtc_state) {
1638 		lockdep_assert_held(&crtc->mutex.mutex.base);
1639 	} else {
1640 		int ret = drm_modeset_lock(&crtc->mutex, state->acquire_ctx);
1641 
1642 		if (ret != 0 && ret != -EALREADY)
1643 			return ERR_PTR(ret);
1644 
1645 		crtc_state = crtc->state;
1646 	}
1647 
1648 	return crtc_state;
1649 }
1650 
1651 /**
1652  * vmw_kms_check_implicit - Verify that all implicit display units scan out
1653  * from the same fb after the new state is committed.
1654  * @dev: The drm_device.
1655  * @state: The new state to be checked.
1656  *
1657  * Returns:
1658  *   Zero on success,
1659  *   -EINVAL on invalid state,
1660  *   -EDEADLK if modeset locking needs to be rerun.
1661  */
vmw_kms_check_implicit(struct drm_device * dev,struct drm_atomic_state * state)1662 static int vmw_kms_check_implicit(struct drm_device *dev,
1663 				  struct drm_atomic_state *state)
1664 {
1665 	struct drm_framebuffer *implicit_fb = NULL;
1666 	struct drm_crtc *crtc;
1667 	struct drm_crtc_state *crtc_state;
1668 	struct drm_plane_state *plane_state;
1669 
1670 	drm_for_each_crtc(crtc, dev) {
1671 		struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
1672 
1673 		if (!du->is_implicit)
1674 			continue;
1675 
1676 		crtc_state = vmw_crtc_state_and_lock(state, crtc);
1677 		if (IS_ERR(crtc_state))
1678 			return PTR_ERR(crtc_state);
1679 
1680 		if (!crtc_state || !crtc_state->enable)
1681 			continue;
1682 
1683 		/*
1684 		 * Can't move primary planes across crtcs, so this is OK.
1685 		 * It also means we don't need to take the plane mutex.
1686 		 */
1687 		plane_state = du->primary.state;
1688 		if (plane_state->crtc != crtc)
1689 			continue;
1690 
1691 		if (!implicit_fb)
1692 			implicit_fb = plane_state->fb;
1693 		else if (implicit_fb != plane_state->fb)
1694 			return -EINVAL;
1695 	}
1696 
1697 	return 0;
1698 }
1699 
1700 /**
1701  * vmw_kms_check_topology - Validates topology in drm_atomic_state
1702  * @dev: DRM device
1703  * @state: the driver state object
1704  *
1705  * Returns:
1706  * 0 on success otherwise negative error code
1707  */
vmw_kms_check_topology(struct drm_device * dev,struct drm_atomic_state * state)1708 static int vmw_kms_check_topology(struct drm_device *dev,
1709 				  struct drm_atomic_state *state)
1710 {
1711 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
1712 	struct drm_rect *rects;
1713 	struct drm_crtc *crtc;
1714 	uint32_t i;
1715 	int ret = 0;
1716 
1717 	rects = kcalloc(dev->mode_config.num_crtc, sizeof(struct drm_rect),
1718 			GFP_KERNEL);
1719 	if (!rects)
1720 		return -ENOMEM;
1721 
1722 	drm_for_each_crtc(crtc, dev) {
1723 		struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
1724 		struct drm_crtc_state *crtc_state;
1725 
1726 		i = drm_crtc_index(crtc);
1727 
1728 		crtc_state = vmw_crtc_state_and_lock(state, crtc);
1729 		if (IS_ERR(crtc_state)) {
1730 			ret = PTR_ERR(crtc_state);
1731 			goto clean;
1732 		}
1733 
1734 		if (!crtc_state)
1735 			continue;
1736 
1737 		if (crtc_state->enable) {
1738 			rects[i].x1 = du->gui_x;
1739 			rects[i].y1 = du->gui_y;
1740 			rects[i].x2 = du->gui_x + crtc_state->mode.hdisplay;
1741 			rects[i].y2 = du->gui_y + crtc_state->mode.vdisplay;
1742 		} else {
1743 			rects[i].x1 = 0;
1744 			rects[i].y1 = 0;
1745 			rects[i].x2 = 0;
1746 			rects[i].y2 = 0;
1747 		}
1748 	}
1749 
1750 	/* Determine change to topology due to new atomic state */
1751 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
1752 				      new_crtc_state, i) {
1753 		struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
1754 		struct drm_connector *connector;
1755 		struct drm_connector_state *conn_state;
1756 		struct vmw_connector_state *vmw_conn_state;
1757 
1758 		if (!du->pref_active && new_crtc_state->enable) {
1759 			VMW_DEBUG_KMS("Enabling a disabled display unit\n");
1760 			ret = -EINVAL;
1761 			goto clean;
1762 		}
1763 
1764 		/*
1765 		 * For vmwgfx each crtc has only one connector attached and it
1766 		 * is not changed so don't really need to check the
1767 		 * crtc->connector_mask and iterate over it.
1768 		 */
1769 		connector = &du->connector;
1770 		conn_state = drm_atomic_get_connector_state(state, connector);
1771 		if (IS_ERR(conn_state)) {
1772 			ret = PTR_ERR(conn_state);
1773 			goto clean;
1774 		}
1775 
1776 		vmw_conn_state = vmw_connector_state_to_vcs(conn_state);
1777 		vmw_conn_state->gui_x = du->gui_x;
1778 		vmw_conn_state->gui_y = du->gui_y;
1779 	}
1780 
1781 	ret = vmw_kms_check_display_memory(dev, dev->mode_config.num_crtc,
1782 					   rects);
1783 
1784 clean:
1785 	kfree(rects);
1786 	return ret;
1787 }
1788 
1789 /**
1790  * vmw_kms_atomic_check_modeset- validate state object for modeset changes
1791  *
1792  * @dev: DRM device
1793  * @state: the driver state object
1794  *
1795  * This is a simple wrapper around drm_atomic_helper_check_modeset() for
1796  * us to assign a value to mode->crtc_clock so that
1797  * drm_calc_timestamping_constants() won't throw an error message
1798  *
1799  * Returns:
1800  * Zero for success or -errno
1801  */
1802 static int
vmw_kms_atomic_check_modeset(struct drm_device * dev,struct drm_atomic_state * state)1803 vmw_kms_atomic_check_modeset(struct drm_device *dev,
1804 			     struct drm_atomic_state *state)
1805 {
1806 	struct drm_crtc *crtc;
1807 	struct drm_crtc_state *crtc_state;
1808 	bool need_modeset = false;
1809 	int i, ret;
1810 
1811 	ret = drm_atomic_helper_check(dev, state);
1812 	if (ret)
1813 		return ret;
1814 
1815 	ret = vmw_kms_check_implicit(dev, state);
1816 	if (ret) {
1817 		VMW_DEBUG_KMS("Invalid implicit state\n");
1818 		return ret;
1819 	}
1820 
1821 	for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
1822 		if (drm_atomic_crtc_needs_modeset(crtc_state))
1823 			need_modeset = true;
1824 	}
1825 
1826 	if (need_modeset)
1827 		return vmw_kms_check_topology(dev, state);
1828 
1829 	return ret;
1830 }
1831 
1832 static const struct drm_mode_config_funcs vmw_kms_funcs = {
1833 	.fb_create = vmw_kms_fb_create,
1834 	.atomic_check = vmw_kms_atomic_check_modeset,
1835 	.atomic_commit = drm_atomic_helper_commit,
1836 };
1837 
vmw_kms_generic_present(struct vmw_private * dev_priv,struct drm_file * file_priv,struct vmw_framebuffer * vfb,struct vmw_surface * surface,uint32_t sid,int32_t destX,int32_t destY,struct drm_vmw_rect * clips,uint32_t num_clips)1838 static int vmw_kms_generic_present(struct vmw_private *dev_priv,
1839 				   struct drm_file *file_priv,
1840 				   struct vmw_framebuffer *vfb,
1841 				   struct vmw_surface *surface,
1842 				   uint32_t sid,
1843 				   int32_t destX, int32_t destY,
1844 				   struct drm_vmw_rect *clips,
1845 				   uint32_t num_clips)
1846 {
1847 	return vmw_kms_sou_do_surface_dirty(dev_priv, vfb, NULL, clips,
1848 					    &surface->res, destX, destY,
1849 					    num_clips, 1, NULL, NULL);
1850 }
1851 
1852 
vmw_kms_present(struct vmw_private * dev_priv,struct drm_file * file_priv,struct vmw_framebuffer * vfb,struct vmw_surface * surface,uint32_t sid,int32_t destX,int32_t destY,struct drm_vmw_rect * clips,uint32_t num_clips)1853 int vmw_kms_present(struct vmw_private *dev_priv,
1854 		    struct drm_file *file_priv,
1855 		    struct vmw_framebuffer *vfb,
1856 		    struct vmw_surface *surface,
1857 		    uint32_t sid,
1858 		    int32_t destX, int32_t destY,
1859 		    struct drm_vmw_rect *clips,
1860 		    uint32_t num_clips)
1861 {
1862 	int ret;
1863 
1864 	switch (dev_priv->active_display_unit) {
1865 	case vmw_du_screen_target:
1866 		ret = vmw_kms_stdu_surface_dirty(dev_priv, vfb, NULL, clips,
1867 						 &surface->res, destX, destY,
1868 						 num_clips, 1, NULL, NULL);
1869 		break;
1870 	case vmw_du_screen_object:
1871 		ret = vmw_kms_generic_present(dev_priv, file_priv, vfb, surface,
1872 					      sid, destX, destY, clips,
1873 					      num_clips);
1874 		break;
1875 	default:
1876 		WARN_ONCE(true,
1877 			  "Present called with invalid display system.\n");
1878 		ret = -ENOSYS;
1879 		break;
1880 	}
1881 	if (ret)
1882 		return ret;
1883 
1884 	vmw_cmd_flush(dev_priv, false);
1885 
1886 	return 0;
1887 }
1888 
1889 static void
vmw_kms_create_hotplug_mode_update_property(struct vmw_private * dev_priv)1890 vmw_kms_create_hotplug_mode_update_property(struct vmw_private *dev_priv)
1891 {
1892 	if (dev_priv->hotplug_mode_update_property)
1893 		return;
1894 
1895 	dev_priv->hotplug_mode_update_property =
1896 		drm_property_create_range(&dev_priv->drm,
1897 					  DRM_MODE_PROP_IMMUTABLE,
1898 					  "hotplug_mode_update", 0, 1);
1899 }
1900 
1901 static void
vmw_atomic_commit_tail(struct drm_atomic_state * old_state)1902 vmw_atomic_commit_tail(struct drm_atomic_state *old_state)
1903 {
1904 	struct vmw_private *vmw = vmw_priv(old_state->dev);
1905 	struct drm_crtc *crtc;
1906 	struct drm_crtc_state *old_crtc_state;
1907 	int i;
1908 
1909 	drm_atomic_helper_commit_tail(old_state);
1910 
1911 	if (vmw->vkms_enabled) {
1912 		for_each_old_crtc_in_state(old_state, crtc, old_crtc_state, i) {
1913 			struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
1914 			(void)old_crtc_state;
1915 			flush_work(&du->vkms.crc_generator_work);
1916 		}
1917 	}
1918 }
1919 
1920 static const struct drm_mode_config_helper_funcs vmw_mode_config_helpers = {
1921 	.atomic_commit_tail = vmw_atomic_commit_tail,
1922 };
1923 
vmw_kms_init(struct vmw_private * dev_priv)1924 int vmw_kms_init(struct vmw_private *dev_priv)
1925 {
1926 	struct drm_device *dev = &dev_priv->drm;
1927 	int ret;
1928 	static const char *display_unit_names[] = {
1929 		"Invalid",
1930 		"Legacy",
1931 		"Screen Object",
1932 		"Screen Target",
1933 		"Invalid (max)"
1934 	};
1935 
1936 	drm_mode_config_init(dev);
1937 	dev->mode_config.funcs = &vmw_kms_funcs;
1938 	dev->mode_config.min_width = 1;
1939 	dev->mode_config.min_height = 1;
1940 	dev->mode_config.max_width = dev_priv->texture_max_width;
1941 	dev->mode_config.max_height = dev_priv->texture_max_height;
1942 	dev->mode_config.preferred_depth = dev_priv->assume_16bpp ? 16 : 32;
1943 	dev->mode_config.helper_private = &vmw_mode_config_helpers;
1944 
1945 	drm_mode_create_suggested_offset_properties(dev);
1946 	vmw_kms_create_hotplug_mode_update_property(dev_priv);
1947 
1948 	ret = vmw_kms_stdu_init_display(dev_priv);
1949 	if (ret) {
1950 		ret = vmw_kms_sou_init_display(dev_priv);
1951 		if (ret) /* Fallback */
1952 			ret = vmw_kms_ldu_init_display(dev_priv);
1953 	}
1954 	BUILD_BUG_ON(ARRAY_SIZE(display_unit_names) != (vmw_du_max + 1));
1955 	drm_info(&dev_priv->drm, "%s display unit initialized\n",
1956 		 display_unit_names[dev_priv->active_display_unit]);
1957 
1958 	return ret;
1959 }
1960 
vmw_kms_close(struct vmw_private * dev_priv)1961 int vmw_kms_close(struct vmw_private *dev_priv)
1962 {
1963 	int ret = 0;
1964 
1965 	/*
1966 	 * Docs says we should take the lock before calling this function
1967 	 * but since it destroys encoders and our destructor calls
1968 	 * drm_encoder_cleanup which takes the lock we deadlock.
1969 	 */
1970 	drm_mode_config_cleanup(&dev_priv->drm);
1971 	if (dev_priv->active_display_unit == vmw_du_legacy)
1972 		ret = vmw_kms_ldu_close_display(dev_priv);
1973 
1974 	return ret;
1975 }
1976 
vmw_kms_cursor_bypass_ioctl(struct drm_device * dev,void * data,struct drm_file * file_priv)1977 int vmw_kms_cursor_bypass_ioctl(struct drm_device *dev, void *data,
1978 				struct drm_file *file_priv)
1979 {
1980 	struct drm_vmw_cursor_bypass_arg *arg = data;
1981 	struct vmw_display_unit *du;
1982 	struct drm_crtc *crtc;
1983 	int ret = 0;
1984 
1985 	mutex_lock(&dev->mode_config.mutex);
1986 	if (arg->flags & DRM_VMW_CURSOR_BYPASS_ALL) {
1987 
1988 		list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
1989 			du = vmw_crtc_to_du(crtc);
1990 			du->hotspot_x = arg->xhot;
1991 			du->hotspot_y = arg->yhot;
1992 		}
1993 
1994 		mutex_unlock(&dev->mode_config.mutex);
1995 		return 0;
1996 	}
1997 
1998 	crtc = drm_crtc_find(dev, file_priv, arg->crtc_id);
1999 	if (!crtc) {
2000 		ret = -ENOENT;
2001 		goto out;
2002 	}
2003 
2004 	du = vmw_crtc_to_du(crtc);
2005 
2006 	du->hotspot_x = arg->xhot;
2007 	du->hotspot_y = arg->yhot;
2008 
2009 out:
2010 	mutex_unlock(&dev->mode_config.mutex);
2011 
2012 	return ret;
2013 }
2014 
vmw_kms_write_svga(struct vmw_private * vmw_priv,unsigned width,unsigned height,unsigned pitch,unsigned bpp,unsigned depth)2015 int vmw_kms_write_svga(struct vmw_private *vmw_priv,
2016 			unsigned width, unsigned height, unsigned pitch,
2017 			unsigned bpp, unsigned depth)
2018 {
2019 	if (vmw_priv->capabilities & SVGA_CAP_PITCHLOCK)
2020 		vmw_write(vmw_priv, SVGA_REG_PITCHLOCK, pitch);
2021 	else if (vmw_fifo_have_pitchlock(vmw_priv))
2022 		vmw_fifo_mem_write(vmw_priv, SVGA_FIFO_PITCHLOCK, pitch);
2023 	vmw_write(vmw_priv, SVGA_REG_WIDTH, width);
2024 	vmw_write(vmw_priv, SVGA_REG_HEIGHT, height);
2025 	if ((vmw_priv->capabilities & SVGA_CAP_8BIT_EMULATION) != 0)
2026 		vmw_write(vmw_priv, SVGA_REG_BITS_PER_PIXEL, bpp);
2027 
2028 	if (vmw_read(vmw_priv, SVGA_REG_DEPTH) != depth) {
2029 		DRM_ERROR("Invalid depth %u for %u bpp, host expects %u\n",
2030 			  depth, bpp, vmw_read(vmw_priv, SVGA_REG_DEPTH));
2031 		return -EINVAL;
2032 	}
2033 
2034 	return 0;
2035 }
2036 
2037 static
vmw_kms_validate_mode_vram(struct vmw_private * dev_priv,u64 pitch,u64 height)2038 bool vmw_kms_validate_mode_vram(struct vmw_private *dev_priv,
2039 				u64 pitch,
2040 				u64 height)
2041 {
2042 	return (pitch * height) < (u64)dev_priv->vram_size;
2043 }
2044 
2045 /**
2046  * vmw_du_update_layout - Update the display unit with topology from resolution
2047  * plugin and generate DRM uevent
2048  * @dev_priv: device private
2049  * @num_rects: number of drm_rect in rects
2050  * @rects: toplogy to update
2051  */
vmw_du_update_layout(struct vmw_private * dev_priv,unsigned int num_rects,struct drm_rect * rects)2052 static int vmw_du_update_layout(struct vmw_private *dev_priv,
2053 				unsigned int num_rects, struct drm_rect *rects)
2054 {
2055 	struct drm_device *dev = &dev_priv->drm;
2056 	struct vmw_display_unit *du;
2057 	struct drm_connector *con;
2058 	struct drm_connector_list_iter conn_iter;
2059 	struct drm_modeset_acquire_ctx ctx;
2060 	struct drm_crtc *crtc;
2061 	int ret;
2062 
2063 	/* Currently gui_x/y is protected with the crtc mutex */
2064 	mutex_lock(&dev->mode_config.mutex);
2065 	drm_modeset_acquire_init(&ctx, 0);
2066 retry:
2067 	drm_for_each_crtc(crtc, dev) {
2068 		ret = drm_modeset_lock(&crtc->mutex, &ctx);
2069 		if (ret < 0) {
2070 			if (ret == -EDEADLK) {
2071 				drm_modeset_backoff(&ctx);
2072 				goto retry;
2073 		}
2074 			goto out_fini;
2075 		}
2076 	}
2077 
2078 	drm_connector_list_iter_begin(dev, &conn_iter);
2079 	drm_for_each_connector_iter(con, &conn_iter) {
2080 		du = vmw_connector_to_du(con);
2081 		if (num_rects > du->unit) {
2082 			du->pref_width = drm_rect_width(&rects[du->unit]);
2083 			du->pref_height = drm_rect_height(&rects[du->unit]);
2084 			du->pref_active = true;
2085 			du->gui_x = rects[du->unit].x1;
2086 			du->gui_y = rects[du->unit].y1;
2087 		} else {
2088 			du->pref_width  = VMWGFX_MIN_INITIAL_WIDTH;
2089 			du->pref_height = VMWGFX_MIN_INITIAL_HEIGHT;
2090 			du->pref_active = false;
2091 			du->gui_x = 0;
2092 			du->gui_y = 0;
2093 		}
2094 	}
2095 	drm_connector_list_iter_end(&conn_iter);
2096 
2097 	list_for_each_entry(con, &dev->mode_config.connector_list, head) {
2098 		du = vmw_connector_to_du(con);
2099 		if (num_rects > du->unit) {
2100 			drm_object_property_set_value
2101 			  (&con->base, dev->mode_config.suggested_x_property,
2102 			   du->gui_x);
2103 			drm_object_property_set_value
2104 			  (&con->base, dev->mode_config.suggested_y_property,
2105 			   du->gui_y);
2106 		} else {
2107 			drm_object_property_set_value
2108 			  (&con->base, dev->mode_config.suggested_x_property,
2109 			   0);
2110 			drm_object_property_set_value
2111 			  (&con->base, dev->mode_config.suggested_y_property,
2112 			   0);
2113 		}
2114 		con->status = vmw_du_connector_detect(con, true);
2115 	}
2116 out_fini:
2117 	drm_modeset_drop_locks(&ctx);
2118 	drm_modeset_acquire_fini(&ctx);
2119 	mutex_unlock(&dev->mode_config.mutex);
2120 
2121 	drm_sysfs_hotplug_event(dev);
2122 
2123 	return 0;
2124 }
2125 
vmw_du_crtc_gamma_set(struct drm_crtc * crtc,u16 * r,u16 * g,u16 * b,uint32_t size,struct drm_modeset_acquire_ctx * ctx)2126 int vmw_du_crtc_gamma_set(struct drm_crtc *crtc,
2127 			  u16 *r, u16 *g, u16 *b,
2128 			  uint32_t size,
2129 			  struct drm_modeset_acquire_ctx *ctx)
2130 {
2131 	struct vmw_private *dev_priv = vmw_priv(crtc->dev);
2132 	int i;
2133 
2134 	for (i = 0; i < size; i++) {
2135 		DRM_DEBUG("%d r/g/b = 0x%04x / 0x%04x / 0x%04x\n", i,
2136 			  r[i], g[i], b[i]);
2137 		vmw_write(dev_priv, SVGA_PALETTE_BASE + i * 3 + 0, r[i] >> 8);
2138 		vmw_write(dev_priv, SVGA_PALETTE_BASE + i * 3 + 1, g[i] >> 8);
2139 		vmw_write(dev_priv, SVGA_PALETTE_BASE + i * 3 + 2, b[i] >> 8);
2140 	}
2141 
2142 	return 0;
2143 }
2144 
vmw_du_connector_dpms(struct drm_connector * connector,int mode)2145 int vmw_du_connector_dpms(struct drm_connector *connector, int mode)
2146 {
2147 	return 0;
2148 }
2149 
2150 enum drm_connector_status
vmw_du_connector_detect(struct drm_connector * connector,bool force)2151 vmw_du_connector_detect(struct drm_connector *connector, bool force)
2152 {
2153 	uint32_t num_displays;
2154 	struct drm_device *dev = connector->dev;
2155 	struct vmw_private *dev_priv = vmw_priv(dev);
2156 	struct vmw_display_unit *du = vmw_connector_to_du(connector);
2157 
2158 	num_displays = vmw_read(dev_priv, SVGA_REG_NUM_DISPLAYS);
2159 
2160 	return ((vmw_connector_to_du(connector)->unit < num_displays &&
2161 		 du->pref_active) ?
2162 		connector_status_connected : connector_status_disconnected);
2163 }
2164 
2165 /**
2166  * vmw_guess_mode_timing - Provide fake timings for a
2167  * 60Hz vrefresh mode.
2168  *
2169  * @mode: Pointer to a struct drm_display_mode with hdisplay and vdisplay
2170  * members filled in.
2171  */
vmw_guess_mode_timing(struct drm_display_mode * mode)2172 void vmw_guess_mode_timing(struct drm_display_mode *mode)
2173 {
2174 	mode->hsync_start = mode->hdisplay + 50;
2175 	mode->hsync_end = mode->hsync_start + 50;
2176 	mode->htotal = mode->hsync_end + 50;
2177 
2178 	mode->vsync_start = mode->vdisplay + 50;
2179 	mode->vsync_end = mode->vsync_start + 50;
2180 	mode->vtotal = mode->vsync_end + 50;
2181 
2182 	mode->clock = (u32)mode->htotal * (u32)mode->vtotal / 100 * 6;
2183 }
2184 
2185 
2186 /**
2187  * vmw_kms_update_layout_ioctl - Handler for DRM_VMW_UPDATE_LAYOUT ioctl
2188  * @dev: drm device for the ioctl
2189  * @data: data pointer for the ioctl
2190  * @file_priv: drm file for the ioctl call
2191  *
2192  * Update preferred topology of display unit as per ioctl request. The topology
2193  * is expressed as array of drm_vmw_rect.
2194  * e.g.
2195  * [0 0 640 480] [640 0 800 600] [0 480 640 480]
2196  *
2197  * NOTE:
2198  * The x and y offset (upper left) in drm_vmw_rect cannot be less than 0. Beside
2199  * device limit on topology, x + w and y + h (lower right) cannot be greater
2200  * than INT_MAX. So topology beyond these limits will return with error.
2201  *
2202  * Returns:
2203  * Zero on success, negative errno on failure.
2204  */
vmw_kms_update_layout_ioctl(struct drm_device * dev,void * data,struct drm_file * file_priv)2205 int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data,
2206 				struct drm_file *file_priv)
2207 {
2208 	struct vmw_private *dev_priv = vmw_priv(dev);
2209 	struct drm_mode_config *mode_config = &dev->mode_config;
2210 	struct drm_vmw_update_layout_arg *arg =
2211 		(struct drm_vmw_update_layout_arg *)data;
2212 	const void __user *user_rects;
2213 	struct drm_vmw_rect *rects;
2214 	struct drm_rect *drm_rects;
2215 	unsigned rects_size;
2216 	int ret, i;
2217 
2218 	if (!arg->num_outputs) {
2219 		struct drm_rect def_rect = {0, 0,
2220 					    VMWGFX_MIN_INITIAL_WIDTH,
2221 					    VMWGFX_MIN_INITIAL_HEIGHT};
2222 		vmw_du_update_layout(dev_priv, 1, &def_rect);
2223 		return 0;
2224 	} else if (arg->num_outputs > VMWGFX_NUM_DISPLAY_UNITS) {
2225 		return -E2BIG;
2226 	}
2227 
2228 	rects_size = arg->num_outputs * sizeof(struct drm_vmw_rect);
2229 	rects = kcalloc(arg->num_outputs, sizeof(struct drm_vmw_rect),
2230 			GFP_KERNEL);
2231 	if (unlikely(!rects))
2232 		return -ENOMEM;
2233 
2234 	user_rects = (void __user *)(unsigned long)arg->rects;
2235 	ret = copy_from_user(rects, user_rects, rects_size);
2236 	if (unlikely(ret != 0)) {
2237 		DRM_ERROR("Failed to get rects.\n");
2238 		ret = -EFAULT;
2239 		goto out_free;
2240 	}
2241 
2242 	drm_rects = (struct drm_rect *)rects;
2243 
2244 	VMW_DEBUG_KMS("Layout count = %u\n", arg->num_outputs);
2245 	for (i = 0; i < arg->num_outputs; i++) {
2246 		struct drm_vmw_rect curr_rect;
2247 
2248 		/* Verify user-space for overflow as kernel use drm_rect */
2249 		if ((rects[i].x + rects[i].w > INT_MAX) ||
2250 		    (rects[i].y + rects[i].h > INT_MAX)) {
2251 			ret = -ERANGE;
2252 			goto out_free;
2253 		}
2254 
2255 		curr_rect = rects[i];
2256 		drm_rects[i].x1 = curr_rect.x;
2257 		drm_rects[i].y1 = curr_rect.y;
2258 		drm_rects[i].x2 = curr_rect.x + curr_rect.w;
2259 		drm_rects[i].y2 = curr_rect.y + curr_rect.h;
2260 
2261 		VMW_DEBUG_KMS("  x1 = %d y1 = %d x2 = %d y2 = %d\n",
2262 			      drm_rects[i].x1, drm_rects[i].y1,
2263 			      drm_rects[i].x2, drm_rects[i].y2);
2264 
2265 		/*
2266 		 * Currently this check is limiting the topology within
2267 		 * mode_config->max (which actually is max texture size
2268 		 * supported by virtual device). This limit is here to address
2269 		 * window managers that create a big framebuffer for whole
2270 		 * topology.
2271 		 */
2272 		if (drm_rects[i].x1 < 0 ||  drm_rects[i].y1 < 0 ||
2273 		    drm_rects[i].x2 > mode_config->max_width ||
2274 		    drm_rects[i].y2 > mode_config->max_height) {
2275 			VMW_DEBUG_KMS("Invalid layout %d %d %d %d\n",
2276 				      drm_rects[i].x1, drm_rects[i].y1,
2277 				      drm_rects[i].x2, drm_rects[i].y2);
2278 			ret = -EINVAL;
2279 			goto out_free;
2280 		}
2281 	}
2282 
2283 	ret = vmw_kms_check_display_memory(dev, arg->num_outputs, drm_rects);
2284 
2285 	if (ret == 0)
2286 		vmw_du_update_layout(dev_priv, arg->num_outputs, drm_rects);
2287 
2288 out_free:
2289 	kfree(rects);
2290 	return ret;
2291 }
2292 
2293 /**
2294  * vmw_kms_helper_dirty - Helper to build commands and perform actions based
2295  * on a set of cliprects and a set of display units.
2296  *
2297  * @dev_priv: Pointer to a device private structure.
2298  * @framebuffer: Pointer to the framebuffer on which to perform the actions.
2299  * @clips: A set of struct drm_clip_rect. Either this os @vclips must be NULL.
2300  * Cliprects are given in framebuffer coordinates.
2301  * @vclips: A set of struct drm_vmw_rect cliprects. Either this or @clips must
2302  * be NULL. Cliprects are given in source coordinates.
2303  * @dest_x: X coordinate offset for the crtc / destination clip rects.
2304  * @dest_y: Y coordinate offset for the crtc / destination clip rects.
2305  * @num_clips: Number of cliprects in the @clips or @vclips array.
2306  * @increment: Integer with which to increment the clip counter when looping.
2307  * Used to skip a predetermined number of clip rects.
2308  * @dirty: Closure structure. See the description of struct vmw_kms_dirty.
2309  */
vmw_kms_helper_dirty(struct vmw_private * dev_priv,struct vmw_framebuffer * framebuffer,const struct drm_clip_rect * clips,const struct drm_vmw_rect * vclips,s32 dest_x,s32 dest_y,int num_clips,int increment,struct vmw_kms_dirty * dirty)2310 int vmw_kms_helper_dirty(struct vmw_private *dev_priv,
2311 			 struct vmw_framebuffer *framebuffer,
2312 			 const struct drm_clip_rect *clips,
2313 			 const struct drm_vmw_rect *vclips,
2314 			 s32 dest_x, s32 dest_y,
2315 			 int num_clips,
2316 			 int increment,
2317 			 struct vmw_kms_dirty *dirty)
2318 {
2319 	struct vmw_display_unit *units[VMWGFX_NUM_DISPLAY_UNITS];
2320 	struct drm_crtc *crtc;
2321 	u32 num_units = 0;
2322 	u32 i, k;
2323 
2324 	dirty->dev_priv = dev_priv;
2325 
2326 	/* If crtc is passed, no need to iterate over other display units */
2327 	if (dirty->crtc) {
2328 		units[num_units++] = vmw_crtc_to_du(dirty->crtc);
2329 	} else {
2330 		list_for_each_entry(crtc, &dev_priv->drm.mode_config.crtc_list,
2331 				    head) {
2332 			struct drm_plane *plane = crtc->primary;
2333 
2334 			if (plane->state->fb == &framebuffer->base)
2335 				units[num_units++] = vmw_crtc_to_du(crtc);
2336 		}
2337 	}
2338 
2339 	for (k = 0; k < num_units; k++) {
2340 		struct vmw_display_unit *unit = units[k];
2341 		s32 crtc_x = unit->crtc.x;
2342 		s32 crtc_y = unit->crtc.y;
2343 		s32 crtc_width = unit->crtc.mode.hdisplay;
2344 		s32 crtc_height = unit->crtc.mode.vdisplay;
2345 		const struct drm_clip_rect *clips_ptr = clips;
2346 		const struct drm_vmw_rect *vclips_ptr = vclips;
2347 
2348 		dirty->unit = unit;
2349 		if (dirty->fifo_reserve_size > 0) {
2350 			dirty->cmd = VMW_CMD_RESERVE(dev_priv,
2351 						      dirty->fifo_reserve_size);
2352 			if (!dirty->cmd)
2353 				return -ENOMEM;
2354 
2355 			memset(dirty->cmd, 0, dirty->fifo_reserve_size);
2356 		}
2357 		dirty->num_hits = 0;
2358 		for (i = 0; i < num_clips; i++, clips_ptr += increment,
2359 		       vclips_ptr += increment) {
2360 			s32 clip_left;
2361 			s32 clip_top;
2362 
2363 			/*
2364 			 * Select clip array type. Note that integer type
2365 			 * in @clips is unsigned short, whereas in @vclips
2366 			 * it's 32-bit.
2367 			 */
2368 			if (clips) {
2369 				dirty->fb_x = (s32) clips_ptr->x1;
2370 				dirty->fb_y = (s32) clips_ptr->y1;
2371 				dirty->unit_x2 = (s32) clips_ptr->x2 + dest_x -
2372 					crtc_x;
2373 				dirty->unit_y2 = (s32) clips_ptr->y2 + dest_y -
2374 					crtc_y;
2375 			} else {
2376 				dirty->fb_x = vclips_ptr->x;
2377 				dirty->fb_y = vclips_ptr->y;
2378 				dirty->unit_x2 = dirty->fb_x + vclips_ptr->w +
2379 					dest_x - crtc_x;
2380 				dirty->unit_y2 = dirty->fb_y + vclips_ptr->h +
2381 					dest_y - crtc_y;
2382 			}
2383 
2384 			dirty->unit_x1 = dirty->fb_x + dest_x - crtc_x;
2385 			dirty->unit_y1 = dirty->fb_y + dest_y - crtc_y;
2386 
2387 			/* Skip this clip if it's outside the crtc region */
2388 			if (dirty->unit_x1 >= crtc_width ||
2389 			    dirty->unit_y1 >= crtc_height ||
2390 			    dirty->unit_x2 <= 0 || dirty->unit_y2 <= 0)
2391 				continue;
2392 
2393 			/* Clip right and bottom to crtc limits */
2394 			dirty->unit_x2 = min_t(s32, dirty->unit_x2,
2395 					       crtc_width);
2396 			dirty->unit_y2 = min_t(s32, dirty->unit_y2,
2397 					       crtc_height);
2398 
2399 			/* Clip left and top to crtc limits */
2400 			clip_left = min_t(s32, dirty->unit_x1, 0);
2401 			clip_top = min_t(s32, dirty->unit_y1, 0);
2402 			dirty->unit_x1 -= clip_left;
2403 			dirty->unit_y1 -= clip_top;
2404 			dirty->fb_x -= clip_left;
2405 			dirty->fb_y -= clip_top;
2406 
2407 			dirty->clip(dirty);
2408 		}
2409 
2410 		dirty->fifo_commit(dirty);
2411 	}
2412 
2413 	return 0;
2414 }
2415 
2416 /**
2417  * vmw_kms_helper_validation_finish - Helper for post KMS command submission
2418  * cleanup and fencing
2419  * @dev_priv: Pointer to the device-private struct
2420  * @file_priv: Pointer identifying the client when user-space fencing is used
2421  * @ctx: Pointer to the validation context
2422  * @out_fence: If non-NULL, returned refcounted fence-pointer
2423  * @user_fence_rep: If non-NULL, pointer to user-space address area
2424  * in which to copy user-space fence info
2425  */
vmw_kms_helper_validation_finish(struct vmw_private * dev_priv,struct drm_file * file_priv,struct vmw_validation_context * ctx,struct vmw_fence_obj ** out_fence,struct drm_vmw_fence_rep __user * user_fence_rep)2426 void vmw_kms_helper_validation_finish(struct vmw_private *dev_priv,
2427 				      struct drm_file *file_priv,
2428 				      struct vmw_validation_context *ctx,
2429 				      struct vmw_fence_obj **out_fence,
2430 				      struct drm_vmw_fence_rep __user *
2431 				      user_fence_rep)
2432 {
2433 	struct vmw_fence_obj *fence = NULL;
2434 	uint32_t handle = 0;
2435 	int ret = 0;
2436 
2437 	if (file_priv || user_fence_rep || vmw_validation_has_bos(ctx) ||
2438 	    out_fence)
2439 		ret = vmw_execbuf_fence_commands(file_priv, dev_priv, &fence,
2440 						 file_priv ? &handle : NULL);
2441 	vmw_validation_done(ctx, fence);
2442 	if (file_priv)
2443 		vmw_execbuf_copy_fence_user(dev_priv, vmw_fpriv(file_priv),
2444 					    ret, user_fence_rep, fence,
2445 					    handle, -1);
2446 	if (out_fence)
2447 		*out_fence = fence;
2448 	else
2449 		vmw_fence_obj_unreference(&fence);
2450 }
2451 
2452 /**
2453  * vmw_kms_create_implicit_placement_property - Set up the implicit placement
2454  * property.
2455  *
2456  * @dev_priv: Pointer to a device private struct.
2457  *
2458  * Sets up the implicit placement property unless it's already set up.
2459  */
2460 void
vmw_kms_create_implicit_placement_property(struct vmw_private * dev_priv)2461 vmw_kms_create_implicit_placement_property(struct vmw_private *dev_priv)
2462 {
2463 	if (dev_priv->implicit_placement_property)
2464 		return;
2465 
2466 	dev_priv->implicit_placement_property =
2467 		drm_property_create_range(&dev_priv->drm,
2468 					  DRM_MODE_PROP_IMMUTABLE,
2469 					  "implicit_placement", 0, 1);
2470 }
2471 
2472 /**
2473  * vmw_kms_suspend - Save modesetting state and turn modesetting off.
2474  *
2475  * @dev: Pointer to the drm device
2476  * Return: 0 on success. Negative error code on failure.
2477  */
vmw_kms_suspend(struct drm_device * dev)2478 int vmw_kms_suspend(struct drm_device *dev)
2479 {
2480 	struct vmw_private *dev_priv = vmw_priv(dev);
2481 
2482 	dev_priv->suspend_state = drm_atomic_helper_suspend(dev);
2483 	if (IS_ERR(dev_priv->suspend_state)) {
2484 		int ret = PTR_ERR(dev_priv->suspend_state);
2485 
2486 		DRM_ERROR("Failed kms suspend: %d\n", ret);
2487 		dev_priv->suspend_state = NULL;
2488 
2489 		return ret;
2490 	}
2491 
2492 	return 0;
2493 }
2494 
2495 
2496 /**
2497  * vmw_kms_resume - Re-enable modesetting and restore state
2498  *
2499  * @dev: Pointer to the drm device
2500  * Return: 0 on success. Negative error code on failure.
2501  *
2502  * State is resumed from a previous vmw_kms_suspend(). It's illegal
2503  * to call this function without a previous vmw_kms_suspend().
2504  */
vmw_kms_resume(struct drm_device * dev)2505 int vmw_kms_resume(struct drm_device *dev)
2506 {
2507 	struct vmw_private *dev_priv = vmw_priv(dev);
2508 	int ret;
2509 
2510 	if (WARN_ON(!dev_priv->suspend_state))
2511 		return 0;
2512 
2513 	ret = drm_atomic_helper_resume(dev, dev_priv->suspend_state);
2514 	dev_priv->suspend_state = NULL;
2515 
2516 	return ret;
2517 }
2518 
2519 /**
2520  * vmw_kms_lost_device - Notify kms that modesetting capabilities will be lost
2521  *
2522  * @dev: Pointer to the drm device
2523  */
vmw_kms_lost_device(struct drm_device * dev)2524 void vmw_kms_lost_device(struct drm_device *dev)
2525 {
2526 	drm_atomic_helper_shutdown(dev);
2527 }
2528 
2529 /**
2530  * vmw_du_helper_plane_update - Helper to do plane update on a display unit.
2531  * @update: The closure structure.
2532  *
2533  * Call this helper after setting callbacks in &vmw_du_update_plane to do plane
2534  * update on display unit.
2535  *
2536  * Return: 0 on success or a negative error code on failure.
2537  */
vmw_du_helper_plane_update(struct vmw_du_update_plane * update)2538 int vmw_du_helper_plane_update(struct vmw_du_update_plane *update)
2539 {
2540 	struct drm_plane_state *state = update->plane->state;
2541 	struct drm_plane_state *old_state = update->old_state;
2542 	struct drm_atomic_helper_damage_iter iter;
2543 	struct drm_rect clip;
2544 	struct drm_rect bb;
2545 	DECLARE_VAL_CONTEXT(val_ctx, NULL, 0);
2546 	uint32_t reserved_size = 0;
2547 	uint32_t submit_size = 0;
2548 	uint32_t curr_size = 0;
2549 	uint32_t num_hits = 0;
2550 	void *cmd_start;
2551 	char *cmd_next;
2552 	int ret;
2553 
2554 	/*
2555 	 * Iterate in advance to check if really need plane update and find the
2556 	 * number of clips that actually are in plane src for fifo allocation.
2557 	 */
2558 	drm_atomic_helper_damage_iter_init(&iter, old_state, state);
2559 	drm_atomic_for_each_plane_damage(&iter, &clip)
2560 		num_hits++;
2561 
2562 	if (num_hits == 0)
2563 		return 0;
2564 
2565 	if (update->vfb->bo) {
2566 		struct vmw_framebuffer_bo *vfbbo =
2567 			container_of(update->vfb, typeof(*vfbbo), base);
2568 
2569 		/*
2570 		 * For screen targets we want a mappable bo, for everything else we want
2571 		 * accelerated i.e. host backed (vram or gmr) bo. If the display unit
2572 		 * is not screen target then mob's shouldn't be available.
2573 		 */
2574 		if (update->dev_priv->active_display_unit == vmw_du_screen_target) {
2575 			vmw_bo_placement_set(vfbbo->buffer,
2576 					     VMW_BO_DOMAIN_SYS | VMW_BO_DOMAIN_MOB | VMW_BO_DOMAIN_GMR,
2577 					     VMW_BO_DOMAIN_SYS | VMW_BO_DOMAIN_MOB | VMW_BO_DOMAIN_GMR);
2578 		} else {
2579 			WARN_ON(update->dev_priv->has_mob);
2580 			vmw_bo_placement_set_default_accelerated(vfbbo->buffer);
2581 		}
2582 		ret = vmw_validation_add_bo(&val_ctx, vfbbo->buffer);
2583 	} else {
2584 		struct vmw_framebuffer_surface *vfbs =
2585 			container_of(update->vfb, typeof(*vfbs), base);
2586 		struct vmw_surface *surf = vmw_user_object_surface(&vfbs->uo);
2587 
2588 		ret = vmw_validation_add_resource(&val_ctx, &surf->res,
2589 						  0, VMW_RES_DIRTY_NONE, NULL,
2590 						  NULL);
2591 	}
2592 
2593 	if (ret)
2594 		return ret;
2595 
2596 	ret = vmw_validation_prepare(&val_ctx, update->mutex, update->intr);
2597 	if (ret)
2598 		goto out_unref;
2599 
2600 	reserved_size = update->calc_fifo_size(update, num_hits);
2601 	cmd_start = VMW_CMD_RESERVE(update->dev_priv, reserved_size);
2602 	if (!cmd_start) {
2603 		ret = -ENOMEM;
2604 		goto out_revert;
2605 	}
2606 
2607 	cmd_next = cmd_start;
2608 
2609 	if (update->post_prepare) {
2610 		curr_size = update->post_prepare(update, cmd_next);
2611 		cmd_next += curr_size;
2612 		submit_size += curr_size;
2613 	}
2614 
2615 	if (update->pre_clip) {
2616 		curr_size = update->pre_clip(update, cmd_next, num_hits);
2617 		cmd_next += curr_size;
2618 		submit_size += curr_size;
2619 	}
2620 
2621 	bb.x1 = INT_MAX;
2622 	bb.y1 = INT_MAX;
2623 	bb.x2 = INT_MIN;
2624 	bb.y2 = INT_MIN;
2625 
2626 	drm_atomic_helper_damage_iter_init(&iter, old_state, state);
2627 	drm_atomic_for_each_plane_damage(&iter, &clip) {
2628 		uint32_t fb_x = clip.x1;
2629 		uint32_t fb_y = clip.y1;
2630 
2631 		vmw_du_translate_to_crtc(state, &clip);
2632 		if (update->clip) {
2633 			curr_size = update->clip(update, cmd_next, &clip, fb_x,
2634 						 fb_y);
2635 			cmd_next += curr_size;
2636 			submit_size += curr_size;
2637 		}
2638 		bb.x1 = min_t(int, bb.x1, clip.x1);
2639 		bb.y1 = min_t(int, bb.y1, clip.y1);
2640 		bb.x2 = max_t(int, bb.x2, clip.x2);
2641 		bb.y2 = max_t(int, bb.y2, clip.y2);
2642 	}
2643 
2644 	curr_size = update->post_clip(update, cmd_next, &bb);
2645 	submit_size += curr_size;
2646 
2647 	if (reserved_size < submit_size)
2648 		submit_size = 0;
2649 
2650 	vmw_cmd_commit(update->dev_priv, submit_size);
2651 
2652 	vmw_kms_helper_validation_finish(update->dev_priv, NULL, &val_ctx,
2653 					 update->out_fence, NULL);
2654 	return ret;
2655 
2656 out_revert:
2657 	vmw_validation_revert(&val_ctx);
2658 
2659 out_unref:
2660 	vmw_validation_unref_lists(&val_ctx);
2661 	return ret;
2662 }
2663 
2664 /**
2665  * vmw_connector_mode_valid - implements drm_connector_helper_funcs.mode_valid callback
2666  *
2667  * @connector: the drm connector, part of a DU container
2668  * @mode: drm mode to check
2669  *
2670  * Returns MODE_OK on success, or a drm_mode_status error code.
2671  */
vmw_connector_mode_valid(struct drm_connector * connector,struct drm_display_mode * mode)2672 enum drm_mode_status vmw_connector_mode_valid(struct drm_connector *connector,
2673 					      struct drm_display_mode *mode)
2674 {
2675 	enum drm_mode_status ret;
2676 	struct drm_device *dev = connector->dev;
2677 	struct vmw_private *dev_priv = vmw_priv(dev);
2678 	u32 assumed_cpp = 4;
2679 
2680 	if (dev_priv->assume_16bpp)
2681 		assumed_cpp = 2;
2682 
2683 	ret = drm_mode_validate_size(mode, dev_priv->texture_max_width,
2684 				     dev_priv->texture_max_height);
2685 	if (ret != MODE_OK)
2686 		return ret;
2687 
2688 	if (!vmw_kms_validate_mode_vram(dev_priv,
2689 					mode->hdisplay * assumed_cpp,
2690 					mode->vdisplay))
2691 		return MODE_MEM;
2692 
2693 	return MODE_OK;
2694 }
2695 
2696 /**
2697  * vmw_connector_get_modes - implements drm_connector_helper_funcs.get_modes callback
2698  *
2699  * @connector: the drm connector, part of a DU container
2700  *
2701  * Returns the number of added modes.
2702  */
vmw_connector_get_modes(struct drm_connector * connector)2703 int vmw_connector_get_modes(struct drm_connector *connector)
2704 {
2705 	struct vmw_display_unit *du = vmw_connector_to_du(connector);
2706 	struct drm_device *dev = connector->dev;
2707 	struct vmw_private *dev_priv = vmw_priv(dev);
2708 	struct drm_display_mode *mode = NULL;
2709 	struct drm_display_mode prefmode = { DRM_MODE("preferred",
2710 		DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED,
2711 		0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
2712 		DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC)
2713 	};
2714 	u32 max_width;
2715 	u32 max_height;
2716 	u32 num_modes;
2717 
2718 	/* Add preferred mode */
2719 	mode = drm_mode_duplicate(dev, &prefmode);
2720 	if (!mode)
2721 		return 0;
2722 
2723 	mode->hdisplay = du->pref_width;
2724 	mode->vdisplay = du->pref_height;
2725 	vmw_guess_mode_timing(mode);
2726 	drm_mode_set_name(mode);
2727 
2728 	drm_mode_probed_add(connector, mode);
2729 	drm_dbg_kms(dev, "preferred mode " DRM_MODE_FMT "\n", DRM_MODE_ARG(mode));
2730 
2731 	/* Probe connector for all modes not exceeding our geom limits */
2732 	max_width  = dev_priv->texture_max_width;
2733 	max_height = dev_priv->texture_max_height;
2734 
2735 	if (dev_priv->active_display_unit == vmw_du_screen_target) {
2736 		max_width  = min(dev_priv->stdu_max_width,  max_width);
2737 		max_height = min(dev_priv->stdu_max_height, max_height);
2738 	}
2739 
2740 	num_modes = 1 + drm_add_modes_noedid(connector, max_width, max_height);
2741 
2742 	return num_modes;
2743 }
2744 
vmw_user_object_ref(struct vmw_user_object * uo)2745 struct vmw_user_object *vmw_user_object_ref(struct vmw_user_object *uo)
2746 {
2747 	if (uo->buffer)
2748 		vmw_user_bo_ref(uo->buffer);
2749 	else if (uo->surface)
2750 		vmw_surface_reference(uo->surface);
2751 	return uo;
2752 }
2753 
vmw_user_object_unref(struct vmw_user_object * uo)2754 void vmw_user_object_unref(struct vmw_user_object *uo)
2755 {
2756 	if (uo->buffer)
2757 		vmw_user_bo_unref(&uo->buffer);
2758 	else if (uo->surface)
2759 		vmw_surface_unreference(&uo->surface);
2760 }
2761 
2762 struct vmw_bo *
vmw_user_object_buffer(struct vmw_user_object * uo)2763 vmw_user_object_buffer(struct vmw_user_object *uo)
2764 {
2765 	if (uo->buffer)
2766 		return uo->buffer;
2767 	else if (uo->surface)
2768 		return uo->surface->res.guest_memory_bo;
2769 	return NULL;
2770 }
2771 
2772 struct vmw_surface *
vmw_user_object_surface(struct vmw_user_object * uo)2773 vmw_user_object_surface(struct vmw_user_object *uo)
2774 {
2775 	if (uo->buffer)
2776 		return uo->buffer->dumb_surface;
2777 	return uo->surface;
2778 }
2779 
vmw_user_object_map(struct vmw_user_object * uo)2780 void *vmw_user_object_map(struct vmw_user_object *uo)
2781 {
2782 	struct vmw_bo *bo = vmw_user_object_buffer(uo);
2783 
2784 	WARN_ON(!bo);
2785 	return vmw_bo_map_and_cache(bo);
2786 }
2787 
vmw_user_object_map_size(struct vmw_user_object * uo,size_t size)2788 void *vmw_user_object_map_size(struct vmw_user_object *uo, size_t size)
2789 {
2790 	struct vmw_bo *bo = vmw_user_object_buffer(uo);
2791 
2792 	WARN_ON(!bo);
2793 	return vmw_bo_map_and_cache_size(bo, size);
2794 }
2795 
vmw_user_object_unmap(struct vmw_user_object * uo)2796 void vmw_user_object_unmap(struct vmw_user_object *uo)
2797 {
2798 	struct vmw_bo *bo = vmw_user_object_buffer(uo);
2799 	int ret;
2800 
2801 	WARN_ON(!bo);
2802 
2803 	/* Fence the mob creation so we are guarateed to have the mob */
2804 	ret = ttm_bo_reserve(&bo->tbo, false, false, NULL);
2805 	if (ret != 0)
2806 		return;
2807 
2808 	vmw_bo_unmap(bo);
2809 	vmw_bo_pin_reserved(bo, false);
2810 
2811 	ttm_bo_unreserve(&bo->tbo);
2812 }
2813 
vmw_user_object_is_mapped(struct vmw_user_object * uo)2814 bool vmw_user_object_is_mapped(struct vmw_user_object *uo)
2815 {
2816 	struct vmw_bo *bo;
2817 
2818 	if (!uo || vmw_user_object_is_null(uo))
2819 		return false;
2820 
2821 	bo = vmw_user_object_buffer(uo);
2822 
2823 	if (WARN_ON(!bo))
2824 		return false;
2825 
2826 	WARN_ON(bo->map.bo && !bo->map.virtual);
2827 	return bo->map.virtual;
2828 }
2829 
vmw_user_object_is_null(struct vmw_user_object * uo)2830 bool vmw_user_object_is_null(struct vmw_user_object *uo)
2831 {
2832 	return !uo->buffer && !uo->surface;
2833 }
2834