xref: /linux/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c (revision 29b4a6996c244f0d360537d6a4a0996468372c17)
1 // SPDX-License-Identifier: GPL-2.0 OR MIT
2 /**************************************************************************
3  *
4  * Copyright (c) 2009-2024 Broadcom. All Rights Reserved. The term
5  * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
6  *
7  * Permission is hereby granted, free of charge, to any person obtaining a
8  * copy of this software and associated documentation files (the
9  * "Software"), to deal in the Software without restriction, including
10  * without limitation the rights to use, copy, modify, merge, publish,
11  * distribute, sub license, and/or sell copies of the Software, and to
12  * permit persons to whom the Software is furnished to do so, subject to
13  * the following conditions:
14  *
15  * The above copyright notice and this permission notice (including the
16  * next paragraph) shall be included in all copies or substantial portions
17  * of the Software.
18  *
19  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
22  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
23  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
24  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
25  * USE OR OTHER DEALINGS IN THE SOFTWARE.
26  *
27  **************************************************************************/
28 #include "vmwgfx_kms.h"
29 
30 #include "vmwgfx_bo.h"
31 #include "vmwgfx_vkms.h"
32 #include "vmw_surface_cache.h"
33 
34 #include <drm/drm_atomic.h>
35 #include <drm/drm_atomic_helper.h>
36 #include <drm/drm_damage_helper.h>
37 #include <drm/drm_fourcc.h>
38 #include <drm/drm_rect.h>
39 #include <drm/drm_sysfs.h>
40 #include <drm/drm_edid.h>
41 
vmw_du_init(struct vmw_display_unit * du)42 void vmw_du_init(struct vmw_display_unit *du)
43 {
44 	vmw_vkms_crtc_init(&du->crtc);
45 }
46 
vmw_du_cleanup(struct vmw_display_unit * du)47 void vmw_du_cleanup(struct vmw_display_unit *du)
48 {
49 	struct vmw_private *dev_priv = vmw_priv(du->primary.dev);
50 
51 	vmw_vkms_crtc_cleanup(&du->crtc);
52 	drm_plane_cleanup(&du->primary);
53 	if (vmw_cmd_supported(dev_priv))
54 		drm_plane_cleanup(&du->cursor.base);
55 
56 	drm_connector_unregister(&du->connector);
57 	drm_crtc_cleanup(&du->crtc);
58 	drm_encoder_cleanup(&du->encoder);
59 	drm_connector_cleanup(&du->connector);
60 }
61 
62 /*
63  * Display Unit Cursor functions
64  */
65 
66 static int vmw_du_cursor_plane_unmap_cm(struct vmw_plane_state *vps);
67 static void vmw_cursor_update_mob(struct vmw_private *dev_priv,
68 				  struct vmw_plane_state *vps,
69 				  u32 *image, u32 width, u32 height,
70 				  u32 hotspotX, u32 hotspotY);
71 
72 struct vmw_svga_fifo_cmd_define_cursor {
73 	u32 cmd;
74 	SVGAFifoCmdDefineAlphaCursor cursor;
75 };
76 
77 /**
78  * vmw_send_define_cursor_cmd - queue a define cursor command
79  * @dev_priv: the private driver struct
80  * @image: buffer which holds the cursor image
81  * @width: width of the mouse cursor image
82  * @height: height of the mouse cursor image
83  * @hotspotX: the horizontal position of mouse hotspot
84  * @hotspotY: the vertical position of mouse hotspot
85  */
vmw_send_define_cursor_cmd(struct vmw_private * dev_priv,u32 * image,u32 width,u32 height,u32 hotspotX,u32 hotspotY)86 static void vmw_send_define_cursor_cmd(struct vmw_private *dev_priv,
87 				       u32 *image, u32 width, u32 height,
88 				       u32 hotspotX, u32 hotspotY)
89 {
90 	struct vmw_svga_fifo_cmd_define_cursor *cmd;
91 	const u32 image_size = width * height * sizeof(*image);
92 	const u32 cmd_size = sizeof(*cmd) + image_size;
93 
94 	/* Try to reserve fifocmd space and swallow any failures;
95 	   such reservations cannot be left unconsumed for long
96 	   under the risk of clogging other fifocmd users, so
97 	   we treat reservations separtely from the way we treat
98 	   other fallible KMS-atomic resources at prepare_fb */
99 	cmd = VMW_CMD_RESERVE(dev_priv, cmd_size);
100 
101 	if (unlikely(!cmd))
102 		return;
103 
104 	memset(cmd, 0, sizeof(*cmd));
105 
106 	memcpy(&cmd[1], image, image_size);
107 
108 	cmd->cmd = SVGA_CMD_DEFINE_ALPHA_CURSOR;
109 	cmd->cursor.id = 0;
110 	cmd->cursor.width = width;
111 	cmd->cursor.height = height;
112 	cmd->cursor.hotspotX = hotspotX;
113 	cmd->cursor.hotspotY = hotspotY;
114 
115 	vmw_cmd_commit_flush(dev_priv, cmd_size);
116 }
117 
118 /**
119  * vmw_cursor_update_image - update the cursor image on the provided plane
120  * @dev_priv: the private driver struct
121  * @vps: the plane state of the cursor plane
122  * @image: buffer which holds the cursor image
123  * @width: width of the mouse cursor image
124  * @height: height of the mouse cursor image
125  * @hotspotX: the horizontal position of mouse hotspot
126  * @hotspotY: the vertical position of mouse hotspot
127  */
vmw_cursor_update_image(struct vmw_private * dev_priv,struct vmw_plane_state * vps,u32 * image,u32 width,u32 height,u32 hotspotX,u32 hotspotY)128 static void vmw_cursor_update_image(struct vmw_private *dev_priv,
129 				    struct vmw_plane_state *vps,
130 				    u32 *image, u32 width, u32 height,
131 				    u32 hotspotX, u32 hotspotY)
132 {
133 	if (vps->cursor.bo)
134 		vmw_cursor_update_mob(dev_priv, vps, image,
135 				      vps->base.crtc_w, vps->base.crtc_h,
136 				      hotspotX, hotspotY);
137 
138 	else
139 		vmw_send_define_cursor_cmd(dev_priv, image, width, height,
140 					   hotspotX, hotspotY);
141 }
142 
143 
144 /**
145  * vmw_cursor_update_mob - Update cursor vis CursorMob mechanism
146  *
147  * Called from inside vmw_du_cursor_plane_atomic_update to actually
148  * make the cursor-image live.
149  *
150  * @dev_priv: device to work with
151  * @vps: the plane state of the cursor plane
152  * @image: cursor source data to fill the MOB with
153  * @width: source data width
154  * @height: source data height
155  * @hotspotX: cursor hotspot x
156  * @hotspotY: cursor hotspot Y
157  */
vmw_cursor_update_mob(struct vmw_private * dev_priv,struct vmw_plane_state * vps,u32 * image,u32 width,u32 height,u32 hotspotX,u32 hotspotY)158 static void vmw_cursor_update_mob(struct vmw_private *dev_priv,
159 				  struct vmw_plane_state *vps,
160 				  u32 *image, u32 width, u32 height,
161 				  u32 hotspotX, u32 hotspotY)
162 {
163 	SVGAGBCursorHeader *header;
164 	SVGAGBAlphaCursorHeader *alpha_header;
165 	const u32 image_size = width * height * sizeof(*image);
166 
167 	header = vmw_bo_map_and_cache(vps->cursor.bo);
168 	alpha_header = &header->header.alphaHeader;
169 
170 	memset(header, 0, sizeof(*header));
171 
172 	header->type = SVGA_ALPHA_CURSOR;
173 	header->sizeInBytes = image_size;
174 
175 	alpha_header->hotspotX = hotspotX;
176 	alpha_header->hotspotY = hotspotY;
177 	alpha_header->width = width;
178 	alpha_header->height = height;
179 
180 	memcpy(header + 1, image, image_size);
181 	vmw_write(dev_priv, SVGA_REG_CURSOR_MOBID,
182 		  vps->cursor.bo->tbo.resource->start);
183 }
184 
185 
vmw_du_cursor_mob_size(u32 w,u32 h)186 static u32 vmw_du_cursor_mob_size(u32 w, u32 h)
187 {
188 	return w * h * sizeof(u32) + sizeof(SVGAGBCursorHeader);
189 }
190 
191 /**
192  * vmw_du_cursor_plane_acquire_image -- Acquire the image data
193  * @vps: cursor plane state
194  */
vmw_du_cursor_plane_acquire_image(struct vmw_plane_state * vps)195 static u32 *vmw_du_cursor_plane_acquire_image(struct vmw_plane_state *vps)
196 {
197 	struct vmw_surface *surf;
198 
199 	if (vmw_user_object_is_null(&vps->uo))
200 		return NULL;
201 
202 	surf = vmw_user_object_surface(&vps->uo);
203 	if (surf && !vmw_user_object_is_mapped(&vps->uo))
204 		return surf->snooper.image;
205 
206 	return vmw_user_object_map(&vps->uo);
207 }
208 
vmw_du_cursor_plane_has_changed(struct vmw_plane_state * old_vps,struct vmw_plane_state * new_vps)209 static bool vmw_du_cursor_plane_has_changed(struct vmw_plane_state *old_vps,
210 					    struct vmw_plane_state *new_vps)
211 {
212 	void *old_image;
213 	void *new_image;
214 	u32 size;
215 	bool changed;
216 
217 	if (old_vps->base.crtc_w != new_vps->base.crtc_w ||
218 	    old_vps->base.crtc_h != new_vps->base.crtc_h)
219 	    return true;
220 
221 	if (old_vps->cursor.hotspot_x != new_vps->cursor.hotspot_x ||
222 	    old_vps->cursor.hotspot_y != new_vps->cursor.hotspot_y)
223 	    return true;
224 
225 	size = new_vps->base.crtc_w * new_vps->base.crtc_h * sizeof(u32);
226 
227 	old_image = vmw_du_cursor_plane_acquire_image(old_vps);
228 	new_image = vmw_du_cursor_plane_acquire_image(new_vps);
229 
230 	changed = false;
231 	if (old_image && new_image && old_image != new_image)
232 		changed = memcmp(old_image, new_image, size) != 0;
233 
234 	return changed;
235 }
236 
vmw_du_destroy_cursor_mob(struct vmw_bo ** vbo)237 static void vmw_du_destroy_cursor_mob(struct vmw_bo **vbo)
238 {
239 	if (!(*vbo))
240 		return;
241 
242 	ttm_bo_unpin(&(*vbo)->tbo);
243 	vmw_bo_unreference(vbo);
244 }
245 
vmw_du_put_cursor_mob(struct vmw_cursor_plane * vcp,struct vmw_plane_state * vps)246 static void vmw_du_put_cursor_mob(struct vmw_cursor_plane *vcp,
247 				  struct vmw_plane_state *vps)
248 {
249 	u32 i;
250 
251 	if (!vps->cursor.bo)
252 		return;
253 
254 	vmw_du_cursor_plane_unmap_cm(vps);
255 
256 	/* Look for a free slot to return this mob to the cache. */
257 	for (i = 0; i < ARRAY_SIZE(vcp->cursor_mobs); i++) {
258 		if (!vcp->cursor_mobs[i]) {
259 			vcp->cursor_mobs[i] = vps->cursor.bo;
260 			vps->cursor.bo = NULL;
261 			return;
262 		}
263 	}
264 
265 	/* Cache is full: See if this mob is bigger than an existing mob. */
266 	for (i = 0; i < ARRAY_SIZE(vcp->cursor_mobs); i++) {
267 		if (vcp->cursor_mobs[i]->tbo.base.size <
268 		    vps->cursor.bo->tbo.base.size) {
269 			vmw_du_destroy_cursor_mob(&vcp->cursor_mobs[i]);
270 			vcp->cursor_mobs[i] = vps->cursor.bo;
271 			vps->cursor.bo = NULL;
272 			return;
273 		}
274 	}
275 
276 	/* Destroy it if it's not worth caching. */
277 	vmw_du_destroy_cursor_mob(&vps->cursor.bo);
278 }
279 
vmw_du_get_cursor_mob(struct vmw_cursor_plane * vcp,struct vmw_plane_state * vps)280 static int vmw_du_get_cursor_mob(struct vmw_cursor_plane *vcp,
281 				 struct vmw_plane_state *vps)
282 {
283 	struct vmw_private *dev_priv = vcp->base.dev->dev_private;
284 	u32 size = vmw_du_cursor_mob_size(vps->base.crtc_w, vps->base.crtc_h);
285 	u32 i;
286 	u32 cursor_max_dim, mob_max_size;
287 	struct vmw_fence_obj *fence = NULL;
288 	int ret;
289 
290 	if (!dev_priv->has_mob ||
291 	    (dev_priv->capabilities2 & SVGA_CAP2_CURSOR_MOB) == 0)
292 		return -EINVAL;
293 
294 	mob_max_size = vmw_read(dev_priv, SVGA_REG_MOB_MAX_SIZE);
295 	cursor_max_dim = vmw_read(dev_priv, SVGA_REG_CURSOR_MAX_DIMENSION);
296 
297 	if (size > mob_max_size || vps->base.crtc_w > cursor_max_dim ||
298 	    vps->base.crtc_h > cursor_max_dim)
299 		return -EINVAL;
300 
301 	if (vps->cursor.bo) {
302 		if (vps->cursor.bo->tbo.base.size >= size)
303 			return 0;
304 		vmw_du_put_cursor_mob(vcp, vps);
305 	}
306 
307 	/* Look for an unused mob in the cache. */
308 	for (i = 0; i < ARRAY_SIZE(vcp->cursor_mobs); i++) {
309 		if (vcp->cursor_mobs[i] &&
310 		    vcp->cursor_mobs[i]->tbo.base.size >= size) {
311 			vps->cursor.bo = vcp->cursor_mobs[i];
312 			vcp->cursor_mobs[i] = NULL;
313 			return 0;
314 		}
315 	}
316 	/* Create a new mob if we can't find an existing one. */
317 	ret = vmw_bo_create_and_populate(dev_priv, size,
318 					 VMW_BO_DOMAIN_MOB,
319 					 &vps->cursor.bo);
320 
321 	if (ret != 0)
322 		return ret;
323 
324 	/* Fence the mob creation so we are guarateed to have the mob */
325 	ret = ttm_bo_reserve(&vps->cursor.bo->tbo, false, false, NULL);
326 	if (ret != 0)
327 		goto teardown;
328 
329 	ret = vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL);
330 	if (ret != 0) {
331 		ttm_bo_unreserve(&vps->cursor.bo->tbo);
332 		goto teardown;
333 	}
334 
335 	dma_fence_wait(&fence->base, false);
336 	dma_fence_put(&fence->base);
337 
338 	ttm_bo_unreserve(&vps->cursor.bo->tbo);
339 	return 0;
340 
341 teardown:
342 	vmw_du_destroy_cursor_mob(&vps->cursor.bo);
343 	return ret;
344 }
345 
346 
vmw_cursor_update_position(struct vmw_private * dev_priv,bool show,int x,int y)347 static void vmw_cursor_update_position(struct vmw_private *dev_priv,
348 				       bool show, int x, int y)
349 {
350 	const uint32_t svga_cursor_on = show ? SVGA_CURSOR_ON_SHOW
351 					     : SVGA_CURSOR_ON_HIDE;
352 	uint32_t count;
353 
354 	spin_lock(&dev_priv->cursor_lock);
355 	if (dev_priv->capabilities2 & SVGA_CAP2_EXTRA_REGS) {
356 		vmw_write(dev_priv, SVGA_REG_CURSOR4_X, x);
357 		vmw_write(dev_priv, SVGA_REG_CURSOR4_Y, y);
358 		vmw_write(dev_priv, SVGA_REG_CURSOR4_SCREEN_ID, SVGA3D_INVALID_ID);
359 		vmw_write(dev_priv, SVGA_REG_CURSOR4_ON, svga_cursor_on);
360 		vmw_write(dev_priv, SVGA_REG_CURSOR4_SUBMIT, 1);
361 	} else if (vmw_is_cursor_bypass3_enabled(dev_priv)) {
362 		vmw_fifo_mem_write(dev_priv, SVGA_FIFO_CURSOR_ON, svga_cursor_on);
363 		vmw_fifo_mem_write(dev_priv, SVGA_FIFO_CURSOR_X, x);
364 		vmw_fifo_mem_write(dev_priv, SVGA_FIFO_CURSOR_Y, y);
365 		count = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_CURSOR_COUNT);
366 		vmw_fifo_mem_write(dev_priv, SVGA_FIFO_CURSOR_COUNT, ++count);
367 	} else {
368 		vmw_write(dev_priv, SVGA_REG_CURSOR_X, x);
369 		vmw_write(dev_priv, SVGA_REG_CURSOR_Y, y);
370 		vmw_write(dev_priv, SVGA_REG_CURSOR_ON, svga_cursor_on);
371 	}
372 	spin_unlock(&dev_priv->cursor_lock);
373 }
374 
vmw_kms_cursor_snoop(struct vmw_surface * srf,struct ttm_object_file * tfile,struct ttm_buffer_object * bo,SVGA3dCmdHeader * header)375 void vmw_kms_cursor_snoop(struct vmw_surface *srf,
376 			  struct ttm_object_file *tfile,
377 			  struct ttm_buffer_object *bo,
378 			  SVGA3dCmdHeader *header)
379 {
380 	struct ttm_bo_kmap_obj map;
381 	unsigned long kmap_offset;
382 	unsigned long kmap_num;
383 	SVGA3dCopyBox *box;
384 	unsigned box_count;
385 	void *virtual;
386 	bool is_iomem;
387 	struct vmw_dma_cmd {
388 		SVGA3dCmdHeader header;
389 		SVGA3dCmdSurfaceDMA dma;
390 	} *cmd;
391 	int i, ret;
392 	const struct SVGA3dSurfaceDesc *desc =
393 		vmw_surface_get_desc(VMW_CURSOR_SNOOP_FORMAT);
394 	const u32 image_pitch = VMW_CURSOR_SNOOP_WIDTH * desc->pitchBytesPerBlock;
395 
396 	cmd = container_of(header, struct vmw_dma_cmd, header);
397 
398 	/* No snooper installed, nothing to copy */
399 	if (!srf->snooper.image)
400 		return;
401 
402 	if (cmd->dma.host.face != 0 || cmd->dma.host.mipmap != 0) {
403 		DRM_ERROR("face and mipmap for cursors should never != 0\n");
404 		return;
405 	}
406 
407 	if (cmd->header.size < 64) {
408 		DRM_ERROR("at least one full copy box must be given\n");
409 		return;
410 	}
411 
412 	box = (SVGA3dCopyBox *)&cmd[1];
413 	box_count = (cmd->header.size - sizeof(SVGA3dCmdSurfaceDMA)) /
414 			sizeof(SVGA3dCopyBox);
415 
416 	if (cmd->dma.guest.ptr.offset % PAGE_SIZE ||
417 	    box->x != 0    || box->y != 0    || box->z != 0    ||
418 	    box->srcx != 0 || box->srcy != 0 || box->srcz != 0 ||
419 	    box->d != 1    || box_count != 1 ||
420 	    box->w > VMW_CURSOR_SNOOP_WIDTH || box->h > VMW_CURSOR_SNOOP_HEIGHT) {
421 		/* TODO handle none page aligned offsets */
422 		/* TODO handle more dst & src != 0 */
423 		/* TODO handle more then one copy */
424 		DRM_ERROR("Can't snoop dma request for cursor!\n");
425 		DRM_ERROR("(%u, %u, %u) (%u, %u, %u) (%ux%ux%u) %u %u\n",
426 			  box->srcx, box->srcy, box->srcz,
427 			  box->x, box->y, box->z,
428 			  box->w, box->h, box->d, box_count,
429 			  cmd->dma.guest.ptr.offset);
430 		return;
431 	}
432 
433 	kmap_offset = cmd->dma.guest.ptr.offset >> PAGE_SHIFT;
434 	kmap_num = (VMW_CURSOR_SNOOP_HEIGHT*image_pitch) >> PAGE_SHIFT;
435 
436 	ret = ttm_bo_reserve(bo, true, false, NULL);
437 	if (unlikely(ret != 0)) {
438 		DRM_ERROR("reserve failed\n");
439 		return;
440 	}
441 
442 	ret = ttm_bo_kmap(bo, kmap_offset, kmap_num, &map);
443 	if (unlikely(ret != 0))
444 		goto err_unreserve;
445 
446 	virtual = ttm_kmap_obj_virtual(&map, &is_iomem);
447 
448 	if (box->w == VMW_CURSOR_SNOOP_WIDTH && cmd->dma.guest.pitch == image_pitch) {
449 		memcpy(srf->snooper.image, virtual,
450 		       VMW_CURSOR_SNOOP_HEIGHT*image_pitch);
451 	} else {
452 		/* Image is unsigned pointer. */
453 		for (i = 0; i < box->h; i++)
454 			memcpy(srf->snooper.image + i * image_pitch,
455 			       virtual + i * cmd->dma.guest.pitch,
456 			       box->w * desc->pitchBytesPerBlock);
457 	}
458 
459 	srf->snooper.age++;
460 
461 	ttm_bo_kunmap(&map);
462 err_unreserve:
463 	ttm_bo_unreserve(bo);
464 }
465 
466 /**
467  * vmw_kms_legacy_hotspot_clear - Clear legacy hotspots
468  *
469  * @dev_priv: Pointer to the device private struct.
470  *
471  * Clears all legacy hotspots.
472  */
vmw_kms_legacy_hotspot_clear(struct vmw_private * dev_priv)473 void vmw_kms_legacy_hotspot_clear(struct vmw_private *dev_priv)
474 {
475 	struct drm_device *dev = &dev_priv->drm;
476 	struct vmw_display_unit *du;
477 	struct drm_crtc *crtc;
478 
479 	drm_modeset_lock_all(dev);
480 	drm_for_each_crtc(crtc, dev) {
481 		du = vmw_crtc_to_du(crtc);
482 
483 		du->hotspot_x = 0;
484 		du->hotspot_y = 0;
485 	}
486 	drm_modeset_unlock_all(dev);
487 }
488 
vmw_kms_cursor_post_execbuf(struct vmw_private * dev_priv)489 void vmw_kms_cursor_post_execbuf(struct vmw_private *dev_priv)
490 {
491 	struct drm_device *dev = &dev_priv->drm;
492 	struct vmw_display_unit *du;
493 	struct drm_crtc *crtc;
494 
495 	mutex_lock(&dev->mode_config.mutex);
496 
497 	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
498 		du = vmw_crtc_to_du(crtc);
499 		if (!du->cursor_surface ||
500 		    du->cursor_age == du->cursor_surface->snooper.age ||
501 		    !du->cursor_surface->snooper.image)
502 			continue;
503 
504 		du->cursor_age = du->cursor_surface->snooper.age;
505 		vmw_send_define_cursor_cmd(dev_priv,
506 					   du->cursor_surface->snooper.image,
507 					   VMW_CURSOR_SNOOP_WIDTH,
508 					   VMW_CURSOR_SNOOP_HEIGHT,
509 					   du->hotspot_x + du->core_hotspot_x,
510 					   du->hotspot_y + du->core_hotspot_y);
511 	}
512 
513 	mutex_unlock(&dev->mode_config.mutex);
514 }
515 
516 
vmw_du_cursor_plane_destroy(struct drm_plane * plane)517 void vmw_du_cursor_plane_destroy(struct drm_plane *plane)
518 {
519 	struct vmw_cursor_plane *vcp = vmw_plane_to_vcp(plane);
520 	u32 i;
521 
522 	vmw_cursor_update_position(plane->dev->dev_private, false, 0, 0);
523 
524 	for (i = 0; i < ARRAY_SIZE(vcp->cursor_mobs); i++)
525 		vmw_du_destroy_cursor_mob(&vcp->cursor_mobs[i]);
526 
527 	drm_plane_cleanup(plane);
528 }
529 
530 
vmw_du_primary_plane_destroy(struct drm_plane * plane)531 void vmw_du_primary_plane_destroy(struct drm_plane *plane)
532 {
533 	drm_plane_cleanup(plane);
534 
535 	/* Planes are static in our case so we don't free it */
536 }
537 
538 
539 /**
540  * vmw_du_plane_unpin_surf - unpins resource associated with a framebuffer surface
541  *
542  * @vps: plane state associated with the display surface
543  */
vmw_du_plane_unpin_surf(struct vmw_plane_state * vps)544 void vmw_du_plane_unpin_surf(struct vmw_plane_state *vps)
545 {
546 	struct vmw_surface *surf = vmw_user_object_surface(&vps->uo);
547 
548 	if (surf) {
549 		if (vps->pinned) {
550 			vmw_resource_unpin(&surf->res);
551 			vps->pinned--;
552 		}
553 	}
554 }
555 
556 
557 /**
558  * vmw_du_plane_cleanup_fb - Unpins the plane surface
559  *
560  * @plane:  display plane
561  * @old_state: Contains the FB to clean up
562  *
563  * Unpins the framebuffer surface
564  *
565  * Returns 0 on success
566  */
567 void
vmw_du_plane_cleanup_fb(struct drm_plane * plane,struct drm_plane_state * old_state)568 vmw_du_plane_cleanup_fb(struct drm_plane *plane,
569 			struct drm_plane_state *old_state)
570 {
571 	struct vmw_plane_state *vps = vmw_plane_state_to_vps(old_state);
572 
573 	vmw_du_plane_unpin_surf(vps);
574 }
575 
576 
577 /**
578  * vmw_du_cursor_plane_map_cm - Maps the cursor mobs.
579  *
580  * @vps: plane_state
581  *
582  * Returns 0 on success
583  */
584 
585 static int
vmw_du_cursor_plane_map_cm(struct vmw_plane_state * vps)586 vmw_du_cursor_plane_map_cm(struct vmw_plane_state *vps)
587 {
588 	int ret;
589 	u32 size = vmw_du_cursor_mob_size(vps->base.crtc_w, vps->base.crtc_h);
590 	struct ttm_buffer_object *bo;
591 
592 	if (!vps->cursor.bo)
593 		return -EINVAL;
594 
595 	bo = &vps->cursor.bo->tbo;
596 
597 	if (bo->base.size < size)
598 		return -EINVAL;
599 
600 	if (vps->cursor.bo->map.virtual)
601 		return 0;
602 
603 	ret = ttm_bo_reserve(bo, false, false, NULL);
604 	if (unlikely(ret != 0))
605 		return -ENOMEM;
606 
607 	vmw_bo_map_and_cache(vps->cursor.bo);
608 
609 	ttm_bo_unreserve(bo);
610 
611 	if (unlikely(ret != 0))
612 		return -ENOMEM;
613 
614 	return 0;
615 }
616 
617 
618 /**
619  * vmw_du_cursor_plane_unmap_cm - Unmaps the cursor mobs.
620  *
621  * @vps: state of the cursor plane
622  *
623  * Returns 0 on success
624  */
625 
626 static int
vmw_du_cursor_plane_unmap_cm(struct vmw_plane_state * vps)627 vmw_du_cursor_plane_unmap_cm(struct vmw_plane_state *vps)
628 {
629 	int ret = 0;
630 	struct vmw_bo *vbo = vps->cursor.bo;
631 
632 	if (!vbo || !vbo->map.virtual)
633 		return 0;
634 
635 	ret = ttm_bo_reserve(&vbo->tbo, true, false, NULL);
636 	if (likely(ret == 0)) {
637 		vmw_bo_unmap(vbo);
638 		ttm_bo_unreserve(&vbo->tbo);
639 	}
640 
641 	return ret;
642 }
643 
644 
645 /**
646  * vmw_du_cursor_plane_cleanup_fb - Unpins the plane surface
647  *
648  * @plane: cursor plane
649  * @old_state: contains the state to clean up
650  *
651  * Unmaps all cursor bo mappings and unpins the cursor surface
652  *
653  * Returns 0 on success
654  */
655 void
vmw_du_cursor_plane_cleanup_fb(struct drm_plane * plane,struct drm_plane_state * old_state)656 vmw_du_cursor_plane_cleanup_fb(struct drm_plane *plane,
657 			       struct drm_plane_state *old_state)
658 {
659 	struct vmw_cursor_plane *vcp = vmw_plane_to_vcp(plane);
660 	struct vmw_plane_state *vps = vmw_plane_state_to_vps(old_state);
661 
662 	if (!vmw_user_object_is_null(&vps->uo))
663 		vmw_user_object_unmap(&vps->uo);
664 
665 	vmw_du_cursor_plane_unmap_cm(vps);
666 	vmw_du_put_cursor_mob(vcp, vps);
667 
668 	vmw_du_plane_unpin_surf(vps);
669 	vmw_user_object_unref(&vps->uo);
670 }
671 
672 
673 /**
674  * vmw_du_cursor_plane_prepare_fb - Readies the cursor by referencing it
675  *
676  * @plane:  display plane
677  * @new_state: info on the new plane state, including the FB
678  *
679  * Returns 0 on success
680  */
681 int
vmw_du_cursor_plane_prepare_fb(struct drm_plane * plane,struct drm_plane_state * new_state)682 vmw_du_cursor_plane_prepare_fb(struct drm_plane *plane,
683 			       struct drm_plane_state *new_state)
684 {
685 	struct drm_framebuffer *fb = new_state->fb;
686 	struct vmw_cursor_plane *vcp = vmw_plane_to_vcp(plane);
687 	struct vmw_plane_state *vps = vmw_plane_state_to_vps(new_state);
688 	struct vmw_bo *bo = NULL;
689 	int ret = 0;
690 
691 	if (!vmw_user_object_is_null(&vps->uo)) {
692 		vmw_user_object_unmap(&vps->uo);
693 		vmw_user_object_unref(&vps->uo);
694 	}
695 
696 	if (fb) {
697 		if (vmw_framebuffer_to_vfb(fb)->bo) {
698 			vps->uo.buffer = vmw_framebuffer_to_vfbd(fb)->buffer;
699 			vps->uo.surface = NULL;
700 		} else {
701 			memcpy(&vps->uo, &vmw_framebuffer_to_vfbs(fb)->uo, sizeof(vps->uo));
702 		}
703 		vmw_user_object_ref(&vps->uo);
704 	}
705 
706 	bo = vmw_user_object_buffer(&vps->uo);
707 	if (bo) {
708 		struct ttm_operation_ctx ctx = {false, false};
709 
710 		ret = ttm_bo_reserve(&bo->tbo, true, false, NULL);
711 		if (ret != 0)
712 			return -ENOMEM;
713 
714 		ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
715 		if (ret != 0)
716 			return -ENOMEM;
717 
718 		vmw_bo_pin_reserved(bo, true);
719 		if (vmw_framebuffer_to_vfb(fb)->bo) {
720 			const u32 size = new_state->crtc_w * new_state->crtc_h * sizeof(u32);
721 
722 			(void)vmw_bo_map_and_cache_size(bo, size);
723 		} else {
724 			vmw_bo_map_and_cache(bo);
725 		}
726 		ttm_bo_unreserve(&bo->tbo);
727 	}
728 
729 	if (!vmw_user_object_is_null(&vps->uo)) {
730 		vmw_du_get_cursor_mob(vcp, vps);
731 		vmw_du_cursor_plane_map_cm(vps);
732 	}
733 
734 	return 0;
735 }
736 
737 
738 void
vmw_du_cursor_plane_atomic_update(struct drm_plane * plane,struct drm_atomic_state * state)739 vmw_du_cursor_plane_atomic_update(struct drm_plane *plane,
740 				  struct drm_atomic_state *state)
741 {
742 	struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
743 									   plane);
744 	struct drm_plane_state *old_state = drm_atomic_get_old_plane_state(state,
745 									   plane);
746 	struct drm_crtc *crtc = new_state->crtc ?: old_state->crtc;
747 	struct vmw_private *dev_priv = vmw_priv(crtc->dev);
748 	struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
749 	struct vmw_plane_state *vps = vmw_plane_state_to_vps(new_state);
750 	struct vmw_plane_state *old_vps = vmw_plane_state_to_vps(old_state);
751 	struct vmw_bo *old_bo = NULL;
752 	struct vmw_bo *new_bo = NULL;
753 	s32 hotspot_x, hotspot_y;
754 	int ret;
755 
756 	hotspot_x = du->hotspot_x + new_state->hotspot_x;
757 	hotspot_y = du->hotspot_y + new_state->hotspot_y;
758 
759 	du->cursor_surface = vmw_user_object_surface(&vps->uo);
760 
761 	if (vmw_user_object_is_null(&vps->uo)) {
762 		vmw_cursor_update_position(dev_priv, false, 0, 0);
763 		return;
764 	}
765 
766 	vps->cursor.hotspot_x = hotspot_x;
767 	vps->cursor.hotspot_y = hotspot_y;
768 
769 	if (du->cursor_surface)
770 		du->cursor_age = du->cursor_surface->snooper.age;
771 
772 	if (!vmw_user_object_is_null(&old_vps->uo)) {
773 		old_bo = vmw_user_object_buffer(&old_vps->uo);
774 		ret = ttm_bo_reserve(&old_bo->tbo, false, false, NULL);
775 		if (ret != 0)
776 			return;
777 	}
778 
779 	if (!vmw_user_object_is_null(&vps->uo)) {
780 		new_bo = vmw_user_object_buffer(&vps->uo);
781 		if (old_bo != new_bo) {
782 			ret = ttm_bo_reserve(&new_bo->tbo, false, false, NULL);
783 			if (ret != 0)
784 				return;
785 		} else {
786 			new_bo = NULL;
787 		}
788 	}
789 	if (!vmw_du_cursor_plane_has_changed(old_vps, vps)) {
790 		/*
791 		 * If it hasn't changed, avoid making the device do extra
792 		 * work by keeping the old cursor active.
793 		 */
794 		struct vmw_cursor_plane_state tmp = old_vps->cursor;
795 		old_vps->cursor = vps->cursor;
796 		vps->cursor = tmp;
797 	} else {
798 		void *image = vmw_du_cursor_plane_acquire_image(vps);
799 		if (image)
800 			vmw_cursor_update_image(dev_priv, vps, image,
801 						new_state->crtc_w,
802 						new_state->crtc_h,
803 						hotspot_x, hotspot_y);
804 	}
805 
806 	if (old_bo)
807 		ttm_bo_unreserve(&old_bo->tbo);
808 	if (new_bo)
809 		ttm_bo_unreserve(&new_bo->tbo);
810 
811 	du->cursor_x = new_state->crtc_x + du->set_gui_x;
812 	du->cursor_y = new_state->crtc_y + du->set_gui_y;
813 
814 	vmw_cursor_update_position(dev_priv, true,
815 				   du->cursor_x + hotspot_x,
816 				   du->cursor_y + hotspot_y);
817 
818 	du->core_hotspot_x = hotspot_x - du->hotspot_x;
819 	du->core_hotspot_y = hotspot_y - du->hotspot_y;
820 }
821 
822 
823 /**
824  * vmw_du_primary_plane_atomic_check - check if the new state is okay
825  *
826  * @plane: display plane
827  * @state: info on the new plane state, including the FB
828  *
829  * Check if the new state is settable given the current state.  Other
830  * than what the atomic helper checks, we care about crtc fitting
831  * the FB and maintaining one active framebuffer.
832  *
833  * Returns 0 on success
834  */
vmw_du_primary_plane_atomic_check(struct drm_plane * plane,struct drm_atomic_state * state)835 int vmw_du_primary_plane_atomic_check(struct drm_plane *plane,
836 				      struct drm_atomic_state *state)
837 {
838 	struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
839 									   plane);
840 	struct drm_plane_state *old_state = drm_atomic_get_old_plane_state(state,
841 									   plane);
842 	struct drm_crtc_state *crtc_state = NULL;
843 	struct drm_framebuffer *new_fb = new_state->fb;
844 	struct drm_framebuffer *old_fb = old_state->fb;
845 	int ret;
846 
847 	/*
848 	 * Ignore damage clips if the framebuffer attached to the plane's state
849 	 * has changed since the last plane update (page-flip). In this case, a
850 	 * full plane update should happen because uploads are done per-buffer.
851 	 */
852 	if (old_fb != new_fb)
853 		new_state->ignore_damage_clips = true;
854 
855 	if (new_state->crtc)
856 		crtc_state = drm_atomic_get_new_crtc_state(state,
857 							   new_state->crtc);
858 
859 	ret = drm_atomic_helper_check_plane_state(new_state, crtc_state,
860 						  DRM_PLANE_NO_SCALING,
861 						  DRM_PLANE_NO_SCALING,
862 						  false, true);
863 	return ret;
864 }
865 
866 
867 /**
868  * vmw_du_cursor_plane_atomic_check - check if the new state is okay
869  *
870  * @plane: cursor plane
871  * @state: info on the new plane state
872  *
873  * This is a chance to fail if the new cursor state does not fit
874  * our requirements.
875  *
876  * Returns 0 on success
877  */
vmw_du_cursor_plane_atomic_check(struct drm_plane * plane,struct drm_atomic_state * state)878 int vmw_du_cursor_plane_atomic_check(struct drm_plane *plane,
879 				     struct drm_atomic_state *state)
880 {
881 	struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
882 									   plane);
883 	int ret = 0;
884 	struct drm_crtc_state *crtc_state = NULL;
885 	struct vmw_surface *surface = NULL;
886 	struct drm_framebuffer *fb = new_state->fb;
887 
888 	if (new_state->crtc)
889 		crtc_state = drm_atomic_get_new_crtc_state(new_state->state,
890 							   new_state->crtc);
891 
892 	ret = drm_atomic_helper_check_plane_state(new_state, crtc_state,
893 						  DRM_PLANE_NO_SCALING,
894 						  DRM_PLANE_NO_SCALING,
895 						  true, true);
896 	if (ret)
897 		return ret;
898 
899 	/* Turning off */
900 	if (!fb)
901 		return 0;
902 
903 	/* A lot of the code assumes this */
904 	if (new_state->crtc_w != 64 || new_state->crtc_h != 64) {
905 		DRM_ERROR("Invalid cursor dimensions (%d, %d)\n",
906 			  new_state->crtc_w, new_state->crtc_h);
907 		return -EINVAL;
908 	}
909 
910 	if (!vmw_framebuffer_to_vfb(fb)->bo) {
911 		surface = vmw_user_object_surface(&vmw_framebuffer_to_vfbs(fb)->uo);
912 
913 		WARN_ON(!surface);
914 
915 		if (!surface ||
916 		    (!surface->snooper.image && !surface->res.guest_memory_bo)) {
917 			DRM_ERROR("surface not suitable for cursor\n");
918 			return -EINVAL;
919 		}
920 	}
921 
922 	return 0;
923 }
924 
925 
vmw_du_crtc_atomic_check(struct drm_crtc * crtc,struct drm_atomic_state * state)926 int vmw_du_crtc_atomic_check(struct drm_crtc *crtc,
927 			     struct drm_atomic_state *state)
928 {
929 	struct vmw_private *vmw = vmw_priv(crtc->dev);
930 	struct drm_crtc_state *new_state = drm_atomic_get_new_crtc_state(state,
931 									 crtc);
932 	struct vmw_display_unit *du = vmw_crtc_to_du(new_state->crtc);
933 	int connector_mask = drm_connector_mask(&du->connector);
934 	bool has_primary = new_state->plane_mask &
935 			   drm_plane_mask(crtc->primary);
936 
937 	/*
938 	 * This is fine in general, but broken userspace might expect
939 	 * some actual rendering so give a clue as why it's blank.
940 	 */
941 	if (new_state->enable && !has_primary)
942 		drm_dbg_driver(&vmw->drm,
943 			       "CRTC without a primary plane will be blank.\n");
944 
945 
946 	if (new_state->connector_mask != connector_mask &&
947 	    new_state->connector_mask != 0) {
948 		DRM_ERROR("Invalid connectors configuration\n");
949 		return -EINVAL;
950 	}
951 
952 	/*
953 	 * Our virtual device does not have a dot clock, so use the logical
954 	 * clock value as the dot clock.
955 	 */
956 	if (new_state->mode.crtc_clock == 0)
957 		new_state->adjusted_mode.crtc_clock = new_state->mode.clock;
958 
959 	return 0;
960 }
961 
962 
vmw_du_crtc_atomic_begin(struct drm_crtc * crtc,struct drm_atomic_state * state)963 void vmw_du_crtc_atomic_begin(struct drm_crtc *crtc,
964 			      struct drm_atomic_state *state)
965 {
966 	vmw_vkms_crtc_atomic_begin(crtc, state);
967 }
968 
969 /**
970  * vmw_du_crtc_duplicate_state - duplicate crtc state
971  * @crtc: DRM crtc
972  *
973  * Allocates and returns a copy of the crtc state (both common and
974  * vmw-specific) for the specified crtc.
975  *
976  * Returns: The newly allocated crtc state, or NULL on failure.
977  */
978 struct drm_crtc_state *
vmw_du_crtc_duplicate_state(struct drm_crtc * crtc)979 vmw_du_crtc_duplicate_state(struct drm_crtc *crtc)
980 {
981 	struct drm_crtc_state *state;
982 	struct vmw_crtc_state *vcs;
983 
984 	if (WARN_ON(!crtc->state))
985 		return NULL;
986 
987 	vcs = kmemdup(crtc->state, sizeof(*vcs), GFP_KERNEL);
988 
989 	if (!vcs)
990 		return NULL;
991 
992 	state = &vcs->base;
993 
994 	__drm_atomic_helper_crtc_duplicate_state(crtc, state);
995 
996 	return state;
997 }
998 
999 
1000 /**
1001  * vmw_du_crtc_reset - creates a blank vmw crtc state
1002  * @crtc: DRM crtc
1003  *
1004  * Resets the atomic state for @crtc by freeing the state pointer (which
1005  * might be NULL, e.g. at driver load time) and allocating a new empty state
1006  * object.
1007  */
vmw_du_crtc_reset(struct drm_crtc * crtc)1008 void vmw_du_crtc_reset(struct drm_crtc *crtc)
1009 {
1010 	struct vmw_crtc_state *vcs;
1011 
1012 
1013 	if (crtc->state) {
1014 		__drm_atomic_helper_crtc_destroy_state(crtc->state);
1015 
1016 		kfree(vmw_crtc_state_to_vcs(crtc->state));
1017 	}
1018 
1019 	vcs = kzalloc(sizeof(*vcs), GFP_KERNEL);
1020 
1021 	if (!vcs) {
1022 		DRM_ERROR("Cannot allocate vmw_crtc_state\n");
1023 		return;
1024 	}
1025 
1026 	__drm_atomic_helper_crtc_reset(crtc, &vcs->base);
1027 }
1028 
1029 
1030 /**
1031  * vmw_du_crtc_destroy_state - destroy crtc state
1032  * @crtc: DRM crtc
1033  * @state: state object to destroy
1034  *
1035  * Destroys the crtc state (both common and vmw-specific) for the
1036  * specified plane.
1037  */
1038 void
vmw_du_crtc_destroy_state(struct drm_crtc * crtc,struct drm_crtc_state * state)1039 vmw_du_crtc_destroy_state(struct drm_crtc *crtc,
1040 			  struct drm_crtc_state *state)
1041 {
1042 	drm_atomic_helper_crtc_destroy_state(crtc, state);
1043 }
1044 
1045 
1046 /**
1047  * vmw_du_plane_duplicate_state - duplicate plane state
1048  * @plane: drm plane
1049  *
1050  * Allocates and returns a copy of the plane state (both common and
1051  * vmw-specific) for the specified plane.
1052  *
1053  * Returns: The newly allocated plane state, or NULL on failure.
1054  */
1055 struct drm_plane_state *
vmw_du_plane_duplicate_state(struct drm_plane * plane)1056 vmw_du_plane_duplicate_state(struct drm_plane *plane)
1057 {
1058 	struct drm_plane_state *state;
1059 	struct vmw_plane_state *vps;
1060 
1061 	vps = kmemdup(plane->state, sizeof(*vps), GFP_KERNEL);
1062 
1063 	if (!vps)
1064 		return NULL;
1065 
1066 	vps->pinned = 0;
1067 	vps->cpp = 0;
1068 
1069 	memset(&vps->cursor, 0, sizeof(vps->cursor));
1070 
1071 	/* Each ref counted resource needs to be acquired again */
1072 	vmw_user_object_ref(&vps->uo);
1073 	state = &vps->base;
1074 
1075 	__drm_atomic_helper_plane_duplicate_state(plane, state);
1076 
1077 	return state;
1078 }
1079 
1080 
1081 /**
1082  * vmw_du_plane_reset - creates a blank vmw plane state
1083  * @plane: drm plane
1084  *
1085  * Resets the atomic state for @plane by freeing the state pointer (which might
1086  * be NULL, e.g. at driver load time) and allocating a new empty state object.
1087  */
vmw_du_plane_reset(struct drm_plane * plane)1088 void vmw_du_plane_reset(struct drm_plane *plane)
1089 {
1090 	struct vmw_plane_state *vps;
1091 
1092 	if (plane->state)
1093 		vmw_du_plane_destroy_state(plane, plane->state);
1094 
1095 	vps = kzalloc(sizeof(*vps), GFP_KERNEL);
1096 
1097 	if (!vps) {
1098 		DRM_ERROR("Cannot allocate vmw_plane_state\n");
1099 		return;
1100 	}
1101 
1102 	__drm_atomic_helper_plane_reset(plane, &vps->base);
1103 }
1104 
1105 
1106 /**
1107  * vmw_du_plane_destroy_state - destroy plane state
1108  * @plane: DRM plane
1109  * @state: state object to destroy
1110  *
1111  * Destroys the plane state (both common and vmw-specific) for the
1112  * specified plane.
1113  */
1114 void
vmw_du_plane_destroy_state(struct drm_plane * plane,struct drm_plane_state * state)1115 vmw_du_plane_destroy_state(struct drm_plane *plane,
1116 			   struct drm_plane_state *state)
1117 {
1118 	struct vmw_plane_state *vps = vmw_plane_state_to_vps(state);
1119 
1120 	/* Should have been freed by cleanup_fb */
1121 	vmw_user_object_unref(&vps->uo);
1122 
1123 	drm_atomic_helper_plane_destroy_state(plane, state);
1124 }
1125 
1126 
1127 /**
1128  * vmw_du_connector_duplicate_state - duplicate connector state
1129  * @connector: DRM connector
1130  *
1131  * Allocates and returns a copy of the connector state (both common and
1132  * vmw-specific) for the specified connector.
1133  *
1134  * Returns: The newly allocated connector state, or NULL on failure.
1135  */
1136 struct drm_connector_state *
vmw_du_connector_duplicate_state(struct drm_connector * connector)1137 vmw_du_connector_duplicate_state(struct drm_connector *connector)
1138 {
1139 	struct drm_connector_state *state;
1140 	struct vmw_connector_state *vcs;
1141 
1142 	if (WARN_ON(!connector->state))
1143 		return NULL;
1144 
1145 	vcs = kmemdup(connector->state, sizeof(*vcs), GFP_KERNEL);
1146 
1147 	if (!vcs)
1148 		return NULL;
1149 
1150 	state = &vcs->base;
1151 
1152 	__drm_atomic_helper_connector_duplicate_state(connector, state);
1153 
1154 	return state;
1155 }
1156 
1157 
1158 /**
1159  * vmw_du_connector_reset - creates a blank vmw connector state
1160  * @connector: DRM connector
1161  *
1162  * Resets the atomic state for @connector by freeing the state pointer (which
1163  * might be NULL, e.g. at driver load time) and allocating a new empty state
1164  * object.
1165  */
vmw_du_connector_reset(struct drm_connector * connector)1166 void vmw_du_connector_reset(struct drm_connector *connector)
1167 {
1168 	struct vmw_connector_state *vcs;
1169 
1170 
1171 	if (connector->state) {
1172 		__drm_atomic_helper_connector_destroy_state(connector->state);
1173 
1174 		kfree(vmw_connector_state_to_vcs(connector->state));
1175 	}
1176 
1177 	vcs = kzalloc(sizeof(*vcs), GFP_KERNEL);
1178 
1179 	if (!vcs) {
1180 		DRM_ERROR("Cannot allocate vmw_connector_state\n");
1181 		return;
1182 	}
1183 
1184 	__drm_atomic_helper_connector_reset(connector, &vcs->base);
1185 }
1186 
1187 
1188 /**
1189  * vmw_du_connector_destroy_state - destroy connector state
1190  * @connector: DRM connector
1191  * @state: state object to destroy
1192  *
1193  * Destroys the connector state (both common and vmw-specific) for the
1194  * specified plane.
1195  */
1196 void
vmw_du_connector_destroy_state(struct drm_connector * connector,struct drm_connector_state * state)1197 vmw_du_connector_destroy_state(struct drm_connector *connector,
1198 			  struct drm_connector_state *state)
1199 {
1200 	drm_atomic_helper_connector_destroy_state(connector, state);
1201 }
1202 /*
1203  * Generic framebuffer code
1204  */
1205 
1206 /*
1207  * Surface framebuffer code
1208  */
1209 
vmw_framebuffer_surface_destroy(struct drm_framebuffer * framebuffer)1210 static void vmw_framebuffer_surface_destroy(struct drm_framebuffer *framebuffer)
1211 {
1212 	struct vmw_framebuffer_surface *vfbs =
1213 		vmw_framebuffer_to_vfbs(framebuffer);
1214 
1215 	drm_framebuffer_cleanup(framebuffer);
1216 	vmw_user_object_unref(&vfbs->uo);
1217 
1218 	kfree(vfbs);
1219 }
1220 
1221 /**
1222  * vmw_kms_readback - Perform a readback from the screen system to
1223  * a buffer-object backed framebuffer.
1224  *
1225  * @dev_priv: Pointer to the device private structure.
1226  * @file_priv: Pointer to a struct drm_file identifying the caller.
1227  * Must be set to NULL if @user_fence_rep is NULL.
1228  * @vfb: Pointer to the buffer-object backed framebuffer.
1229  * @user_fence_rep: User-space provided structure for fence information.
1230  * Must be set to non-NULL if @file_priv is non-NULL.
1231  * @vclips: Array of clip rects.
1232  * @num_clips: Number of clip rects in @vclips.
1233  *
1234  * Returns 0 on success, negative error code on failure. -ERESTARTSYS if
1235  * interrupted.
1236  */
vmw_kms_readback(struct vmw_private * dev_priv,struct drm_file * file_priv,struct vmw_framebuffer * vfb,struct drm_vmw_fence_rep __user * user_fence_rep,struct drm_vmw_rect * vclips,uint32_t num_clips)1237 int vmw_kms_readback(struct vmw_private *dev_priv,
1238 		     struct drm_file *file_priv,
1239 		     struct vmw_framebuffer *vfb,
1240 		     struct drm_vmw_fence_rep __user *user_fence_rep,
1241 		     struct drm_vmw_rect *vclips,
1242 		     uint32_t num_clips)
1243 {
1244 	switch (dev_priv->active_display_unit) {
1245 	case vmw_du_screen_object:
1246 		return vmw_kms_sou_readback(dev_priv, file_priv, vfb,
1247 					    user_fence_rep, vclips, num_clips,
1248 					    NULL);
1249 	case vmw_du_screen_target:
1250 		return vmw_kms_stdu_readback(dev_priv, file_priv, vfb,
1251 					     user_fence_rep, NULL, vclips, num_clips,
1252 					     1, NULL);
1253 	default:
1254 		WARN_ONCE(true,
1255 			  "Readback called with invalid display system.\n");
1256 }
1257 
1258 	return -ENOSYS;
1259 }
1260 
vmw_framebuffer_surface_create_handle(struct drm_framebuffer * fb,struct drm_file * file_priv,unsigned int * handle)1261 static int vmw_framebuffer_surface_create_handle(struct drm_framebuffer *fb,
1262 						 struct drm_file *file_priv,
1263 						 unsigned int *handle)
1264 {
1265 	struct vmw_framebuffer_surface *vfbs = vmw_framebuffer_to_vfbs(fb);
1266 	struct vmw_bo *bo = vmw_user_object_buffer(&vfbs->uo);
1267 
1268 	return drm_gem_handle_create(file_priv, &bo->tbo.base, handle);
1269 }
1270 
1271 static const struct drm_framebuffer_funcs vmw_framebuffer_surface_funcs = {
1272 	.create_handle = vmw_framebuffer_surface_create_handle,
1273 	.destroy = vmw_framebuffer_surface_destroy,
1274 	.dirty = drm_atomic_helper_dirtyfb,
1275 };
1276 
vmw_kms_new_framebuffer_surface(struct vmw_private * dev_priv,struct vmw_user_object * uo,struct vmw_framebuffer ** out,const struct drm_mode_fb_cmd2 * mode_cmd)1277 static int vmw_kms_new_framebuffer_surface(struct vmw_private *dev_priv,
1278 					   struct vmw_user_object *uo,
1279 					   struct vmw_framebuffer **out,
1280 					   const struct drm_mode_fb_cmd2
1281 					   *mode_cmd)
1282 
1283 {
1284 	struct drm_device *dev = &dev_priv->drm;
1285 	struct vmw_framebuffer_surface *vfbs;
1286 	enum SVGA3dSurfaceFormat format;
1287 	struct vmw_surface *surface;
1288 	int ret;
1289 
1290 	/* 3D is only supported on HWv8 and newer hosts */
1291 	if (dev_priv->active_display_unit == vmw_du_legacy)
1292 		return -ENOSYS;
1293 
1294 	surface = vmw_user_object_surface(uo);
1295 
1296 	/*
1297 	 * Sanity checks.
1298 	 */
1299 
1300 	if (!drm_any_plane_has_format(&dev_priv->drm,
1301 				      mode_cmd->pixel_format,
1302 				      mode_cmd->modifier[0])) {
1303 		drm_dbg(&dev_priv->drm,
1304 			"unsupported pixel format %p4cc / modifier 0x%llx\n",
1305 			&mode_cmd->pixel_format, mode_cmd->modifier[0]);
1306 		return -EINVAL;
1307 	}
1308 
1309 	/* Surface must be marked as a scanout. */
1310 	if (unlikely(!surface->metadata.scanout))
1311 		return -EINVAL;
1312 
1313 	if (unlikely(surface->metadata.mip_levels[0] != 1 ||
1314 		     surface->metadata.num_sizes != 1 ||
1315 		     surface->metadata.base_size.width < mode_cmd->width ||
1316 		     surface->metadata.base_size.height < mode_cmd->height ||
1317 		     surface->metadata.base_size.depth != 1)) {
1318 		DRM_ERROR("Incompatible surface dimensions "
1319 			  "for requested mode.\n");
1320 		return -EINVAL;
1321 	}
1322 
1323 	switch (mode_cmd->pixel_format) {
1324 	case DRM_FORMAT_ARGB8888:
1325 		format = SVGA3D_A8R8G8B8;
1326 		break;
1327 	case DRM_FORMAT_XRGB8888:
1328 		format = SVGA3D_X8R8G8B8;
1329 		break;
1330 	case DRM_FORMAT_RGB565:
1331 		format = SVGA3D_R5G6B5;
1332 		break;
1333 	case DRM_FORMAT_XRGB1555:
1334 		format = SVGA3D_A1R5G5B5;
1335 		break;
1336 	default:
1337 		DRM_ERROR("Invalid pixel format: %p4cc\n",
1338 			  &mode_cmd->pixel_format);
1339 		return -EINVAL;
1340 	}
1341 
1342 	/*
1343 	 * For DX, surface format validation is done when surface->scanout
1344 	 * is set.
1345 	 */
1346 	if (!has_sm4_context(dev_priv) && format != surface->metadata.format) {
1347 		DRM_ERROR("Invalid surface format for requested mode.\n");
1348 		return -EINVAL;
1349 	}
1350 
1351 	vfbs = kzalloc(sizeof(*vfbs), GFP_KERNEL);
1352 	if (!vfbs) {
1353 		ret = -ENOMEM;
1354 		goto out_err1;
1355 	}
1356 
1357 	drm_helper_mode_fill_fb_struct(dev, &vfbs->base.base, mode_cmd);
1358 	memcpy(&vfbs->uo, uo, sizeof(vfbs->uo));
1359 	vmw_user_object_ref(&vfbs->uo);
1360 
1361 	*out = &vfbs->base;
1362 
1363 	ret = drm_framebuffer_init(dev, &vfbs->base.base,
1364 				   &vmw_framebuffer_surface_funcs);
1365 	if (ret)
1366 		goto out_err2;
1367 
1368 	return 0;
1369 
1370 out_err2:
1371 	vmw_user_object_unref(&vfbs->uo);
1372 	kfree(vfbs);
1373 out_err1:
1374 	return ret;
1375 }
1376 
1377 /*
1378  * Buffer-object framebuffer code
1379  */
1380 
vmw_framebuffer_bo_create_handle(struct drm_framebuffer * fb,struct drm_file * file_priv,unsigned int * handle)1381 static int vmw_framebuffer_bo_create_handle(struct drm_framebuffer *fb,
1382 					    struct drm_file *file_priv,
1383 					    unsigned int *handle)
1384 {
1385 	struct vmw_framebuffer_bo *vfbd =
1386 			vmw_framebuffer_to_vfbd(fb);
1387 	return drm_gem_handle_create(file_priv, &vfbd->buffer->tbo.base, handle);
1388 }
1389 
vmw_framebuffer_bo_destroy(struct drm_framebuffer * framebuffer)1390 static void vmw_framebuffer_bo_destroy(struct drm_framebuffer *framebuffer)
1391 {
1392 	struct vmw_framebuffer_bo *vfbd =
1393 		vmw_framebuffer_to_vfbd(framebuffer);
1394 
1395 	drm_framebuffer_cleanup(framebuffer);
1396 	vmw_bo_unreference(&vfbd->buffer);
1397 
1398 	kfree(vfbd);
1399 }
1400 
1401 static const struct drm_framebuffer_funcs vmw_framebuffer_bo_funcs = {
1402 	.create_handle = vmw_framebuffer_bo_create_handle,
1403 	.destroy = vmw_framebuffer_bo_destroy,
1404 	.dirty = drm_atomic_helper_dirtyfb,
1405 };
1406 
vmw_kms_new_framebuffer_bo(struct vmw_private * dev_priv,struct vmw_bo * bo,struct vmw_framebuffer ** out,const struct drm_mode_fb_cmd2 * mode_cmd)1407 static int vmw_kms_new_framebuffer_bo(struct vmw_private *dev_priv,
1408 				      struct vmw_bo *bo,
1409 				      struct vmw_framebuffer **out,
1410 				      const struct drm_mode_fb_cmd2
1411 				      *mode_cmd)
1412 
1413 {
1414 	struct drm_device *dev = &dev_priv->drm;
1415 	struct vmw_framebuffer_bo *vfbd;
1416 	unsigned int requested_size;
1417 	int ret;
1418 
1419 	requested_size = mode_cmd->height * mode_cmd->pitches[0];
1420 	if (unlikely(requested_size > bo->tbo.base.size)) {
1421 		DRM_ERROR("Screen buffer object size is too small "
1422 			  "for requested mode.\n");
1423 		return -EINVAL;
1424 	}
1425 
1426 	if (!drm_any_plane_has_format(&dev_priv->drm,
1427 				      mode_cmd->pixel_format,
1428 				      mode_cmd->modifier[0])) {
1429 		drm_dbg(&dev_priv->drm,
1430 			"unsupported pixel format %p4cc / modifier 0x%llx\n",
1431 			&mode_cmd->pixel_format, mode_cmd->modifier[0]);
1432 		return -EINVAL;
1433 	}
1434 
1435 	vfbd = kzalloc(sizeof(*vfbd), GFP_KERNEL);
1436 	if (!vfbd) {
1437 		ret = -ENOMEM;
1438 		goto out_err1;
1439 	}
1440 
1441 	vfbd->base.base.obj[0] = &bo->tbo.base;
1442 	drm_helper_mode_fill_fb_struct(dev, &vfbd->base.base, mode_cmd);
1443 	vfbd->base.bo = true;
1444 	vfbd->buffer = vmw_bo_reference(bo);
1445 	*out = &vfbd->base;
1446 
1447 	ret = drm_framebuffer_init(dev, &vfbd->base.base,
1448 				   &vmw_framebuffer_bo_funcs);
1449 	if (ret)
1450 		goto out_err2;
1451 
1452 	return 0;
1453 
1454 out_err2:
1455 	vmw_bo_unreference(&bo);
1456 	kfree(vfbd);
1457 out_err1:
1458 	return ret;
1459 }
1460 
1461 
1462 /**
1463  * vmw_kms_srf_ok - check if a surface can be created
1464  *
1465  * @dev_priv: Pointer to device private struct.
1466  * @width: requested width
1467  * @height: requested height
1468  *
1469  * Surfaces need to be less than texture size
1470  */
1471 static bool
vmw_kms_srf_ok(struct vmw_private * dev_priv,uint32_t width,uint32_t height)1472 vmw_kms_srf_ok(struct vmw_private *dev_priv, uint32_t width, uint32_t height)
1473 {
1474 	if (width  > dev_priv->texture_max_width ||
1475 	    height > dev_priv->texture_max_height)
1476 		return false;
1477 
1478 	return true;
1479 }
1480 
1481 /**
1482  * vmw_kms_new_framebuffer - Create a new framebuffer.
1483  *
1484  * @dev_priv: Pointer to device private struct.
1485  * @uo: Pointer to user object to wrap the kms framebuffer around.
1486  * Either the buffer or surface inside the user object must be NULL.
1487  * @mode_cmd: Frame-buffer metadata.
1488  */
1489 struct vmw_framebuffer *
vmw_kms_new_framebuffer(struct vmw_private * dev_priv,struct vmw_user_object * uo,const struct drm_mode_fb_cmd2 * mode_cmd)1490 vmw_kms_new_framebuffer(struct vmw_private *dev_priv,
1491 			struct vmw_user_object *uo,
1492 			const struct drm_mode_fb_cmd2 *mode_cmd)
1493 {
1494 	struct vmw_framebuffer *vfb = NULL;
1495 	int ret;
1496 
1497 	/* Create the new framebuffer depending one what we have */
1498 	if (vmw_user_object_surface(uo)) {
1499 		ret = vmw_kms_new_framebuffer_surface(dev_priv, uo, &vfb,
1500 						      mode_cmd);
1501 	} else if (uo->buffer) {
1502 		ret = vmw_kms_new_framebuffer_bo(dev_priv, uo->buffer, &vfb,
1503 						 mode_cmd);
1504 	} else {
1505 		BUG();
1506 	}
1507 
1508 	if (ret)
1509 		return ERR_PTR(ret);
1510 
1511 	return vfb;
1512 }
1513 
1514 /*
1515  * Generic Kernel modesetting functions
1516  */
1517 
vmw_kms_fb_create(struct drm_device * dev,struct drm_file * file_priv,const struct drm_mode_fb_cmd2 * mode_cmd)1518 static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev,
1519 						 struct drm_file *file_priv,
1520 						 const struct drm_mode_fb_cmd2 *mode_cmd)
1521 {
1522 	struct vmw_private *dev_priv = vmw_priv(dev);
1523 	struct vmw_framebuffer *vfb = NULL;
1524 	struct vmw_user_object uo = {0};
1525 	int ret;
1526 
1527 	/* returns either a bo or surface */
1528 	ret = vmw_user_object_lookup(dev_priv, file_priv, mode_cmd->handles[0],
1529 				     &uo);
1530 	if (ret) {
1531 		DRM_ERROR("Invalid buffer object handle %u (0x%x).\n",
1532 			  mode_cmd->handles[0], mode_cmd->handles[0]);
1533 		goto err_out;
1534 	}
1535 
1536 
1537 	if (vmw_user_object_surface(&uo) &&
1538 	    !vmw_kms_srf_ok(dev_priv, mode_cmd->width, mode_cmd->height)) {
1539 		DRM_ERROR("Surface size cannot exceed %dx%d\n",
1540 			dev_priv->texture_max_width,
1541 			dev_priv->texture_max_height);
1542 		goto err_out;
1543 	}
1544 
1545 
1546 	vfb = vmw_kms_new_framebuffer(dev_priv, &uo, mode_cmd);
1547 	if (IS_ERR(vfb)) {
1548 		ret = PTR_ERR(vfb);
1549 		goto err_out;
1550 	}
1551 
1552 err_out:
1553 	/* vmw_user_object_lookup takes one ref so does new_fb */
1554 	vmw_user_object_unref(&uo);
1555 
1556 	if (ret) {
1557 		DRM_ERROR("failed to create vmw_framebuffer: %i\n", ret);
1558 		return ERR_PTR(ret);
1559 	}
1560 
1561 	return &vfb->base;
1562 }
1563 
1564 /**
1565  * vmw_kms_check_display_memory - Validates display memory required for a
1566  * topology
1567  * @dev: DRM device
1568  * @num_rects: number of drm_rect in rects
1569  * @rects: array of drm_rect representing the topology to validate indexed by
1570  * crtc index.
1571  *
1572  * Returns:
1573  * 0 on success otherwise negative error code
1574  */
vmw_kms_check_display_memory(struct drm_device * dev,uint32_t num_rects,struct drm_rect * rects)1575 static int vmw_kms_check_display_memory(struct drm_device *dev,
1576 					uint32_t num_rects,
1577 					struct drm_rect *rects)
1578 {
1579 	struct vmw_private *dev_priv = vmw_priv(dev);
1580 	struct drm_rect bounding_box = {0};
1581 	u64 total_pixels = 0, pixel_mem, bb_mem;
1582 	int i;
1583 
1584 	for (i = 0; i < num_rects; i++) {
1585 		/*
1586 		 * For STDU only individual screen (screen target) is limited by
1587 		 * SCREENTARGET_MAX_WIDTH/HEIGHT registers.
1588 		 */
1589 		if (dev_priv->active_display_unit == vmw_du_screen_target &&
1590 		    (drm_rect_width(&rects[i]) > dev_priv->stdu_max_width ||
1591 		     drm_rect_height(&rects[i]) > dev_priv->stdu_max_height)) {
1592 			VMW_DEBUG_KMS("Screen size not supported.\n");
1593 			return -EINVAL;
1594 		}
1595 
1596 		/* Bounding box upper left is at (0,0). */
1597 		if (rects[i].x2 > bounding_box.x2)
1598 			bounding_box.x2 = rects[i].x2;
1599 
1600 		if (rects[i].y2 > bounding_box.y2)
1601 			bounding_box.y2 = rects[i].y2;
1602 
1603 		total_pixels += (u64) drm_rect_width(&rects[i]) *
1604 			(u64) drm_rect_height(&rects[i]);
1605 	}
1606 
1607 	/* Virtual svga device primary limits are always in 32-bpp. */
1608 	pixel_mem = total_pixels * 4;
1609 
1610 	/*
1611 	 * For HV10 and below prim_bb_mem is vram size. When
1612 	 * SVGA_REG_MAX_PRIMARY_BOUNDING_BOX_MEM is not present vram size is
1613 	 * limit on primary bounding box
1614 	 */
1615 	if (pixel_mem > dev_priv->max_primary_mem) {
1616 		VMW_DEBUG_KMS("Combined output size too large.\n");
1617 		return -EINVAL;
1618 	}
1619 
1620 	/* SVGA_CAP_NO_BB_RESTRICTION is available for STDU only. */
1621 	if (dev_priv->active_display_unit != vmw_du_screen_target ||
1622 	    !(dev_priv->capabilities & SVGA_CAP_NO_BB_RESTRICTION)) {
1623 		bb_mem = (u64) bounding_box.x2 * bounding_box.y2 * 4;
1624 
1625 		if (bb_mem > dev_priv->max_primary_mem) {
1626 			VMW_DEBUG_KMS("Topology is beyond supported limits.\n");
1627 			return -EINVAL;
1628 		}
1629 	}
1630 
1631 	return 0;
1632 }
1633 
1634 /**
1635  * vmw_crtc_state_and_lock - Return new or current crtc state with locked
1636  * crtc mutex
1637  * @state: The atomic state pointer containing the new atomic state
1638  * @crtc: The crtc
1639  *
1640  * This function returns the new crtc state if it's part of the state update.
1641  * Otherwise returns the current crtc state. It also makes sure that the
1642  * crtc mutex is locked.
1643  *
1644  * Returns: A valid crtc state pointer or NULL. It may also return a
1645  * pointer error, in particular -EDEADLK if locking needs to be rerun.
1646  */
1647 static struct drm_crtc_state *
vmw_crtc_state_and_lock(struct drm_atomic_state * state,struct drm_crtc * crtc)1648 vmw_crtc_state_and_lock(struct drm_atomic_state *state, struct drm_crtc *crtc)
1649 {
1650 	struct drm_crtc_state *crtc_state;
1651 
1652 	crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
1653 	if (crtc_state) {
1654 		lockdep_assert_held(&crtc->mutex.mutex.base);
1655 	} else {
1656 		int ret = drm_modeset_lock(&crtc->mutex, state->acquire_ctx);
1657 
1658 		if (ret != 0 && ret != -EALREADY)
1659 			return ERR_PTR(ret);
1660 
1661 		crtc_state = crtc->state;
1662 	}
1663 
1664 	return crtc_state;
1665 }
1666 
1667 /**
1668  * vmw_kms_check_implicit - Verify that all implicit display units scan out
1669  * from the same fb after the new state is committed.
1670  * @dev: The drm_device.
1671  * @state: The new state to be checked.
1672  *
1673  * Returns:
1674  *   Zero on success,
1675  *   -EINVAL on invalid state,
1676  *   -EDEADLK if modeset locking needs to be rerun.
1677  */
vmw_kms_check_implicit(struct drm_device * dev,struct drm_atomic_state * state)1678 static int vmw_kms_check_implicit(struct drm_device *dev,
1679 				  struct drm_atomic_state *state)
1680 {
1681 	struct drm_framebuffer *implicit_fb = NULL;
1682 	struct drm_crtc *crtc;
1683 	struct drm_crtc_state *crtc_state;
1684 	struct drm_plane_state *plane_state;
1685 
1686 	drm_for_each_crtc(crtc, dev) {
1687 		struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
1688 
1689 		if (!du->is_implicit)
1690 			continue;
1691 
1692 		crtc_state = vmw_crtc_state_and_lock(state, crtc);
1693 		if (IS_ERR(crtc_state))
1694 			return PTR_ERR(crtc_state);
1695 
1696 		if (!crtc_state || !crtc_state->enable)
1697 			continue;
1698 
1699 		/*
1700 		 * Can't move primary planes across crtcs, so this is OK.
1701 		 * It also means we don't need to take the plane mutex.
1702 		 */
1703 		plane_state = du->primary.state;
1704 		if (plane_state->crtc != crtc)
1705 			continue;
1706 
1707 		if (!implicit_fb)
1708 			implicit_fb = plane_state->fb;
1709 		else if (implicit_fb != plane_state->fb)
1710 			return -EINVAL;
1711 	}
1712 
1713 	return 0;
1714 }
1715 
1716 /**
1717  * vmw_kms_check_topology - Validates topology in drm_atomic_state
1718  * @dev: DRM device
1719  * @state: the driver state object
1720  *
1721  * Returns:
1722  * 0 on success otherwise negative error code
1723  */
vmw_kms_check_topology(struct drm_device * dev,struct drm_atomic_state * state)1724 static int vmw_kms_check_topology(struct drm_device *dev,
1725 				  struct drm_atomic_state *state)
1726 {
1727 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
1728 	struct drm_rect *rects;
1729 	struct drm_crtc *crtc;
1730 	uint32_t i;
1731 	int ret = 0;
1732 
1733 	rects = kcalloc(dev->mode_config.num_crtc, sizeof(struct drm_rect),
1734 			GFP_KERNEL);
1735 	if (!rects)
1736 		return -ENOMEM;
1737 
1738 	drm_for_each_crtc(crtc, dev) {
1739 		struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
1740 		struct drm_crtc_state *crtc_state;
1741 
1742 		i = drm_crtc_index(crtc);
1743 
1744 		crtc_state = vmw_crtc_state_and_lock(state, crtc);
1745 		if (IS_ERR(crtc_state)) {
1746 			ret = PTR_ERR(crtc_state);
1747 			goto clean;
1748 		}
1749 
1750 		if (!crtc_state)
1751 			continue;
1752 
1753 		if (crtc_state->enable) {
1754 			rects[i].x1 = du->gui_x;
1755 			rects[i].y1 = du->gui_y;
1756 			rects[i].x2 = du->gui_x + crtc_state->mode.hdisplay;
1757 			rects[i].y2 = du->gui_y + crtc_state->mode.vdisplay;
1758 		} else {
1759 			rects[i].x1 = 0;
1760 			rects[i].y1 = 0;
1761 			rects[i].x2 = 0;
1762 			rects[i].y2 = 0;
1763 		}
1764 	}
1765 
1766 	/* Determine change to topology due to new atomic state */
1767 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
1768 				      new_crtc_state, i) {
1769 		struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
1770 		struct drm_connector *connector;
1771 		struct drm_connector_state *conn_state;
1772 		struct vmw_connector_state *vmw_conn_state;
1773 
1774 		if (!du->pref_active && new_crtc_state->enable) {
1775 			VMW_DEBUG_KMS("Enabling a disabled display unit\n");
1776 			ret = -EINVAL;
1777 			goto clean;
1778 		}
1779 
1780 		/*
1781 		 * For vmwgfx each crtc has only one connector attached and it
1782 		 * is not changed so don't really need to check the
1783 		 * crtc->connector_mask and iterate over it.
1784 		 */
1785 		connector = &du->connector;
1786 		conn_state = drm_atomic_get_connector_state(state, connector);
1787 		if (IS_ERR(conn_state)) {
1788 			ret = PTR_ERR(conn_state);
1789 			goto clean;
1790 		}
1791 
1792 		vmw_conn_state = vmw_connector_state_to_vcs(conn_state);
1793 		vmw_conn_state->gui_x = du->gui_x;
1794 		vmw_conn_state->gui_y = du->gui_y;
1795 	}
1796 
1797 	ret = vmw_kms_check_display_memory(dev, dev->mode_config.num_crtc,
1798 					   rects);
1799 
1800 clean:
1801 	kfree(rects);
1802 	return ret;
1803 }
1804 
1805 /**
1806  * vmw_kms_atomic_check_modeset- validate state object for modeset changes
1807  *
1808  * @dev: DRM device
1809  * @state: the driver state object
1810  *
1811  * This is a simple wrapper around drm_atomic_helper_check_modeset() for
1812  * us to assign a value to mode->crtc_clock so that
1813  * drm_calc_timestamping_constants() won't throw an error message
1814  *
1815  * Returns:
1816  * Zero for success or -errno
1817  */
1818 static int
vmw_kms_atomic_check_modeset(struct drm_device * dev,struct drm_atomic_state * state)1819 vmw_kms_atomic_check_modeset(struct drm_device *dev,
1820 			     struct drm_atomic_state *state)
1821 {
1822 	struct drm_crtc *crtc;
1823 	struct drm_crtc_state *crtc_state;
1824 	bool need_modeset = false;
1825 	int i, ret;
1826 
1827 	ret = drm_atomic_helper_check(dev, state);
1828 	if (ret)
1829 		return ret;
1830 
1831 	ret = vmw_kms_check_implicit(dev, state);
1832 	if (ret) {
1833 		VMW_DEBUG_KMS("Invalid implicit state\n");
1834 		return ret;
1835 	}
1836 
1837 	for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
1838 		if (drm_atomic_crtc_needs_modeset(crtc_state))
1839 			need_modeset = true;
1840 	}
1841 
1842 	if (need_modeset)
1843 		return vmw_kms_check_topology(dev, state);
1844 
1845 	return ret;
1846 }
1847 
1848 static const struct drm_mode_config_funcs vmw_kms_funcs = {
1849 	.fb_create = vmw_kms_fb_create,
1850 	.atomic_check = vmw_kms_atomic_check_modeset,
1851 	.atomic_commit = drm_atomic_helper_commit,
1852 };
1853 
vmw_kms_generic_present(struct vmw_private * dev_priv,struct drm_file * file_priv,struct vmw_framebuffer * vfb,struct vmw_surface * surface,uint32_t sid,int32_t destX,int32_t destY,struct drm_vmw_rect * clips,uint32_t num_clips)1854 static int vmw_kms_generic_present(struct vmw_private *dev_priv,
1855 				   struct drm_file *file_priv,
1856 				   struct vmw_framebuffer *vfb,
1857 				   struct vmw_surface *surface,
1858 				   uint32_t sid,
1859 				   int32_t destX, int32_t destY,
1860 				   struct drm_vmw_rect *clips,
1861 				   uint32_t num_clips)
1862 {
1863 	return vmw_kms_sou_do_surface_dirty(dev_priv, vfb, NULL, clips,
1864 					    &surface->res, destX, destY,
1865 					    num_clips, 1, NULL, NULL);
1866 }
1867 
1868 
vmw_kms_present(struct vmw_private * dev_priv,struct drm_file * file_priv,struct vmw_framebuffer * vfb,struct vmw_surface * surface,uint32_t sid,int32_t destX,int32_t destY,struct drm_vmw_rect * clips,uint32_t num_clips)1869 int vmw_kms_present(struct vmw_private *dev_priv,
1870 		    struct drm_file *file_priv,
1871 		    struct vmw_framebuffer *vfb,
1872 		    struct vmw_surface *surface,
1873 		    uint32_t sid,
1874 		    int32_t destX, int32_t destY,
1875 		    struct drm_vmw_rect *clips,
1876 		    uint32_t num_clips)
1877 {
1878 	int ret;
1879 
1880 	switch (dev_priv->active_display_unit) {
1881 	case vmw_du_screen_target:
1882 		ret = vmw_kms_stdu_surface_dirty(dev_priv, vfb, NULL, clips,
1883 						 &surface->res, destX, destY,
1884 						 num_clips, 1, NULL, NULL);
1885 		break;
1886 	case vmw_du_screen_object:
1887 		ret = vmw_kms_generic_present(dev_priv, file_priv, vfb, surface,
1888 					      sid, destX, destY, clips,
1889 					      num_clips);
1890 		break;
1891 	default:
1892 		WARN_ONCE(true,
1893 			  "Present called with invalid display system.\n");
1894 		ret = -ENOSYS;
1895 		break;
1896 	}
1897 	if (ret)
1898 		return ret;
1899 
1900 	vmw_cmd_flush(dev_priv, false);
1901 
1902 	return 0;
1903 }
1904 
1905 static void
vmw_kms_create_hotplug_mode_update_property(struct vmw_private * dev_priv)1906 vmw_kms_create_hotplug_mode_update_property(struct vmw_private *dev_priv)
1907 {
1908 	if (dev_priv->hotplug_mode_update_property)
1909 		return;
1910 
1911 	dev_priv->hotplug_mode_update_property =
1912 		drm_property_create_range(&dev_priv->drm,
1913 					  DRM_MODE_PROP_IMMUTABLE,
1914 					  "hotplug_mode_update", 0, 1);
1915 }
1916 
1917 static void
vmw_atomic_commit_tail(struct drm_atomic_state * old_state)1918 vmw_atomic_commit_tail(struct drm_atomic_state *old_state)
1919 {
1920 	struct vmw_private *vmw = vmw_priv(old_state->dev);
1921 	struct drm_crtc *crtc;
1922 	struct drm_crtc_state *old_crtc_state;
1923 	int i;
1924 
1925 	drm_atomic_helper_commit_tail(old_state);
1926 
1927 	if (vmw->vkms_enabled) {
1928 		for_each_old_crtc_in_state(old_state, crtc, old_crtc_state, i) {
1929 			struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
1930 			(void)old_crtc_state;
1931 			flush_work(&du->vkms.crc_generator_work);
1932 		}
1933 	}
1934 }
1935 
1936 static const struct drm_mode_config_helper_funcs vmw_mode_config_helpers = {
1937 	.atomic_commit_tail = vmw_atomic_commit_tail,
1938 };
1939 
vmw_kms_init(struct vmw_private * dev_priv)1940 int vmw_kms_init(struct vmw_private *dev_priv)
1941 {
1942 	struct drm_device *dev = &dev_priv->drm;
1943 	int ret;
1944 	static const char *display_unit_names[] = {
1945 		"Invalid",
1946 		"Legacy",
1947 		"Screen Object",
1948 		"Screen Target",
1949 		"Invalid (max)"
1950 	};
1951 
1952 	drm_mode_config_init(dev);
1953 	dev->mode_config.funcs = &vmw_kms_funcs;
1954 	dev->mode_config.min_width = 1;
1955 	dev->mode_config.min_height = 1;
1956 	dev->mode_config.max_width = dev_priv->texture_max_width;
1957 	dev->mode_config.max_height = dev_priv->texture_max_height;
1958 	dev->mode_config.preferred_depth = dev_priv->assume_16bpp ? 16 : 32;
1959 	dev->mode_config.helper_private = &vmw_mode_config_helpers;
1960 
1961 	drm_mode_create_suggested_offset_properties(dev);
1962 	vmw_kms_create_hotplug_mode_update_property(dev_priv);
1963 
1964 	ret = vmw_kms_stdu_init_display(dev_priv);
1965 	if (ret) {
1966 		ret = vmw_kms_sou_init_display(dev_priv);
1967 		if (ret) /* Fallback */
1968 			ret = vmw_kms_ldu_init_display(dev_priv);
1969 	}
1970 	BUILD_BUG_ON(ARRAY_SIZE(display_unit_names) != (vmw_du_max + 1));
1971 	drm_info(&dev_priv->drm, "%s display unit initialized\n",
1972 		 display_unit_names[dev_priv->active_display_unit]);
1973 
1974 	return ret;
1975 }
1976 
vmw_kms_close(struct vmw_private * dev_priv)1977 int vmw_kms_close(struct vmw_private *dev_priv)
1978 {
1979 	int ret = 0;
1980 
1981 	/*
1982 	 * Docs says we should take the lock before calling this function
1983 	 * but since it destroys encoders and our destructor calls
1984 	 * drm_encoder_cleanup which takes the lock we deadlock.
1985 	 */
1986 	drm_mode_config_cleanup(&dev_priv->drm);
1987 	if (dev_priv->active_display_unit == vmw_du_legacy)
1988 		ret = vmw_kms_ldu_close_display(dev_priv);
1989 
1990 	return ret;
1991 }
1992 
vmw_kms_cursor_bypass_ioctl(struct drm_device * dev,void * data,struct drm_file * file_priv)1993 int vmw_kms_cursor_bypass_ioctl(struct drm_device *dev, void *data,
1994 				struct drm_file *file_priv)
1995 {
1996 	struct drm_vmw_cursor_bypass_arg *arg = data;
1997 	struct vmw_display_unit *du;
1998 	struct drm_crtc *crtc;
1999 	int ret = 0;
2000 
2001 	mutex_lock(&dev->mode_config.mutex);
2002 	if (arg->flags & DRM_VMW_CURSOR_BYPASS_ALL) {
2003 
2004 		list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
2005 			du = vmw_crtc_to_du(crtc);
2006 			du->hotspot_x = arg->xhot;
2007 			du->hotspot_y = arg->yhot;
2008 		}
2009 
2010 		mutex_unlock(&dev->mode_config.mutex);
2011 		return 0;
2012 	}
2013 
2014 	crtc = drm_crtc_find(dev, file_priv, arg->crtc_id);
2015 	if (!crtc) {
2016 		ret = -ENOENT;
2017 		goto out;
2018 	}
2019 
2020 	du = vmw_crtc_to_du(crtc);
2021 
2022 	du->hotspot_x = arg->xhot;
2023 	du->hotspot_y = arg->yhot;
2024 
2025 out:
2026 	mutex_unlock(&dev->mode_config.mutex);
2027 
2028 	return ret;
2029 }
2030 
vmw_kms_write_svga(struct vmw_private * vmw_priv,unsigned width,unsigned height,unsigned pitch,unsigned bpp,unsigned depth)2031 int vmw_kms_write_svga(struct vmw_private *vmw_priv,
2032 			unsigned width, unsigned height, unsigned pitch,
2033 			unsigned bpp, unsigned depth)
2034 {
2035 	if (vmw_priv->capabilities & SVGA_CAP_PITCHLOCK)
2036 		vmw_write(vmw_priv, SVGA_REG_PITCHLOCK, pitch);
2037 	else if (vmw_fifo_have_pitchlock(vmw_priv))
2038 		vmw_fifo_mem_write(vmw_priv, SVGA_FIFO_PITCHLOCK, pitch);
2039 	vmw_write(vmw_priv, SVGA_REG_WIDTH, width);
2040 	vmw_write(vmw_priv, SVGA_REG_HEIGHT, height);
2041 	if ((vmw_priv->capabilities & SVGA_CAP_8BIT_EMULATION) != 0)
2042 		vmw_write(vmw_priv, SVGA_REG_BITS_PER_PIXEL, bpp);
2043 
2044 	if (vmw_read(vmw_priv, SVGA_REG_DEPTH) != depth) {
2045 		DRM_ERROR("Invalid depth %u for %u bpp, host expects %u\n",
2046 			  depth, bpp, vmw_read(vmw_priv, SVGA_REG_DEPTH));
2047 		return -EINVAL;
2048 	}
2049 
2050 	return 0;
2051 }
2052 
2053 static
vmw_kms_validate_mode_vram(struct vmw_private * dev_priv,u64 pitch,u64 height)2054 bool vmw_kms_validate_mode_vram(struct vmw_private *dev_priv,
2055 				u64 pitch,
2056 				u64 height)
2057 {
2058 	return (pitch * height) < (u64)dev_priv->vram_size;
2059 }
2060 
2061 /**
2062  * vmw_du_update_layout - Update the display unit with topology from resolution
2063  * plugin and generate DRM uevent
2064  * @dev_priv: device private
2065  * @num_rects: number of drm_rect in rects
2066  * @rects: toplogy to update
2067  */
vmw_du_update_layout(struct vmw_private * dev_priv,unsigned int num_rects,struct drm_rect * rects)2068 static int vmw_du_update_layout(struct vmw_private *dev_priv,
2069 				unsigned int num_rects, struct drm_rect *rects)
2070 {
2071 	struct drm_device *dev = &dev_priv->drm;
2072 	struct vmw_display_unit *du;
2073 	struct drm_connector *con;
2074 	struct drm_connector_list_iter conn_iter;
2075 	struct drm_modeset_acquire_ctx ctx;
2076 	struct drm_crtc *crtc;
2077 	int ret;
2078 
2079 	/* Currently gui_x/y is protected with the crtc mutex */
2080 	mutex_lock(&dev->mode_config.mutex);
2081 	drm_modeset_acquire_init(&ctx, 0);
2082 retry:
2083 	drm_for_each_crtc(crtc, dev) {
2084 		ret = drm_modeset_lock(&crtc->mutex, &ctx);
2085 		if (ret < 0) {
2086 			if (ret == -EDEADLK) {
2087 				drm_modeset_backoff(&ctx);
2088 				goto retry;
2089 		}
2090 			goto out_fini;
2091 		}
2092 	}
2093 
2094 	drm_connector_list_iter_begin(dev, &conn_iter);
2095 	drm_for_each_connector_iter(con, &conn_iter) {
2096 		du = vmw_connector_to_du(con);
2097 		if (num_rects > du->unit) {
2098 			du->pref_width = drm_rect_width(&rects[du->unit]);
2099 			du->pref_height = drm_rect_height(&rects[du->unit]);
2100 			du->pref_active = true;
2101 			du->gui_x = rects[du->unit].x1;
2102 			du->gui_y = rects[du->unit].y1;
2103 		} else {
2104 			du->pref_width  = VMWGFX_MIN_INITIAL_WIDTH;
2105 			du->pref_height = VMWGFX_MIN_INITIAL_HEIGHT;
2106 			du->pref_active = false;
2107 			du->gui_x = 0;
2108 			du->gui_y = 0;
2109 		}
2110 	}
2111 	drm_connector_list_iter_end(&conn_iter);
2112 
2113 	list_for_each_entry(con, &dev->mode_config.connector_list, head) {
2114 		du = vmw_connector_to_du(con);
2115 		if (num_rects > du->unit) {
2116 			drm_object_property_set_value
2117 			  (&con->base, dev->mode_config.suggested_x_property,
2118 			   du->gui_x);
2119 			drm_object_property_set_value
2120 			  (&con->base, dev->mode_config.suggested_y_property,
2121 			   du->gui_y);
2122 		} else {
2123 			drm_object_property_set_value
2124 			  (&con->base, dev->mode_config.suggested_x_property,
2125 			   0);
2126 			drm_object_property_set_value
2127 			  (&con->base, dev->mode_config.suggested_y_property,
2128 			   0);
2129 		}
2130 		con->status = vmw_du_connector_detect(con, true);
2131 	}
2132 out_fini:
2133 	drm_modeset_drop_locks(&ctx);
2134 	drm_modeset_acquire_fini(&ctx);
2135 	mutex_unlock(&dev->mode_config.mutex);
2136 
2137 	drm_sysfs_hotplug_event(dev);
2138 
2139 	return 0;
2140 }
2141 
vmw_du_crtc_gamma_set(struct drm_crtc * crtc,u16 * r,u16 * g,u16 * b,uint32_t size,struct drm_modeset_acquire_ctx * ctx)2142 int vmw_du_crtc_gamma_set(struct drm_crtc *crtc,
2143 			  u16 *r, u16 *g, u16 *b,
2144 			  uint32_t size,
2145 			  struct drm_modeset_acquire_ctx *ctx)
2146 {
2147 	struct vmw_private *dev_priv = vmw_priv(crtc->dev);
2148 	int i;
2149 
2150 	for (i = 0; i < size; i++) {
2151 		DRM_DEBUG("%d r/g/b = 0x%04x / 0x%04x / 0x%04x\n", i,
2152 			  r[i], g[i], b[i]);
2153 		vmw_write(dev_priv, SVGA_PALETTE_BASE + i * 3 + 0, r[i] >> 8);
2154 		vmw_write(dev_priv, SVGA_PALETTE_BASE + i * 3 + 1, g[i] >> 8);
2155 		vmw_write(dev_priv, SVGA_PALETTE_BASE + i * 3 + 2, b[i] >> 8);
2156 	}
2157 
2158 	return 0;
2159 }
2160 
vmw_du_connector_dpms(struct drm_connector * connector,int mode)2161 int vmw_du_connector_dpms(struct drm_connector *connector, int mode)
2162 {
2163 	return 0;
2164 }
2165 
2166 enum drm_connector_status
vmw_du_connector_detect(struct drm_connector * connector,bool force)2167 vmw_du_connector_detect(struct drm_connector *connector, bool force)
2168 {
2169 	uint32_t num_displays;
2170 	struct drm_device *dev = connector->dev;
2171 	struct vmw_private *dev_priv = vmw_priv(dev);
2172 	struct vmw_display_unit *du = vmw_connector_to_du(connector);
2173 
2174 	num_displays = vmw_read(dev_priv, SVGA_REG_NUM_DISPLAYS);
2175 
2176 	return ((vmw_connector_to_du(connector)->unit < num_displays &&
2177 		 du->pref_active) ?
2178 		connector_status_connected : connector_status_disconnected);
2179 }
2180 
2181 /**
2182  * vmw_guess_mode_timing - Provide fake timings for a
2183  * 60Hz vrefresh mode.
2184  *
2185  * @mode: Pointer to a struct drm_display_mode with hdisplay and vdisplay
2186  * members filled in.
2187  */
vmw_guess_mode_timing(struct drm_display_mode * mode)2188 void vmw_guess_mode_timing(struct drm_display_mode *mode)
2189 {
2190 	mode->hsync_start = mode->hdisplay + 50;
2191 	mode->hsync_end = mode->hsync_start + 50;
2192 	mode->htotal = mode->hsync_end + 50;
2193 
2194 	mode->vsync_start = mode->vdisplay + 50;
2195 	mode->vsync_end = mode->vsync_start + 50;
2196 	mode->vtotal = mode->vsync_end + 50;
2197 
2198 	mode->clock = (u32)mode->htotal * (u32)mode->vtotal / 100 * 6;
2199 }
2200 
2201 
2202 /**
2203  * vmw_kms_update_layout_ioctl - Handler for DRM_VMW_UPDATE_LAYOUT ioctl
2204  * @dev: drm device for the ioctl
2205  * @data: data pointer for the ioctl
2206  * @file_priv: drm file for the ioctl call
2207  *
2208  * Update preferred topology of display unit as per ioctl request. The topology
2209  * is expressed as array of drm_vmw_rect.
2210  * e.g.
2211  * [0 0 640 480] [640 0 800 600] [0 480 640 480]
2212  *
2213  * NOTE:
2214  * The x and y offset (upper left) in drm_vmw_rect cannot be less than 0. Beside
2215  * device limit on topology, x + w and y + h (lower right) cannot be greater
2216  * than INT_MAX. So topology beyond these limits will return with error.
2217  *
2218  * Returns:
2219  * Zero on success, negative errno on failure.
2220  */
vmw_kms_update_layout_ioctl(struct drm_device * dev,void * data,struct drm_file * file_priv)2221 int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data,
2222 				struct drm_file *file_priv)
2223 {
2224 	struct vmw_private *dev_priv = vmw_priv(dev);
2225 	struct drm_mode_config *mode_config = &dev->mode_config;
2226 	struct drm_vmw_update_layout_arg *arg =
2227 		(struct drm_vmw_update_layout_arg *)data;
2228 	void __user *user_rects;
2229 	struct drm_vmw_rect *rects;
2230 	struct drm_rect *drm_rects;
2231 	unsigned rects_size;
2232 	int ret, i;
2233 
2234 	if (!arg->num_outputs) {
2235 		struct drm_rect def_rect = {0, 0,
2236 					    VMWGFX_MIN_INITIAL_WIDTH,
2237 					    VMWGFX_MIN_INITIAL_HEIGHT};
2238 		vmw_du_update_layout(dev_priv, 1, &def_rect);
2239 		return 0;
2240 	}
2241 
2242 	rects_size = arg->num_outputs * sizeof(struct drm_vmw_rect);
2243 	rects = kcalloc(arg->num_outputs, sizeof(struct drm_vmw_rect),
2244 			GFP_KERNEL);
2245 	if (unlikely(!rects))
2246 		return -ENOMEM;
2247 
2248 	user_rects = (void __user *)(unsigned long)arg->rects;
2249 	ret = copy_from_user(rects, user_rects, rects_size);
2250 	if (unlikely(ret != 0)) {
2251 		DRM_ERROR("Failed to get rects.\n");
2252 		ret = -EFAULT;
2253 		goto out_free;
2254 	}
2255 
2256 	drm_rects = (struct drm_rect *)rects;
2257 
2258 	VMW_DEBUG_KMS("Layout count = %u\n", arg->num_outputs);
2259 	for (i = 0; i < arg->num_outputs; i++) {
2260 		struct drm_vmw_rect curr_rect;
2261 
2262 		/* Verify user-space for overflow as kernel use drm_rect */
2263 		if ((rects[i].x + rects[i].w > INT_MAX) ||
2264 		    (rects[i].y + rects[i].h > INT_MAX)) {
2265 			ret = -ERANGE;
2266 			goto out_free;
2267 		}
2268 
2269 		curr_rect = rects[i];
2270 		drm_rects[i].x1 = curr_rect.x;
2271 		drm_rects[i].y1 = curr_rect.y;
2272 		drm_rects[i].x2 = curr_rect.x + curr_rect.w;
2273 		drm_rects[i].y2 = curr_rect.y + curr_rect.h;
2274 
2275 		VMW_DEBUG_KMS("  x1 = %d y1 = %d x2 = %d y2 = %d\n",
2276 			      drm_rects[i].x1, drm_rects[i].y1,
2277 			      drm_rects[i].x2, drm_rects[i].y2);
2278 
2279 		/*
2280 		 * Currently this check is limiting the topology within
2281 		 * mode_config->max (which actually is max texture size
2282 		 * supported by virtual device). This limit is here to address
2283 		 * window managers that create a big framebuffer for whole
2284 		 * topology.
2285 		 */
2286 		if (drm_rects[i].x1 < 0 ||  drm_rects[i].y1 < 0 ||
2287 		    drm_rects[i].x2 > mode_config->max_width ||
2288 		    drm_rects[i].y2 > mode_config->max_height) {
2289 			VMW_DEBUG_KMS("Invalid layout %d %d %d %d\n",
2290 				      drm_rects[i].x1, drm_rects[i].y1,
2291 				      drm_rects[i].x2, drm_rects[i].y2);
2292 			ret = -EINVAL;
2293 			goto out_free;
2294 		}
2295 	}
2296 
2297 	ret = vmw_kms_check_display_memory(dev, arg->num_outputs, drm_rects);
2298 
2299 	if (ret == 0)
2300 		vmw_du_update_layout(dev_priv, arg->num_outputs, drm_rects);
2301 
2302 out_free:
2303 	kfree(rects);
2304 	return ret;
2305 }
2306 
2307 /**
2308  * vmw_kms_helper_dirty - Helper to build commands and perform actions based
2309  * on a set of cliprects and a set of display units.
2310  *
2311  * @dev_priv: Pointer to a device private structure.
2312  * @framebuffer: Pointer to the framebuffer on which to perform the actions.
2313  * @clips: A set of struct drm_clip_rect. Either this os @vclips must be NULL.
2314  * Cliprects are given in framebuffer coordinates.
2315  * @vclips: A set of struct drm_vmw_rect cliprects. Either this or @clips must
2316  * be NULL. Cliprects are given in source coordinates.
2317  * @dest_x: X coordinate offset for the crtc / destination clip rects.
2318  * @dest_y: Y coordinate offset for the crtc / destination clip rects.
2319  * @num_clips: Number of cliprects in the @clips or @vclips array.
2320  * @increment: Integer with which to increment the clip counter when looping.
2321  * Used to skip a predetermined number of clip rects.
2322  * @dirty: Closure structure. See the description of struct vmw_kms_dirty.
2323  */
vmw_kms_helper_dirty(struct vmw_private * dev_priv,struct vmw_framebuffer * framebuffer,const struct drm_clip_rect * clips,const struct drm_vmw_rect * vclips,s32 dest_x,s32 dest_y,int num_clips,int increment,struct vmw_kms_dirty * dirty)2324 int vmw_kms_helper_dirty(struct vmw_private *dev_priv,
2325 			 struct vmw_framebuffer *framebuffer,
2326 			 const struct drm_clip_rect *clips,
2327 			 const struct drm_vmw_rect *vclips,
2328 			 s32 dest_x, s32 dest_y,
2329 			 int num_clips,
2330 			 int increment,
2331 			 struct vmw_kms_dirty *dirty)
2332 {
2333 	struct vmw_display_unit *units[VMWGFX_NUM_DISPLAY_UNITS];
2334 	struct drm_crtc *crtc;
2335 	u32 num_units = 0;
2336 	u32 i, k;
2337 
2338 	dirty->dev_priv = dev_priv;
2339 
2340 	/* If crtc is passed, no need to iterate over other display units */
2341 	if (dirty->crtc) {
2342 		units[num_units++] = vmw_crtc_to_du(dirty->crtc);
2343 	} else {
2344 		list_for_each_entry(crtc, &dev_priv->drm.mode_config.crtc_list,
2345 				    head) {
2346 			struct drm_plane *plane = crtc->primary;
2347 
2348 			if (plane->state->fb == &framebuffer->base)
2349 				units[num_units++] = vmw_crtc_to_du(crtc);
2350 		}
2351 	}
2352 
2353 	for (k = 0; k < num_units; k++) {
2354 		struct vmw_display_unit *unit = units[k];
2355 		s32 crtc_x = unit->crtc.x;
2356 		s32 crtc_y = unit->crtc.y;
2357 		s32 crtc_width = unit->crtc.mode.hdisplay;
2358 		s32 crtc_height = unit->crtc.mode.vdisplay;
2359 		const struct drm_clip_rect *clips_ptr = clips;
2360 		const struct drm_vmw_rect *vclips_ptr = vclips;
2361 
2362 		dirty->unit = unit;
2363 		if (dirty->fifo_reserve_size > 0) {
2364 			dirty->cmd = VMW_CMD_RESERVE(dev_priv,
2365 						      dirty->fifo_reserve_size);
2366 			if (!dirty->cmd)
2367 				return -ENOMEM;
2368 
2369 			memset(dirty->cmd, 0, dirty->fifo_reserve_size);
2370 		}
2371 		dirty->num_hits = 0;
2372 		for (i = 0; i < num_clips; i++, clips_ptr += increment,
2373 		       vclips_ptr += increment) {
2374 			s32 clip_left;
2375 			s32 clip_top;
2376 
2377 			/*
2378 			 * Select clip array type. Note that integer type
2379 			 * in @clips is unsigned short, whereas in @vclips
2380 			 * it's 32-bit.
2381 			 */
2382 			if (clips) {
2383 				dirty->fb_x = (s32) clips_ptr->x1;
2384 				dirty->fb_y = (s32) clips_ptr->y1;
2385 				dirty->unit_x2 = (s32) clips_ptr->x2 + dest_x -
2386 					crtc_x;
2387 				dirty->unit_y2 = (s32) clips_ptr->y2 + dest_y -
2388 					crtc_y;
2389 			} else {
2390 				dirty->fb_x = vclips_ptr->x;
2391 				dirty->fb_y = vclips_ptr->y;
2392 				dirty->unit_x2 = dirty->fb_x + vclips_ptr->w +
2393 					dest_x - crtc_x;
2394 				dirty->unit_y2 = dirty->fb_y + vclips_ptr->h +
2395 					dest_y - crtc_y;
2396 			}
2397 
2398 			dirty->unit_x1 = dirty->fb_x + dest_x - crtc_x;
2399 			dirty->unit_y1 = dirty->fb_y + dest_y - crtc_y;
2400 
2401 			/* Skip this clip if it's outside the crtc region */
2402 			if (dirty->unit_x1 >= crtc_width ||
2403 			    dirty->unit_y1 >= crtc_height ||
2404 			    dirty->unit_x2 <= 0 || dirty->unit_y2 <= 0)
2405 				continue;
2406 
2407 			/* Clip right and bottom to crtc limits */
2408 			dirty->unit_x2 = min_t(s32, dirty->unit_x2,
2409 					       crtc_width);
2410 			dirty->unit_y2 = min_t(s32, dirty->unit_y2,
2411 					       crtc_height);
2412 
2413 			/* Clip left and top to crtc limits */
2414 			clip_left = min_t(s32, dirty->unit_x1, 0);
2415 			clip_top = min_t(s32, dirty->unit_y1, 0);
2416 			dirty->unit_x1 -= clip_left;
2417 			dirty->unit_y1 -= clip_top;
2418 			dirty->fb_x -= clip_left;
2419 			dirty->fb_y -= clip_top;
2420 
2421 			dirty->clip(dirty);
2422 		}
2423 
2424 		dirty->fifo_commit(dirty);
2425 	}
2426 
2427 	return 0;
2428 }
2429 
2430 /**
2431  * vmw_kms_helper_validation_finish - Helper for post KMS command submission
2432  * cleanup and fencing
2433  * @dev_priv: Pointer to the device-private struct
2434  * @file_priv: Pointer identifying the client when user-space fencing is used
2435  * @ctx: Pointer to the validation context
2436  * @out_fence: If non-NULL, returned refcounted fence-pointer
2437  * @user_fence_rep: If non-NULL, pointer to user-space address area
2438  * in which to copy user-space fence info
2439  */
vmw_kms_helper_validation_finish(struct vmw_private * dev_priv,struct drm_file * file_priv,struct vmw_validation_context * ctx,struct vmw_fence_obj ** out_fence,struct drm_vmw_fence_rep __user * user_fence_rep)2440 void vmw_kms_helper_validation_finish(struct vmw_private *dev_priv,
2441 				      struct drm_file *file_priv,
2442 				      struct vmw_validation_context *ctx,
2443 				      struct vmw_fence_obj **out_fence,
2444 				      struct drm_vmw_fence_rep __user *
2445 				      user_fence_rep)
2446 {
2447 	struct vmw_fence_obj *fence = NULL;
2448 	uint32_t handle = 0;
2449 	int ret = 0;
2450 
2451 	if (file_priv || user_fence_rep || vmw_validation_has_bos(ctx) ||
2452 	    out_fence)
2453 		ret = vmw_execbuf_fence_commands(file_priv, dev_priv, &fence,
2454 						 file_priv ? &handle : NULL);
2455 	vmw_validation_done(ctx, fence);
2456 	if (file_priv)
2457 		vmw_execbuf_copy_fence_user(dev_priv, vmw_fpriv(file_priv),
2458 					    ret, user_fence_rep, fence,
2459 					    handle, -1);
2460 	if (out_fence)
2461 		*out_fence = fence;
2462 	else
2463 		vmw_fence_obj_unreference(&fence);
2464 }
2465 
2466 /**
2467  * vmw_kms_create_implicit_placement_property - Set up the implicit placement
2468  * property.
2469  *
2470  * @dev_priv: Pointer to a device private struct.
2471  *
2472  * Sets up the implicit placement property unless it's already set up.
2473  */
2474 void
vmw_kms_create_implicit_placement_property(struct vmw_private * dev_priv)2475 vmw_kms_create_implicit_placement_property(struct vmw_private *dev_priv)
2476 {
2477 	if (dev_priv->implicit_placement_property)
2478 		return;
2479 
2480 	dev_priv->implicit_placement_property =
2481 		drm_property_create_range(&dev_priv->drm,
2482 					  DRM_MODE_PROP_IMMUTABLE,
2483 					  "implicit_placement", 0, 1);
2484 }
2485 
2486 /**
2487  * vmw_kms_suspend - Save modesetting state and turn modesetting off.
2488  *
2489  * @dev: Pointer to the drm device
2490  * Return: 0 on success. Negative error code on failure.
2491  */
vmw_kms_suspend(struct drm_device * dev)2492 int vmw_kms_suspend(struct drm_device *dev)
2493 {
2494 	struct vmw_private *dev_priv = vmw_priv(dev);
2495 
2496 	dev_priv->suspend_state = drm_atomic_helper_suspend(dev);
2497 	if (IS_ERR(dev_priv->suspend_state)) {
2498 		int ret = PTR_ERR(dev_priv->suspend_state);
2499 
2500 		DRM_ERROR("Failed kms suspend: %d\n", ret);
2501 		dev_priv->suspend_state = NULL;
2502 
2503 		return ret;
2504 	}
2505 
2506 	return 0;
2507 }
2508 
2509 
2510 /**
2511  * vmw_kms_resume - Re-enable modesetting and restore state
2512  *
2513  * @dev: Pointer to the drm device
2514  * Return: 0 on success. Negative error code on failure.
2515  *
2516  * State is resumed from a previous vmw_kms_suspend(). It's illegal
2517  * to call this function without a previous vmw_kms_suspend().
2518  */
vmw_kms_resume(struct drm_device * dev)2519 int vmw_kms_resume(struct drm_device *dev)
2520 {
2521 	struct vmw_private *dev_priv = vmw_priv(dev);
2522 	int ret;
2523 
2524 	if (WARN_ON(!dev_priv->suspend_state))
2525 		return 0;
2526 
2527 	ret = drm_atomic_helper_resume(dev, dev_priv->suspend_state);
2528 	dev_priv->suspend_state = NULL;
2529 
2530 	return ret;
2531 }
2532 
2533 /**
2534  * vmw_kms_lost_device - Notify kms that modesetting capabilities will be lost
2535  *
2536  * @dev: Pointer to the drm device
2537  */
vmw_kms_lost_device(struct drm_device * dev)2538 void vmw_kms_lost_device(struct drm_device *dev)
2539 {
2540 	drm_atomic_helper_shutdown(dev);
2541 }
2542 
2543 /**
2544  * vmw_du_helper_plane_update - Helper to do plane update on a display unit.
2545  * @update: The closure structure.
2546  *
2547  * Call this helper after setting callbacks in &vmw_du_update_plane to do plane
2548  * update on display unit.
2549  *
2550  * Return: 0 on success or a negative error code on failure.
2551  */
vmw_du_helper_plane_update(struct vmw_du_update_plane * update)2552 int vmw_du_helper_plane_update(struct vmw_du_update_plane *update)
2553 {
2554 	struct drm_plane_state *state = update->plane->state;
2555 	struct drm_plane_state *old_state = update->old_state;
2556 	struct drm_atomic_helper_damage_iter iter;
2557 	struct drm_rect clip;
2558 	struct drm_rect bb;
2559 	DECLARE_VAL_CONTEXT(val_ctx, NULL, 0);
2560 	uint32_t reserved_size = 0;
2561 	uint32_t submit_size = 0;
2562 	uint32_t curr_size = 0;
2563 	uint32_t num_hits = 0;
2564 	void *cmd_start;
2565 	char *cmd_next;
2566 	int ret;
2567 
2568 	/*
2569 	 * Iterate in advance to check if really need plane update and find the
2570 	 * number of clips that actually are in plane src for fifo allocation.
2571 	 */
2572 	drm_atomic_helper_damage_iter_init(&iter, old_state, state);
2573 	drm_atomic_for_each_plane_damage(&iter, &clip)
2574 		num_hits++;
2575 
2576 	if (num_hits == 0)
2577 		return 0;
2578 
2579 	if (update->vfb->bo) {
2580 		struct vmw_framebuffer_bo *vfbbo =
2581 			container_of(update->vfb, typeof(*vfbbo), base);
2582 
2583 		/*
2584 		 * For screen targets we want a mappable bo, for everything else we want
2585 		 * accelerated i.e. host backed (vram or gmr) bo. If the display unit
2586 		 * is not screen target then mob's shouldn't be available.
2587 		 */
2588 		if (update->dev_priv->active_display_unit == vmw_du_screen_target) {
2589 			vmw_bo_placement_set(vfbbo->buffer,
2590 					     VMW_BO_DOMAIN_SYS | VMW_BO_DOMAIN_MOB | VMW_BO_DOMAIN_GMR,
2591 					     VMW_BO_DOMAIN_SYS | VMW_BO_DOMAIN_MOB | VMW_BO_DOMAIN_GMR);
2592 		} else {
2593 			WARN_ON(update->dev_priv->has_mob);
2594 			vmw_bo_placement_set_default_accelerated(vfbbo->buffer);
2595 		}
2596 		ret = vmw_validation_add_bo(&val_ctx, vfbbo->buffer);
2597 	} else {
2598 		struct vmw_framebuffer_surface *vfbs =
2599 			container_of(update->vfb, typeof(*vfbs), base);
2600 		struct vmw_surface *surf = vmw_user_object_surface(&vfbs->uo);
2601 
2602 		ret = vmw_validation_add_resource(&val_ctx, &surf->res,
2603 						  0, VMW_RES_DIRTY_NONE, NULL,
2604 						  NULL);
2605 	}
2606 
2607 	if (ret)
2608 		return ret;
2609 
2610 	ret = vmw_validation_prepare(&val_ctx, update->mutex, update->intr);
2611 	if (ret)
2612 		goto out_unref;
2613 
2614 	reserved_size = update->calc_fifo_size(update, num_hits);
2615 	cmd_start = VMW_CMD_RESERVE(update->dev_priv, reserved_size);
2616 	if (!cmd_start) {
2617 		ret = -ENOMEM;
2618 		goto out_revert;
2619 	}
2620 
2621 	cmd_next = cmd_start;
2622 
2623 	if (update->post_prepare) {
2624 		curr_size = update->post_prepare(update, cmd_next);
2625 		cmd_next += curr_size;
2626 		submit_size += curr_size;
2627 	}
2628 
2629 	if (update->pre_clip) {
2630 		curr_size = update->pre_clip(update, cmd_next, num_hits);
2631 		cmd_next += curr_size;
2632 		submit_size += curr_size;
2633 	}
2634 
2635 	bb.x1 = INT_MAX;
2636 	bb.y1 = INT_MAX;
2637 	bb.x2 = INT_MIN;
2638 	bb.y2 = INT_MIN;
2639 
2640 	drm_atomic_helper_damage_iter_init(&iter, old_state, state);
2641 	drm_atomic_for_each_plane_damage(&iter, &clip) {
2642 		uint32_t fb_x = clip.x1;
2643 		uint32_t fb_y = clip.y1;
2644 
2645 		vmw_du_translate_to_crtc(state, &clip);
2646 		if (update->clip) {
2647 			curr_size = update->clip(update, cmd_next, &clip, fb_x,
2648 						 fb_y);
2649 			cmd_next += curr_size;
2650 			submit_size += curr_size;
2651 		}
2652 		bb.x1 = min_t(int, bb.x1, clip.x1);
2653 		bb.y1 = min_t(int, bb.y1, clip.y1);
2654 		bb.x2 = max_t(int, bb.x2, clip.x2);
2655 		bb.y2 = max_t(int, bb.y2, clip.y2);
2656 	}
2657 
2658 	curr_size = update->post_clip(update, cmd_next, &bb);
2659 	submit_size += curr_size;
2660 
2661 	if (reserved_size < submit_size)
2662 		submit_size = 0;
2663 
2664 	vmw_cmd_commit(update->dev_priv, submit_size);
2665 
2666 	vmw_kms_helper_validation_finish(update->dev_priv, NULL, &val_ctx,
2667 					 update->out_fence, NULL);
2668 	return ret;
2669 
2670 out_revert:
2671 	vmw_validation_revert(&val_ctx);
2672 
2673 out_unref:
2674 	vmw_validation_unref_lists(&val_ctx);
2675 	return ret;
2676 }
2677 
2678 /**
2679  * vmw_connector_mode_valid - implements drm_connector_helper_funcs.mode_valid callback
2680  *
2681  * @connector: the drm connector, part of a DU container
2682  * @mode: drm mode to check
2683  *
2684  * Returns MODE_OK on success, or a drm_mode_status error code.
2685  */
vmw_connector_mode_valid(struct drm_connector * connector,struct drm_display_mode * mode)2686 enum drm_mode_status vmw_connector_mode_valid(struct drm_connector *connector,
2687 					      struct drm_display_mode *mode)
2688 {
2689 	enum drm_mode_status ret;
2690 	struct drm_device *dev = connector->dev;
2691 	struct vmw_private *dev_priv = vmw_priv(dev);
2692 	u32 assumed_cpp = 4;
2693 
2694 	if (dev_priv->assume_16bpp)
2695 		assumed_cpp = 2;
2696 
2697 	ret = drm_mode_validate_size(mode, dev_priv->texture_max_width,
2698 				     dev_priv->texture_max_height);
2699 	if (ret != MODE_OK)
2700 		return ret;
2701 
2702 	if (!vmw_kms_validate_mode_vram(dev_priv,
2703 					mode->hdisplay * assumed_cpp,
2704 					mode->vdisplay))
2705 		return MODE_MEM;
2706 
2707 	return MODE_OK;
2708 }
2709 
2710 /**
2711  * vmw_connector_get_modes - implements drm_connector_helper_funcs.get_modes callback
2712  *
2713  * @connector: the drm connector, part of a DU container
2714  *
2715  * Returns the number of added modes.
2716  */
vmw_connector_get_modes(struct drm_connector * connector)2717 int vmw_connector_get_modes(struct drm_connector *connector)
2718 {
2719 	struct vmw_display_unit *du = vmw_connector_to_du(connector);
2720 	struct drm_device *dev = connector->dev;
2721 	struct vmw_private *dev_priv = vmw_priv(dev);
2722 	struct drm_display_mode *mode = NULL;
2723 	struct drm_display_mode prefmode = { DRM_MODE("preferred",
2724 		DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED,
2725 		0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
2726 		DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC)
2727 	};
2728 	u32 max_width;
2729 	u32 max_height;
2730 	u32 num_modes;
2731 
2732 	/* Add preferred mode */
2733 	mode = drm_mode_duplicate(dev, &prefmode);
2734 	if (!mode)
2735 		return 0;
2736 
2737 	mode->hdisplay = du->pref_width;
2738 	mode->vdisplay = du->pref_height;
2739 	vmw_guess_mode_timing(mode);
2740 	drm_mode_set_name(mode);
2741 
2742 	drm_mode_probed_add(connector, mode);
2743 	drm_dbg_kms(dev, "preferred mode " DRM_MODE_FMT "\n", DRM_MODE_ARG(mode));
2744 
2745 	/* Probe connector for all modes not exceeding our geom limits */
2746 	max_width  = dev_priv->texture_max_width;
2747 	max_height = dev_priv->texture_max_height;
2748 
2749 	if (dev_priv->active_display_unit == vmw_du_screen_target) {
2750 		max_width  = min(dev_priv->stdu_max_width,  max_width);
2751 		max_height = min(dev_priv->stdu_max_height, max_height);
2752 	}
2753 
2754 	num_modes = 1 + drm_add_modes_noedid(connector, max_width, max_height);
2755 
2756 	return num_modes;
2757 }
2758 
vmw_user_object_ref(struct vmw_user_object * uo)2759 struct vmw_user_object *vmw_user_object_ref(struct vmw_user_object *uo)
2760 {
2761 	if (uo->buffer)
2762 		vmw_user_bo_ref(uo->buffer);
2763 	else if (uo->surface)
2764 		vmw_surface_reference(uo->surface);
2765 	return uo;
2766 }
2767 
vmw_user_object_unref(struct vmw_user_object * uo)2768 void vmw_user_object_unref(struct vmw_user_object *uo)
2769 {
2770 	if (uo->buffer)
2771 		vmw_user_bo_unref(&uo->buffer);
2772 	else if (uo->surface)
2773 		vmw_surface_unreference(&uo->surface);
2774 }
2775 
2776 struct vmw_bo *
vmw_user_object_buffer(struct vmw_user_object * uo)2777 vmw_user_object_buffer(struct vmw_user_object *uo)
2778 {
2779 	if (uo->buffer)
2780 		return uo->buffer;
2781 	else if (uo->surface)
2782 		return uo->surface->res.guest_memory_bo;
2783 	return NULL;
2784 }
2785 
2786 struct vmw_surface *
vmw_user_object_surface(struct vmw_user_object * uo)2787 vmw_user_object_surface(struct vmw_user_object *uo)
2788 {
2789 	if (uo->buffer)
2790 		return uo->buffer->dumb_surface;
2791 	return uo->surface;
2792 }
2793 
vmw_user_object_map(struct vmw_user_object * uo)2794 void *vmw_user_object_map(struct vmw_user_object *uo)
2795 {
2796 	struct vmw_bo *bo = vmw_user_object_buffer(uo);
2797 
2798 	WARN_ON(!bo);
2799 	return vmw_bo_map_and_cache(bo);
2800 }
2801 
vmw_user_object_map_size(struct vmw_user_object * uo,size_t size)2802 void *vmw_user_object_map_size(struct vmw_user_object *uo, size_t size)
2803 {
2804 	struct vmw_bo *bo = vmw_user_object_buffer(uo);
2805 
2806 	WARN_ON(!bo);
2807 	return vmw_bo_map_and_cache_size(bo, size);
2808 }
2809 
vmw_user_object_unmap(struct vmw_user_object * uo)2810 void vmw_user_object_unmap(struct vmw_user_object *uo)
2811 {
2812 	struct vmw_bo *bo = vmw_user_object_buffer(uo);
2813 	int ret;
2814 
2815 	WARN_ON(!bo);
2816 
2817 	/* Fence the mob creation so we are guarateed to have the mob */
2818 	ret = ttm_bo_reserve(&bo->tbo, false, false, NULL);
2819 	if (ret != 0)
2820 		return;
2821 
2822 	vmw_bo_unmap(bo);
2823 	vmw_bo_pin_reserved(bo, false);
2824 
2825 	ttm_bo_unreserve(&bo->tbo);
2826 }
2827 
vmw_user_object_is_mapped(struct vmw_user_object * uo)2828 bool vmw_user_object_is_mapped(struct vmw_user_object *uo)
2829 {
2830 	struct vmw_bo *bo;
2831 
2832 	if (!uo || vmw_user_object_is_null(uo))
2833 		return false;
2834 
2835 	bo = vmw_user_object_buffer(uo);
2836 
2837 	if (WARN_ON(!bo))
2838 		return false;
2839 
2840 	WARN_ON(bo->map.bo && !bo->map.virtual);
2841 	return bo->map.virtual;
2842 }
2843 
vmw_user_object_is_null(struct vmw_user_object * uo)2844 bool vmw_user_object_is_null(struct vmw_user_object *uo)
2845 {
2846 	return !uo->buffer && !uo->surface;
2847 }
2848