xref: /linux/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c (revision 001821b0e79716c4e17c71d8e053a23599a7a508)
1 // SPDX-License-Identifier: GPL-2.0 OR MIT
2 /**************************************************************************
3  *
4  * Copyright 2009-2023 VMware, Inc., Palo Alto, CA., USA
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the
8  * "Software"), to deal in the Software without restriction, including
9  * without limitation the rights to use, copy, modify, merge, publish,
10  * distribute, sub license, and/or sell copies of the Software, and to
11  * permit persons to whom the Software is furnished to do so, subject to
12  * the following conditions:
13  *
14  * The above copyright notice and this permission notice (including the
15  * next paragraph) shall be included in all copies or substantial portions
16  * of the Software.
17  *
18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24  * USE OR OTHER DEALINGS IN THE SOFTWARE.
25  *
26  **************************************************************************/
27 #include "vmwgfx_kms.h"
28 
29 #include "vmwgfx_bo.h"
30 #include "vmwgfx_vkms.h"
31 #include "vmw_surface_cache.h"
32 
33 #include <drm/drm_atomic.h>
34 #include <drm/drm_atomic_helper.h>
35 #include <drm/drm_damage_helper.h>
36 #include <drm/drm_fourcc.h>
37 #include <drm/drm_rect.h>
38 #include <drm/drm_sysfs.h>
39 #include <drm/drm_edid.h>
40 
41 void vmw_du_init(struct vmw_display_unit *du)
42 {
43 	vmw_vkms_crtc_init(&du->crtc);
44 }
45 
46 void vmw_du_cleanup(struct vmw_display_unit *du)
47 {
48 	struct vmw_private *dev_priv = vmw_priv(du->primary.dev);
49 
50 	vmw_vkms_crtc_cleanup(&du->crtc);
51 	drm_plane_cleanup(&du->primary);
52 	if (vmw_cmd_supported(dev_priv))
53 		drm_plane_cleanup(&du->cursor.base);
54 
55 	drm_connector_unregister(&du->connector);
56 	drm_crtc_cleanup(&du->crtc);
57 	drm_encoder_cleanup(&du->encoder);
58 	drm_connector_cleanup(&du->connector);
59 }
60 
61 /*
62  * Display Unit Cursor functions
63  */
64 
65 static int vmw_du_cursor_plane_unmap_cm(struct vmw_plane_state *vps);
66 static void vmw_cursor_update_mob(struct vmw_private *dev_priv,
67 				  struct vmw_plane_state *vps,
68 				  u32 *image, u32 width, u32 height,
69 				  u32 hotspotX, u32 hotspotY);
70 
71 struct vmw_svga_fifo_cmd_define_cursor {
72 	u32 cmd;
73 	SVGAFifoCmdDefineAlphaCursor cursor;
74 };
75 
76 /**
77  * vmw_send_define_cursor_cmd - queue a define cursor command
78  * @dev_priv: the private driver struct
79  * @image: buffer which holds the cursor image
80  * @width: width of the mouse cursor image
81  * @height: height of the mouse cursor image
82  * @hotspotX: the horizontal position of mouse hotspot
83  * @hotspotY: the vertical position of mouse hotspot
84  */
85 static void vmw_send_define_cursor_cmd(struct vmw_private *dev_priv,
86 				       u32 *image, u32 width, u32 height,
87 				       u32 hotspotX, u32 hotspotY)
88 {
89 	struct vmw_svga_fifo_cmd_define_cursor *cmd;
90 	const u32 image_size = width * height * sizeof(*image);
91 	const u32 cmd_size = sizeof(*cmd) + image_size;
92 
93 	/* Try to reserve fifocmd space and swallow any failures;
94 	   such reservations cannot be left unconsumed for long
95 	   under the risk of clogging other fifocmd users, so
96 	   we treat reservations separtely from the way we treat
97 	   other fallible KMS-atomic resources at prepare_fb */
98 	cmd = VMW_CMD_RESERVE(dev_priv, cmd_size);
99 
100 	if (unlikely(!cmd))
101 		return;
102 
103 	memset(cmd, 0, sizeof(*cmd));
104 
105 	memcpy(&cmd[1], image, image_size);
106 
107 	cmd->cmd = SVGA_CMD_DEFINE_ALPHA_CURSOR;
108 	cmd->cursor.id = 0;
109 	cmd->cursor.width = width;
110 	cmd->cursor.height = height;
111 	cmd->cursor.hotspotX = hotspotX;
112 	cmd->cursor.hotspotY = hotspotY;
113 
114 	vmw_cmd_commit_flush(dev_priv, cmd_size);
115 }
116 
117 /**
118  * vmw_cursor_update_image - update the cursor image on the provided plane
119  * @dev_priv: the private driver struct
120  * @vps: the plane state of the cursor plane
121  * @image: buffer which holds the cursor image
122  * @width: width of the mouse cursor image
123  * @height: height of the mouse cursor image
124  * @hotspotX: the horizontal position of mouse hotspot
125  * @hotspotY: the vertical position of mouse hotspot
126  */
127 static void vmw_cursor_update_image(struct vmw_private *dev_priv,
128 				    struct vmw_plane_state *vps,
129 				    u32 *image, u32 width, u32 height,
130 				    u32 hotspotX, u32 hotspotY)
131 {
132 	if (vps->cursor.bo)
133 		vmw_cursor_update_mob(dev_priv, vps, image,
134 				      vps->base.crtc_w, vps->base.crtc_h,
135 				      hotspotX, hotspotY);
136 
137 	else
138 		vmw_send_define_cursor_cmd(dev_priv, image, width, height,
139 					   hotspotX, hotspotY);
140 }
141 
142 
143 /**
144  * vmw_cursor_update_mob - Update cursor vis CursorMob mechanism
145  *
146  * Called from inside vmw_du_cursor_plane_atomic_update to actually
147  * make the cursor-image live.
148  *
149  * @dev_priv: device to work with
150  * @vps: the plane state of the cursor plane
151  * @image: cursor source data to fill the MOB with
152  * @width: source data width
153  * @height: source data height
154  * @hotspotX: cursor hotspot x
155  * @hotspotY: cursor hotspot Y
156  */
157 static void vmw_cursor_update_mob(struct vmw_private *dev_priv,
158 				  struct vmw_plane_state *vps,
159 				  u32 *image, u32 width, u32 height,
160 				  u32 hotspotX, u32 hotspotY)
161 {
162 	SVGAGBCursorHeader *header;
163 	SVGAGBAlphaCursorHeader *alpha_header;
164 	const u32 image_size = width * height * sizeof(*image);
165 
166 	header = vmw_bo_map_and_cache(vps->cursor.bo);
167 	alpha_header = &header->header.alphaHeader;
168 
169 	memset(header, 0, sizeof(*header));
170 
171 	header->type = SVGA_ALPHA_CURSOR;
172 	header->sizeInBytes = image_size;
173 
174 	alpha_header->hotspotX = hotspotX;
175 	alpha_header->hotspotY = hotspotY;
176 	alpha_header->width = width;
177 	alpha_header->height = height;
178 
179 	memcpy(header + 1, image, image_size);
180 	vmw_write(dev_priv, SVGA_REG_CURSOR_MOBID,
181 		  vps->cursor.bo->tbo.resource->start);
182 }
183 
184 
185 static u32 vmw_du_cursor_mob_size(u32 w, u32 h)
186 {
187 	return w * h * sizeof(u32) + sizeof(SVGAGBCursorHeader);
188 }
189 
190 /**
191  * vmw_du_cursor_plane_acquire_image -- Acquire the image data
192  * @vps: cursor plane state
193  */
194 static u32 *vmw_du_cursor_plane_acquire_image(struct vmw_plane_state *vps)
195 {
196 	if (vps->surf) {
197 		if (vps->surf_mapped)
198 			return vmw_bo_map_and_cache(vps->surf->res.guest_memory_bo);
199 		return vps->surf->snooper.image;
200 	} else if (vps->bo)
201 		return vmw_bo_map_and_cache(vps->bo);
202 	return NULL;
203 }
204 
205 static bool vmw_du_cursor_plane_has_changed(struct vmw_plane_state *old_vps,
206 					    struct vmw_plane_state *new_vps)
207 {
208 	void *old_image;
209 	void *new_image;
210 	u32 size;
211 	bool changed;
212 
213 	if (old_vps->base.crtc_w != new_vps->base.crtc_w ||
214 	    old_vps->base.crtc_h != new_vps->base.crtc_h)
215 	    return true;
216 
217 	if (old_vps->cursor.hotspot_x != new_vps->cursor.hotspot_x ||
218 	    old_vps->cursor.hotspot_y != new_vps->cursor.hotspot_y)
219 	    return true;
220 
221 	size = new_vps->base.crtc_w * new_vps->base.crtc_h * sizeof(u32);
222 
223 	old_image = vmw_du_cursor_plane_acquire_image(old_vps);
224 	new_image = vmw_du_cursor_plane_acquire_image(new_vps);
225 
226 	changed = false;
227 	if (old_image && new_image)
228 		changed = memcmp(old_image, new_image, size) != 0;
229 
230 	return changed;
231 }
232 
233 static void vmw_du_destroy_cursor_mob(struct vmw_bo **vbo)
234 {
235 	if (!(*vbo))
236 		return;
237 
238 	ttm_bo_unpin(&(*vbo)->tbo);
239 	vmw_bo_unreference(vbo);
240 }
241 
242 static void vmw_du_put_cursor_mob(struct vmw_cursor_plane *vcp,
243 				  struct vmw_plane_state *vps)
244 {
245 	u32 i;
246 
247 	if (!vps->cursor.bo)
248 		return;
249 
250 	vmw_du_cursor_plane_unmap_cm(vps);
251 
252 	/* Look for a free slot to return this mob to the cache. */
253 	for (i = 0; i < ARRAY_SIZE(vcp->cursor_mobs); i++) {
254 		if (!vcp->cursor_mobs[i]) {
255 			vcp->cursor_mobs[i] = vps->cursor.bo;
256 			vps->cursor.bo = NULL;
257 			return;
258 		}
259 	}
260 
261 	/* Cache is full: See if this mob is bigger than an existing mob. */
262 	for (i = 0; i < ARRAY_SIZE(vcp->cursor_mobs); i++) {
263 		if (vcp->cursor_mobs[i]->tbo.base.size <
264 		    vps->cursor.bo->tbo.base.size) {
265 			vmw_du_destroy_cursor_mob(&vcp->cursor_mobs[i]);
266 			vcp->cursor_mobs[i] = vps->cursor.bo;
267 			vps->cursor.bo = NULL;
268 			return;
269 		}
270 	}
271 
272 	/* Destroy it if it's not worth caching. */
273 	vmw_du_destroy_cursor_mob(&vps->cursor.bo);
274 }
275 
276 static int vmw_du_get_cursor_mob(struct vmw_cursor_plane *vcp,
277 				 struct vmw_plane_state *vps)
278 {
279 	struct vmw_private *dev_priv = vcp->base.dev->dev_private;
280 	u32 size = vmw_du_cursor_mob_size(vps->base.crtc_w, vps->base.crtc_h);
281 	u32 i;
282 	u32 cursor_max_dim, mob_max_size;
283 	struct vmw_fence_obj *fence = NULL;
284 	int ret;
285 
286 	if (!dev_priv->has_mob ||
287 	    (dev_priv->capabilities2 & SVGA_CAP2_CURSOR_MOB) == 0)
288 		return -EINVAL;
289 
290 	mob_max_size = vmw_read(dev_priv, SVGA_REG_MOB_MAX_SIZE);
291 	cursor_max_dim = vmw_read(dev_priv, SVGA_REG_CURSOR_MAX_DIMENSION);
292 
293 	if (size > mob_max_size || vps->base.crtc_w > cursor_max_dim ||
294 	    vps->base.crtc_h > cursor_max_dim)
295 		return -EINVAL;
296 
297 	if (vps->cursor.bo) {
298 		if (vps->cursor.bo->tbo.base.size >= size)
299 			return 0;
300 		vmw_du_put_cursor_mob(vcp, vps);
301 	}
302 
303 	/* Look for an unused mob in the cache. */
304 	for (i = 0; i < ARRAY_SIZE(vcp->cursor_mobs); i++) {
305 		if (vcp->cursor_mobs[i] &&
306 		    vcp->cursor_mobs[i]->tbo.base.size >= size) {
307 			vps->cursor.bo = vcp->cursor_mobs[i];
308 			vcp->cursor_mobs[i] = NULL;
309 			return 0;
310 		}
311 	}
312 	/* Create a new mob if we can't find an existing one. */
313 	ret = vmw_bo_create_and_populate(dev_priv, size,
314 					 VMW_BO_DOMAIN_MOB,
315 					 &vps->cursor.bo);
316 
317 	if (ret != 0)
318 		return ret;
319 
320 	/* Fence the mob creation so we are guarateed to have the mob */
321 	ret = ttm_bo_reserve(&vps->cursor.bo->tbo, false, false, NULL);
322 	if (ret != 0)
323 		goto teardown;
324 
325 	ret = vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL);
326 	if (ret != 0) {
327 		ttm_bo_unreserve(&vps->cursor.bo->tbo);
328 		goto teardown;
329 	}
330 
331 	dma_fence_wait(&fence->base, false);
332 	dma_fence_put(&fence->base);
333 
334 	ttm_bo_unreserve(&vps->cursor.bo->tbo);
335 	return 0;
336 
337 teardown:
338 	vmw_du_destroy_cursor_mob(&vps->cursor.bo);
339 	return ret;
340 }
341 
342 
343 static void vmw_cursor_update_position(struct vmw_private *dev_priv,
344 				       bool show, int x, int y)
345 {
346 	const uint32_t svga_cursor_on = show ? SVGA_CURSOR_ON_SHOW
347 					     : SVGA_CURSOR_ON_HIDE;
348 	uint32_t count;
349 
350 	spin_lock(&dev_priv->cursor_lock);
351 	if (dev_priv->capabilities2 & SVGA_CAP2_EXTRA_REGS) {
352 		vmw_write(dev_priv, SVGA_REG_CURSOR4_X, x);
353 		vmw_write(dev_priv, SVGA_REG_CURSOR4_Y, y);
354 		vmw_write(dev_priv, SVGA_REG_CURSOR4_SCREEN_ID, SVGA3D_INVALID_ID);
355 		vmw_write(dev_priv, SVGA_REG_CURSOR4_ON, svga_cursor_on);
356 		vmw_write(dev_priv, SVGA_REG_CURSOR4_SUBMIT, 1);
357 	} else if (vmw_is_cursor_bypass3_enabled(dev_priv)) {
358 		vmw_fifo_mem_write(dev_priv, SVGA_FIFO_CURSOR_ON, svga_cursor_on);
359 		vmw_fifo_mem_write(dev_priv, SVGA_FIFO_CURSOR_X, x);
360 		vmw_fifo_mem_write(dev_priv, SVGA_FIFO_CURSOR_Y, y);
361 		count = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_CURSOR_COUNT);
362 		vmw_fifo_mem_write(dev_priv, SVGA_FIFO_CURSOR_COUNT, ++count);
363 	} else {
364 		vmw_write(dev_priv, SVGA_REG_CURSOR_X, x);
365 		vmw_write(dev_priv, SVGA_REG_CURSOR_Y, y);
366 		vmw_write(dev_priv, SVGA_REG_CURSOR_ON, svga_cursor_on);
367 	}
368 	spin_unlock(&dev_priv->cursor_lock);
369 }
370 
371 void vmw_kms_cursor_snoop(struct vmw_surface *srf,
372 			  struct ttm_object_file *tfile,
373 			  struct ttm_buffer_object *bo,
374 			  SVGA3dCmdHeader *header)
375 {
376 	struct ttm_bo_kmap_obj map;
377 	unsigned long kmap_offset;
378 	unsigned long kmap_num;
379 	SVGA3dCopyBox *box;
380 	unsigned box_count;
381 	void *virtual;
382 	bool is_iomem;
383 	struct vmw_dma_cmd {
384 		SVGA3dCmdHeader header;
385 		SVGA3dCmdSurfaceDMA dma;
386 	} *cmd;
387 	int i, ret;
388 	const struct SVGA3dSurfaceDesc *desc =
389 		vmw_surface_get_desc(VMW_CURSOR_SNOOP_FORMAT);
390 	const u32 image_pitch = VMW_CURSOR_SNOOP_WIDTH * desc->pitchBytesPerBlock;
391 
392 	cmd = container_of(header, struct vmw_dma_cmd, header);
393 
394 	/* No snooper installed, nothing to copy */
395 	if (!srf->snooper.image)
396 		return;
397 
398 	if (cmd->dma.host.face != 0 || cmd->dma.host.mipmap != 0) {
399 		DRM_ERROR("face and mipmap for cursors should never != 0\n");
400 		return;
401 	}
402 
403 	if (cmd->header.size < 64) {
404 		DRM_ERROR("at least one full copy box must be given\n");
405 		return;
406 	}
407 
408 	box = (SVGA3dCopyBox *)&cmd[1];
409 	box_count = (cmd->header.size - sizeof(SVGA3dCmdSurfaceDMA)) /
410 			sizeof(SVGA3dCopyBox);
411 
412 	if (cmd->dma.guest.ptr.offset % PAGE_SIZE ||
413 	    box->x != 0    || box->y != 0    || box->z != 0    ||
414 	    box->srcx != 0 || box->srcy != 0 || box->srcz != 0 ||
415 	    box->d != 1    || box_count != 1 ||
416 	    box->w > VMW_CURSOR_SNOOP_WIDTH || box->h > VMW_CURSOR_SNOOP_HEIGHT) {
417 		/* TODO handle none page aligned offsets */
418 		/* TODO handle more dst & src != 0 */
419 		/* TODO handle more then one copy */
420 		DRM_ERROR("Can't snoop dma request for cursor!\n");
421 		DRM_ERROR("(%u, %u, %u) (%u, %u, %u) (%ux%ux%u) %u %u\n",
422 			  box->srcx, box->srcy, box->srcz,
423 			  box->x, box->y, box->z,
424 			  box->w, box->h, box->d, box_count,
425 			  cmd->dma.guest.ptr.offset);
426 		return;
427 	}
428 
429 	kmap_offset = cmd->dma.guest.ptr.offset >> PAGE_SHIFT;
430 	kmap_num = (VMW_CURSOR_SNOOP_HEIGHT*image_pitch) >> PAGE_SHIFT;
431 
432 	ret = ttm_bo_reserve(bo, true, false, NULL);
433 	if (unlikely(ret != 0)) {
434 		DRM_ERROR("reserve failed\n");
435 		return;
436 	}
437 
438 	ret = ttm_bo_kmap(bo, kmap_offset, kmap_num, &map);
439 	if (unlikely(ret != 0))
440 		goto err_unreserve;
441 
442 	virtual = ttm_kmap_obj_virtual(&map, &is_iomem);
443 
444 	if (box->w == VMW_CURSOR_SNOOP_WIDTH && cmd->dma.guest.pitch == image_pitch) {
445 		memcpy(srf->snooper.image, virtual,
446 		       VMW_CURSOR_SNOOP_HEIGHT*image_pitch);
447 	} else {
448 		/* Image is unsigned pointer. */
449 		for (i = 0; i < box->h; i++)
450 			memcpy(srf->snooper.image + i * image_pitch,
451 			       virtual + i * cmd->dma.guest.pitch,
452 			       box->w * desc->pitchBytesPerBlock);
453 	}
454 
455 	srf->snooper.age++;
456 
457 	ttm_bo_kunmap(&map);
458 err_unreserve:
459 	ttm_bo_unreserve(bo);
460 }
461 
462 /**
463  * vmw_kms_legacy_hotspot_clear - Clear legacy hotspots
464  *
465  * @dev_priv: Pointer to the device private struct.
466  *
467  * Clears all legacy hotspots.
468  */
469 void vmw_kms_legacy_hotspot_clear(struct vmw_private *dev_priv)
470 {
471 	struct drm_device *dev = &dev_priv->drm;
472 	struct vmw_display_unit *du;
473 	struct drm_crtc *crtc;
474 
475 	drm_modeset_lock_all(dev);
476 	drm_for_each_crtc(crtc, dev) {
477 		du = vmw_crtc_to_du(crtc);
478 
479 		du->hotspot_x = 0;
480 		du->hotspot_y = 0;
481 	}
482 	drm_modeset_unlock_all(dev);
483 }
484 
485 void vmw_kms_cursor_post_execbuf(struct vmw_private *dev_priv)
486 {
487 	struct drm_device *dev = &dev_priv->drm;
488 	struct vmw_display_unit *du;
489 	struct drm_crtc *crtc;
490 
491 	mutex_lock(&dev->mode_config.mutex);
492 
493 	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
494 		du = vmw_crtc_to_du(crtc);
495 		if (!du->cursor_surface ||
496 		    du->cursor_age == du->cursor_surface->snooper.age ||
497 		    !du->cursor_surface->snooper.image)
498 			continue;
499 
500 		du->cursor_age = du->cursor_surface->snooper.age;
501 		vmw_send_define_cursor_cmd(dev_priv,
502 					   du->cursor_surface->snooper.image,
503 					   VMW_CURSOR_SNOOP_WIDTH,
504 					   VMW_CURSOR_SNOOP_HEIGHT,
505 					   du->hotspot_x + du->core_hotspot_x,
506 					   du->hotspot_y + du->core_hotspot_y);
507 	}
508 
509 	mutex_unlock(&dev->mode_config.mutex);
510 }
511 
512 
513 void vmw_du_cursor_plane_destroy(struct drm_plane *plane)
514 {
515 	struct vmw_cursor_plane *vcp = vmw_plane_to_vcp(plane);
516 	u32 i;
517 
518 	vmw_cursor_update_position(plane->dev->dev_private, false, 0, 0);
519 
520 	for (i = 0; i < ARRAY_SIZE(vcp->cursor_mobs); i++)
521 		vmw_du_destroy_cursor_mob(&vcp->cursor_mobs[i]);
522 
523 	drm_plane_cleanup(plane);
524 }
525 
526 
527 void vmw_du_primary_plane_destroy(struct drm_plane *plane)
528 {
529 	drm_plane_cleanup(plane);
530 
531 	/* Planes are static in our case so we don't free it */
532 }
533 
534 
535 /**
536  * vmw_du_plane_unpin_surf - unpins resource associated with a framebuffer surface
537  *
538  * @vps: plane state associated with the display surface
539  * @unreference: true if we also want to unreference the display.
540  */
541 void vmw_du_plane_unpin_surf(struct vmw_plane_state *vps,
542 			     bool unreference)
543 {
544 	if (vps->surf) {
545 		if (vps->pinned) {
546 			vmw_resource_unpin(&vps->surf->res);
547 			vps->pinned--;
548 		}
549 
550 		if (unreference) {
551 			if (vps->pinned)
552 				DRM_ERROR("Surface still pinned\n");
553 			vmw_surface_unreference(&vps->surf);
554 		}
555 	}
556 }
557 
558 
559 /**
560  * vmw_du_plane_cleanup_fb - Unpins the plane surface
561  *
562  * @plane:  display plane
563  * @old_state: Contains the FB to clean up
564  *
565  * Unpins the framebuffer surface
566  *
567  * Returns 0 on success
568  */
569 void
570 vmw_du_plane_cleanup_fb(struct drm_plane *plane,
571 			struct drm_plane_state *old_state)
572 {
573 	struct vmw_plane_state *vps = vmw_plane_state_to_vps(old_state);
574 
575 	vmw_du_plane_unpin_surf(vps, false);
576 }
577 
578 
579 /**
580  * vmw_du_cursor_plane_map_cm - Maps the cursor mobs.
581  *
582  * @vps: plane_state
583  *
584  * Returns 0 on success
585  */
586 
587 static int
588 vmw_du_cursor_plane_map_cm(struct vmw_plane_state *vps)
589 {
590 	int ret;
591 	u32 size = vmw_du_cursor_mob_size(vps->base.crtc_w, vps->base.crtc_h);
592 	struct ttm_buffer_object *bo;
593 
594 	if (!vps->cursor.bo)
595 		return -EINVAL;
596 
597 	bo = &vps->cursor.bo->tbo;
598 
599 	if (bo->base.size < size)
600 		return -EINVAL;
601 
602 	if (vps->cursor.bo->map.virtual)
603 		return 0;
604 
605 	ret = ttm_bo_reserve(bo, false, false, NULL);
606 	if (unlikely(ret != 0))
607 		return -ENOMEM;
608 
609 	vmw_bo_map_and_cache(vps->cursor.bo);
610 
611 	ttm_bo_unreserve(bo);
612 
613 	if (unlikely(ret != 0))
614 		return -ENOMEM;
615 
616 	return 0;
617 }
618 
619 
620 /**
621  * vmw_du_cursor_plane_unmap_cm - Unmaps the cursor mobs.
622  *
623  * @vps: state of the cursor plane
624  *
625  * Returns 0 on success
626  */
627 
628 static int
629 vmw_du_cursor_plane_unmap_cm(struct vmw_plane_state *vps)
630 {
631 	int ret = 0;
632 	struct vmw_bo *vbo = vps->cursor.bo;
633 
634 	if (!vbo || !vbo->map.virtual)
635 		return 0;
636 
637 	ret = ttm_bo_reserve(&vbo->tbo, true, false, NULL);
638 	if (likely(ret == 0)) {
639 		vmw_bo_unmap(vbo);
640 		ttm_bo_unreserve(&vbo->tbo);
641 	}
642 
643 	return ret;
644 }
645 
646 
647 /**
648  * vmw_du_cursor_plane_cleanup_fb - Unpins the plane surface
649  *
650  * @plane: cursor plane
651  * @old_state: contains the state to clean up
652  *
653  * Unmaps all cursor bo mappings and unpins the cursor surface
654  *
655  * Returns 0 on success
656  */
657 void
658 vmw_du_cursor_plane_cleanup_fb(struct drm_plane *plane,
659 			       struct drm_plane_state *old_state)
660 {
661 	struct vmw_cursor_plane *vcp = vmw_plane_to_vcp(plane);
662 	struct vmw_plane_state *vps = vmw_plane_state_to_vps(old_state);
663 
664 	if (vps->surf_mapped) {
665 		vmw_bo_unmap(vps->surf->res.guest_memory_bo);
666 		vps->surf_mapped = false;
667 	}
668 
669 	vmw_du_cursor_plane_unmap_cm(vps);
670 	vmw_du_put_cursor_mob(vcp, vps);
671 
672 	vmw_du_plane_unpin_surf(vps, false);
673 
674 	if (vps->surf) {
675 		vmw_surface_unreference(&vps->surf);
676 		vps->surf = NULL;
677 	}
678 
679 	if (vps->bo) {
680 		vmw_bo_unreference(&vps->bo);
681 		vps->bo = NULL;
682 	}
683 }
684 
685 
686 /**
687  * vmw_du_cursor_plane_prepare_fb - Readies the cursor by referencing it
688  *
689  * @plane:  display plane
690  * @new_state: info on the new plane state, including the FB
691  *
692  * Returns 0 on success
693  */
694 int
695 vmw_du_cursor_plane_prepare_fb(struct drm_plane *plane,
696 			       struct drm_plane_state *new_state)
697 {
698 	struct drm_framebuffer *fb = new_state->fb;
699 	struct vmw_cursor_plane *vcp = vmw_plane_to_vcp(plane);
700 	struct vmw_plane_state *vps = vmw_plane_state_to_vps(new_state);
701 	int ret = 0;
702 
703 	if (vps->surf) {
704 		if (vps->surf_mapped) {
705 			vmw_bo_unmap(vps->surf->res.guest_memory_bo);
706 			vps->surf_mapped = false;
707 		}
708 		vmw_surface_unreference(&vps->surf);
709 		vps->surf = NULL;
710 	}
711 
712 	if (vps->bo) {
713 		vmw_bo_unreference(&vps->bo);
714 		vps->bo = NULL;
715 	}
716 
717 	if (fb) {
718 		if (vmw_framebuffer_to_vfb(fb)->bo) {
719 			vps->bo = vmw_framebuffer_to_vfbd(fb)->buffer;
720 			vmw_bo_reference(vps->bo);
721 		} else {
722 			vps->surf = vmw_framebuffer_to_vfbs(fb)->surface;
723 			vmw_surface_reference(vps->surf);
724 		}
725 	}
726 
727 	if (!vps->surf && vps->bo) {
728 		const u32 size = new_state->crtc_w * new_state->crtc_h * sizeof(u32);
729 
730 		/*
731 		 * Not using vmw_bo_map_and_cache() helper here as we need to
732 		 * reserve the ttm_buffer_object first which
733 		 * vmw_bo_map_and_cache() omits.
734 		 */
735 		ret = ttm_bo_reserve(&vps->bo->tbo, true, false, NULL);
736 
737 		if (unlikely(ret != 0))
738 			return -ENOMEM;
739 
740 		ret = ttm_bo_kmap(&vps->bo->tbo, 0, PFN_UP(size), &vps->bo->map);
741 
742 		ttm_bo_unreserve(&vps->bo->tbo);
743 
744 		if (unlikely(ret != 0))
745 			return -ENOMEM;
746 	} else if (vps->surf && !vps->bo && vps->surf->res.guest_memory_bo) {
747 
748 		WARN_ON(vps->surf->snooper.image);
749 		ret = ttm_bo_reserve(&vps->surf->res.guest_memory_bo->tbo, true, false,
750 				     NULL);
751 		if (unlikely(ret != 0))
752 			return -ENOMEM;
753 		vmw_bo_map_and_cache(vps->surf->res.guest_memory_bo);
754 		ttm_bo_unreserve(&vps->surf->res.guest_memory_bo->tbo);
755 		vps->surf_mapped = true;
756 	}
757 
758 	if (vps->surf || vps->bo) {
759 		vmw_du_get_cursor_mob(vcp, vps);
760 		vmw_du_cursor_plane_map_cm(vps);
761 	}
762 
763 	return 0;
764 }
765 
766 
767 void
768 vmw_du_cursor_plane_atomic_update(struct drm_plane *plane,
769 				  struct drm_atomic_state *state)
770 {
771 	struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
772 									   plane);
773 	struct drm_plane_state *old_state = drm_atomic_get_old_plane_state(state,
774 									   plane);
775 	struct drm_crtc *crtc = new_state->crtc ?: old_state->crtc;
776 	struct vmw_private *dev_priv = vmw_priv(crtc->dev);
777 	struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
778 	struct vmw_plane_state *vps = vmw_plane_state_to_vps(new_state);
779 	struct vmw_plane_state *old_vps = vmw_plane_state_to_vps(old_state);
780 	s32 hotspot_x, hotspot_y;
781 
782 	hotspot_x = du->hotspot_x + new_state->hotspot_x;
783 	hotspot_y = du->hotspot_y + new_state->hotspot_y;
784 
785 	du->cursor_surface = vps->surf;
786 
787 	if (!vps->surf && !vps->bo) {
788 		vmw_cursor_update_position(dev_priv, false, 0, 0);
789 		return;
790 	}
791 
792 	vps->cursor.hotspot_x = hotspot_x;
793 	vps->cursor.hotspot_y = hotspot_y;
794 
795 	if (vps->surf) {
796 		du->cursor_age = du->cursor_surface->snooper.age;
797 	}
798 
799 	if (!vmw_du_cursor_plane_has_changed(old_vps, vps)) {
800 		/*
801 		 * If it hasn't changed, avoid making the device do extra
802 		 * work by keeping the old cursor active.
803 		 */
804 		struct vmw_cursor_plane_state tmp = old_vps->cursor;
805 		old_vps->cursor = vps->cursor;
806 		vps->cursor = tmp;
807 	} else {
808 		void *image = vmw_du_cursor_plane_acquire_image(vps);
809 		if (image)
810 			vmw_cursor_update_image(dev_priv, vps, image,
811 						new_state->crtc_w,
812 						new_state->crtc_h,
813 						hotspot_x, hotspot_y);
814 	}
815 
816 	du->cursor_x = new_state->crtc_x + du->set_gui_x;
817 	du->cursor_y = new_state->crtc_y + du->set_gui_y;
818 
819 	vmw_cursor_update_position(dev_priv, true,
820 				   du->cursor_x + hotspot_x,
821 				   du->cursor_y + hotspot_y);
822 
823 	du->core_hotspot_x = hotspot_x - du->hotspot_x;
824 	du->core_hotspot_y = hotspot_y - du->hotspot_y;
825 }
826 
827 
828 /**
829  * vmw_du_primary_plane_atomic_check - check if the new state is okay
830  *
831  * @plane: display plane
832  * @state: info on the new plane state, including the FB
833  *
834  * Check if the new state is settable given the current state.  Other
835  * than what the atomic helper checks, we care about crtc fitting
836  * the FB and maintaining one active framebuffer.
837  *
838  * Returns 0 on success
839  */
840 int vmw_du_primary_plane_atomic_check(struct drm_plane *plane,
841 				      struct drm_atomic_state *state)
842 {
843 	struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
844 									   plane);
845 	struct drm_plane_state *old_state = drm_atomic_get_old_plane_state(state,
846 									   plane);
847 	struct drm_crtc_state *crtc_state = NULL;
848 	struct drm_framebuffer *new_fb = new_state->fb;
849 	struct drm_framebuffer *old_fb = old_state->fb;
850 	int ret;
851 
852 	/*
853 	 * Ignore damage clips if the framebuffer attached to the plane's state
854 	 * has changed since the last plane update (page-flip). In this case, a
855 	 * full plane update should happen because uploads are done per-buffer.
856 	 */
857 	if (old_fb != new_fb)
858 		new_state->ignore_damage_clips = true;
859 
860 	if (new_state->crtc)
861 		crtc_state = drm_atomic_get_new_crtc_state(state,
862 							   new_state->crtc);
863 
864 	ret = drm_atomic_helper_check_plane_state(new_state, crtc_state,
865 						  DRM_PLANE_NO_SCALING,
866 						  DRM_PLANE_NO_SCALING,
867 						  false, true);
868 	return ret;
869 }
870 
871 
872 /**
873  * vmw_du_cursor_plane_atomic_check - check if the new state is okay
874  *
875  * @plane: cursor plane
876  * @state: info on the new plane state
877  *
878  * This is a chance to fail if the new cursor state does not fit
879  * our requirements.
880  *
881  * Returns 0 on success
882  */
883 int vmw_du_cursor_plane_atomic_check(struct drm_plane *plane,
884 				     struct drm_atomic_state *state)
885 {
886 	struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
887 									   plane);
888 	int ret = 0;
889 	struct drm_crtc_state *crtc_state = NULL;
890 	struct vmw_surface *surface = NULL;
891 	struct drm_framebuffer *fb = new_state->fb;
892 
893 	if (new_state->crtc)
894 		crtc_state = drm_atomic_get_new_crtc_state(new_state->state,
895 							   new_state->crtc);
896 
897 	ret = drm_atomic_helper_check_plane_state(new_state, crtc_state,
898 						  DRM_PLANE_NO_SCALING,
899 						  DRM_PLANE_NO_SCALING,
900 						  true, true);
901 	if (ret)
902 		return ret;
903 
904 	/* Turning off */
905 	if (!fb)
906 		return 0;
907 
908 	/* A lot of the code assumes this */
909 	if (new_state->crtc_w != 64 || new_state->crtc_h != 64) {
910 		DRM_ERROR("Invalid cursor dimensions (%d, %d)\n",
911 			  new_state->crtc_w, new_state->crtc_h);
912 		return -EINVAL;
913 	}
914 
915 	if (!vmw_framebuffer_to_vfb(fb)->bo) {
916 		surface = vmw_framebuffer_to_vfbs(fb)->surface;
917 
918 		WARN_ON(!surface);
919 
920 		if (!surface ||
921 		    (!surface->snooper.image && !surface->res.guest_memory_bo)) {
922 			DRM_ERROR("surface not suitable for cursor\n");
923 			return -EINVAL;
924 		}
925 	}
926 
927 	return 0;
928 }
929 
930 
931 int vmw_du_crtc_atomic_check(struct drm_crtc *crtc,
932 			     struct drm_atomic_state *state)
933 {
934 	struct vmw_private *vmw = vmw_priv(crtc->dev);
935 	struct drm_crtc_state *new_state = drm_atomic_get_new_crtc_state(state,
936 									 crtc);
937 	struct vmw_display_unit *du = vmw_crtc_to_du(new_state->crtc);
938 	int connector_mask = drm_connector_mask(&du->connector);
939 	bool has_primary = new_state->plane_mask &
940 			   drm_plane_mask(crtc->primary);
941 
942 	/*
943 	 * This is fine in general, but broken userspace might expect
944 	 * some actual rendering so give a clue as why it's blank.
945 	 */
946 	if (new_state->enable && !has_primary)
947 		drm_dbg_driver(&vmw->drm,
948 			       "CRTC without a primary plane will be blank.\n");
949 
950 
951 	if (new_state->connector_mask != connector_mask &&
952 	    new_state->connector_mask != 0) {
953 		DRM_ERROR("Invalid connectors configuration\n");
954 		return -EINVAL;
955 	}
956 
957 	/*
958 	 * Our virtual device does not have a dot clock, so use the logical
959 	 * clock value as the dot clock.
960 	 */
961 	if (new_state->mode.crtc_clock == 0)
962 		new_state->adjusted_mode.crtc_clock = new_state->mode.clock;
963 
964 	return 0;
965 }
966 
967 
968 void vmw_du_crtc_atomic_begin(struct drm_crtc *crtc,
969 			      struct drm_atomic_state *state)
970 {
971 	vmw_vkms_crtc_atomic_begin(crtc, state);
972 }
973 
974 /**
975  * vmw_du_crtc_duplicate_state - duplicate crtc state
976  * @crtc: DRM crtc
977  *
978  * Allocates and returns a copy of the crtc state (both common and
979  * vmw-specific) for the specified crtc.
980  *
981  * Returns: The newly allocated crtc state, or NULL on failure.
982  */
983 struct drm_crtc_state *
984 vmw_du_crtc_duplicate_state(struct drm_crtc *crtc)
985 {
986 	struct drm_crtc_state *state;
987 	struct vmw_crtc_state *vcs;
988 
989 	if (WARN_ON(!crtc->state))
990 		return NULL;
991 
992 	vcs = kmemdup(crtc->state, sizeof(*vcs), GFP_KERNEL);
993 
994 	if (!vcs)
995 		return NULL;
996 
997 	state = &vcs->base;
998 
999 	__drm_atomic_helper_crtc_duplicate_state(crtc, state);
1000 
1001 	return state;
1002 }
1003 
1004 
1005 /**
1006  * vmw_du_crtc_reset - creates a blank vmw crtc state
1007  * @crtc: DRM crtc
1008  *
1009  * Resets the atomic state for @crtc by freeing the state pointer (which
1010  * might be NULL, e.g. at driver load time) and allocating a new empty state
1011  * object.
1012  */
1013 void vmw_du_crtc_reset(struct drm_crtc *crtc)
1014 {
1015 	struct vmw_crtc_state *vcs;
1016 
1017 
1018 	if (crtc->state) {
1019 		__drm_atomic_helper_crtc_destroy_state(crtc->state);
1020 
1021 		kfree(vmw_crtc_state_to_vcs(crtc->state));
1022 	}
1023 
1024 	vcs = kzalloc(sizeof(*vcs), GFP_KERNEL);
1025 
1026 	if (!vcs) {
1027 		DRM_ERROR("Cannot allocate vmw_crtc_state\n");
1028 		return;
1029 	}
1030 
1031 	__drm_atomic_helper_crtc_reset(crtc, &vcs->base);
1032 }
1033 
1034 
1035 /**
1036  * vmw_du_crtc_destroy_state - destroy crtc state
1037  * @crtc: DRM crtc
1038  * @state: state object to destroy
1039  *
1040  * Destroys the crtc state (both common and vmw-specific) for the
1041  * specified plane.
1042  */
1043 void
1044 vmw_du_crtc_destroy_state(struct drm_crtc *crtc,
1045 			  struct drm_crtc_state *state)
1046 {
1047 	drm_atomic_helper_crtc_destroy_state(crtc, state);
1048 }
1049 
1050 
1051 /**
1052  * vmw_du_plane_duplicate_state - duplicate plane state
1053  * @plane: drm plane
1054  *
1055  * Allocates and returns a copy of the plane state (both common and
1056  * vmw-specific) for the specified plane.
1057  *
1058  * Returns: The newly allocated plane state, or NULL on failure.
1059  */
1060 struct drm_plane_state *
1061 vmw_du_plane_duplicate_state(struct drm_plane *plane)
1062 {
1063 	struct drm_plane_state *state;
1064 	struct vmw_plane_state *vps;
1065 
1066 	vps = kmemdup(plane->state, sizeof(*vps), GFP_KERNEL);
1067 
1068 	if (!vps)
1069 		return NULL;
1070 
1071 	vps->pinned = 0;
1072 	vps->cpp = 0;
1073 
1074 	memset(&vps->cursor, 0, sizeof(vps->cursor));
1075 
1076 	/* Each ref counted resource needs to be acquired again */
1077 	if (vps->surf)
1078 		(void) vmw_surface_reference(vps->surf);
1079 
1080 	if (vps->bo)
1081 		(void) vmw_bo_reference(vps->bo);
1082 
1083 	state = &vps->base;
1084 
1085 	__drm_atomic_helper_plane_duplicate_state(plane, state);
1086 
1087 	return state;
1088 }
1089 
1090 
1091 /**
1092  * vmw_du_plane_reset - creates a blank vmw plane state
1093  * @plane: drm plane
1094  *
1095  * Resets the atomic state for @plane by freeing the state pointer (which might
1096  * be NULL, e.g. at driver load time) and allocating a new empty state object.
1097  */
1098 void vmw_du_plane_reset(struct drm_plane *plane)
1099 {
1100 	struct vmw_plane_state *vps;
1101 
1102 	if (plane->state)
1103 		vmw_du_plane_destroy_state(plane, plane->state);
1104 
1105 	vps = kzalloc(sizeof(*vps), GFP_KERNEL);
1106 
1107 	if (!vps) {
1108 		DRM_ERROR("Cannot allocate vmw_plane_state\n");
1109 		return;
1110 	}
1111 
1112 	__drm_atomic_helper_plane_reset(plane, &vps->base);
1113 }
1114 
1115 
1116 /**
1117  * vmw_du_plane_destroy_state - destroy plane state
1118  * @plane: DRM plane
1119  * @state: state object to destroy
1120  *
1121  * Destroys the plane state (both common and vmw-specific) for the
1122  * specified plane.
1123  */
1124 void
1125 vmw_du_plane_destroy_state(struct drm_plane *plane,
1126 			   struct drm_plane_state *state)
1127 {
1128 	struct vmw_plane_state *vps = vmw_plane_state_to_vps(state);
1129 
1130 	/* Should have been freed by cleanup_fb */
1131 	if (vps->surf)
1132 		vmw_surface_unreference(&vps->surf);
1133 
1134 	if (vps->bo)
1135 		vmw_bo_unreference(&vps->bo);
1136 
1137 	drm_atomic_helper_plane_destroy_state(plane, state);
1138 }
1139 
1140 
1141 /**
1142  * vmw_du_connector_duplicate_state - duplicate connector state
1143  * @connector: DRM connector
1144  *
1145  * Allocates and returns a copy of the connector state (both common and
1146  * vmw-specific) for the specified connector.
1147  *
1148  * Returns: The newly allocated connector state, or NULL on failure.
1149  */
1150 struct drm_connector_state *
1151 vmw_du_connector_duplicate_state(struct drm_connector *connector)
1152 {
1153 	struct drm_connector_state *state;
1154 	struct vmw_connector_state *vcs;
1155 
1156 	if (WARN_ON(!connector->state))
1157 		return NULL;
1158 
1159 	vcs = kmemdup(connector->state, sizeof(*vcs), GFP_KERNEL);
1160 
1161 	if (!vcs)
1162 		return NULL;
1163 
1164 	state = &vcs->base;
1165 
1166 	__drm_atomic_helper_connector_duplicate_state(connector, state);
1167 
1168 	return state;
1169 }
1170 
1171 
1172 /**
1173  * vmw_du_connector_reset - creates a blank vmw connector state
1174  * @connector: DRM connector
1175  *
1176  * Resets the atomic state for @connector by freeing the state pointer (which
1177  * might be NULL, e.g. at driver load time) and allocating a new empty state
1178  * object.
1179  */
1180 void vmw_du_connector_reset(struct drm_connector *connector)
1181 {
1182 	struct vmw_connector_state *vcs;
1183 
1184 
1185 	if (connector->state) {
1186 		__drm_atomic_helper_connector_destroy_state(connector->state);
1187 
1188 		kfree(vmw_connector_state_to_vcs(connector->state));
1189 	}
1190 
1191 	vcs = kzalloc(sizeof(*vcs), GFP_KERNEL);
1192 
1193 	if (!vcs) {
1194 		DRM_ERROR("Cannot allocate vmw_connector_state\n");
1195 		return;
1196 	}
1197 
1198 	__drm_atomic_helper_connector_reset(connector, &vcs->base);
1199 }
1200 
1201 
1202 /**
1203  * vmw_du_connector_destroy_state - destroy connector state
1204  * @connector: DRM connector
1205  * @state: state object to destroy
1206  *
1207  * Destroys the connector state (both common and vmw-specific) for the
1208  * specified plane.
1209  */
1210 void
1211 vmw_du_connector_destroy_state(struct drm_connector *connector,
1212 			  struct drm_connector_state *state)
1213 {
1214 	drm_atomic_helper_connector_destroy_state(connector, state);
1215 }
1216 /*
1217  * Generic framebuffer code
1218  */
1219 
1220 /*
1221  * Surface framebuffer code
1222  */
1223 
1224 static void vmw_framebuffer_surface_destroy(struct drm_framebuffer *framebuffer)
1225 {
1226 	struct vmw_framebuffer_surface *vfbs =
1227 		vmw_framebuffer_to_vfbs(framebuffer);
1228 
1229 	drm_framebuffer_cleanup(framebuffer);
1230 	vmw_surface_unreference(&vfbs->surface);
1231 
1232 	kfree(vfbs);
1233 }
1234 
1235 /**
1236  * vmw_kms_readback - Perform a readback from the screen system to
1237  * a buffer-object backed framebuffer.
1238  *
1239  * @dev_priv: Pointer to the device private structure.
1240  * @file_priv: Pointer to a struct drm_file identifying the caller.
1241  * Must be set to NULL if @user_fence_rep is NULL.
1242  * @vfb: Pointer to the buffer-object backed framebuffer.
1243  * @user_fence_rep: User-space provided structure for fence information.
1244  * Must be set to non-NULL if @file_priv is non-NULL.
1245  * @vclips: Array of clip rects.
1246  * @num_clips: Number of clip rects in @vclips.
1247  *
1248  * Returns 0 on success, negative error code on failure. -ERESTARTSYS if
1249  * interrupted.
1250  */
1251 int vmw_kms_readback(struct vmw_private *dev_priv,
1252 		     struct drm_file *file_priv,
1253 		     struct vmw_framebuffer *vfb,
1254 		     struct drm_vmw_fence_rep __user *user_fence_rep,
1255 		     struct drm_vmw_rect *vclips,
1256 		     uint32_t num_clips)
1257 {
1258 	switch (dev_priv->active_display_unit) {
1259 	case vmw_du_screen_object:
1260 		return vmw_kms_sou_readback(dev_priv, file_priv, vfb,
1261 					    user_fence_rep, vclips, num_clips,
1262 					    NULL);
1263 	case vmw_du_screen_target:
1264 		return vmw_kms_stdu_readback(dev_priv, file_priv, vfb,
1265 					     user_fence_rep, NULL, vclips, num_clips,
1266 					     1, NULL);
1267 	default:
1268 		WARN_ONCE(true,
1269 			  "Readback called with invalid display system.\n");
1270 }
1271 
1272 	return -ENOSYS;
1273 }
1274 
1275 
1276 static const struct drm_framebuffer_funcs vmw_framebuffer_surface_funcs = {
1277 	.destroy = vmw_framebuffer_surface_destroy,
1278 	.dirty = drm_atomic_helper_dirtyfb,
1279 };
1280 
1281 static int vmw_kms_new_framebuffer_surface(struct vmw_private *dev_priv,
1282 					   struct vmw_surface *surface,
1283 					   struct vmw_framebuffer **out,
1284 					   const struct drm_mode_fb_cmd2
1285 					   *mode_cmd,
1286 					   bool is_bo_proxy)
1287 
1288 {
1289 	struct drm_device *dev = &dev_priv->drm;
1290 	struct vmw_framebuffer_surface *vfbs;
1291 	enum SVGA3dSurfaceFormat format;
1292 	int ret;
1293 
1294 	/* 3D is only supported on HWv8 and newer hosts */
1295 	if (dev_priv->active_display_unit == vmw_du_legacy)
1296 		return -ENOSYS;
1297 
1298 	/*
1299 	 * Sanity checks.
1300 	 */
1301 
1302 	if (!drm_any_plane_has_format(&dev_priv->drm,
1303 				      mode_cmd->pixel_format,
1304 				      mode_cmd->modifier[0])) {
1305 		drm_dbg(&dev_priv->drm,
1306 			"unsupported pixel format %p4cc / modifier 0x%llx\n",
1307 			&mode_cmd->pixel_format, mode_cmd->modifier[0]);
1308 		return -EINVAL;
1309 	}
1310 
1311 	/* Surface must be marked as a scanout. */
1312 	if (unlikely(!surface->metadata.scanout))
1313 		return -EINVAL;
1314 
1315 	if (unlikely(surface->metadata.mip_levels[0] != 1 ||
1316 		     surface->metadata.num_sizes != 1 ||
1317 		     surface->metadata.base_size.width < mode_cmd->width ||
1318 		     surface->metadata.base_size.height < mode_cmd->height ||
1319 		     surface->metadata.base_size.depth != 1)) {
1320 		DRM_ERROR("Incompatible surface dimensions "
1321 			  "for requested mode.\n");
1322 		return -EINVAL;
1323 	}
1324 
1325 	switch (mode_cmd->pixel_format) {
1326 	case DRM_FORMAT_ARGB8888:
1327 		format = SVGA3D_A8R8G8B8;
1328 		break;
1329 	case DRM_FORMAT_XRGB8888:
1330 		format = SVGA3D_X8R8G8B8;
1331 		break;
1332 	case DRM_FORMAT_RGB565:
1333 		format = SVGA3D_R5G6B5;
1334 		break;
1335 	case DRM_FORMAT_XRGB1555:
1336 		format = SVGA3D_A1R5G5B5;
1337 		break;
1338 	default:
1339 		DRM_ERROR("Invalid pixel format: %p4cc\n",
1340 			  &mode_cmd->pixel_format);
1341 		return -EINVAL;
1342 	}
1343 
1344 	/*
1345 	 * For DX, surface format validation is done when surface->scanout
1346 	 * is set.
1347 	 */
1348 	if (!has_sm4_context(dev_priv) && format != surface->metadata.format) {
1349 		DRM_ERROR("Invalid surface format for requested mode.\n");
1350 		return -EINVAL;
1351 	}
1352 
1353 	vfbs = kzalloc(sizeof(*vfbs), GFP_KERNEL);
1354 	if (!vfbs) {
1355 		ret = -ENOMEM;
1356 		goto out_err1;
1357 	}
1358 
1359 	drm_helper_mode_fill_fb_struct(dev, &vfbs->base.base, mode_cmd);
1360 	vfbs->surface = vmw_surface_reference(surface);
1361 	vfbs->is_bo_proxy = is_bo_proxy;
1362 
1363 	*out = &vfbs->base;
1364 
1365 	ret = drm_framebuffer_init(dev, &vfbs->base.base,
1366 				   &vmw_framebuffer_surface_funcs);
1367 	if (ret)
1368 		goto out_err2;
1369 
1370 	return 0;
1371 
1372 out_err2:
1373 	vmw_surface_unreference(&surface);
1374 	kfree(vfbs);
1375 out_err1:
1376 	return ret;
1377 }
1378 
1379 /*
1380  * Buffer-object framebuffer code
1381  */
1382 
1383 static int vmw_framebuffer_bo_create_handle(struct drm_framebuffer *fb,
1384 					    struct drm_file *file_priv,
1385 					    unsigned int *handle)
1386 {
1387 	struct vmw_framebuffer_bo *vfbd =
1388 			vmw_framebuffer_to_vfbd(fb);
1389 
1390 	return drm_gem_handle_create(file_priv, &vfbd->buffer->tbo.base, handle);
1391 }
1392 
1393 static void vmw_framebuffer_bo_destroy(struct drm_framebuffer *framebuffer)
1394 {
1395 	struct vmw_framebuffer_bo *vfbd =
1396 		vmw_framebuffer_to_vfbd(framebuffer);
1397 
1398 	drm_framebuffer_cleanup(framebuffer);
1399 	vmw_bo_unreference(&vfbd->buffer);
1400 
1401 	kfree(vfbd);
1402 }
1403 
1404 static const struct drm_framebuffer_funcs vmw_framebuffer_bo_funcs = {
1405 	.create_handle = vmw_framebuffer_bo_create_handle,
1406 	.destroy = vmw_framebuffer_bo_destroy,
1407 	.dirty = drm_atomic_helper_dirtyfb,
1408 };
1409 
1410 /**
1411  * vmw_create_bo_proxy - create a proxy surface for the buffer object
1412  *
1413  * @dev: DRM device
1414  * @mode_cmd: parameters for the new surface
1415  * @bo_mob: MOB backing the buffer object
1416  * @srf_out: newly created surface
1417  *
1418  * When the content FB is a buffer object, we create a surface as a proxy to the
1419  * same buffer.  This way we can do a surface copy rather than a surface DMA.
1420  * This is a more efficient approach
1421  *
1422  * RETURNS:
1423  * 0 on success, error code otherwise
1424  */
1425 static int vmw_create_bo_proxy(struct drm_device *dev,
1426 			       const struct drm_mode_fb_cmd2 *mode_cmd,
1427 			       struct vmw_bo *bo_mob,
1428 			       struct vmw_surface **srf_out)
1429 {
1430 	struct vmw_surface_metadata metadata = {0};
1431 	uint32_t format;
1432 	struct vmw_resource *res;
1433 	unsigned int bytes_pp;
1434 	int ret;
1435 
1436 	switch (mode_cmd->pixel_format) {
1437 	case DRM_FORMAT_ARGB8888:
1438 	case DRM_FORMAT_XRGB8888:
1439 		format = SVGA3D_X8R8G8B8;
1440 		bytes_pp = 4;
1441 		break;
1442 
1443 	case DRM_FORMAT_RGB565:
1444 	case DRM_FORMAT_XRGB1555:
1445 		format = SVGA3D_R5G6B5;
1446 		bytes_pp = 2;
1447 		break;
1448 
1449 	case 8:
1450 		format = SVGA3D_P8;
1451 		bytes_pp = 1;
1452 		break;
1453 
1454 	default:
1455 		DRM_ERROR("Invalid framebuffer format %p4cc\n",
1456 			  &mode_cmd->pixel_format);
1457 		return -EINVAL;
1458 	}
1459 
1460 	metadata.format = format;
1461 	metadata.mip_levels[0] = 1;
1462 	metadata.num_sizes = 1;
1463 	metadata.base_size.width = mode_cmd->pitches[0] / bytes_pp;
1464 	metadata.base_size.height =  mode_cmd->height;
1465 	metadata.base_size.depth = 1;
1466 	metadata.scanout = true;
1467 
1468 	ret = vmw_gb_surface_define(vmw_priv(dev), &metadata, srf_out);
1469 	if (ret) {
1470 		DRM_ERROR("Failed to allocate proxy content buffer\n");
1471 		return ret;
1472 	}
1473 
1474 	res = &(*srf_out)->res;
1475 
1476 	/* Reserve and switch the backing mob. */
1477 	mutex_lock(&res->dev_priv->cmdbuf_mutex);
1478 	(void) vmw_resource_reserve(res, false, true);
1479 	vmw_user_bo_unref(&res->guest_memory_bo);
1480 	res->guest_memory_bo = vmw_user_bo_ref(bo_mob);
1481 	res->guest_memory_offset = 0;
1482 	vmw_resource_unreserve(res, false, false, false, NULL, 0);
1483 	mutex_unlock(&res->dev_priv->cmdbuf_mutex);
1484 
1485 	return 0;
1486 }
1487 
1488 
1489 
1490 static int vmw_kms_new_framebuffer_bo(struct vmw_private *dev_priv,
1491 				      struct vmw_bo *bo,
1492 				      struct vmw_framebuffer **out,
1493 				      const struct drm_mode_fb_cmd2
1494 				      *mode_cmd)
1495 
1496 {
1497 	struct drm_device *dev = &dev_priv->drm;
1498 	struct vmw_framebuffer_bo *vfbd;
1499 	unsigned int requested_size;
1500 	int ret;
1501 
1502 	requested_size = mode_cmd->height * mode_cmd->pitches[0];
1503 	if (unlikely(requested_size > bo->tbo.base.size)) {
1504 		DRM_ERROR("Screen buffer object size is too small "
1505 			  "for requested mode.\n");
1506 		return -EINVAL;
1507 	}
1508 
1509 	if (!drm_any_plane_has_format(&dev_priv->drm,
1510 				      mode_cmd->pixel_format,
1511 				      mode_cmd->modifier[0])) {
1512 		drm_dbg(&dev_priv->drm,
1513 			"unsupported pixel format %p4cc / modifier 0x%llx\n",
1514 			&mode_cmd->pixel_format, mode_cmd->modifier[0]);
1515 		return -EINVAL;
1516 	}
1517 
1518 	vfbd = kzalloc(sizeof(*vfbd), GFP_KERNEL);
1519 	if (!vfbd) {
1520 		ret = -ENOMEM;
1521 		goto out_err1;
1522 	}
1523 
1524 	vfbd->base.base.obj[0] = &bo->tbo.base;
1525 	drm_helper_mode_fill_fb_struct(dev, &vfbd->base.base, mode_cmd);
1526 	vfbd->base.bo = true;
1527 	vfbd->buffer = vmw_bo_reference(bo);
1528 	*out = &vfbd->base;
1529 
1530 	ret = drm_framebuffer_init(dev, &vfbd->base.base,
1531 				   &vmw_framebuffer_bo_funcs);
1532 	if (ret)
1533 		goto out_err2;
1534 
1535 	return 0;
1536 
1537 out_err2:
1538 	vmw_bo_unreference(&bo);
1539 	kfree(vfbd);
1540 out_err1:
1541 	return ret;
1542 }
1543 
1544 
1545 /**
1546  * vmw_kms_srf_ok - check if a surface can be created
1547  *
1548  * @dev_priv: Pointer to device private struct.
1549  * @width: requested width
1550  * @height: requested height
1551  *
1552  * Surfaces need to be less than texture size
1553  */
1554 static bool
1555 vmw_kms_srf_ok(struct vmw_private *dev_priv, uint32_t width, uint32_t height)
1556 {
1557 	if (width  > dev_priv->texture_max_width ||
1558 	    height > dev_priv->texture_max_height)
1559 		return false;
1560 
1561 	return true;
1562 }
1563 
1564 /**
1565  * vmw_kms_new_framebuffer - Create a new framebuffer.
1566  *
1567  * @dev_priv: Pointer to device private struct.
1568  * @bo: Pointer to buffer object to wrap the kms framebuffer around.
1569  * Either @bo or @surface must be NULL.
1570  * @surface: Pointer to a surface to wrap the kms framebuffer around.
1571  * Either @bo or @surface must be NULL.
1572  * @only_2d: No presents will occur to this buffer object based framebuffer.
1573  * This helps the code to do some important optimizations.
1574  * @mode_cmd: Frame-buffer metadata.
1575  */
1576 struct vmw_framebuffer *
1577 vmw_kms_new_framebuffer(struct vmw_private *dev_priv,
1578 			struct vmw_bo *bo,
1579 			struct vmw_surface *surface,
1580 			bool only_2d,
1581 			const struct drm_mode_fb_cmd2 *mode_cmd)
1582 {
1583 	struct vmw_framebuffer *vfb = NULL;
1584 	bool is_bo_proxy = false;
1585 	int ret;
1586 
1587 	/*
1588 	 * We cannot use the SurfaceDMA command in an non-accelerated VM,
1589 	 * therefore, wrap the buffer object in a surface so we can use the
1590 	 * SurfaceCopy command.
1591 	 */
1592 	if (vmw_kms_srf_ok(dev_priv, mode_cmd->width, mode_cmd->height)  &&
1593 	    bo && only_2d &&
1594 	    mode_cmd->width > 64 &&  /* Don't create a proxy for cursor */
1595 	    dev_priv->active_display_unit == vmw_du_screen_target) {
1596 		ret = vmw_create_bo_proxy(&dev_priv->drm, mode_cmd,
1597 					  bo, &surface);
1598 		if (ret)
1599 			return ERR_PTR(ret);
1600 
1601 		is_bo_proxy = true;
1602 	}
1603 
1604 	/* Create the new framebuffer depending one what we have */
1605 	if (surface) {
1606 		ret = vmw_kms_new_framebuffer_surface(dev_priv, surface, &vfb,
1607 						      mode_cmd,
1608 						      is_bo_proxy);
1609 		/*
1610 		 * vmw_create_bo_proxy() adds a reference that is no longer
1611 		 * needed
1612 		 */
1613 		if (is_bo_proxy)
1614 			vmw_surface_unreference(&surface);
1615 	} else if (bo) {
1616 		ret = vmw_kms_new_framebuffer_bo(dev_priv, bo, &vfb,
1617 						 mode_cmd);
1618 	} else {
1619 		BUG();
1620 	}
1621 
1622 	if (ret)
1623 		return ERR_PTR(ret);
1624 
1625 	return vfb;
1626 }
1627 
1628 /*
1629  * Generic Kernel modesetting functions
1630  */
1631 
1632 static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev,
1633 						 struct drm_file *file_priv,
1634 						 const struct drm_mode_fb_cmd2 *mode_cmd)
1635 {
1636 	struct vmw_private *dev_priv = vmw_priv(dev);
1637 	struct vmw_framebuffer *vfb = NULL;
1638 	struct vmw_surface *surface = NULL;
1639 	struct vmw_bo *bo = NULL;
1640 	int ret;
1641 
1642 	/* returns either a bo or surface */
1643 	ret = vmw_user_lookup_handle(dev_priv, file_priv,
1644 				     mode_cmd->handles[0],
1645 				     &surface, &bo);
1646 	if (ret) {
1647 		DRM_ERROR("Invalid buffer object handle %u (0x%x).\n",
1648 			  mode_cmd->handles[0], mode_cmd->handles[0]);
1649 		goto err_out;
1650 	}
1651 
1652 
1653 	if (!bo &&
1654 	    !vmw_kms_srf_ok(dev_priv, mode_cmd->width, mode_cmd->height)) {
1655 		DRM_ERROR("Surface size cannot exceed %dx%d\n",
1656 			dev_priv->texture_max_width,
1657 			dev_priv->texture_max_height);
1658 		goto err_out;
1659 	}
1660 
1661 
1662 	vfb = vmw_kms_new_framebuffer(dev_priv, bo, surface,
1663 				      !(dev_priv->capabilities & SVGA_CAP_3D),
1664 				      mode_cmd);
1665 	if (IS_ERR(vfb)) {
1666 		ret = PTR_ERR(vfb);
1667 		goto err_out;
1668 	}
1669 
1670 err_out:
1671 	/* vmw_user_lookup_handle takes one ref so does new_fb */
1672 	if (bo)
1673 		vmw_user_bo_unref(&bo);
1674 	if (surface)
1675 		vmw_surface_unreference(&surface);
1676 
1677 	if (ret) {
1678 		DRM_ERROR("failed to create vmw_framebuffer: %i\n", ret);
1679 		return ERR_PTR(ret);
1680 	}
1681 
1682 	return &vfb->base;
1683 }
1684 
1685 /**
1686  * vmw_kms_check_display_memory - Validates display memory required for a
1687  * topology
1688  * @dev: DRM device
1689  * @num_rects: number of drm_rect in rects
1690  * @rects: array of drm_rect representing the topology to validate indexed by
1691  * crtc index.
1692  *
1693  * Returns:
1694  * 0 on success otherwise negative error code
1695  */
1696 static int vmw_kms_check_display_memory(struct drm_device *dev,
1697 					uint32_t num_rects,
1698 					struct drm_rect *rects)
1699 {
1700 	struct vmw_private *dev_priv = vmw_priv(dev);
1701 	struct drm_rect bounding_box = {0};
1702 	u64 total_pixels = 0, pixel_mem, bb_mem;
1703 	int i;
1704 
1705 	for (i = 0; i < num_rects; i++) {
1706 		/*
1707 		 * For STDU only individual screen (screen target) is limited by
1708 		 * SCREENTARGET_MAX_WIDTH/HEIGHT registers.
1709 		 */
1710 		if (dev_priv->active_display_unit == vmw_du_screen_target &&
1711 		    (drm_rect_width(&rects[i]) > dev_priv->stdu_max_width ||
1712 		     drm_rect_height(&rects[i]) > dev_priv->stdu_max_height)) {
1713 			VMW_DEBUG_KMS("Screen size not supported.\n");
1714 			return -EINVAL;
1715 		}
1716 
1717 		/* Bounding box upper left is at (0,0). */
1718 		if (rects[i].x2 > bounding_box.x2)
1719 			bounding_box.x2 = rects[i].x2;
1720 
1721 		if (rects[i].y2 > bounding_box.y2)
1722 			bounding_box.y2 = rects[i].y2;
1723 
1724 		total_pixels += (u64) drm_rect_width(&rects[i]) *
1725 			(u64) drm_rect_height(&rects[i]);
1726 	}
1727 
1728 	/* Virtual svga device primary limits are always in 32-bpp. */
1729 	pixel_mem = total_pixels * 4;
1730 
1731 	/*
1732 	 * For HV10 and below prim_bb_mem is vram size. When
1733 	 * SVGA_REG_MAX_PRIMARY_BOUNDING_BOX_MEM is not present vram size is
1734 	 * limit on primary bounding box
1735 	 */
1736 	if (pixel_mem > dev_priv->max_primary_mem) {
1737 		VMW_DEBUG_KMS("Combined output size too large.\n");
1738 		return -EINVAL;
1739 	}
1740 
1741 	/* SVGA_CAP_NO_BB_RESTRICTION is available for STDU only. */
1742 	if (dev_priv->active_display_unit != vmw_du_screen_target ||
1743 	    !(dev_priv->capabilities & SVGA_CAP_NO_BB_RESTRICTION)) {
1744 		bb_mem = (u64) bounding_box.x2 * bounding_box.y2 * 4;
1745 
1746 		if (bb_mem > dev_priv->max_primary_mem) {
1747 			VMW_DEBUG_KMS("Topology is beyond supported limits.\n");
1748 			return -EINVAL;
1749 		}
1750 	}
1751 
1752 	return 0;
1753 }
1754 
1755 /**
1756  * vmw_crtc_state_and_lock - Return new or current crtc state with locked
1757  * crtc mutex
1758  * @state: The atomic state pointer containing the new atomic state
1759  * @crtc: The crtc
1760  *
1761  * This function returns the new crtc state if it's part of the state update.
1762  * Otherwise returns the current crtc state. It also makes sure that the
1763  * crtc mutex is locked.
1764  *
1765  * Returns: A valid crtc state pointer or NULL. It may also return a
1766  * pointer error, in particular -EDEADLK if locking needs to be rerun.
1767  */
1768 static struct drm_crtc_state *
1769 vmw_crtc_state_and_lock(struct drm_atomic_state *state, struct drm_crtc *crtc)
1770 {
1771 	struct drm_crtc_state *crtc_state;
1772 
1773 	crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
1774 	if (crtc_state) {
1775 		lockdep_assert_held(&crtc->mutex.mutex.base);
1776 	} else {
1777 		int ret = drm_modeset_lock(&crtc->mutex, state->acquire_ctx);
1778 
1779 		if (ret != 0 && ret != -EALREADY)
1780 			return ERR_PTR(ret);
1781 
1782 		crtc_state = crtc->state;
1783 	}
1784 
1785 	return crtc_state;
1786 }
1787 
1788 /**
1789  * vmw_kms_check_implicit - Verify that all implicit display units scan out
1790  * from the same fb after the new state is committed.
1791  * @dev: The drm_device.
1792  * @state: The new state to be checked.
1793  *
1794  * Returns:
1795  *   Zero on success,
1796  *   -EINVAL on invalid state,
1797  *   -EDEADLK if modeset locking needs to be rerun.
1798  */
1799 static int vmw_kms_check_implicit(struct drm_device *dev,
1800 				  struct drm_atomic_state *state)
1801 {
1802 	struct drm_framebuffer *implicit_fb = NULL;
1803 	struct drm_crtc *crtc;
1804 	struct drm_crtc_state *crtc_state;
1805 	struct drm_plane_state *plane_state;
1806 
1807 	drm_for_each_crtc(crtc, dev) {
1808 		struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
1809 
1810 		if (!du->is_implicit)
1811 			continue;
1812 
1813 		crtc_state = vmw_crtc_state_and_lock(state, crtc);
1814 		if (IS_ERR(crtc_state))
1815 			return PTR_ERR(crtc_state);
1816 
1817 		if (!crtc_state || !crtc_state->enable)
1818 			continue;
1819 
1820 		/*
1821 		 * Can't move primary planes across crtcs, so this is OK.
1822 		 * It also means we don't need to take the plane mutex.
1823 		 */
1824 		plane_state = du->primary.state;
1825 		if (plane_state->crtc != crtc)
1826 			continue;
1827 
1828 		if (!implicit_fb)
1829 			implicit_fb = plane_state->fb;
1830 		else if (implicit_fb != plane_state->fb)
1831 			return -EINVAL;
1832 	}
1833 
1834 	return 0;
1835 }
1836 
1837 /**
1838  * vmw_kms_check_topology - Validates topology in drm_atomic_state
1839  * @dev: DRM device
1840  * @state: the driver state object
1841  *
1842  * Returns:
1843  * 0 on success otherwise negative error code
1844  */
1845 static int vmw_kms_check_topology(struct drm_device *dev,
1846 				  struct drm_atomic_state *state)
1847 {
1848 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
1849 	struct drm_rect *rects;
1850 	struct drm_crtc *crtc;
1851 	uint32_t i;
1852 	int ret = 0;
1853 
1854 	rects = kcalloc(dev->mode_config.num_crtc, sizeof(struct drm_rect),
1855 			GFP_KERNEL);
1856 	if (!rects)
1857 		return -ENOMEM;
1858 
1859 	drm_for_each_crtc(crtc, dev) {
1860 		struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
1861 		struct drm_crtc_state *crtc_state;
1862 
1863 		i = drm_crtc_index(crtc);
1864 
1865 		crtc_state = vmw_crtc_state_and_lock(state, crtc);
1866 		if (IS_ERR(crtc_state)) {
1867 			ret = PTR_ERR(crtc_state);
1868 			goto clean;
1869 		}
1870 
1871 		if (!crtc_state)
1872 			continue;
1873 
1874 		if (crtc_state->enable) {
1875 			rects[i].x1 = du->gui_x;
1876 			rects[i].y1 = du->gui_y;
1877 			rects[i].x2 = du->gui_x + crtc_state->mode.hdisplay;
1878 			rects[i].y2 = du->gui_y + crtc_state->mode.vdisplay;
1879 		} else {
1880 			rects[i].x1 = 0;
1881 			rects[i].y1 = 0;
1882 			rects[i].x2 = 0;
1883 			rects[i].y2 = 0;
1884 		}
1885 	}
1886 
1887 	/* Determine change to topology due to new atomic state */
1888 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
1889 				      new_crtc_state, i) {
1890 		struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
1891 		struct drm_connector *connector;
1892 		struct drm_connector_state *conn_state;
1893 		struct vmw_connector_state *vmw_conn_state;
1894 
1895 		if (!du->pref_active && new_crtc_state->enable) {
1896 			VMW_DEBUG_KMS("Enabling a disabled display unit\n");
1897 			ret = -EINVAL;
1898 			goto clean;
1899 		}
1900 
1901 		/*
1902 		 * For vmwgfx each crtc has only one connector attached and it
1903 		 * is not changed so don't really need to check the
1904 		 * crtc->connector_mask and iterate over it.
1905 		 */
1906 		connector = &du->connector;
1907 		conn_state = drm_atomic_get_connector_state(state, connector);
1908 		if (IS_ERR(conn_state)) {
1909 			ret = PTR_ERR(conn_state);
1910 			goto clean;
1911 		}
1912 
1913 		vmw_conn_state = vmw_connector_state_to_vcs(conn_state);
1914 		vmw_conn_state->gui_x = du->gui_x;
1915 		vmw_conn_state->gui_y = du->gui_y;
1916 	}
1917 
1918 	ret = vmw_kms_check_display_memory(dev, dev->mode_config.num_crtc,
1919 					   rects);
1920 
1921 clean:
1922 	kfree(rects);
1923 	return ret;
1924 }
1925 
1926 /**
1927  * vmw_kms_atomic_check_modeset- validate state object for modeset changes
1928  *
1929  * @dev: DRM device
1930  * @state: the driver state object
1931  *
1932  * This is a simple wrapper around drm_atomic_helper_check_modeset() for
1933  * us to assign a value to mode->crtc_clock so that
1934  * drm_calc_timestamping_constants() won't throw an error message
1935  *
1936  * Returns:
1937  * Zero for success or -errno
1938  */
1939 static int
1940 vmw_kms_atomic_check_modeset(struct drm_device *dev,
1941 			     struct drm_atomic_state *state)
1942 {
1943 	struct drm_crtc *crtc;
1944 	struct drm_crtc_state *crtc_state;
1945 	bool need_modeset = false;
1946 	int i, ret;
1947 
1948 	ret = drm_atomic_helper_check(dev, state);
1949 	if (ret)
1950 		return ret;
1951 
1952 	ret = vmw_kms_check_implicit(dev, state);
1953 	if (ret) {
1954 		VMW_DEBUG_KMS("Invalid implicit state\n");
1955 		return ret;
1956 	}
1957 
1958 	for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
1959 		if (drm_atomic_crtc_needs_modeset(crtc_state))
1960 			need_modeset = true;
1961 	}
1962 
1963 	if (need_modeset)
1964 		return vmw_kms_check_topology(dev, state);
1965 
1966 	return ret;
1967 }
1968 
1969 static const struct drm_mode_config_funcs vmw_kms_funcs = {
1970 	.fb_create = vmw_kms_fb_create,
1971 	.atomic_check = vmw_kms_atomic_check_modeset,
1972 	.atomic_commit = drm_atomic_helper_commit,
1973 };
1974 
1975 static int vmw_kms_generic_present(struct vmw_private *dev_priv,
1976 				   struct drm_file *file_priv,
1977 				   struct vmw_framebuffer *vfb,
1978 				   struct vmw_surface *surface,
1979 				   uint32_t sid,
1980 				   int32_t destX, int32_t destY,
1981 				   struct drm_vmw_rect *clips,
1982 				   uint32_t num_clips)
1983 {
1984 	return vmw_kms_sou_do_surface_dirty(dev_priv, vfb, NULL, clips,
1985 					    &surface->res, destX, destY,
1986 					    num_clips, 1, NULL, NULL);
1987 }
1988 
1989 
1990 int vmw_kms_present(struct vmw_private *dev_priv,
1991 		    struct drm_file *file_priv,
1992 		    struct vmw_framebuffer *vfb,
1993 		    struct vmw_surface *surface,
1994 		    uint32_t sid,
1995 		    int32_t destX, int32_t destY,
1996 		    struct drm_vmw_rect *clips,
1997 		    uint32_t num_clips)
1998 {
1999 	int ret;
2000 
2001 	switch (dev_priv->active_display_unit) {
2002 	case vmw_du_screen_target:
2003 		ret = vmw_kms_stdu_surface_dirty(dev_priv, vfb, NULL, clips,
2004 						 &surface->res, destX, destY,
2005 						 num_clips, 1, NULL, NULL);
2006 		break;
2007 	case vmw_du_screen_object:
2008 		ret = vmw_kms_generic_present(dev_priv, file_priv, vfb, surface,
2009 					      sid, destX, destY, clips,
2010 					      num_clips);
2011 		break;
2012 	default:
2013 		WARN_ONCE(true,
2014 			  "Present called with invalid display system.\n");
2015 		ret = -ENOSYS;
2016 		break;
2017 	}
2018 	if (ret)
2019 		return ret;
2020 
2021 	vmw_cmd_flush(dev_priv, false);
2022 
2023 	return 0;
2024 }
2025 
2026 static void
2027 vmw_kms_create_hotplug_mode_update_property(struct vmw_private *dev_priv)
2028 {
2029 	if (dev_priv->hotplug_mode_update_property)
2030 		return;
2031 
2032 	dev_priv->hotplug_mode_update_property =
2033 		drm_property_create_range(&dev_priv->drm,
2034 					  DRM_MODE_PROP_IMMUTABLE,
2035 					  "hotplug_mode_update", 0, 1);
2036 }
2037 
2038 static void
2039 vmw_atomic_commit_tail(struct drm_atomic_state *old_state)
2040 {
2041 	struct vmw_private *vmw = vmw_priv(old_state->dev);
2042 	struct drm_crtc *crtc;
2043 	struct drm_crtc_state *old_crtc_state;
2044 	int i;
2045 
2046 	drm_atomic_helper_commit_tail(old_state);
2047 
2048 	if (vmw->vkms_enabled) {
2049 		for_each_old_crtc_in_state(old_state, crtc, old_crtc_state, i) {
2050 			struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
2051 			(void)old_crtc_state;
2052 			flush_work(&du->vkms.crc_generator_work);
2053 		}
2054 	}
2055 }
2056 
2057 static const struct drm_mode_config_helper_funcs vmw_mode_config_helpers = {
2058 	.atomic_commit_tail = vmw_atomic_commit_tail,
2059 };
2060 
2061 int vmw_kms_init(struct vmw_private *dev_priv)
2062 {
2063 	struct drm_device *dev = &dev_priv->drm;
2064 	int ret;
2065 	static const char *display_unit_names[] = {
2066 		"Invalid",
2067 		"Legacy",
2068 		"Screen Object",
2069 		"Screen Target",
2070 		"Invalid (max)"
2071 	};
2072 
2073 	drm_mode_config_init(dev);
2074 	dev->mode_config.funcs = &vmw_kms_funcs;
2075 	dev->mode_config.min_width = 1;
2076 	dev->mode_config.min_height = 1;
2077 	dev->mode_config.max_width = dev_priv->texture_max_width;
2078 	dev->mode_config.max_height = dev_priv->texture_max_height;
2079 	dev->mode_config.preferred_depth = dev_priv->assume_16bpp ? 16 : 32;
2080 	dev->mode_config.helper_private = &vmw_mode_config_helpers;
2081 
2082 	drm_mode_create_suggested_offset_properties(dev);
2083 	vmw_kms_create_hotplug_mode_update_property(dev_priv);
2084 
2085 	ret = vmw_kms_stdu_init_display(dev_priv);
2086 	if (ret) {
2087 		ret = vmw_kms_sou_init_display(dev_priv);
2088 		if (ret) /* Fallback */
2089 			ret = vmw_kms_ldu_init_display(dev_priv);
2090 	}
2091 	BUILD_BUG_ON(ARRAY_SIZE(display_unit_names) != (vmw_du_max + 1));
2092 	drm_info(&dev_priv->drm, "%s display unit initialized\n",
2093 		 display_unit_names[dev_priv->active_display_unit]);
2094 
2095 	return ret;
2096 }
2097 
2098 int vmw_kms_close(struct vmw_private *dev_priv)
2099 {
2100 	int ret = 0;
2101 
2102 	/*
2103 	 * Docs says we should take the lock before calling this function
2104 	 * but since it destroys encoders and our destructor calls
2105 	 * drm_encoder_cleanup which takes the lock we deadlock.
2106 	 */
2107 	drm_mode_config_cleanup(&dev_priv->drm);
2108 	if (dev_priv->active_display_unit == vmw_du_legacy)
2109 		ret = vmw_kms_ldu_close_display(dev_priv);
2110 
2111 	return ret;
2112 }
2113 
2114 int vmw_kms_cursor_bypass_ioctl(struct drm_device *dev, void *data,
2115 				struct drm_file *file_priv)
2116 {
2117 	struct drm_vmw_cursor_bypass_arg *arg = data;
2118 	struct vmw_display_unit *du;
2119 	struct drm_crtc *crtc;
2120 	int ret = 0;
2121 
2122 	mutex_lock(&dev->mode_config.mutex);
2123 	if (arg->flags & DRM_VMW_CURSOR_BYPASS_ALL) {
2124 
2125 		list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
2126 			du = vmw_crtc_to_du(crtc);
2127 			du->hotspot_x = arg->xhot;
2128 			du->hotspot_y = arg->yhot;
2129 		}
2130 
2131 		mutex_unlock(&dev->mode_config.mutex);
2132 		return 0;
2133 	}
2134 
2135 	crtc = drm_crtc_find(dev, file_priv, arg->crtc_id);
2136 	if (!crtc) {
2137 		ret = -ENOENT;
2138 		goto out;
2139 	}
2140 
2141 	du = vmw_crtc_to_du(crtc);
2142 
2143 	du->hotspot_x = arg->xhot;
2144 	du->hotspot_y = arg->yhot;
2145 
2146 out:
2147 	mutex_unlock(&dev->mode_config.mutex);
2148 
2149 	return ret;
2150 }
2151 
2152 int vmw_kms_write_svga(struct vmw_private *vmw_priv,
2153 			unsigned width, unsigned height, unsigned pitch,
2154 			unsigned bpp, unsigned depth)
2155 {
2156 	if (vmw_priv->capabilities & SVGA_CAP_PITCHLOCK)
2157 		vmw_write(vmw_priv, SVGA_REG_PITCHLOCK, pitch);
2158 	else if (vmw_fifo_have_pitchlock(vmw_priv))
2159 		vmw_fifo_mem_write(vmw_priv, SVGA_FIFO_PITCHLOCK, pitch);
2160 	vmw_write(vmw_priv, SVGA_REG_WIDTH, width);
2161 	vmw_write(vmw_priv, SVGA_REG_HEIGHT, height);
2162 	if ((vmw_priv->capabilities & SVGA_CAP_8BIT_EMULATION) != 0)
2163 		vmw_write(vmw_priv, SVGA_REG_BITS_PER_PIXEL, bpp);
2164 
2165 	if (vmw_read(vmw_priv, SVGA_REG_DEPTH) != depth) {
2166 		DRM_ERROR("Invalid depth %u for %u bpp, host expects %u\n",
2167 			  depth, bpp, vmw_read(vmw_priv, SVGA_REG_DEPTH));
2168 		return -EINVAL;
2169 	}
2170 
2171 	return 0;
2172 }
2173 
2174 bool vmw_kms_validate_mode_vram(struct vmw_private *dev_priv,
2175 				uint32_t pitch,
2176 				uint32_t height)
2177 {
2178 	return ((u64) pitch * (u64) height) < (u64)
2179 		((dev_priv->active_display_unit == vmw_du_screen_target) ?
2180 		 dev_priv->max_primary_mem : dev_priv->vram_size);
2181 }
2182 
2183 /**
2184  * vmw_du_update_layout - Update the display unit with topology from resolution
2185  * plugin and generate DRM uevent
2186  * @dev_priv: device private
2187  * @num_rects: number of drm_rect in rects
2188  * @rects: toplogy to update
2189  */
2190 static int vmw_du_update_layout(struct vmw_private *dev_priv,
2191 				unsigned int num_rects, struct drm_rect *rects)
2192 {
2193 	struct drm_device *dev = &dev_priv->drm;
2194 	struct vmw_display_unit *du;
2195 	struct drm_connector *con;
2196 	struct drm_connector_list_iter conn_iter;
2197 	struct drm_modeset_acquire_ctx ctx;
2198 	struct drm_crtc *crtc;
2199 	int ret;
2200 
2201 	/* Currently gui_x/y is protected with the crtc mutex */
2202 	mutex_lock(&dev->mode_config.mutex);
2203 	drm_modeset_acquire_init(&ctx, 0);
2204 retry:
2205 	drm_for_each_crtc(crtc, dev) {
2206 		ret = drm_modeset_lock(&crtc->mutex, &ctx);
2207 		if (ret < 0) {
2208 			if (ret == -EDEADLK) {
2209 				drm_modeset_backoff(&ctx);
2210 				goto retry;
2211 		}
2212 			goto out_fini;
2213 		}
2214 	}
2215 
2216 	drm_connector_list_iter_begin(dev, &conn_iter);
2217 	drm_for_each_connector_iter(con, &conn_iter) {
2218 		du = vmw_connector_to_du(con);
2219 		if (num_rects > du->unit) {
2220 			du->pref_width = drm_rect_width(&rects[du->unit]);
2221 			du->pref_height = drm_rect_height(&rects[du->unit]);
2222 			du->pref_active = true;
2223 			du->gui_x = rects[du->unit].x1;
2224 			du->gui_y = rects[du->unit].y1;
2225 		} else {
2226 			du->pref_width  = VMWGFX_MIN_INITIAL_WIDTH;
2227 			du->pref_height = VMWGFX_MIN_INITIAL_HEIGHT;
2228 			du->pref_active = false;
2229 			du->gui_x = 0;
2230 			du->gui_y = 0;
2231 		}
2232 	}
2233 	drm_connector_list_iter_end(&conn_iter);
2234 
2235 	list_for_each_entry(con, &dev->mode_config.connector_list, head) {
2236 		du = vmw_connector_to_du(con);
2237 		if (num_rects > du->unit) {
2238 			drm_object_property_set_value
2239 			  (&con->base, dev->mode_config.suggested_x_property,
2240 			   du->gui_x);
2241 			drm_object_property_set_value
2242 			  (&con->base, dev->mode_config.suggested_y_property,
2243 			   du->gui_y);
2244 		} else {
2245 			drm_object_property_set_value
2246 			  (&con->base, dev->mode_config.suggested_x_property,
2247 			   0);
2248 			drm_object_property_set_value
2249 			  (&con->base, dev->mode_config.suggested_y_property,
2250 			   0);
2251 		}
2252 		con->status = vmw_du_connector_detect(con, true);
2253 	}
2254 out_fini:
2255 	drm_modeset_drop_locks(&ctx);
2256 	drm_modeset_acquire_fini(&ctx);
2257 	mutex_unlock(&dev->mode_config.mutex);
2258 
2259 	drm_sysfs_hotplug_event(dev);
2260 
2261 	return 0;
2262 }
2263 
2264 int vmw_du_crtc_gamma_set(struct drm_crtc *crtc,
2265 			  u16 *r, u16 *g, u16 *b,
2266 			  uint32_t size,
2267 			  struct drm_modeset_acquire_ctx *ctx)
2268 {
2269 	struct vmw_private *dev_priv = vmw_priv(crtc->dev);
2270 	int i;
2271 
2272 	for (i = 0; i < size; i++) {
2273 		DRM_DEBUG("%d r/g/b = 0x%04x / 0x%04x / 0x%04x\n", i,
2274 			  r[i], g[i], b[i]);
2275 		vmw_write(dev_priv, SVGA_PALETTE_BASE + i * 3 + 0, r[i] >> 8);
2276 		vmw_write(dev_priv, SVGA_PALETTE_BASE + i * 3 + 1, g[i] >> 8);
2277 		vmw_write(dev_priv, SVGA_PALETTE_BASE + i * 3 + 2, b[i] >> 8);
2278 	}
2279 
2280 	return 0;
2281 }
2282 
2283 int vmw_du_connector_dpms(struct drm_connector *connector, int mode)
2284 {
2285 	return 0;
2286 }
2287 
2288 enum drm_connector_status
2289 vmw_du_connector_detect(struct drm_connector *connector, bool force)
2290 {
2291 	uint32_t num_displays;
2292 	struct drm_device *dev = connector->dev;
2293 	struct vmw_private *dev_priv = vmw_priv(dev);
2294 	struct vmw_display_unit *du = vmw_connector_to_du(connector);
2295 
2296 	num_displays = vmw_read(dev_priv, SVGA_REG_NUM_DISPLAYS);
2297 
2298 	return ((vmw_connector_to_du(connector)->unit < num_displays &&
2299 		 du->pref_active) ?
2300 		connector_status_connected : connector_status_disconnected);
2301 }
2302 
2303 /**
2304  * vmw_guess_mode_timing - Provide fake timings for a
2305  * 60Hz vrefresh mode.
2306  *
2307  * @mode: Pointer to a struct drm_display_mode with hdisplay and vdisplay
2308  * members filled in.
2309  */
2310 void vmw_guess_mode_timing(struct drm_display_mode *mode)
2311 {
2312 	mode->hsync_start = mode->hdisplay + 50;
2313 	mode->hsync_end = mode->hsync_start + 50;
2314 	mode->htotal = mode->hsync_end + 50;
2315 
2316 	mode->vsync_start = mode->vdisplay + 50;
2317 	mode->vsync_end = mode->vsync_start + 50;
2318 	mode->vtotal = mode->vsync_end + 50;
2319 
2320 	mode->clock = (u32)mode->htotal * (u32)mode->vtotal / 100 * 6;
2321 }
2322 
2323 
2324 /**
2325  * vmw_kms_update_layout_ioctl - Handler for DRM_VMW_UPDATE_LAYOUT ioctl
2326  * @dev: drm device for the ioctl
2327  * @data: data pointer for the ioctl
2328  * @file_priv: drm file for the ioctl call
2329  *
2330  * Update preferred topology of display unit as per ioctl request. The topology
2331  * is expressed as array of drm_vmw_rect.
2332  * e.g.
2333  * [0 0 640 480] [640 0 800 600] [0 480 640 480]
2334  *
2335  * NOTE:
2336  * The x and y offset (upper left) in drm_vmw_rect cannot be less than 0. Beside
2337  * device limit on topology, x + w and y + h (lower right) cannot be greater
2338  * than INT_MAX. So topology beyond these limits will return with error.
2339  *
2340  * Returns:
2341  * Zero on success, negative errno on failure.
2342  */
2343 int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data,
2344 				struct drm_file *file_priv)
2345 {
2346 	struct vmw_private *dev_priv = vmw_priv(dev);
2347 	struct drm_mode_config *mode_config = &dev->mode_config;
2348 	struct drm_vmw_update_layout_arg *arg =
2349 		(struct drm_vmw_update_layout_arg *)data;
2350 	void __user *user_rects;
2351 	struct drm_vmw_rect *rects;
2352 	struct drm_rect *drm_rects;
2353 	unsigned rects_size;
2354 	int ret, i;
2355 
2356 	if (!arg->num_outputs) {
2357 		struct drm_rect def_rect = {0, 0,
2358 					    VMWGFX_MIN_INITIAL_WIDTH,
2359 					    VMWGFX_MIN_INITIAL_HEIGHT};
2360 		vmw_du_update_layout(dev_priv, 1, &def_rect);
2361 		return 0;
2362 	}
2363 
2364 	rects_size = arg->num_outputs * sizeof(struct drm_vmw_rect);
2365 	rects = kcalloc(arg->num_outputs, sizeof(struct drm_vmw_rect),
2366 			GFP_KERNEL);
2367 	if (unlikely(!rects))
2368 		return -ENOMEM;
2369 
2370 	user_rects = (void __user *)(unsigned long)arg->rects;
2371 	ret = copy_from_user(rects, user_rects, rects_size);
2372 	if (unlikely(ret != 0)) {
2373 		DRM_ERROR("Failed to get rects.\n");
2374 		ret = -EFAULT;
2375 		goto out_free;
2376 	}
2377 
2378 	drm_rects = (struct drm_rect *)rects;
2379 
2380 	VMW_DEBUG_KMS("Layout count = %u\n", arg->num_outputs);
2381 	for (i = 0; i < arg->num_outputs; i++) {
2382 		struct drm_vmw_rect curr_rect;
2383 
2384 		/* Verify user-space for overflow as kernel use drm_rect */
2385 		if ((rects[i].x + rects[i].w > INT_MAX) ||
2386 		    (rects[i].y + rects[i].h > INT_MAX)) {
2387 			ret = -ERANGE;
2388 			goto out_free;
2389 		}
2390 
2391 		curr_rect = rects[i];
2392 		drm_rects[i].x1 = curr_rect.x;
2393 		drm_rects[i].y1 = curr_rect.y;
2394 		drm_rects[i].x2 = curr_rect.x + curr_rect.w;
2395 		drm_rects[i].y2 = curr_rect.y + curr_rect.h;
2396 
2397 		VMW_DEBUG_KMS("  x1 = %d y1 = %d x2 = %d y2 = %d\n",
2398 			      drm_rects[i].x1, drm_rects[i].y1,
2399 			      drm_rects[i].x2, drm_rects[i].y2);
2400 
2401 		/*
2402 		 * Currently this check is limiting the topology within
2403 		 * mode_config->max (which actually is max texture size
2404 		 * supported by virtual device). This limit is here to address
2405 		 * window managers that create a big framebuffer for whole
2406 		 * topology.
2407 		 */
2408 		if (drm_rects[i].x1 < 0 ||  drm_rects[i].y1 < 0 ||
2409 		    drm_rects[i].x2 > mode_config->max_width ||
2410 		    drm_rects[i].y2 > mode_config->max_height) {
2411 			VMW_DEBUG_KMS("Invalid layout %d %d %d %d\n",
2412 				      drm_rects[i].x1, drm_rects[i].y1,
2413 				      drm_rects[i].x2, drm_rects[i].y2);
2414 			ret = -EINVAL;
2415 			goto out_free;
2416 		}
2417 	}
2418 
2419 	ret = vmw_kms_check_display_memory(dev, arg->num_outputs, drm_rects);
2420 
2421 	if (ret == 0)
2422 		vmw_du_update_layout(dev_priv, arg->num_outputs, drm_rects);
2423 
2424 out_free:
2425 	kfree(rects);
2426 	return ret;
2427 }
2428 
2429 /**
2430  * vmw_kms_helper_dirty - Helper to build commands and perform actions based
2431  * on a set of cliprects and a set of display units.
2432  *
2433  * @dev_priv: Pointer to a device private structure.
2434  * @framebuffer: Pointer to the framebuffer on which to perform the actions.
2435  * @clips: A set of struct drm_clip_rect. Either this os @vclips must be NULL.
2436  * Cliprects are given in framebuffer coordinates.
2437  * @vclips: A set of struct drm_vmw_rect cliprects. Either this or @clips must
2438  * be NULL. Cliprects are given in source coordinates.
2439  * @dest_x: X coordinate offset for the crtc / destination clip rects.
2440  * @dest_y: Y coordinate offset for the crtc / destination clip rects.
2441  * @num_clips: Number of cliprects in the @clips or @vclips array.
2442  * @increment: Integer with which to increment the clip counter when looping.
2443  * Used to skip a predetermined number of clip rects.
2444  * @dirty: Closure structure. See the description of struct vmw_kms_dirty.
2445  */
2446 int vmw_kms_helper_dirty(struct vmw_private *dev_priv,
2447 			 struct vmw_framebuffer *framebuffer,
2448 			 const struct drm_clip_rect *clips,
2449 			 const struct drm_vmw_rect *vclips,
2450 			 s32 dest_x, s32 dest_y,
2451 			 int num_clips,
2452 			 int increment,
2453 			 struct vmw_kms_dirty *dirty)
2454 {
2455 	struct vmw_display_unit *units[VMWGFX_NUM_DISPLAY_UNITS];
2456 	struct drm_crtc *crtc;
2457 	u32 num_units = 0;
2458 	u32 i, k;
2459 
2460 	dirty->dev_priv = dev_priv;
2461 
2462 	/* If crtc is passed, no need to iterate over other display units */
2463 	if (dirty->crtc) {
2464 		units[num_units++] = vmw_crtc_to_du(dirty->crtc);
2465 	} else {
2466 		list_for_each_entry(crtc, &dev_priv->drm.mode_config.crtc_list,
2467 				    head) {
2468 			struct drm_plane *plane = crtc->primary;
2469 
2470 			if (plane->state->fb == &framebuffer->base)
2471 				units[num_units++] = vmw_crtc_to_du(crtc);
2472 		}
2473 	}
2474 
2475 	for (k = 0; k < num_units; k++) {
2476 		struct vmw_display_unit *unit = units[k];
2477 		s32 crtc_x = unit->crtc.x;
2478 		s32 crtc_y = unit->crtc.y;
2479 		s32 crtc_width = unit->crtc.mode.hdisplay;
2480 		s32 crtc_height = unit->crtc.mode.vdisplay;
2481 		const struct drm_clip_rect *clips_ptr = clips;
2482 		const struct drm_vmw_rect *vclips_ptr = vclips;
2483 
2484 		dirty->unit = unit;
2485 		if (dirty->fifo_reserve_size > 0) {
2486 			dirty->cmd = VMW_CMD_RESERVE(dev_priv,
2487 						      dirty->fifo_reserve_size);
2488 			if (!dirty->cmd)
2489 				return -ENOMEM;
2490 
2491 			memset(dirty->cmd, 0, dirty->fifo_reserve_size);
2492 		}
2493 		dirty->num_hits = 0;
2494 		for (i = 0; i < num_clips; i++, clips_ptr += increment,
2495 		       vclips_ptr += increment) {
2496 			s32 clip_left;
2497 			s32 clip_top;
2498 
2499 			/*
2500 			 * Select clip array type. Note that integer type
2501 			 * in @clips is unsigned short, whereas in @vclips
2502 			 * it's 32-bit.
2503 			 */
2504 			if (clips) {
2505 				dirty->fb_x = (s32) clips_ptr->x1;
2506 				dirty->fb_y = (s32) clips_ptr->y1;
2507 				dirty->unit_x2 = (s32) clips_ptr->x2 + dest_x -
2508 					crtc_x;
2509 				dirty->unit_y2 = (s32) clips_ptr->y2 + dest_y -
2510 					crtc_y;
2511 			} else {
2512 				dirty->fb_x = vclips_ptr->x;
2513 				dirty->fb_y = vclips_ptr->y;
2514 				dirty->unit_x2 = dirty->fb_x + vclips_ptr->w +
2515 					dest_x - crtc_x;
2516 				dirty->unit_y2 = dirty->fb_y + vclips_ptr->h +
2517 					dest_y - crtc_y;
2518 			}
2519 
2520 			dirty->unit_x1 = dirty->fb_x + dest_x - crtc_x;
2521 			dirty->unit_y1 = dirty->fb_y + dest_y - crtc_y;
2522 
2523 			/* Skip this clip if it's outside the crtc region */
2524 			if (dirty->unit_x1 >= crtc_width ||
2525 			    dirty->unit_y1 >= crtc_height ||
2526 			    dirty->unit_x2 <= 0 || dirty->unit_y2 <= 0)
2527 				continue;
2528 
2529 			/* Clip right and bottom to crtc limits */
2530 			dirty->unit_x2 = min_t(s32, dirty->unit_x2,
2531 					       crtc_width);
2532 			dirty->unit_y2 = min_t(s32, dirty->unit_y2,
2533 					       crtc_height);
2534 
2535 			/* Clip left and top to crtc limits */
2536 			clip_left = min_t(s32, dirty->unit_x1, 0);
2537 			clip_top = min_t(s32, dirty->unit_y1, 0);
2538 			dirty->unit_x1 -= clip_left;
2539 			dirty->unit_y1 -= clip_top;
2540 			dirty->fb_x -= clip_left;
2541 			dirty->fb_y -= clip_top;
2542 
2543 			dirty->clip(dirty);
2544 		}
2545 
2546 		dirty->fifo_commit(dirty);
2547 	}
2548 
2549 	return 0;
2550 }
2551 
2552 /**
2553  * vmw_kms_helper_validation_finish - Helper for post KMS command submission
2554  * cleanup and fencing
2555  * @dev_priv: Pointer to the device-private struct
2556  * @file_priv: Pointer identifying the client when user-space fencing is used
2557  * @ctx: Pointer to the validation context
2558  * @out_fence: If non-NULL, returned refcounted fence-pointer
2559  * @user_fence_rep: If non-NULL, pointer to user-space address area
2560  * in which to copy user-space fence info
2561  */
2562 void vmw_kms_helper_validation_finish(struct vmw_private *dev_priv,
2563 				      struct drm_file *file_priv,
2564 				      struct vmw_validation_context *ctx,
2565 				      struct vmw_fence_obj **out_fence,
2566 				      struct drm_vmw_fence_rep __user *
2567 				      user_fence_rep)
2568 {
2569 	struct vmw_fence_obj *fence = NULL;
2570 	uint32_t handle = 0;
2571 	int ret = 0;
2572 
2573 	if (file_priv || user_fence_rep || vmw_validation_has_bos(ctx) ||
2574 	    out_fence)
2575 		ret = vmw_execbuf_fence_commands(file_priv, dev_priv, &fence,
2576 						 file_priv ? &handle : NULL);
2577 	vmw_validation_done(ctx, fence);
2578 	if (file_priv)
2579 		vmw_execbuf_copy_fence_user(dev_priv, vmw_fpriv(file_priv),
2580 					    ret, user_fence_rep, fence,
2581 					    handle, -1);
2582 	if (out_fence)
2583 		*out_fence = fence;
2584 	else
2585 		vmw_fence_obj_unreference(&fence);
2586 }
2587 
2588 /**
2589  * vmw_kms_update_proxy - Helper function to update a proxy surface from
2590  * its backing MOB.
2591  *
2592  * @res: Pointer to the surface resource
2593  * @clips: Clip rects in framebuffer (surface) space.
2594  * @num_clips: Number of clips in @clips.
2595  * @increment: Integer with which to increment the clip counter when looping.
2596  * Used to skip a predetermined number of clip rects.
2597  *
2598  * This function makes sure the proxy surface is updated from its backing MOB
2599  * using the region given by @clips. The surface resource @res and its backing
2600  * MOB needs to be reserved and validated on call.
2601  */
2602 int vmw_kms_update_proxy(struct vmw_resource *res,
2603 			 const struct drm_clip_rect *clips,
2604 			 unsigned num_clips,
2605 			 int increment)
2606 {
2607 	struct vmw_private *dev_priv = res->dev_priv;
2608 	struct drm_vmw_size *size = &vmw_res_to_srf(res)->metadata.base_size;
2609 	struct {
2610 		SVGA3dCmdHeader header;
2611 		SVGA3dCmdUpdateGBImage body;
2612 	} *cmd;
2613 	SVGA3dBox *box;
2614 	size_t copy_size = 0;
2615 	int i;
2616 
2617 	if (!clips)
2618 		return 0;
2619 
2620 	cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd) * num_clips);
2621 	if (!cmd)
2622 		return -ENOMEM;
2623 
2624 	for (i = 0; i < num_clips; ++i, clips += increment, ++cmd) {
2625 		box = &cmd->body.box;
2626 
2627 		cmd->header.id = SVGA_3D_CMD_UPDATE_GB_IMAGE;
2628 		cmd->header.size = sizeof(cmd->body);
2629 		cmd->body.image.sid = res->id;
2630 		cmd->body.image.face = 0;
2631 		cmd->body.image.mipmap = 0;
2632 
2633 		if (clips->x1 > size->width || clips->x2 > size->width ||
2634 		    clips->y1 > size->height || clips->y2 > size->height) {
2635 			DRM_ERROR("Invalid clips outsize of framebuffer.\n");
2636 			return -EINVAL;
2637 		}
2638 
2639 		box->x = clips->x1;
2640 		box->y = clips->y1;
2641 		box->z = 0;
2642 		box->w = clips->x2 - clips->x1;
2643 		box->h = clips->y2 - clips->y1;
2644 		box->d = 1;
2645 
2646 		copy_size += sizeof(*cmd);
2647 	}
2648 
2649 	vmw_cmd_commit(dev_priv, copy_size);
2650 
2651 	return 0;
2652 }
2653 
2654 /**
2655  * vmw_kms_create_implicit_placement_property - Set up the implicit placement
2656  * property.
2657  *
2658  * @dev_priv: Pointer to a device private struct.
2659  *
2660  * Sets up the implicit placement property unless it's already set up.
2661  */
2662 void
2663 vmw_kms_create_implicit_placement_property(struct vmw_private *dev_priv)
2664 {
2665 	if (dev_priv->implicit_placement_property)
2666 		return;
2667 
2668 	dev_priv->implicit_placement_property =
2669 		drm_property_create_range(&dev_priv->drm,
2670 					  DRM_MODE_PROP_IMMUTABLE,
2671 					  "implicit_placement", 0, 1);
2672 }
2673 
2674 /**
2675  * vmw_kms_suspend - Save modesetting state and turn modesetting off.
2676  *
2677  * @dev: Pointer to the drm device
2678  * Return: 0 on success. Negative error code on failure.
2679  */
2680 int vmw_kms_suspend(struct drm_device *dev)
2681 {
2682 	struct vmw_private *dev_priv = vmw_priv(dev);
2683 
2684 	dev_priv->suspend_state = drm_atomic_helper_suspend(dev);
2685 	if (IS_ERR(dev_priv->suspend_state)) {
2686 		int ret = PTR_ERR(dev_priv->suspend_state);
2687 
2688 		DRM_ERROR("Failed kms suspend: %d\n", ret);
2689 		dev_priv->suspend_state = NULL;
2690 
2691 		return ret;
2692 	}
2693 
2694 	return 0;
2695 }
2696 
2697 
2698 /**
2699  * vmw_kms_resume - Re-enable modesetting and restore state
2700  *
2701  * @dev: Pointer to the drm device
2702  * Return: 0 on success. Negative error code on failure.
2703  *
2704  * State is resumed from a previous vmw_kms_suspend(). It's illegal
2705  * to call this function without a previous vmw_kms_suspend().
2706  */
2707 int vmw_kms_resume(struct drm_device *dev)
2708 {
2709 	struct vmw_private *dev_priv = vmw_priv(dev);
2710 	int ret;
2711 
2712 	if (WARN_ON(!dev_priv->suspend_state))
2713 		return 0;
2714 
2715 	ret = drm_atomic_helper_resume(dev, dev_priv->suspend_state);
2716 	dev_priv->suspend_state = NULL;
2717 
2718 	return ret;
2719 }
2720 
2721 /**
2722  * vmw_kms_lost_device - Notify kms that modesetting capabilities will be lost
2723  *
2724  * @dev: Pointer to the drm device
2725  */
2726 void vmw_kms_lost_device(struct drm_device *dev)
2727 {
2728 	drm_atomic_helper_shutdown(dev);
2729 }
2730 
2731 /**
2732  * vmw_du_helper_plane_update - Helper to do plane update on a display unit.
2733  * @update: The closure structure.
2734  *
2735  * Call this helper after setting callbacks in &vmw_du_update_plane to do plane
2736  * update on display unit.
2737  *
2738  * Return: 0 on success or a negative error code on failure.
2739  */
2740 int vmw_du_helper_plane_update(struct vmw_du_update_plane *update)
2741 {
2742 	struct drm_plane_state *state = update->plane->state;
2743 	struct drm_plane_state *old_state = update->old_state;
2744 	struct drm_atomic_helper_damage_iter iter;
2745 	struct drm_rect clip;
2746 	struct drm_rect bb;
2747 	DECLARE_VAL_CONTEXT(val_ctx, NULL, 0);
2748 	uint32_t reserved_size = 0;
2749 	uint32_t submit_size = 0;
2750 	uint32_t curr_size = 0;
2751 	uint32_t num_hits = 0;
2752 	void *cmd_start;
2753 	char *cmd_next;
2754 	int ret;
2755 
2756 	/*
2757 	 * Iterate in advance to check if really need plane update and find the
2758 	 * number of clips that actually are in plane src for fifo allocation.
2759 	 */
2760 	drm_atomic_helper_damage_iter_init(&iter, old_state, state);
2761 	drm_atomic_for_each_plane_damage(&iter, &clip)
2762 		num_hits++;
2763 
2764 	if (num_hits == 0)
2765 		return 0;
2766 
2767 	if (update->vfb->bo) {
2768 		struct vmw_framebuffer_bo *vfbbo =
2769 			container_of(update->vfb, typeof(*vfbbo), base);
2770 
2771 		/*
2772 		 * For screen targets we want a mappable bo, for everything else we want
2773 		 * accelerated i.e. host backed (vram or gmr) bo. If the display unit
2774 		 * is not screen target then mob's shouldn't be available.
2775 		 */
2776 		if (update->dev_priv->active_display_unit == vmw_du_screen_target) {
2777 			vmw_bo_placement_set(vfbbo->buffer,
2778 					     VMW_BO_DOMAIN_SYS | VMW_BO_DOMAIN_MOB | VMW_BO_DOMAIN_GMR,
2779 					     VMW_BO_DOMAIN_SYS | VMW_BO_DOMAIN_MOB | VMW_BO_DOMAIN_GMR);
2780 		} else {
2781 			WARN_ON(update->dev_priv->has_mob);
2782 			vmw_bo_placement_set_default_accelerated(vfbbo->buffer);
2783 		}
2784 		ret = vmw_validation_add_bo(&val_ctx, vfbbo->buffer);
2785 	} else {
2786 		struct vmw_framebuffer_surface *vfbs =
2787 			container_of(update->vfb, typeof(*vfbs), base);
2788 
2789 		ret = vmw_validation_add_resource(&val_ctx, &vfbs->surface->res,
2790 						  0, VMW_RES_DIRTY_NONE, NULL,
2791 						  NULL);
2792 	}
2793 
2794 	if (ret)
2795 		return ret;
2796 
2797 	ret = vmw_validation_prepare(&val_ctx, update->mutex, update->intr);
2798 	if (ret)
2799 		goto out_unref;
2800 
2801 	reserved_size = update->calc_fifo_size(update, num_hits);
2802 	cmd_start = VMW_CMD_RESERVE(update->dev_priv, reserved_size);
2803 	if (!cmd_start) {
2804 		ret = -ENOMEM;
2805 		goto out_revert;
2806 	}
2807 
2808 	cmd_next = cmd_start;
2809 
2810 	if (update->post_prepare) {
2811 		curr_size = update->post_prepare(update, cmd_next);
2812 		cmd_next += curr_size;
2813 		submit_size += curr_size;
2814 	}
2815 
2816 	if (update->pre_clip) {
2817 		curr_size = update->pre_clip(update, cmd_next, num_hits);
2818 		cmd_next += curr_size;
2819 		submit_size += curr_size;
2820 	}
2821 
2822 	bb.x1 = INT_MAX;
2823 	bb.y1 = INT_MAX;
2824 	bb.x2 = INT_MIN;
2825 	bb.y2 = INT_MIN;
2826 
2827 	drm_atomic_helper_damage_iter_init(&iter, old_state, state);
2828 	drm_atomic_for_each_plane_damage(&iter, &clip) {
2829 		uint32_t fb_x = clip.x1;
2830 		uint32_t fb_y = clip.y1;
2831 
2832 		vmw_du_translate_to_crtc(state, &clip);
2833 		if (update->clip) {
2834 			curr_size = update->clip(update, cmd_next, &clip, fb_x,
2835 						 fb_y);
2836 			cmd_next += curr_size;
2837 			submit_size += curr_size;
2838 		}
2839 		bb.x1 = min_t(int, bb.x1, clip.x1);
2840 		bb.y1 = min_t(int, bb.y1, clip.y1);
2841 		bb.x2 = max_t(int, bb.x2, clip.x2);
2842 		bb.y2 = max_t(int, bb.y2, clip.y2);
2843 	}
2844 
2845 	curr_size = update->post_clip(update, cmd_next, &bb);
2846 	submit_size += curr_size;
2847 
2848 	if (reserved_size < submit_size)
2849 		submit_size = 0;
2850 
2851 	vmw_cmd_commit(update->dev_priv, submit_size);
2852 
2853 	vmw_kms_helper_validation_finish(update->dev_priv, NULL, &val_ctx,
2854 					 update->out_fence, NULL);
2855 	return ret;
2856 
2857 out_revert:
2858 	vmw_validation_revert(&val_ctx);
2859 
2860 out_unref:
2861 	vmw_validation_unref_lists(&val_ctx);
2862 	return ret;
2863 }
2864 
2865 /**
2866  * vmw_connector_mode_valid - implements drm_connector_helper_funcs.mode_valid callback
2867  *
2868  * @connector: the drm connector, part of a DU container
2869  * @mode: drm mode to check
2870  *
2871  * Returns MODE_OK on success, or a drm_mode_status error code.
2872  */
2873 enum drm_mode_status vmw_connector_mode_valid(struct drm_connector *connector,
2874 					      struct drm_display_mode *mode)
2875 {
2876 	struct drm_device *dev = connector->dev;
2877 	struct vmw_private *dev_priv = vmw_priv(dev);
2878 	u32 max_width = dev_priv->texture_max_width;
2879 	u32 max_height = dev_priv->texture_max_height;
2880 	u32 assumed_cpp = 4;
2881 
2882 	if (dev_priv->assume_16bpp)
2883 		assumed_cpp = 2;
2884 
2885 	if (dev_priv->active_display_unit == vmw_du_screen_target) {
2886 		max_width  = min(dev_priv->stdu_max_width,  max_width);
2887 		max_height = min(dev_priv->stdu_max_height, max_height);
2888 	}
2889 
2890 	if (max_width < mode->hdisplay)
2891 		return MODE_BAD_HVALUE;
2892 
2893 	if (max_height < mode->vdisplay)
2894 		return MODE_BAD_VVALUE;
2895 
2896 	if (!vmw_kms_validate_mode_vram(dev_priv,
2897 					mode->hdisplay * assumed_cpp,
2898 					mode->vdisplay))
2899 		return MODE_MEM;
2900 
2901 	return MODE_OK;
2902 }
2903 
2904 /**
2905  * vmw_connector_get_modes - implements drm_connector_helper_funcs.get_modes callback
2906  *
2907  * @connector: the drm connector, part of a DU container
2908  *
2909  * Returns the number of added modes.
2910  */
2911 int vmw_connector_get_modes(struct drm_connector *connector)
2912 {
2913 	struct vmw_display_unit *du = vmw_connector_to_du(connector);
2914 	struct drm_device *dev = connector->dev;
2915 	struct vmw_private *dev_priv = vmw_priv(dev);
2916 	struct drm_display_mode *mode = NULL;
2917 	struct drm_display_mode prefmode = { DRM_MODE("preferred",
2918 		DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED,
2919 		0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
2920 		DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC)
2921 	};
2922 	u32 max_width;
2923 	u32 max_height;
2924 	u32 num_modes;
2925 
2926 	/* Add preferred mode */
2927 	mode = drm_mode_duplicate(dev, &prefmode);
2928 	if (!mode)
2929 		return 0;
2930 
2931 	mode->hdisplay = du->pref_width;
2932 	mode->vdisplay = du->pref_height;
2933 	vmw_guess_mode_timing(mode);
2934 	drm_mode_set_name(mode);
2935 
2936 	drm_mode_probed_add(connector, mode);
2937 	drm_dbg_kms(dev, "preferred mode " DRM_MODE_FMT "\n", DRM_MODE_ARG(mode));
2938 
2939 	/* Probe connector for all modes not exceeding our geom limits */
2940 	max_width  = dev_priv->texture_max_width;
2941 	max_height = dev_priv->texture_max_height;
2942 
2943 	if (dev_priv->active_display_unit == vmw_du_screen_target) {
2944 		max_width  = min(dev_priv->stdu_max_width,  max_width);
2945 		max_height = min(dev_priv->stdu_max_height, max_height);
2946 	}
2947 
2948 	num_modes = 1 + drm_add_modes_noedid(connector, max_width, max_height);
2949 
2950 	return num_modes;
2951 }
2952