xref: /linux/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c (revision 79790b6818e96c58fe2bffee1b418c16e64e7b80)
1 // SPDX-License-Identifier: GPL-2.0 OR MIT
2 /**************************************************************************
3  *
4  * Copyright 2009-2023 VMware, Inc., Palo Alto, CA., USA
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the
8  * "Software"), to deal in the Software without restriction, including
9  * without limitation the rights to use, copy, modify, merge, publish,
10  * distribute, sub license, and/or sell copies of the Software, and to
11  * permit persons to whom the Software is furnished to do so, subject to
12  * the following conditions:
13  *
14  * The above copyright notice and this permission notice (including the
15  * next paragraph) shall be included in all copies or substantial portions
16  * of the Software.
17  *
18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24  * USE OR OTHER DEALINGS IN THE SOFTWARE.
25  *
26  **************************************************************************/
27 #include "vmwgfx_kms.h"
28 
29 #include "vmwgfx_bo.h"
30 #include "vmw_surface_cache.h"
31 
32 #include <drm/drm_atomic.h>
33 #include <drm/drm_atomic_helper.h>
34 #include <drm/drm_damage_helper.h>
35 #include <drm/drm_fourcc.h>
36 #include <drm/drm_rect.h>
37 #include <drm/drm_sysfs.h>
38 #include <drm/drm_edid.h>
39 
40 void vmw_du_cleanup(struct vmw_display_unit *du)
41 {
42 	struct vmw_private *dev_priv = vmw_priv(du->primary.dev);
43 	drm_plane_cleanup(&du->primary);
44 	if (vmw_cmd_supported(dev_priv))
45 		drm_plane_cleanup(&du->cursor.base);
46 
47 	drm_connector_unregister(&du->connector);
48 	drm_crtc_cleanup(&du->crtc);
49 	drm_encoder_cleanup(&du->encoder);
50 	drm_connector_cleanup(&du->connector);
51 }
52 
53 /*
54  * Display Unit Cursor functions
55  */
56 
57 static int vmw_du_cursor_plane_unmap_cm(struct vmw_plane_state *vps);
58 static void vmw_cursor_update_mob(struct vmw_private *dev_priv,
59 				  struct vmw_plane_state *vps,
60 				  u32 *image, u32 width, u32 height,
61 				  u32 hotspotX, u32 hotspotY);
62 
63 struct vmw_svga_fifo_cmd_define_cursor {
64 	u32 cmd;
65 	SVGAFifoCmdDefineAlphaCursor cursor;
66 };
67 
68 /**
69  * vmw_send_define_cursor_cmd - queue a define cursor command
70  * @dev_priv: the private driver struct
71  * @image: buffer which holds the cursor image
72  * @width: width of the mouse cursor image
73  * @height: height of the mouse cursor image
74  * @hotspotX: the horizontal position of mouse hotspot
75  * @hotspotY: the vertical position of mouse hotspot
76  */
77 static void vmw_send_define_cursor_cmd(struct vmw_private *dev_priv,
78 				       u32 *image, u32 width, u32 height,
79 				       u32 hotspotX, u32 hotspotY)
80 {
81 	struct vmw_svga_fifo_cmd_define_cursor *cmd;
82 	const u32 image_size = width * height * sizeof(*image);
83 	const u32 cmd_size = sizeof(*cmd) + image_size;
84 
85 	/* Try to reserve fifocmd space and swallow any failures;
86 	   such reservations cannot be left unconsumed for long
87 	   under the risk of clogging other fifocmd users, so
88 	   we treat reservations separtely from the way we treat
89 	   other fallible KMS-atomic resources at prepare_fb */
90 	cmd = VMW_CMD_RESERVE(dev_priv, cmd_size);
91 
92 	if (unlikely(!cmd))
93 		return;
94 
95 	memset(cmd, 0, sizeof(*cmd));
96 
97 	memcpy(&cmd[1], image, image_size);
98 
99 	cmd->cmd = SVGA_CMD_DEFINE_ALPHA_CURSOR;
100 	cmd->cursor.id = 0;
101 	cmd->cursor.width = width;
102 	cmd->cursor.height = height;
103 	cmd->cursor.hotspotX = hotspotX;
104 	cmd->cursor.hotspotY = hotspotY;
105 
106 	vmw_cmd_commit_flush(dev_priv, cmd_size);
107 }
108 
109 /**
110  * vmw_cursor_update_image - update the cursor image on the provided plane
111  * @dev_priv: the private driver struct
112  * @vps: the plane state of the cursor plane
113  * @image: buffer which holds the cursor image
114  * @width: width of the mouse cursor image
115  * @height: height of the mouse cursor image
116  * @hotspotX: the horizontal position of mouse hotspot
117  * @hotspotY: the vertical position of mouse hotspot
118  */
119 static void vmw_cursor_update_image(struct vmw_private *dev_priv,
120 				    struct vmw_plane_state *vps,
121 				    u32 *image, u32 width, u32 height,
122 				    u32 hotspotX, u32 hotspotY)
123 {
124 	if (vps->cursor.bo)
125 		vmw_cursor_update_mob(dev_priv, vps, image,
126 				      vps->base.crtc_w, vps->base.crtc_h,
127 				      hotspotX, hotspotY);
128 
129 	else
130 		vmw_send_define_cursor_cmd(dev_priv, image, width, height,
131 					   hotspotX, hotspotY);
132 }
133 
134 
135 /**
136  * vmw_cursor_update_mob - Update cursor vis CursorMob mechanism
137  *
138  * Called from inside vmw_du_cursor_plane_atomic_update to actually
139  * make the cursor-image live.
140  *
141  * @dev_priv: device to work with
142  * @vps: the plane state of the cursor plane
143  * @image: cursor source data to fill the MOB with
144  * @width: source data width
145  * @height: source data height
146  * @hotspotX: cursor hotspot x
147  * @hotspotY: cursor hotspot Y
148  */
149 static void vmw_cursor_update_mob(struct vmw_private *dev_priv,
150 				  struct vmw_plane_state *vps,
151 				  u32 *image, u32 width, u32 height,
152 				  u32 hotspotX, u32 hotspotY)
153 {
154 	SVGAGBCursorHeader *header;
155 	SVGAGBAlphaCursorHeader *alpha_header;
156 	const u32 image_size = width * height * sizeof(*image);
157 
158 	header = vmw_bo_map_and_cache(vps->cursor.bo);
159 	alpha_header = &header->header.alphaHeader;
160 
161 	memset(header, 0, sizeof(*header));
162 
163 	header->type = SVGA_ALPHA_CURSOR;
164 	header->sizeInBytes = image_size;
165 
166 	alpha_header->hotspotX = hotspotX;
167 	alpha_header->hotspotY = hotspotY;
168 	alpha_header->width = width;
169 	alpha_header->height = height;
170 
171 	memcpy(header + 1, image, image_size);
172 	vmw_write(dev_priv, SVGA_REG_CURSOR_MOBID,
173 		  vps->cursor.bo->tbo.resource->start);
174 }
175 
176 
177 static u32 vmw_du_cursor_mob_size(u32 w, u32 h)
178 {
179 	return w * h * sizeof(u32) + sizeof(SVGAGBCursorHeader);
180 }
181 
182 /**
183  * vmw_du_cursor_plane_acquire_image -- Acquire the image data
184  * @vps: cursor plane state
185  */
186 static u32 *vmw_du_cursor_plane_acquire_image(struct vmw_plane_state *vps)
187 {
188 	if (vps->surf) {
189 		if (vps->surf_mapped)
190 			return vmw_bo_map_and_cache(vps->surf->res.guest_memory_bo);
191 		return vps->surf->snooper.image;
192 	} else if (vps->bo)
193 		return vmw_bo_map_and_cache(vps->bo);
194 	return NULL;
195 }
196 
197 static bool vmw_du_cursor_plane_has_changed(struct vmw_plane_state *old_vps,
198 					    struct vmw_plane_state *new_vps)
199 {
200 	void *old_image;
201 	void *new_image;
202 	u32 size;
203 	bool changed;
204 
205 	if (old_vps->base.crtc_w != new_vps->base.crtc_w ||
206 	    old_vps->base.crtc_h != new_vps->base.crtc_h)
207 	    return true;
208 
209 	if (old_vps->cursor.hotspot_x != new_vps->cursor.hotspot_x ||
210 	    old_vps->cursor.hotspot_y != new_vps->cursor.hotspot_y)
211 	    return true;
212 
213 	size = new_vps->base.crtc_w * new_vps->base.crtc_h * sizeof(u32);
214 
215 	old_image = vmw_du_cursor_plane_acquire_image(old_vps);
216 	new_image = vmw_du_cursor_plane_acquire_image(new_vps);
217 
218 	changed = false;
219 	if (old_image && new_image)
220 		changed = memcmp(old_image, new_image, size) != 0;
221 
222 	return changed;
223 }
224 
225 static void vmw_du_destroy_cursor_mob(struct vmw_bo **vbo)
226 {
227 	if (!(*vbo))
228 		return;
229 
230 	ttm_bo_unpin(&(*vbo)->tbo);
231 	vmw_bo_unreference(vbo);
232 }
233 
234 static void vmw_du_put_cursor_mob(struct vmw_cursor_plane *vcp,
235 				  struct vmw_plane_state *vps)
236 {
237 	u32 i;
238 
239 	if (!vps->cursor.bo)
240 		return;
241 
242 	vmw_du_cursor_plane_unmap_cm(vps);
243 
244 	/* Look for a free slot to return this mob to the cache. */
245 	for (i = 0; i < ARRAY_SIZE(vcp->cursor_mobs); i++) {
246 		if (!vcp->cursor_mobs[i]) {
247 			vcp->cursor_mobs[i] = vps->cursor.bo;
248 			vps->cursor.bo = NULL;
249 			return;
250 		}
251 	}
252 
253 	/* Cache is full: See if this mob is bigger than an existing mob. */
254 	for (i = 0; i < ARRAY_SIZE(vcp->cursor_mobs); i++) {
255 		if (vcp->cursor_mobs[i]->tbo.base.size <
256 		    vps->cursor.bo->tbo.base.size) {
257 			vmw_du_destroy_cursor_mob(&vcp->cursor_mobs[i]);
258 			vcp->cursor_mobs[i] = vps->cursor.bo;
259 			vps->cursor.bo = NULL;
260 			return;
261 		}
262 	}
263 
264 	/* Destroy it if it's not worth caching. */
265 	vmw_du_destroy_cursor_mob(&vps->cursor.bo);
266 }
267 
268 static int vmw_du_get_cursor_mob(struct vmw_cursor_plane *vcp,
269 				 struct vmw_plane_state *vps)
270 {
271 	struct vmw_private *dev_priv = vcp->base.dev->dev_private;
272 	u32 size = vmw_du_cursor_mob_size(vps->base.crtc_w, vps->base.crtc_h);
273 	u32 i;
274 	u32 cursor_max_dim, mob_max_size;
275 	struct vmw_fence_obj *fence = NULL;
276 	int ret;
277 
278 	if (!dev_priv->has_mob ||
279 	    (dev_priv->capabilities2 & SVGA_CAP2_CURSOR_MOB) == 0)
280 		return -EINVAL;
281 
282 	mob_max_size = vmw_read(dev_priv, SVGA_REG_MOB_MAX_SIZE);
283 	cursor_max_dim = vmw_read(dev_priv, SVGA_REG_CURSOR_MAX_DIMENSION);
284 
285 	if (size > mob_max_size || vps->base.crtc_w > cursor_max_dim ||
286 	    vps->base.crtc_h > cursor_max_dim)
287 		return -EINVAL;
288 
289 	if (vps->cursor.bo) {
290 		if (vps->cursor.bo->tbo.base.size >= size)
291 			return 0;
292 		vmw_du_put_cursor_mob(vcp, vps);
293 	}
294 
295 	/* Look for an unused mob in the cache. */
296 	for (i = 0; i < ARRAY_SIZE(vcp->cursor_mobs); i++) {
297 		if (vcp->cursor_mobs[i] &&
298 		    vcp->cursor_mobs[i]->tbo.base.size >= size) {
299 			vps->cursor.bo = vcp->cursor_mobs[i];
300 			vcp->cursor_mobs[i] = NULL;
301 			return 0;
302 		}
303 	}
304 	/* Create a new mob if we can't find an existing one. */
305 	ret = vmw_bo_create_and_populate(dev_priv, size,
306 					 VMW_BO_DOMAIN_MOB,
307 					 &vps->cursor.bo);
308 
309 	if (ret != 0)
310 		return ret;
311 
312 	/* Fence the mob creation so we are guarateed to have the mob */
313 	ret = ttm_bo_reserve(&vps->cursor.bo->tbo, false, false, NULL);
314 	if (ret != 0)
315 		goto teardown;
316 
317 	ret = vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL);
318 	if (ret != 0) {
319 		ttm_bo_unreserve(&vps->cursor.bo->tbo);
320 		goto teardown;
321 	}
322 
323 	dma_fence_wait(&fence->base, false);
324 	dma_fence_put(&fence->base);
325 
326 	ttm_bo_unreserve(&vps->cursor.bo->tbo);
327 	return 0;
328 
329 teardown:
330 	vmw_du_destroy_cursor_mob(&vps->cursor.bo);
331 	return ret;
332 }
333 
334 
335 static void vmw_cursor_update_position(struct vmw_private *dev_priv,
336 				       bool show, int x, int y)
337 {
338 	const uint32_t svga_cursor_on = show ? SVGA_CURSOR_ON_SHOW
339 					     : SVGA_CURSOR_ON_HIDE;
340 	uint32_t count;
341 
342 	spin_lock(&dev_priv->cursor_lock);
343 	if (dev_priv->capabilities2 & SVGA_CAP2_EXTRA_REGS) {
344 		vmw_write(dev_priv, SVGA_REG_CURSOR4_X, x);
345 		vmw_write(dev_priv, SVGA_REG_CURSOR4_Y, y);
346 		vmw_write(dev_priv, SVGA_REG_CURSOR4_SCREEN_ID, SVGA3D_INVALID_ID);
347 		vmw_write(dev_priv, SVGA_REG_CURSOR4_ON, svga_cursor_on);
348 		vmw_write(dev_priv, SVGA_REG_CURSOR4_SUBMIT, 1);
349 	} else if (vmw_is_cursor_bypass3_enabled(dev_priv)) {
350 		vmw_fifo_mem_write(dev_priv, SVGA_FIFO_CURSOR_ON, svga_cursor_on);
351 		vmw_fifo_mem_write(dev_priv, SVGA_FIFO_CURSOR_X, x);
352 		vmw_fifo_mem_write(dev_priv, SVGA_FIFO_CURSOR_Y, y);
353 		count = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_CURSOR_COUNT);
354 		vmw_fifo_mem_write(dev_priv, SVGA_FIFO_CURSOR_COUNT, ++count);
355 	} else {
356 		vmw_write(dev_priv, SVGA_REG_CURSOR_X, x);
357 		vmw_write(dev_priv, SVGA_REG_CURSOR_Y, y);
358 		vmw_write(dev_priv, SVGA_REG_CURSOR_ON, svga_cursor_on);
359 	}
360 	spin_unlock(&dev_priv->cursor_lock);
361 }
362 
363 void vmw_kms_cursor_snoop(struct vmw_surface *srf,
364 			  struct ttm_object_file *tfile,
365 			  struct ttm_buffer_object *bo,
366 			  SVGA3dCmdHeader *header)
367 {
368 	struct ttm_bo_kmap_obj map;
369 	unsigned long kmap_offset;
370 	unsigned long kmap_num;
371 	SVGA3dCopyBox *box;
372 	unsigned box_count;
373 	void *virtual;
374 	bool is_iomem;
375 	struct vmw_dma_cmd {
376 		SVGA3dCmdHeader header;
377 		SVGA3dCmdSurfaceDMA dma;
378 	} *cmd;
379 	int i, ret;
380 	const struct SVGA3dSurfaceDesc *desc =
381 		vmw_surface_get_desc(VMW_CURSOR_SNOOP_FORMAT);
382 	const u32 image_pitch = VMW_CURSOR_SNOOP_WIDTH * desc->pitchBytesPerBlock;
383 
384 	cmd = container_of(header, struct vmw_dma_cmd, header);
385 
386 	/* No snooper installed, nothing to copy */
387 	if (!srf->snooper.image)
388 		return;
389 
390 	if (cmd->dma.host.face != 0 || cmd->dma.host.mipmap != 0) {
391 		DRM_ERROR("face and mipmap for cursors should never != 0\n");
392 		return;
393 	}
394 
395 	if (cmd->header.size < 64) {
396 		DRM_ERROR("at least one full copy box must be given\n");
397 		return;
398 	}
399 
400 	box = (SVGA3dCopyBox *)&cmd[1];
401 	box_count = (cmd->header.size - sizeof(SVGA3dCmdSurfaceDMA)) /
402 			sizeof(SVGA3dCopyBox);
403 
404 	if (cmd->dma.guest.ptr.offset % PAGE_SIZE ||
405 	    box->x != 0    || box->y != 0    || box->z != 0    ||
406 	    box->srcx != 0 || box->srcy != 0 || box->srcz != 0 ||
407 	    box->d != 1    || box_count != 1 ||
408 	    box->w > VMW_CURSOR_SNOOP_WIDTH || box->h > VMW_CURSOR_SNOOP_HEIGHT) {
409 		/* TODO handle none page aligned offsets */
410 		/* TODO handle more dst & src != 0 */
411 		/* TODO handle more then one copy */
412 		DRM_ERROR("Can't snoop dma request for cursor!\n");
413 		DRM_ERROR("(%u, %u, %u) (%u, %u, %u) (%ux%ux%u) %u %u\n",
414 			  box->srcx, box->srcy, box->srcz,
415 			  box->x, box->y, box->z,
416 			  box->w, box->h, box->d, box_count,
417 			  cmd->dma.guest.ptr.offset);
418 		return;
419 	}
420 
421 	kmap_offset = cmd->dma.guest.ptr.offset >> PAGE_SHIFT;
422 	kmap_num = (VMW_CURSOR_SNOOP_HEIGHT*image_pitch) >> PAGE_SHIFT;
423 
424 	ret = ttm_bo_reserve(bo, true, false, NULL);
425 	if (unlikely(ret != 0)) {
426 		DRM_ERROR("reserve failed\n");
427 		return;
428 	}
429 
430 	ret = ttm_bo_kmap(bo, kmap_offset, kmap_num, &map);
431 	if (unlikely(ret != 0))
432 		goto err_unreserve;
433 
434 	virtual = ttm_kmap_obj_virtual(&map, &is_iomem);
435 
436 	if (box->w == VMW_CURSOR_SNOOP_WIDTH && cmd->dma.guest.pitch == image_pitch) {
437 		memcpy(srf->snooper.image, virtual,
438 		       VMW_CURSOR_SNOOP_HEIGHT*image_pitch);
439 	} else {
440 		/* Image is unsigned pointer. */
441 		for (i = 0; i < box->h; i++)
442 			memcpy(srf->snooper.image + i * image_pitch,
443 			       virtual + i * cmd->dma.guest.pitch,
444 			       box->w * desc->pitchBytesPerBlock);
445 	}
446 
447 	srf->snooper.age++;
448 
449 	ttm_bo_kunmap(&map);
450 err_unreserve:
451 	ttm_bo_unreserve(bo);
452 }
453 
454 /**
455  * vmw_kms_legacy_hotspot_clear - Clear legacy hotspots
456  *
457  * @dev_priv: Pointer to the device private struct.
458  *
459  * Clears all legacy hotspots.
460  */
461 void vmw_kms_legacy_hotspot_clear(struct vmw_private *dev_priv)
462 {
463 	struct drm_device *dev = &dev_priv->drm;
464 	struct vmw_display_unit *du;
465 	struct drm_crtc *crtc;
466 
467 	drm_modeset_lock_all(dev);
468 	drm_for_each_crtc(crtc, dev) {
469 		du = vmw_crtc_to_du(crtc);
470 
471 		du->hotspot_x = 0;
472 		du->hotspot_y = 0;
473 	}
474 	drm_modeset_unlock_all(dev);
475 }
476 
477 void vmw_kms_cursor_post_execbuf(struct vmw_private *dev_priv)
478 {
479 	struct drm_device *dev = &dev_priv->drm;
480 	struct vmw_display_unit *du;
481 	struct drm_crtc *crtc;
482 
483 	mutex_lock(&dev->mode_config.mutex);
484 
485 	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
486 		du = vmw_crtc_to_du(crtc);
487 		if (!du->cursor_surface ||
488 		    du->cursor_age == du->cursor_surface->snooper.age ||
489 		    !du->cursor_surface->snooper.image)
490 			continue;
491 
492 		du->cursor_age = du->cursor_surface->snooper.age;
493 		vmw_send_define_cursor_cmd(dev_priv,
494 					   du->cursor_surface->snooper.image,
495 					   VMW_CURSOR_SNOOP_WIDTH,
496 					   VMW_CURSOR_SNOOP_HEIGHT,
497 					   du->hotspot_x + du->core_hotspot_x,
498 					   du->hotspot_y + du->core_hotspot_y);
499 	}
500 
501 	mutex_unlock(&dev->mode_config.mutex);
502 }
503 
504 
505 void vmw_du_cursor_plane_destroy(struct drm_plane *plane)
506 {
507 	struct vmw_cursor_plane *vcp = vmw_plane_to_vcp(plane);
508 	u32 i;
509 
510 	vmw_cursor_update_position(plane->dev->dev_private, false, 0, 0);
511 
512 	for (i = 0; i < ARRAY_SIZE(vcp->cursor_mobs); i++)
513 		vmw_du_destroy_cursor_mob(&vcp->cursor_mobs[i]);
514 
515 	drm_plane_cleanup(plane);
516 }
517 
518 
519 void vmw_du_primary_plane_destroy(struct drm_plane *plane)
520 {
521 	drm_plane_cleanup(plane);
522 
523 	/* Planes are static in our case so we don't free it */
524 }
525 
526 
527 /**
528  * vmw_du_plane_unpin_surf - unpins resource associated with a framebuffer surface
529  *
530  * @vps: plane state associated with the display surface
531  * @unreference: true if we also want to unreference the display.
532  */
533 void vmw_du_plane_unpin_surf(struct vmw_plane_state *vps,
534 			     bool unreference)
535 {
536 	if (vps->surf) {
537 		if (vps->pinned) {
538 			vmw_resource_unpin(&vps->surf->res);
539 			vps->pinned--;
540 		}
541 
542 		if (unreference) {
543 			if (vps->pinned)
544 				DRM_ERROR("Surface still pinned\n");
545 			vmw_surface_unreference(&vps->surf);
546 		}
547 	}
548 }
549 
550 
551 /**
552  * vmw_du_plane_cleanup_fb - Unpins the plane surface
553  *
554  * @plane:  display plane
555  * @old_state: Contains the FB to clean up
556  *
557  * Unpins the framebuffer surface
558  *
559  * Returns 0 on success
560  */
561 void
562 vmw_du_plane_cleanup_fb(struct drm_plane *plane,
563 			struct drm_plane_state *old_state)
564 {
565 	struct vmw_plane_state *vps = vmw_plane_state_to_vps(old_state);
566 
567 	vmw_du_plane_unpin_surf(vps, false);
568 }
569 
570 
571 /**
572  * vmw_du_cursor_plane_map_cm - Maps the cursor mobs.
573  *
574  * @vps: plane_state
575  *
576  * Returns 0 on success
577  */
578 
579 static int
580 vmw_du_cursor_plane_map_cm(struct vmw_plane_state *vps)
581 {
582 	int ret;
583 	u32 size = vmw_du_cursor_mob_size(vps->base.crtc_w, vps->base.crtc_h);
584 	struct ttm_buffer_object *bo;
585 
586 	if (!vps->cursor.bo)
587 		return -EINVAL;
588 
589 	bo = &vps->cursor.bo->tbo;
590 
591 	if (bo->base.size < size)
592 		return -EINVAL;
593 
594 	if (vps->cursor.bo->map.virtual)
595 		return 0;
596 
597 	ret = ttm_bo_reserve(bo, false, false, NULL);
598 	if (unlikely(ret != 0))
599 		return -ENOMEM;
600 
601 	vmw_bo_map_and_cache(vps->cursor.bo);
602 
603 	ttm_bo_unreserve(bo);
604 
605 	if (unlikely(ret != 0))
606 		return -ENOMEM;
607 
608 	return 0;
609 }
610 
611 
612 /**
613  * vmw_du_cursor_plane_unmap_cm - Unmaps the cursor mobs.
614  *
615  * @vps: state of the cursor plane
616  *
617  * Returns 0 on success
618  */
619 
620 static int
621 vmw_du_cursor_plane_unmap_cm(struct vmw_plane_state *vps)
622 {
623 	int ret = 0;
624 	struct vmw_bo *vbo = vps->cursor.bo;
625 
626 	if (!vbo || !vbo->map.virtual)
627 		return 0;
628 
629 	ret = ttm_bo_reserve(&vbo->tbo, true, false, NULL);
630 	if (likely(ret == 0)) {
631 		vmw_bo_unmap(vbo);
632 		ttm_bo_unreserve(&vbo->tbo);
633 	}
634 
635 	return ret;
636 }
637 
638 
639 /**
640  * vmw_du_cursor_plane_cleanup_fb - Unpins the plane surface
641  *
642  * @plane: cursor plane
643  * @old_state: contains the state to clean up
644  *
645  * Unmaps all cursor bo mappings and unpins the cursor surface
646  *
647  * Returns 0 on success
648  */
649 void
650 vmw_du_cursor_plane_cleanup_fb(struct drm_plane *plane,
651 			       struct drm_plane_state *old_state)
652 {
653 	struct vmw_cursor_plane *vcp = vmw_plane_to_vcp(plane);
654 	struct vmw_plane_state *vps = vmw_plane_state_to_vps(old_state);
655 
656 	if (vps->surf_mapped) {
657 		vmw_bo_unmap(vps->surf->res.guest_memory_bo);
658 		vps->surf_mapped = false;
659 	}
660 
661 	vmw_du_cursor_plane_unmap_cm(vps);
662 	vmw_du_put_cursor_mob(vcp, vps);
663 
664 	vmw_du_plane_unpin_surf(vps, false);
665 
666 	if (vps->surf) {
667 		vmw_surface_unreference(&vps->surf);
668 		vps->surf = NULL;
669 	}
670 
671 	if (vps->bo) {
672 		vmw_bo_unreference(&vps->bo);
673 		vps->bo = NULL;
674 	}
675 }
676 
677 
678 /**
679  * vmw_du_cursor_plane_prepare_fb - Readies the cursor by referencing it
680  *
681  * @plane:  display plane
682  * @new_state: info on the new plane state, including the FB
683  *
684  * Returns 0 on success
685  */
686 int
687 vmw_du_cursor_plane_prepare_fb(struct drm_plane *plane,
688 			       struct drm_plane_state *new_state)
689 {
690 	struct drm_framebuffer *fb = new_state->fb;
691 	struct vmw_cursor_plane *vcp = vmw_plane_to_vcp(plane);
692 	struct vmw_plane_state *vps = vmw_plane_state_to_vps(new_state);
693 	int ret = 0;
694 
695 	if (vps->surf) {
696 		if (vps->surf_mapped) {
697 			vmw_bo_unmap(vps->surf->res.guest_memory_bo);
698 			vps->surf_mapped = false;
699 		}
700 		vmw_surface_unreference(&vps->surf);
701 		vps->surf = NULL;
702 	}
703 
704 	if (vps->bo) {
705 		vmw_bo_unreference(&vps->bo);
706 		vps->bo = NULL;
707 	}
708 
709 	if (fb) {
710 		if (vmw_framebuffer_to_vfb(fb)->bo) {
711 			vps->bo = vmw_framebuffer_to_vfbd(fb)->buffer;
712 			vmw_bo_reference(vps->bo);
713 		} else {
714 			vps->surf = vmw_framebuffer_to_vfbs(fb)->surface;
715 			vmw_surface_reference(vps->surf);
716 		}
717 	}
718 
719 	if (!vps->surf && vps->bo) {
720 		const u32 size = new_state->crtc_w * new_state->crtc_h * sizeof(u32);
721 
722 		/*
723 		 * Not using vmw_bo_map_and_cache() helper here as we need to
724 		 * reserve the ttm_buffer_object first which
725 		 * vmw_bo_map_and_cache() omits.
726 		 */
727 		ret = ttm_bo_reserve(&vps->bo->tbo, true, false, NULL);
728 
729 		if (unlikely(ret != 0))
730 			return -ENOMEM;
731 
732 		ret = ttm_bo_kmap(&vps->bo->tbo, 0, PFN_UP(size), &vps->bo->map);
733 
734 		ttm_bo_unreserve(&vps->bo->tbo);
735 
736 		if (unlikely(ret != 0))
737 			return -ENOMEM;
738 	} else if (vps->surf && !vps->bo && vps->surf->res.guest_memory_bo) {
739 
740 		WARN_ON(vps->surf->snooper.image);
741 		ret = ttm_bo_reserve(&vps->surf->res.guest_memory_bo->tbo, true, false,
742 				     NULL);
743 		if (unlikely(ret != 0))
744 			return -ENOMEM;
745 		vmw_bo_map_and_cache(vps->surf->res.guest_memory_bo);
746 		ttm_bo_unreserve(&vps->surf->res.guest_memory_bo->tbo);
747 		vps->surf_mapped = true;
748 	}
749 
750 	if (vps->surf || vps->bo) {
751 		vmw_du_get_cursor_mob(vcp, vps);
752 		vmw_du_cursor_plane_map_cm(vps);
753 	}
754 
755 	return 0;
756 }
757 
758 
759 void
760 vmw_du_cursor_plane_atomic_update(struct drm_plane *plane,
761 				  struct drm_atomic_state *state)
762 {
763 	struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
764 									   plane);
765 	struct drm_plane_state *old_state = drm_atomic_get_old_plane_state(state,
766 									   plane);
767 	struct drm_crtc *crtc = new_state->crtc ?: old_state->crtc;
768 	struct vmw_private *dev_priv = vmw_priv(crtc->dev);
769 	struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
770 	struct vmw_plane_state *vps = vmw_plane_state_to_vps(new_state);
771 	struct vmw_plane_state *old_vps = vmw_plane_state_to_vps(old_state);
772 	s32 hotspot_x, hotspot_y;
773 
774 	hotspot_x = du->hotspot_x + new_state->hotspot_x;
775 	hotspot_y = du->hotspot_y + new_state->hotspot_y;
776 
777 	du->cursor_surface = vps->surf;
778 
779 	if (!vps->surf && !vps->bo) {
780 		vmw_cursor_update_position(dev_priv, false, 0, 0);
781 		return;
782 	}
783 
784 	vps->cursor.hotspot_x = hotspot_x;
785 	vps->cursor.hotspot_y = hotspot_y;
786 
787 	if (vps->surf) {
788 		du->cursor_age = du->cursor_surface->snooper.age;
789 	}
790 
791 	if (!vmw_du_cursor_plane_has_changed(old_vps, vps)) {
792 		/*
793 		 * If it hasn't changed, avoid making the device do extra
794 		 * work by keeping the old cursor active.
795 		 */
796 		struct vmw_cursor_plane_state tmp = old_vps->cursor;
797 		old_vps->cursor = vps->cursor;
798 		vps->cursor = tmp;
799 	} else {
800 		void *image = vmw_du_cursor_plane_acquire_image(vps);
801 		if (image)
802 			vmw_cursor_update_image(dev_priv, vps, image,
803 						new_state->crtc_w,
804 						new_state->crtc_h,
805 						hotspot_x, hotspot_y);
806 	}
807 
808 	du->cursor_x = new_state->crtc_x + du->set_gui_x;
809 	du->cursor_y = new_state->crtc_y + du->set_gui_y;
810 
811 	vmw_cursor_update_position(dev_priv, true,
812 				   du->cursor_x + hotspot_x,
813 				   du->cursor_y + hotspot_y);
814 
815 	du->core_hotspot_x = hotspot_x - du->hotspot_x;
816 	du->core_hotspot_y = hotspot_y - du->hotspot_y;
817 }
818 
819 
820 /**
821  * vmw_du_primary_plane_atomic_check - check if the new state is okay
822  *
823  * @plane: display plane
824  * @state: info on the new plane state, including the FB
825  *
826  * Check if the new state is settable given the current state.  Other
827  * than what the atomic helper checks, we care about crtc fitting
828  * the FB and maintaining one active framebuffer.
829  *
830  * Returns 0 on success
831  */
832 int vmw_du_primary_plane_atomic_check(struct drm_plane *plane,
833 				      struct drm_atomic_state *state)
834 {
835 	struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
836 									   plane);
837 	struct drm_plane_state *old_state = drm_atomic_get_old_plane_state(state,
838 									   plane);
839 	struct drm_crtc_state *crtc_state = NULL;
840 	struct drm_framebuffer *new_fb = new_state->fb;
841 	struct drm_framebuffer *old_fb = old_state->fb;
842 	int ret;
843 
844 	/*
845 	 * Ignore damage clips if the framebuffer attached to the plane's state
846 	 * has changed since the last plane update (page-flip). In this case, a
847 	 * full plane update should happen because uploads are done per-buffer.
848 	 */
849 	if (old_fb != new_fb)
850 		new_state->ignore_damage_clips = true;
851 
852 	if (new_state->crtc)
853 		crtc_state = drm_atomic_get_new_crtc_state(state,
854 							   new_state->crtc);
855 
856 	ret = drm_atomic_helper_check_plane_state(new_state, crtc_state,
857 						  DRM_PLANE_NO_SCALING,
858 						  DRM_PLANE_NO_SCALING,
859 						  false, true);
860 	return ret;
861 }
862 
863 
864 /**
865  * vmw_du_cursor_plane_atomic_check - check if the new state is okay
866  *
867  * @plane: cursor plane
868  * @state: info on the new plane state
869  *
870  * This is a chance to fail if the new cursor state does not fit
871  * our requirements.
872  *
873  * Returns 0 on success
874  */
875 int vmw_du_cursor_plane_atomic_check(struct drm_plane *plane,
876 				     struct drm_atomic_state *state)
877 {
878 	struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
879 									   plane);
880 	int ret = 0;
881 	struct drm_crtc_state *crtc_state = NULL;
882 	struct vmw_surface *surface = NULL;
883 	struct drm_framebuffer *fb = new_state->fb;
884 
885 	if (new_state->crtc)
886 		crtc_state = drm_atomic_get_new_crtc_state(new_state->state,
887 							   new_state->crtc);
888 
889 	ret = drm_atomic_helper_check_plane_state(new_state, crtc_state,
890 						  DRM_PLANE_NO_SCALING,
891 						  DRM_PLANE_NO_SCALING,
892 						  true, true);
893 	if (ret)
894 		return ret;
895 
896 	/* Turning off */
897 	if (!fb)
898 		return 0;
899 
900 	/* A lot of the code assumes this */
901 	if (new_state->crtc_w != 64 || new_state->crtc_h != 64) {
902 		DRM_ERROR("Invalid cursor dimensions (%d, %d)\n",
903 			  new_state->crtc_w, new_state->crtc_h);
904 		return -EINVAL;
905 	}
906 
907 	if (!vmw_framebuffer_to_vfb(fb)->bo) {
908 		surface = vmw_framebuffer_to_vfbs(fb)->surface;
909 
910 		WARN_ON(!surface);
911 
912 		if (!surface ||
913 		    (!surface->snooper.image && !surface->res.guest_memory_bo)) {
914 			DRM_ERROR("surface not suitable for cursor\n");
915 			return -EINVAL;
916 		}
917 	}
918 
919 	return 0;
920 }
921 
922 
923 int vmw_du_crtc_atomic_check(struct drm_crtc *crtc,
924 			     struct drm_atomic_state *state)
925 {
926 	struct drm_crtc_state *new_state = drm_atomic_get_new_crtc_state(state,
927 									 crtc);
928 	struct vmw_display_unit *du = vmw_crtc_to_du(new_state->crtc);
929 	int connector_mask = drm_connector_mask(&du->connector);
930 	bool has_primary = new_state->plane_mask &
931 			   drm_plane_mask(crtc->primary);
932 
933 	/* We always want to have an active plane with an active CRTC */
934 	if (has_primary != new_state->enable)
935 		return -EINVAL;
936 
937 
938 	if (new_state->connector_mask != connector_mask &&
939 	    new_state->connector_mask != 0) {
940 		DRM_ERROR("Invalid connectors configuration\n");
941 		return -EINVAL;
942 	}
943 
944 	/*
945 	 * Our virtual device does not have a dot clock, so use the logical
946 	 * clock value as the dot clock.
947 	 */
948 	if (new_state->mode.crtc_clock == 0)
949 		new_state->adjusted_mode.crtc_clock = new_state->mode.clock;
950 
951 	return 0;
952 }
953 
954 
955 void vmw_du_crtc_atomic_begin(struct drm_crtc *crtc,
956 			      struct drm_atomic_state *state)
957 {
958 }
959 
960 
961 void vmw_du_crtc_atomic_flush(struct drm_crtc *crtc,
962 			      struct drm_atomic_state *state)
963 {
964 }
965 
966 
967 /**
968  * vmw_du_crtc_duplicate_state - duplicate crtc state
969  * @crtc: DRM crtc
970  *
971  * Allocates and returns a copy of the crtc state (both common and
972  * vmw-specific) for the specified crtc.
973  *
974  * Returns: The newly allocated crtc state, or NULL on failure.
975  */
976 struct drm_crtc_state *
977 vmw_du_crtc_duplicate_state(struct drm_crtc *crtc)
978 {
979 	struct drm_crtc_state *state;
980 	struct vmw_crtc_state *vcs;
981 
982 	if (WARN_ON(!crtc->state))
983 		return NULL;
984 
985 	vcs = kmemdup(crtc->state, sizeof(*vcs), GFP_KERNEL);
986 
987 	if (!vcs)
988 		return NULL;
989 
990 	state = &vcs->base;
991 
992 	__drm_atomic_helper_crtc_duplicate_state(crtc, state);
993 
994 	return state;
995 }
996 
997 
998 /**
999  * vmw_du_crtc_reset - creates a blank vmw crtc state
1000  * @crtc: DRM crtc
1001  *
1002  * Resets the atomic state for @crtc by freeing the state pointer (which
1003  * might be NULL, e.g. at driver load time) and allocating a new empty state
1004  * object.
1005  */
1006 void vmw_du_crtc_reset(struct drm_crtc *crtc)
1007 {
1008 	struct vmw_crtc_state *vcs;
1009 
1010 
1011 	if (crtc->state) {
1012 		__drm_atomic_helper_crtc_destroy_state(crtc->state);
1013 
1014 		kfree(vmw_crtc_state_to_vcs(crtc->state));
1015 	}
1016 
1017 	vcs = kzalloc(sizeof(*vcs), GFP_KERNEL);
1018 
1019 	if (!vcs) {
1020 		DRM_ERROR("Cannot allocate vmw_crtc_state\n");
1021 		return;
1022 	}
1023 
1024 	__drm_atomic_helper_crtc_reset(crtc, &vcs->base);
1025 }
1026 
1027 
1028 /**
1029  * vmw_du_crtc_destroy_state - destroy crtc state
1030  * @crtc: DRM crtc
1031  * @state: state object to destroy
1032  *
1033  * Destroys the crtc state (both common and vmw-specific) for the
1034  * specified plane.
1035  */
1036 void
1037 vmw_du_crtc_destroy_state(struct drm_crtc *crtc,
1038 			  struct drm_crtc_state *state)
1039 {
1040 	drm_atomic_helper_crtc_destroy_state(crtc, state);
1041 }
1042 
1043 
1044 /**
1045  * vmw_du_plane_duplicate_state - duplicate plane state
1046  * @plane: drm plane
1047  *
1048  * Allocates and returns a copy of the plane state (both common and
1049  * vmw-specific) for the specified plane.
1050  *
1051  * Returns: The newly allocated plane state, or NULL on failure.
1052  */
1053 struct drm_plane_state *
1054 vmw_du_plane_duplicate_state(struct drm_plane *plane)
1055 {
1056 	struct drm_plane_state *state;
1057 	struct vmw_plane_state *vps;
1058 
1059 	vps = kmemdup(plane->state, sizeof(*vps), GFP_KERNEL);
1060 
1061 	if (!vps)
1062 		return NULL;
1063 
1064 	vps->pinned = 0;
1065 	vps->cpp = 0;
1066 
1067 	memset(&vps->cursor, 0, sizeof(vps->cursor));
1068 
1069 	/* Each ref counted resource needs to be acquired again */
1070 	if (vps->surf)
1071 		(void) vmw_surface_reference(vps->surf);
1072 
1073 	if (vps->bo)
1074 		(void) vmw_bo_reference(vps->bo);
1075 
1076 	state = &vps->base;
1077 
1078 	__drm_atomic_helper_plane_duplicate_state(plane, state);
1079 
1080 	return state;
1081 }
1082 
1083 
1084 /**
1085  * vmw_du_plane_reset - creates a blank vmw plane state
1086  * @plane: drm plane
1087  *
1088  * Resets the atomic state for @plane by freeing the state pointer (which might
1089  * be NULL, e.g. at driver load time) and allocating a new empty state object.
1090  */
1091 void vmw_du_plane_reset(struct drm_plane *plane)
1092 {
1093 	struct vmw_plane_state *vps;
1094 
1095 	if (plane->state)
1096 		vmw_du_plane_destroy_state(plane, plane->state);
1097 
1098 	vps = kzalloc(sizeof(*vps), GFP_KERNEL);
1099 
1100 	if (!vps) {
1101 		DRM_ERROR("Cannot allocate vmw_plane_state\n");
1102 		return;
1103 	}
1104 
1105 	__drm_atomic_helper_plane_reset(plane, &vps->base);
1106 }
1107 
1108 
1109 /**
1110  * vmw_du_plane_destroy_state - destroy plane state
1111  * @plane: DRM plane
1112  * @state: state object to destroy
1113  *
1114  * Destroys the plane state (both common and vmw-specific) for the
1115  * specified plane.
1116  */
1117 void
1118 vmw_du_plane_destroy_state(struct drm_plane *plane,
1119 			   struct drm_plane_state *state)
1120 {
1121 	struct vmw_plane_state *vps = vmw_plane_state_to_vps(state);
1122 
1123 	/* Should have been freed by cleanup_fb */
1124 	if (vps->surf)
1125 		vmw_surface_unreference(&vps->surf);
1126 
1127 	if (vps->bo)
1128 		vmw_bo_unreference(&vps->bo);
1129 
1130 	drm_atomic_helper_plane_destroy_state(plane, state);
1131 }
1132 
1133 
1134 /**
1135  * vmw_du_connector_duplicate_state - duplicate connector state
1136  * @connector: DRM connector
1137  *
1138  * Allocates and returns a copy of the connector state (both common and
1139  * vmw-specific) for the specified connector.
1140  *
1141  * Returns: The newly allocated connector state, or NULL on failure.
1142  */
1143 struct drm_connector_state *
1144 vmw_du_connector_duplicate_state(struct drm_connector *connector)
1145 {
1146 	struct drm_connector_state *state;
1147 	struct vmw_connector_state *vcs;
1148 
1149 	if (WARN_ON(!connector->state))
1150 		return NULL;
1151 
1152 	vcs = kmemdup(connector->state, sizeof(*vcs), GFP_KERNEL);
1153 
1154 	if (!vcs)
1155 		return NULL;
1156 
1157 	state = &vcs->base;
1158 
1159 	__drm_atomic_helper_connector_duplicate_state(connector, state);
1160 
1161 	return state;
1162 }
1163 
1164 
1165 /**
1166  * vmw_du_connector_reset - creates a blank vmw connector state
1167  * @connector: DRM connector
1168  *
1169  * Resets the atomic state for @connector by freeing the state pointer (which
1170  * might be NULL, e.g. at driver load time) and allocating a new empty state
1171  * object.
1172  */
1173 void vmw_du_connector_reset(struct drm_connector *connector)
1174 {
1175 	struct vmw_connector_state *vcs;
1176 
1177 
1178 	if (connector->state) {
1179 		__drm_atomic_helper_connector_destroy_state(connector->state);
1180 
1181 		kfree(vmw_connector_state_to_vcs(connector->state));
1182 	}
1183 
1184 	vcs = kzalloc(sizeof(*vcs), GFP_KERNEL);
1185 
1186 	if (!vcs) {
1187 		DRM_ERROR("Cannot allocate vmw_connector_state\n");
1188 		return;
1189 	}
1190 
1191 	__drm_atomic_helper_connector_reset(connector, &vcs->base);
1192 }
1193 
1194 
1195 /**
1196  * vmw_du_connector_destroy_state - destroy connector state
1197  * @connector: DRM connector
1198  * @state: state object to destroy
1199  *
1200  * Destroys the connector state (both common and vmw-specific) for the
1201  * specified plane.
1202  */
1203 void
1204 vmw_du_connector_destroy_state(struct drm_connector *connector,
1205 			  struct drm_connector_state *state)
1206 {
1207 	drm_atomic_helper_connector_destroy_state(connector, state);
1208 }
1209 /*
1210  * Generic framebuffer code
1211  */
1212 
1213 /*
1214  * Surface framebuffer code
1215  */
1216 
1217 static void vmw_framebuffer_surface_destroy(struct drm_framebuffer *framebuffer)
1218 {
1219 	struct vmw_framebuffer_surface *vfbs =
1220 		vmw_framebuffer_to_vfbs(framebuffer);
1221 
1222 	drm_framebuffer_cleanup(framebuffer);
1223 	vmw_surface_unreference(&vfbs->surface);
1224 
1225 	kfree(vfbs);
1226 }
1227 
1228 /**
1229  * vmw_kms_readback - Perform a readback from the screen system to
1230  * a buffer-object backed framebuffer.
1231  *
1232  * @dev_priv: Pointer to the device private structure.
1233  * @file_priv: Pointer to a struct drm_file identifying the caller.
1234  * Must be set to NULL if @user_fence_rep is NULL.
1235  * @vfb: Pointer to the buffer-object backed framebuffer.
1236  * @user_fence_rep: User-space provided structure for fence information.
1237  * Must be set to non-NULL if @file_priv is non-NULL.
1238  * @vclips: Array of clip rects.
1239  * @num_clips: Number of clip rects in @vclips.
1240  *
1241  * Returns 0 on success, negative error code on failure. -ERESTARTSYS if
1242  * interrupted.
1243  */
1244 int vmw_kms_readback(struct vmw_private *dev_priv,
1245 		     struct drm_file *file_priv,
1246 		     struct vmw_framebuffer *vfb,
1247 		     struct drm_vmw_fence_rep __user *user_fence_rep,
1248 		     struct drm_vmw_rect *vclips,
1249 		     uint32_t num_clips)
1250 {
1251 	switch (dev_priv->active_display_unit) {
1252 	case vmw_du_screen_object:
1253 		return vmw_kms_sou_readback(dev_priv, file_priv, vfb,
1254 					    user_fence_rep, vclips, num_clips,
1255 					    NULL);
1256 	case vmw_du_screen_target:
1257 		return vmw_kms_stdu_readback(dev_priv, file_priv, vfb,
1258 					     user_fence_rep, NULL, vclips, num_clips,
1259 					     1, NULL);
1260 	default:
1261 		WARN_ONCE(true,
1262 			  "Readback called with invalid display system.\n");
1263 }
1264 
1265 	return -ENOSYS;
1266 }
1267 
1268 
1269 static const struct drm_framebuffer_funcs vmw_framebuffer_surface_funcs = {
1270 	.destroy = vmw_framebuffer_surface_destroy,
1271 	.dirty = drm_atomic_helper_dirtyfb,
1272 };
1273 
1274 static int vmw_kms_new_framebuffer_surface(struct vmw_private *dev_priv,
1275 					   struct vmw_surface *surface,
1276 					   struct vmw_framebuffer **out,
1277 					   const struct drm_mode_fb_cmd2
1278 					   *mode_cmd,
1279 					   bool is_bo_proxy)
1280 
1281 {
1282 	struct drm_device *dev = &dev_priv->drm;
1283 	struct vmw_framebuffer_surface *vfbs;
1284 	enum SVGA3dSurfaceFormat format;
1285 	int ret;
1286 
1287 	/* 3D is only supported on HWv8 and newer hosts */
1288 	if (dev_priv->active_display_unit == vmw_du_legacy)
1289 		return -ENOSYS;
1290 
1291 	/*
1292 	 * Sanity checks.
1293 	 */
1294 
1295 	if (!drm_any_plane_has_format(&dev_priv->drm,
1296 				      mode_cmd->pixel_format,
1297 				      mode_cmd->modifier[0])) {
1298 		drm_dbg(&dev_priv->drm,
1299 			"unsupported pixel format %p4cc / modifier 0x%llx\n",
1300 			&mode_cmd->pixel_format, mode_cmd->modifier[0]);
1301 		return -EINVAL;
1302 	}
1303 
1304 	/* Surface must be marked as a scanout. */
1305 	if (unlikely(!surface->metadata.scanout))
1306 		return -EINVAL;
1307 
1308 	if (unlikely(surface->metadata.mip_levels[0] != 1 ||
1309 		     surface->metadata.num_sizes != 1 ||
1310 		     surface->metadata.base_size.width < mode_cmd->width ||
1311 		     surface->metadata.base_size.height < mode_cmd->height ||
1312 		     surface->metadata.base_size.depth != 1)) {
1313 		DRM_ERROR("Incompatible surface dimensions "
1314 			  "for requested mode.\n");
1315 		return -EINVAL;
1316 	}
1317 
1318 	switch (mode_cmd->pixel_format) {
1319 	case DRM_FORMAT_ARGB8888:
1320 		format = SVGA3D_A8R8G8B8;
1321 		break;
1322 	case DRM_FORMAT_XRGB8888:
1323 		format = SVGA3D_X8R8G8B8;
1324 		break;
1325 	case DRM_FORMAT_RGB565:
1326 		format = SVGA3D_R5G6B5;
1327 		break;
1328 	case DRM_FORMAT_XRGB1555:
1329 		format = SVGA3D_A1R5G5B5;
1330 		break;
1331 	default:
1332 		DRM_ERROR("Invalid pixel format: %p4cc\n",
1333 			  &mode_cmd->pixel_format);
1334 		return -EINVAL;
1335 	}
1336 
1337 	/*
1338 	 * For DX, surface format validation is done when surface->scanout
1339 	 * is set.
1340 	 */
1341 	if (!has_sm4_context(dev_priv) && format != surface->metadata.format) {
1342 		DRM_ERROR("Invalid surface format for requested mode.\n");
1343 		return -EINVAL;
1344 	}
1345 
1346 	vfbs = kzalloc(sizeof(*vfbs), GFP_KERNEL);
1347 	if (!vfbs) {
1348 		ret = -ENOMEM;
1349 		goto out_err1;
1350 	}
1351 
1352 	drm_helper_mode_fill_fb_struct(dev, &vfbs->base.base, mode_cmd);
1353 	vfbs->surface = vmw_surface_reference(surface);
1354 	vfbs->is_bo_proxy = is_bo_proxy;
1355 
1356 	*out = &vfbs->base;
1357 
1358 	ret = drm_framebuffer_init(dev, &vfbs->base.base,
1359 				   &vmw_framebuffer_surface_funcs);
1360 	if (ret)
1361 		goto out_err2;
1362 
1363 	return 0;
1364 
1365 out_err2:
1366 	vmw_surface_unreference(&surface);
1367 	kfree(vfbs);
1368 out_err1:
1369 	return ret;
1370 }
1371 
1372 /*
1373  * Buffer-object framebuffer code
1374  */
1375 
1376 static int vmw_framebuffer_bo_create_handle(struct drm_framebuffer *fb,
1377 					    struct drm_file *file_priv,
1378 					    unsigned int *handle)
1379 {
1380 	struct vmw_framebuffer_bo *vfbd =
1381 			vmw_framebuffer_to_vfbd(fb);
1382 
1383 	return drm_gem_handle_create(file_priv, &vfbd->buffer->tbo.base, handle);
1384 }
1385 
1386 static void vmw_framebuffer_bo_destroy(struct drm_framebuffer *framebuffer)
1387 {
1388 	struct vmw_framebuffer_bo *vfbd =
1389 		vmw_framebuffer_to_vfbd(framebuffer);
1390 
1391 	drm_framebuffer_cleanup(framebuffer);
1392 	vmw_bo_unreference(&vfbd->buffer);
1393 
1394 	kfree(vfbd);
1395 }
1396 
1397 static const struct drm_framebuffer_funcs vmw_framebuffer_bo_funcs = {
1398 	.create_handle = vmw_framebuffer_bo_create_handle,
1399 	.destroy = vmw_framebuffer_bo_destroy,
1400 	.dirty = drm_atomic_helper_dirtyfb,
1401 };
1402 
1403 /**
1404  * vmw_create_bo_proxy - create a proxy surface for the buffer object
1405  *
1406  * @dev: DRM device
1407  * @mode_cmd: parameters for the new surface
1408  * @bo_mob: MOB backing the buffer object
1409  * @srf_out: newly created surface
1410  *
1411  * When the content FB is a buffer object, we create a surface as a proxy to the
1412  * same buffer.  This way we can do a surface copy rather than a surface DMA.
1413  * This is a more efficient approach
1414  *
1415  * RETURNS:
1416  * 0 on success, error code otherwise
1417  */
1418 static int vmw_create_bo_proxy(struct drm_device *dev,
1419 			       const struct drm_mode_fb_cmd2 *mode_cmd,
1420 			       struct vmw_bo *bo_mob,
1421 			       struct vmw_surface **srf_out)
1422 {
1423 	struct vmw_surface_metadata metadata = {0};
1424 	uint32_t format;
1425 	struct vmw_resource *res;
1426 	unsigned int bytes_pp;
1427 	int ret;
1428 
1429 	switch (mode_cmd->pixel_format) {
1430 	case DRM_FORMAT_ARGB8888:
1431 	case DRM_FORMAT_XRGB8888:
1432 		format = SVGA3D_X8R8G8B8;
1433 		bytes_pp = 4;
1434 		break;
1435 
1436 	case DRM_FORMAT_RGB565:
1437 	case DRM_FORMAT_XRGB1555:
1438 		format = SVGA3D_R5G6B5;
1439 		bytes_pp = 2;
1440 		break;
1441 
1442 	case 8:
1443 		format = SVGA3D_P8;
1444 		bytes_pp = 1;
1445 		break;
1446 
1447 	default:
1448 		DRM_ERROR("Invalid framebuffer format %p4cc\n",
1449 			  &mode_cmd->pixel_format);
1450 		return -EINVAL;
1451 	}
1452 
1453 	metadata.format = format;
1454 	metadata.mip_levels[0] = 1;
1455 	metadata.num_sizes = 1;
1456 	metadata.base_size.width = mode_cmd->pitches[0] / bytes_pp;
1457 	metadata.base_size.height =  mode_cmd->height;
1458 	metadata.base_size.depth = 1;
1459 	metadata.scanout = true;
1460 
1461 	ret = vmw_gb_surface_define(vmw_priv(dev), &metadata, srf_out);
1462 	if (ret) {
1463 		DRM_ERROR("Failed to allocate proxy content buffer\n");
1464 		return ret;
1465 	}
1466 
1467 	res = &(*srf_out)->res;
1468 
1469 	/* Reserve and switch the backing mob. */
1470 	mutex_lock(&res->dev_priv->cmdbuf_mutex);
1471 	(void) vmw_resource_reserve(res, false, true);
1472 	vmw_user_bo_unref(&res->guest_memory_bo);
1473 	res->guest_memory_bo = vmw_user_bo_ref(bo_mob);
1474 	res->guest_memory_offset = 0;
1475 	vmw_resource_unreserve(res, false, false, false, NULL, 0);
1476 	mutex_unlock(&res->dev_priv->cmdbuf_mutex);
1477 
1478 	return 0;
1479 }
1480 
1481 
1482 
1483 static int vmw_kms_new_framebuffer_bo(struct vmw_private *dev_priv,
1484 				      struct vmw_bo *bo,
1485 				      struct vmw_framebuffer **out,
1486 				      const struct drm_mode_fb_cmd2
1487 				      *mode_cmd)
1488 
1489 {
1490 	struct drm_device *dev = &dev_priv->drm;
1491 	struct vmw_framebuffer_bo *vfbd;
1492 	unsigned int requested_size;
1493 	int ret;
1494 
1495 	requested_size = mode_cmd->height * mode_cmd->pitches[0];
1496 	if (unlikely(requested_size > bo->tbo.base.size)) {
1497 		DRM_ERROR("Screen buffer object size is too small "
1498 			  "for requested mode.\n");
1499 		return -EINVAL;
1500 	}
1501 
1502 	if (!drm_any_plane_has_format(&dev_priv->drm,
1503 				      mode_cmd->pixel_format,
1504 				      mode_cmd->modifier[0])) {
1505 		drm_dbg(&dev_priv->drm,
1506 			"unsupported pixel format %p4cc / modifier 0x%llx\n",
1507 			&mode_cmd->pixel_format, mode_cmd->modifier[0]);
1508 		return -EINVAL;
1509 	}
1510 
1511 	vfbd = kzalloc(sizeof(*vfbd), GFP_KERNEL);
1512 	if (!vfbd) {
1513 		ret = -ENOMEM;
1514 		goto out_err1;
1515 	}
1516 
1517 	vfbd->base.base.obj[0] = &bo->tbo.base;
1518 	drm_helper_mode_fill_fb_struct(dev, &vfbd->base.base, mode_cmd);
1519 	vfbd->base.bo = true;
1520 	vfbd->buffer = vmw_bo_reference(bo);
1521 	*out = &vfbd->base;
1522 
1523 	ret = drm_framebuffer_init(dev, &vfbd->base.base,
1524 				   &vmw_framebuffer_bo_funcs);
1525 	if (ret)
1526 		goto out_err2;
1527 
1528 	return 0;
1529 
1530 out_err2:
1531 	vmw_bo_unreference(&bo);
1532 	kfree(vfbd);
1533 out_err1:
1534 	return ret;
1535 }
1536 
1537 
1538 /**
1539  * vmw_kms_srf_ok - check if a surface can be created
1540  *
1541  * @dev_priv: Pointer to device private struct.
1542  * @width: requested width
1543  * @height: requested height
1544  *
1545  * Surfaces need to be less than texture size
1546  */
1547 static bool
1548 vmw_kms_srf_ok(struct vmw_private *dev_priv, uint32_t width, uint32_t height)
1549 {
1550 	if (width  > dev_priv->texture_max_width ||
1551 	    height > dev_priv->texture_max_height)
1552 		return false;
1553 
1554 	return true;
1555 }
1556 
1557 /**
1558  * vmw_kms_new_framebuffer - Create a new framebuffer.
1559  *
1560  * @dev_priv: Pointer to device private struct.
1561  * @bo: Pointer to buffer object to wrap the kms framebuffer around.
1562  * Either @bo or @surface must be NULL.
1563  * @surface: Pointer to a surface to wrap the kms framebuffer around.
1564  * Either @bo or @surface must be NULL.
1565  * @only_2d: No presents will occur to this buffer object based framebuffer.
1566  * This helps the code to do some important optimizations.
1567  * @mode_cmd: Frame-buffer metadata.
1568  */
1569 struct vmw_framebuffer *
1570 vmw_kms_new_framebuffer(struct vmw_private *dev_priv,
1571 			struct vmw_bo *bo,
1572 			struct vmw_surface *surface,
1573 			bool only_2d,
1574 			const struct drm_mode_fb_cmd2 *mode_cmd)
1575 {
1576 	struct vmw_framebuffer *vfb = NULL;
1577 	bool is_bo_proxy = false;
1578 	int ret;
1579 
1580 	/*
1581 	 * We cannot use the SurfaceDMA command in an non-accelerated VM,
1582 	 * therefore, wrap the buffer object in a surface so we can use the
1583 	 * SurfaceCopy command.
1584 	 */
1585 	if (vmw_kms_srf_ok(dev_priv, mode_cmd->width, mode_cmd->height)  &&
1586 	    bo && only_2d &&
1587 	    mode_cmd->width > 64 &&  /* Don't create a proxy for cursor */
1588 	    dev_priv->active_display_unit == vmw_du_screen_target) {
1589 		ret = vmw_create_bo_proxy(&dev_priv->drm, mode_cmd,
1590 					  bo, &surface);
1591 		if (ret)
1592 			return ERR_PTR(ret);
1593 
1594 		is_bo_proxy = true;
1595 	}
1596 
1597 	/* Create the new framebuffer depending one what we have */
1598 	if (surface) {
1599 		ret = vmw_kms_new_framebuffer_surface(dev_priv, surface, &vfb,
1600 						      mode_cmd,
1601 						      is_bo_proxy);
1602 		/*
1603 		 * vmw_create_bo_proxy() adds a reference that is no longer
1604 		 * needed
1605 		 */
1606 		if (is_bo_proxy)
1607 			vmw_surface_unreference(&surface);
1608 	} else if (bo) {
1609 		ret = vmw_kms_new_framebuffer_bo(dev_priv, bo, &vfb,
1610 						 mode_cmd);
1611 	} else {
1612 		BUG();
1613 	}
1614 
1615 	if (ret)
1616 		return ERR_PTR(ret);
1617 
1618 	return vfb;
1619 }
1620 
1621 /*
1622  * Generic Kernel modesetting functions
1623  */
1624 
1625 static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev,
1626 						 struct drm_file *file_priv,
1627 						 const struct drm_mode_fb_cmd2 *mode_cmd)
1628 {
1629 	struct vmw_private *dev_priv = vmw_priv(dev);
1630 	struct vmw_framebuffer *vfb = NULL;
1631 	struct vmw_surface *surface = NULL;
1632 	struct vmw_bo *bo = NULL;
1633 	int ret;
1634 
1635 	/* returns either a bo or surface */
1636 	ret = vmw_user_lookup_handle(dev_priv, file_priv,
1637 				     mode_cmd->handles[0],
1638 				     &surface, &bo);
1639 	if (ret) {
1640 		DRM_ERROR("Invalid buffer object handle %u (0x%x).\n",
1641 			  mode_cmd->handles[0], mode_cmd->handles[0]);
1642 		goto err_out;
1643 	}
1644 
1645 
1646 	if (!bo &&
1647 	    !vmw_kms_srf_ok(dev_priv, mode_cmd->width, mode_cmd->height)) {
1648 		DRM_ERROR("Surface size cannot exceed %dx%d\n",
1649 			dev_priv->texture_max_width,
1650 			dev_priv->texture_max_height);
1651 		goto err_out;
1652 	}
1653 
1654 
1655 	vfb = vmw_kms_new_framebuffer(dev_priv, bo, surface,
1656 				      !(dev_priv->capabilities & SVGA_CAP_3D),
1657 				      mode_cmd);
1658 	if (IS_ERR(vfb)) {
1659 		ret = PTR_ERR(vfb);
1660 		goto err_out;
1661 	}
1662 
1663 err_out:
1664 	/* vmw_user_lookup_handle takes one ref so does new_fb */
1665 	if (bo)
1666 		vmw_user_bo_unref(&bo);
1667 	if (surface)
1668 		vmw_surface_unreference(&surface);
1669 
1670 	if (ret) {
1671 		DRM_ERROR("failed to create vmw_framebuffer: %i\n", ret);
1672 		return ERR_PTR(ret);
1673 	}
1674 
1675 	return &vfb->base;
1676 }
1677 
1678 /**
1679  * vmw_kms_check_display_memory - Validates display memory required for a
1680  * topology
1681  * @dev: DRM device
1682  * @num_rects: number of drm_rect in rects
1683  * @rects: array of drm_rect representing the topology to validate indexed by
1684  * crtc index.
1685  *
1686  * Returns:
1687  * 0 on success otherwise negative error code
1688  */
1689 static int vmw_kms_check_display_memory(struct drm_device *dev,
1690 					uint32_t num_rects,
1691 					struct drm_rect *rects)
1692 {
1693 	struct vmw_private *dev_priv = vmw_priv(dev);
1694 	struct drm_rect bounding_box = {0};
1695 	u64 total_pixels = 0, pixel_mem, bb_mem;
1696 	int i;
1697 
1698 	for (i = 0; i < num_rects; i++) {
1699 		/*
1700 		 * For STDU only individual screen (screen target) is limited by
1701 		 * SCREENTARGET_MAX_WIDTH/HEIGHT registers.
1702 		 */
1703 		if (dev_priv->active_display_unit == vmw_du_screen_target &&
1704 		    (drm_rect_width(&rects[i]) > dev_priv->stdu_max_width ||
1705 		     drm_rect_height(&rects[i]) > dev_priv->stdu_max_height)) {
1706 			VMW_DEBUG_KMS("Screen size not supported.\n");
1707 			return -EINVAL;
1708 		}
1709 
1710 		/* Bounding box upper left is at (0,0). */
1711 		if (rects[i].x2 > bounding_box.x2)
1712 			bounding_box.x2 = rects[i].x2;
1713 
1714 		if (rects[i].y2 > bounding_box.y2)
1715 			bounding_box.y2 = rects[i].y2;
1716 
1717 		total_pixels += (u64) drm_rect_width(&rects[i]) *
1718 			(u64) drm_rect_height(&rects[i]);
1719 	}
1720 
1721 	/* Virtual svga device primary limits are always in 32-bpp. */
1722 	pixel_mem = total_pixels * 4;
1723 
1724 	/*
1725 	 * For HV10 and below prim_bb_mem is vram size. When
1726 	 * SVGA_REG_MAX_PRIMARY_BOUNDING_BOX_MEM is not present vram size is
1727 	 * limit on primary bounding box
1728 	 */
1729 	if (pixel_mem > dev_priv->max_primary_mem) {
1730 		VMW_DEBUG_KMS("Combined output size too large.\n");
1731 		return -EINVAL;
1732 	}
1733 
1734 	/* SVGA_CAP_NO_BB_RESTRICTION is available for STDU only. */
1735 	if (dev_priv->active_display_unit != vmw_du_screen_target ||
1736 	    !(dev_priv->capabilities & SVGA_CAP_NO_BB_RESTRICTION)) {
1737 		bb_mem = (u64) bounding_box.x2 * bounding_box.y2 * 4;
1738 
1739 		if (bb_mem > dev_priv->max_primary_mem) {
1740 			VMW_DEBUG_KMS("Topology is beyond supported limits.\n");
1741 			return -EINVAL;
1742 		}
1743 	}
1744 
1745 	return 0;
1746 }
1747 
1748 /**
1749  * vmw_crtc_state_and_lock - Return new or current crtc state with locked
1750  * crtc mutex
1751  * @state: The atomic state pointer containing the new atomic state
1752  * @crtc: The crtc
1753  *
1754  * This function returns the new crtc state if it's part of the state update.
1755  * Otherwise returns the current crtc state. It also makes sure that the
1756  * crtc mutex is locked.
1757  *
1758  * Returns: A valid crtc state pointer or NULL. It may also return a
1759  * pointer error, in particular -EDEADLK if locking needs to be rerun.
1760  */
1761 static struct drm_crtc_state *
1762 vmw_crtc_state_and_lock(struct drm_atomic_state *state, struct drm_crtc *crtc)
1763 {
1764 	struct drm_crtc_state *crtc_state;
1765 
1766 	crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
1767 	if (crtc_state) {
1768 		lockdep_assert_held(&crtc->mutex.mutex.base);
1769 	} else {
1770 		int ret = drm_modeset_lock(&crtc->mutex, state->acquire_ctx);
1771 
1772 		if (ret != 0 && ret != -EALREADY)
1773 			return ERR_PTR(ret);
1774 
1775 		crtc_state = crtc->state;
1776 	}
1777 
1778 	return crtc_state;
1779 }
1780 
1781 /**
1782  * vmw_kms_check_implicit - Verify that all implicit display units scan out
1783  * from the same fb after the new state is committed.
1784  * @dev: The drm_device.
1785  * @state: The new state to be checked.
1786  *
1787  * Returns:
1788  *   Zero on success,
1789  *   -EINVAL on invalid state,
1790  *   -EDEADLK if modeset locking needs to be rerun.
1791  */
1792 static int vmw_kms_check_implicit(struct drm_device *dev,
1793 				  struct drm_atomic_state *state)
1794 {
1795 	struct drm_framebuffer *implicit_fb = NULL;
1796 	struct drm_crtc *crtc;
1797 	struct drm_crtc_state *crtc_state;
1798 	struct drm_plane_state *plane_state;
1799 
1800 	drm_for_each_crtc(crtc, dev) {
1801 		struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
1802 
1803 		if (!du->is_implicit)
1804 			continue;
1805 
1806 		crtc_state = vmw_crtc_state_and_lock(state, crtc);
1807 		if (IS_ERR(crtc_state))
1808 			return PTR_ERR(crtc_state);
1809 
1810 		if (!crtc_state || !crtc_state->enable)
1811 			continue;
1812 
1813 		/*
1814 		 * Can't move primary planes across crtcs, so this is OK.
1815 		 * It also means we don't need to take the plane mutex.
1816 		 */
1817 		plane_state = du->primary.state;
1818 		if (plane_state->crtc != crtc)
1819 			continue;
1820 
1821 		if (!implicit_fb)
1822 			implicit_fb = plane_state->fb;
1823 		else if (implicit_fb != plane_state->fb)
1824 			return -EINVAL;
1825 	}
1826 
1827 	return 0;
1828 }
1829 
1830 /**
1831  * vmw_kms_check_topology - Validates topology in drm_atomic_state
1832  * @dev: DRM device
1833  * @state: the driver state object
1834  *
1835  * Returns:
1836  * 0 on success otherwise negative error code
1837  */
1838 static int vmw_kms_check_topology(struct drm_device *dev,
1839 				  struct drm_atomic_state *state)
1840 {
1841 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
1842 	struct drm_rect *rects;
1843 	struct drm_crtc *crtc;
1844 	uint32_t i;
1845 	int ret = 0;
1846 
1847 	rects = kcalloc(dev->mode_config.num_crtc, sizeof(struct drm_rect),
1848 			GFP_KERNEL);
1849 	if (!rects)
1850 		return -ENOMEM;
1851 
1852 	drm_for_each_crtc(crtc, dev) {
1853 		struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
1854 		struct drm_crtc_state *crtc_state;
1855 
1856 		i = drm_crtc_index(crtc);
1857 
1858 		crtc_state = vmw_crtc_state_and_lock(state, crtc);
1859 		if (IS_ERR(crtc_state)) {
1860 			ret = PTR_ERR(crtc_state);
1861 			goto clean;
1862 		}
1863 
1864 		if (!crtc_state)
1865 			continue;
1866 
1867 		if (crtc_state->enable) {
1868 			rects[i].x1 = du->gui_x;
1869 			rects[i].y1 = du->gui_y;
1870 			rects[i].x2 = du->gui_x + crtc_state->mode.hdisplay;
1871 			rects[i].y2 = du->gui_y + crtc_state->mode.vdisplay;
1872 		} else {
1873 			rects[i].x1 = 0;
1874 			rects[i].y1 = 0;
1875 			rects[i].x2 = 0;
1876 			rects[i].y2 = 0;
1877 		}
1878 	}
1879 
1880 	/* Determine change to topology due to new atomic state */
1881 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
1882 				      new_crtc_state, i) {
1883 		struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
1884 		struct drm_connector *connector;
1885 		struct drm_connector_state *conn_state;
1886 		struct vmw_connector_state *vmw_conn_state;
1887 
1888 		if (!du->pref_active && new_crtc_state->enable) {
1889 			VMW_DEBUG_KMS("Enabling a disabled display unit\n");
1890 			ret = -EINVAL;
1891 			goto clean;
1892 		}
1893 
1894 		/*
1895 		 * For vmwgfx each crtc has only one connector attached and it
1896 		 * is not changed so don't really need to check the
1897 		 * crtc->connector_mask and iterate over it.
1898 		 */
1899 		connector = &du->connector;
1900 		conn_state = drm_atomic_get_connector_state(state, connector);
1901 		if (IS_ERR(conn_state)) {
1902 			ret = PTR_ERR(conn_state);
1903 			goto clean;
1904 		}
1905 
1906 		vmw_conn_state = vmw_connector_state_to_vcs(conn_state);
1907 		vmw_conn_state->gui_x = du->gui_x;
1908 		vmw_conn_state->gui_y = du->gui_y;
1909 	}
1910 
1911 	ret = vmw_kms_check_display_memory(dev, dev->mode_config.num_crtc,
1912 					   rects);
1913 
1914 clean:
1915 	kfree(rects);
1916 	return ret;
1917 }
1918 
1919 /**
1920  * vmw_kms_atomic_check_modeset- validate state object for modeset changes
1921  *
1922  * @dev: DRM device
1923  * @state: the driver state object
1924  *
1925  * This is a simple wrapper around drm_atomic_helper_check_modeset() for
1926  * us to assign a value to mode->crtc_clock so that
1927  * drm_calc_timestamping_constants() won't throw an error message
1928  *
1929  * Returns:
1930  * Zero for success or -errno
1931  */
1932 static int
1933 vmw_kms_atomic_check_modeset(struct drm_device *dev,
1934 			     struct drm_atomic_state *state)
1935 {
1936 	struct drm_crtc *crtc;
1937 	struct drm_crtc_state *crtc_state;
1938 	bool need_modeset = false;
1939 	int i, ret;
1940 
1941 	ret = drm_atomic_helper_check(dev, state);
1942 	if (ret)
1943 		return ret;
1944 
1945 	ret = vmw_kms_check_implicit(dev, state);
1946 	if (ret) {
1947 		VMW_DEBUG_KMS("Invalid implicit state\n");
1948 		return ret;
1949 	}
1950 
1951 	for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
1952 		if (drm_atomic_crtc_needs_modeset(crtc_state))
1953 			need_modeset = true;
1954 	}
1955 
1956 	if (need_modeset)
1957 		return vmw_kms_check_topology(dev, state);
1958 
1959 	return ret;
1960 }
1961 
1962 static const struct drm_mode_config_funcs vmw_kms_funcs = {
1963 	.fb_create = vmw_kms_fb_create,
1964 	.atomic_check = vmw_kms_atomic_check_modeset,
1965 	.atomic_commit = drm_atomic_helper_commit,
1966 };
1967 
1968 static int vmw_kms_generic_present(struct vmw_private *dev_priv,
1969 				   struct drm_file *file_priv,
1970 				   struct vmw_framebuffer *vfb,
1971 				   struct vmw_surface *surface,
1972 				   uint32_t sid,
1973 				   int32_t destX, int32_t destY,
1974 				   struct drm_vmw_rect *clips,
1975 				   uint32_t num_clips)
1976 {
1977 	return vmw_kms_sou_do_surface_dirty(dev_priv, vfb, NULL, clips,
1978 					    &surface->res, destX, destY,
1979 					    num_clips, 1, NULL, NULL);
1980 }
1981 
1982 
1983 int vmw_kms_present(struct vmw_private *dev_priv,
1984 		    struct drm_file *file_priv,
1985 		    struct vmw_framebuffer *vfb,
1986 		    struct vmw_surface *surface,
1987 		    uint32_t sid,
1988 		    int32_t destX, int32_t destY,
1989 		    struct drm_vmw_rect *clips,
1990 		    uint32_t num_clips)
1991 {
1992 	int ret;
1993 
1994 	switch (dev_priv->active_display_unit) {
1995 	case vmw_du_screen_target:
1996 		ret = vmw_kms_stdu_surface_dirty(dev_priv, vfb, NULL, clips,
1997 						 &surface->res, destX, destY,
1998 						 num_clips, 1, NULL, NULL);
1999 		break;
2000 	case vmw_du_screen_object:
2001 		ret = vmw_kms_generic_present(dev_priv, file_priv, vfb, surface,
2002 					      sid, destX, destY, clips,
2003 					      num_clips);
2004 		break;
2005 	default:
2006 		WARN_ONCE(true,
2007 			  "Present called with invalid display system.\n");
2008 		ret = -ENOSYS;
2009 		break;
2010 	}
2011 	if (ret)
2012 		return ret;
2013 
2014 	vmw_cmd_flush(dev_priv, false);
2015 
2016 	return 0;
2017 }
2018 
2019 static void
2020 vmw_kms_create_hotplug_mode_update_property(struct vmw_private *dev_priv)
2021 {
2022 	if (dev_priv->hotplug_mode_update_property)
2023 		return;
2024 
2025 	dev_priv->hotplug_mode_update_property =
2026 		drm_property_create_range(&dev_priv->drm,
2027 					  DRM_MODE_PROP_IMMUTABLE,
2028 					  "hotplug_mode_update", 0, 1);
2029 }
2030 
2031 int vmw_kms_init(struct vmw_private *dev_priv)
2032 {
2033 	struct drm_device *dev = &dev_priv->drm;
2034 	int ret;
2035 	static const char *display_unit_names[] = {
2036 		"Invalid",
2037 		"Legacy",
2038 		"Screen Object",
2039 		"Screen Target",
2040 		"Invalid (max)"
2041 	};
2042 
2043 	drm_mode_config_init(dev);
2044 	dev->mode_config.funcs = &vmw_kms_funcs;
2045 	dev->mode_config.min_width = 1;
2046 	dev->mode_config.min_height = 1;
2047 	dev->mode_config.max_width = dev_priv->texture_max_width;
2048 	dev->mode_config.max_height = dev_priv->texture_max_height;
2049 	dev->mode_config.preferred_depth = dev_priv->assume_16bpp ? 16 : 32;
2050 
2051 	drm_mode_create_suggested_offset_properties(dev);
2052 	vmw_kms_create_hotplug_mode_update_property(dev_priv);
2053 
2054 	ret = vmw_kms_stdu_init_display(dev_priv);
2055 	if (ret) {
2056 		ret = vmw_kms_sou_init_display(dev_priv);
2057 		if (ret) /* Fallback */
2058 			ret = vmw_kms_ldu_init_display(dev_priv);
2059 	}
2060 	BUILD_BUG_ON(ARRAY_SIZE(display_unit_names) != (vmw_du_max + 1));
2061 	drm_info(&dev_priv->drm, "%s display unit initialized\n",
2062 		 display_unit_names[dev_priv->active_display_unit]);
2063 
2064 	return ret;
2065 }
2066 
2067 int vmw_kms_close(struct vmw_private *dev_priv)
2068 {
2069 	int ret = 0;
2070 
2071 	/*
2072 	 * Docs says we should take the lock before calling this function
2073 	 * but since it destroys encoders and our destructor calls
2074 	 * drm_encoder_cleanup which takes the lock we deadlock.
2075 	 */
2076 	drm_mode_config_cleanup(&dev_priv->drm);
2077 	if (dev_priv->active_display_unit == vmw_du_legacy)
2078 		ret = vmw_kms_ldu_close_display(dev_priv);
2079 
2080 	return ret;
2081 }
2082 
2083 int vmw_kms_cursor_bypass_ioctl(struct drm_device *dev, void *data,
2084 				struct drm_file *file_priv)
2085 {
2086 	struct drm_vmw_cursor_bypass_arg *arg = data;
2087 	struct vmw_display_unit *du;
2088 	struct drm_crtc *crtc;
2089 	int ret = 0;
2090 
2091 	mutex_lock(&dev->mode_config.mutex);
2092 	if (arg->flags & DRM_VMW_CURSOR_BYPASS_ALL) {
2093 
2094 		list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
2095 			du = vmw_crtc_to_du(crtc);
2096 			du->hotspot_x = arg->xhot;
2097 			du->hotspot_y = arg->yhot;
2098 		}
2099 
2100 		mutex_unlock(&dev->mode_config.mutex);
2101 		return 0;
2102 	}
2103 
2104 	crtc = drm_crtc_find(dev, file_priv, arg->crtc_id);
2105 	if (!crtc) {
2106 		ret = -ENOENT;
2107 		goto out;
2108 	}
2109 
2110 	du = vmw_crtc_to_du(crtc);
2111 
2112 	du->hotspot_x = arg->xhot;
2113 	du->hotspot_y = arg->yhot;
2114 
2115 out:
2116 	mutex_unlock(&dev->mode_config.mutex);
2117 
2118 	return ret;
2119 }
2120 
2121 int vmw_kms_write_svga(struct vmw_private *vmw_priv,
2122 			unsigned width, unsigned height, unsigned pitch,
2123 			unsigned bpp, unsigned depth)
2124 {
2125 	if (vmw_priv->capabilities & SVGA_CAP_PITCHLOCK)
2126 		vmw_write(vmw_priv, SVGA_REG_PITCHLOCK, pitch);
2127 	else if (vmw_fifo_have_pitchlock(vmw_priv))
2128 		vmw_fifo_mem_write(vmw_priv, SVGA_FIFO_PITCHLOCK, pitch);
2129 	vmw_write(vmw_priv, SVGA_REG_WIDTH, width);
2130 	vmw_write(vmw_priv, SVGA_REG_HEIGHT, height);
2131 	if ((vmw_priv->capabilities & SVGA_CAP_8BIT_EMULATION) != 0)
2132 		vmw_write(vmw_priv, SVGA_REG_BITS_PER_PIXEL, bpp);
2133 
2134 	if (vmw_read(vmw_priv, SVGA_REG_DEPTH) != depth) {
2135 		DRM_ERROR("Invalid depth %u for %u bpp, host expects %u\n",
2136 			  depth, bpp, vmw_read(vmw_priv, SVGA_REG_DEPTH));
2137 		return -EINVAL;
2138 	}
2139 
2140 	return 0;
2141 }
2142 
2143 bool vmw_kms_validate_mode_vram(struct vmw_private *dev_priv,
2144 				uint32_t pitch,
2145 				uint32_t height)
2146 {
2147 	return ((u64) pitch * (u64) height) < (u64)
2148 		((dev_priv->active_display_unit == vmw_du_screen_target) ?
2149 		 dev_priv->max_primary_mem : dev_priv->vram_size);
2150 }
2151 
2152 /**
2153  * vmw_du_update_layout - Update the display unit with topology from resolution
2154  * plugin and generate DRM uevent
2155  * @dev_priv: device private
2156  * @num_rects: number of drm_rect in rects
2157  * @rects: toplogy to update
2158  */
2159 static int vmw_du_update_layout(struct vmw_private *dev_priv,
2160 				unsigned int num_rects, struct drm_rect *rects)
2161 {
2162 	struct drm_device *dev = &dev_priv->drm;
2163 	struct vmw_display_unit *du;
2164 	struct drm_connector *con;
2165 	struct drm_connector_list_iter conn_iter;
2166 	struct drm_modeset_acquire_ctx ctx;
2167 	struct drm_crtc *crtc;
2168 	int ret;
2169 
2170 	/* Currently gui_x/y is protected with the crtc mutex */
2171 	mutex_lock(&dev->mode_config.mutex);
2172 	drm_modeset_acquire_init(&ctx, 0);
2173 retry:
2174 	drm_for_each_crtc(crtc, dev) {
2175 		ret = drm_modeset_lock(&crtc->mutex, &ctx);
2176 		if (ret < 0) {
2177 			if (ret == -EDEADLK) {
2178 				drm_modeset_backoff(&ctx);
2179 				goto retry;
2180 		}
2181 			goto out_fini;
2182 		}
2183 	}
2184 
2185 	drm_connector_list_iter_begin(dev, &conn_iter);
2186 	drm_for_each_connector_iter(con, &conn_iter) {
2187 		du = vmw_connector_to_du(con);
2188 		if (num_rects > du->unit) {
2189 			du->pref_width = drm_rect_width(&rects[du->unit]);
2190 			du->pref_height = drm_rect_height(&rects[du->unit]);
2191 			du->pref_active = true;
2192 			du->gui_x = rects[du->unit].x1;
2193 			du->gui_y = rects[du->unit].y1;
2194 		} else {
2195 			du->pref_width  = VMWGFX_MIN_INITIAL_WIDTH;
2196 			du->pref_height = VMWGFX_MIN_INITIAL_HEIGHT;
2197 			du->pref_active = false;
2198 			du->gui_x = 0;
2199 			du->gui_y = 0;
2200 		}
2201 	}
2202 	drm_connector_list_iter_end(&conn_iter);
2203 
2204 	list_for_each_entry(con, &dev->mode_config.connector_list, head) {
2205 		du = vmw_connector_to_du(con);
2206 		if (num_rects > du->unit) {
2207 			drm_object_property_set_value
2208 			  (&con->base, dev->mode_config.suggested_x_property,
2209 			   du->gui_x);
2210 			drm_object_property_set_value
2211 			  (&con->base, dev->mode_config.suggested_y_property,
2212 			   du->gui_y);
2213 		} else {
2214 			drm_object_property_set_value
2215 			  (&con->base, dev->mode_config.suggested_x_property,
2216 			   0);
2217 			drm_object_property_set_value
2218 			  (&con->base, dev->mode_config.suggested_y_property,
2219 			   0);
2220 		}
2221 		con->status = vmw_du_connector_detect(con, true);
2222 	}
2223 out_fini:
2224 	drm_modeset_drop_locks(&ctx);
2225 	drm_modeset_acquire_fini(&ctx);
2226 	mutex_unlock(&dev->mode_config.mutex);
2227 
2228 	drm_sysfs_hotplug_event(dev);
2229 
2230 	return 0;
2231 }
2232 
2233 int vmw_du_crtc_gamma_set(struct drm_crtc *crtc,
2234 			  u16 *r, u16 *g, u16 *b,
2235 			  uint32_t size,
2236 			  struct drm_modeset_acquire_ctx *ctx)
2237 {
2238 	struct vmw_private *dev_priv = vmw_priv(crtc->dev);
2239 	int i;
2240 
2241 	for (i = 0; i < size; i++) {
2242 		DRM_DEBUG("%d r/g/b = 0x%04x / 0x%04x / 0x%04x\n", i,
2243 			  r[i], g[i], b[i]);
2244 		vmw_write(dev_priv, SVGA_PALETTE_BASE + i * 3 + 0, r[i] >> 8);
2245 		vmw_write(dev_priv, SVGA_PALETTE_BASE + i * 3 + 1, g[i] >> 8);
2246 		vmw_write(dev_priv, SVGA_PALETTE_BASE + i * 3 + 2, b[i] >> 8);
2247 	}
2248 
2249 	return 0;
2250 }
2251 
2252 int vmw_du_connector_dpms(struct drm_connector *connector, int mode)
2253 {
2254 	return 0;
2255 }
2256 
2257 enum drm_connector_status
2258 vmw_du_connector_detect(struct drm_connector *connector, bool force)
2259 {
2260 	uint32_t num_displays;
2261 	struct drm_device *dev = connector->dev;
2262 	struct vmw_private *dev_priv = vmw_priv(dev);
2263 	struct vmw_display_unit *du = vmw_connector_to_du(connector);
2264 
2265 	num_displays = vmw_read(dev_priv, SVGA_REG_NUM_DISPLAYS);
2266 
2267 	return ((vmw_connector_to_du(connector)->unit < num_displays &&
2268 		 du->pref_active) ?
2269 		connector_status_connected : connector_status_disconnected);
2270 }
2271 
2272 /**
2273  * vmw_guess_mode_timing - Provide fake timings for a
2274  * 60Hz vrefresh mode.
2275  *
2276  * @mode: Pointer to a struct drm_display_mode with hdisplay and vdisplay
2277  * members filled in.
2278  */
2279 void vmw_guess_mode_timing(struct drm_display_mode *mode)
2280 {
2281 	mode->hsync_start = mode->hdisplay + 50;
2282 	mode->hsync_end = mode->hsync_start + 50;
2283 	mode->htotal = mode->hsync_end + 50;
2284 
2285 	mode->vsync_start = mode->vdisplay + 50;
2286 	mode->vsync_end = mode->vsync_start + 50;
2287 	mode->vtotal = mode->vsync_end + 50;
2288 
2289 	mode->clock = (u32)mode->htotal * (u32)mode->vtotal / 100 * 6;
2290 }
2291 
2292 
2293 /**
2294  * vmw_kms_update_layout_ioctl - Handler for DRM_VMW_UPDATE_LAYOUT ioctl
2295  * @dev: drm device for the ioctl
2296  * @data: data pointer for the ioctl
2297  * @file_priv: drm file for the ioctl call
2298  *
2299  * Update preferred topology of display unit as per ioctl request. The topology
2300  * is expressed as array of drm_vmw_rect.
2301  * e.g.
2302  * [0 0 640 480] [640 0 800 600] [0 480 640 480]
2303  *
2304  * NOTE:
2305  * The x and y offset (upper left) in drm_vmw_rect cannot be less than 0. Beside
2306  * device limit on topology, x + w and y + h (lower right) cannot be greater
2307  * than INT_MAX. So topology beyond these limits will return with error.
2308  *
2309  * Returns:
2310  * Zero on success, negative errno on failure.
2311  */
2312 int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data,
2313 				struct drm_file *file_priv)
2314 {
2315 	struct vmw_private *dev_priv = vmw_priv(dev);
2316 	struct drm_mode_config *mode_config = &dev->mode_config;
2317 	struct drm_vmw_update_layout_arg *arg =
2318 		(struct drm_vmw_update_layout_arg *)data;
2319 	void __user *user_rects;
2320 	struct drm_vmw_rect *rects;
2321 	struct drm_rect *drm_rects;
2322 	unsigned rects_size;
2323 	int ret, i;
2324 
2325 	if (!arg->num_outputs) {
2326 		struct drm_rect def_rect = {0, 0,
2327 					    VMWGFX_MIN_INITIAL_WIDTH,
2328 					    VMWGFX_MIN_INITIAL_HEIGHT};
2329 		vmw_du_update_layout(dev_priv, 1, &def_rect);
2330 		return 0;
2331 	}
2332 
2333 	rects_size = arg->num_outputs * sizeof(struct drm_vmw_rect);
2334 	rects = kcalloc(arg->num_outputs, sizeof(struct drm_vmw_rect),
2335 			GFP_KERNEL);
2336 	if (unlikely(!rects))
2337 		return -ENOMEM;
2338 
2339 	user_rects = (void __user *)(unsigned long)arg->rects;
2340 	ret = copy_from_user(rects, user_rects, rects_size);
2341 	if (unlikely(ret != 0)) {
2342 		DRM_ERROR("Failed to get rects.\n");
2343 		ret = -EFAULT;
2344 		goto out_free;
2345 	}
2346 
2347 	drm_rects = (struct drm_rect *)rects;
2348 
2349 	VMW_DEBUG_KMS("Layout count = %u\n", arg->num_outputs);
2350 	for (i = 0; i < arg->num_outputs; i++) {
2351 		struct drm_vmw_rect curr_rect;
2352 
2353 		/* Verify user-space for overflow as kernel use drm_rect */
2354 		if ((rects[i].x + rects[i].w > INT_MAX) ||
2355 		    (rects[i].y + rects[i].h > INT_MAX)) {
2356 			ret = -ERANGE;
2357 			goto out_free;
2358 		}
2359 
2360 		curr_rect = rects[i];
2361 		drm_rects[i].x1 = curr_rect.x;
2362 		drm_rects[i].y1 = curr_rect.y;
2363 		drm_rects[i].x2 = curr_rect.x + curr_rect.w;
2364 		drm_rects[i].y2 = curr_rect.y + curr_rect.h;
2365 
2366 		VMW_DEBUG_KMS("  x1 = %d y1 = %d x2 = %d y2 = %d\n",
2367 			      drm_rects[i].x1, drm_rects[i].y1,
2368 			      drm_rects[i].x2, drm_rects[i].y2);
2369 
2370 		/*
2371 		 * Currently this check is limiting the topology within
2372 		 * mode_config->max (which actually is max texture size
2373 		 * supported by virtual device). This limit is here to address
2374 		 * window managers that create a big framebuffer for whole
2375 		 * topology.
2376 		 */
2377 		if (drm_rects[i].x1 < 0 ||  drm_rects[i].y1 < 0 ||
2378 		    drm_rects[i].x2 > mode_config->max_width ||
2379 		    drm_rects[i].y2 > mode_config->max_height) {
2380 			VMW_DEBUG_KMS("Invalid layout %d %d %d %d\n",
2381 				      drm_rects[i].x1, drm_rects[i].y1,
2382 				      drm_rects[i].x2, drm_rects[i].y2);
2383 			ret = -EINVAL;
2384 			goto out_free;
2385 		}
2386 	}
2387 
2388 	ret = vmw_kms_check_display_memory(dev, arg->num_outputs, drm_rects);
2389 
2390 	if (ret == 0)
2391 		vmw_du_update_layout(dev_priv, arg->num_outputs, drm_rects);
2392 
2393 out_free:
2394 	kfree(rects);
2395 	return ret;
2396 }
2397 
2398 /**
2399  * vmw_kms_helper_dirty - Helper to build commands and perform actions based
2400  * on a set of cliprects and a set of display units.
2401  *
2402  * @dev_priv: Pointer to a device private structure.
2403  * @framebuffer: Pointer to the framebuffer on which to perform the actions.
2404  * @clips: A set of struct drm_clip_rect. Either this os @vclips must be NULL.
2405  * Cliprects are given in framebuffer coordinates.
2406  * @vclips: A set of struct drm_vmw_rect cliprects. Either this or @clips must
2407  * be NULL. Cliprects are given in source coordinates.
2408  * @dest_x: X coordinate offset for the crtc / destination clip rects.
2409  * @dest_y: Y coordinate offset for the crtc / destination clip rects.
2410  * @num_clips: Number of cliprects in the @clips or @vclips array.
2411  * @increment: Integer with which to increment the clip counter when looping.
2412  * Used to skip a predetermined number of clip rects.
2413  * @dirty: Closure structure. See the description of struct vmw_kms_dirty.
2414  */
2415 int vmw_kms_helper_dirty(struct vmw_private *dev_priv,
2416 			 struct vmw_framebuffer *framebuffer,
2417 			 const struct drm_clip_rect *clips,
2418 			 const struct drm_vmw_rect *vclips,
2419 			 s32 dest_x, s32 dest_y,
2420 			 int num_clips,
2421 			 int increment,
2422 			 struct vmw_kms_dirty *dirty)
2423 {
2424 	struct vmw_display_unit *units[VMWGFX_NUM_DISPLAY_UNITS];
2425 	struct drm_crtc *crtc;
2426 	u32 num_units = 0;
2427 	u32 i, k;
2428 
2429 	dirty->dev_priv = dev_priv;
2430 
2431 	/* If crtc is passed, no need to iterate over other display units */
2432 	if (dirty->crtc) {
2433 		units[num_units++] = vmw_crtc_to_du(dirty->crtc);
2434 	} else {
2435 		list_for_each_entry(crtc, &dev_priv->drm.mode_config.crtc_list,
2436 				    head) {
2437 			struct drm_plane *plane = crtc->primary;
2438 
2439 			if (plane->state->fb == &framebuffer->base)
2440 				units[num_units++] = vmw_crtc_to_du(crtc);
2441 		}
2442 	}
2443 
2444 	for (k = 0; k < num_units; k++) {
2445 		struct vmw_display_unit *unit = units[k];
2446 		s32 crtc_x = unit->crtc.x;
2447 		s32 crtc_y = unit->crtc.y;
2448 		s32 crtc_width = unit->crtc.mode.hdisplay;
2449 		s32 crtc_height = unit->crtc.mode.vdisplay;
2450 		const struct drm_clip_rect *clips_ptr = clips;
2451 		const struct drm_vmw_rect *vclips_ptr = vclips;
2452 
2453 		dirty->unit = unit;
2454 		if (dirty->fifo_reserve_size > 0) {
2455 			dirty->cmd = VMW_CMD_RESERVE(dev_priv,
2456 						      dirty->fifo_reserve_size);
2457 			if (!dirty->cmd)
2458 				return -ENOMEM;
2459 
2460 			memset(dirty->cmd, 0, dirty->fifo_reserve_size);
2461 		}
2462 		dirty->num_hits = 0;
2463 		for (i = 0; i < num_clips; i++, clips_ptr += increment,
2464 		       vclips_ptr += increment) {
2465 			s32 clip_left;
2466 			s32 clip_top;
2467 
2468 			/*
2469 			 * Select clip array type. Note that integer type
2470 			 * in @clips is unsigned short, whereas in @vclips
2471 			 * it's 32-bit.
2472 			 */
2473 			if (clips) {
2474 				dirty->fb_x = (s32) clips_ptr->x1;
2475 				dirty->fb_y = (s32) clips_ptr->y1;
2476 				dirty->unit_x2 = (s32) clips_ptr->x2 + dest_x -
2477 					crtc_x;
2478 				dirty->unit_y2 = (s32) clips_ptr->y2 + dest_y -
2479 					crtc_y;
2480 			} else {
2481 				dirty->fb_x = vclips_ptr->x;
2482 				dirty->fb_y = vclips_ptr->y;
2483 				dirty->unit_x2 = dirty->fb_x + vclips_ptr->w +
2484 					dest_x - crtc_x;
2485 				dirty->unit_y2 = dirty->fb_y + vclips_ptr->h +
2486 					dest_y - crtc_y;
2487 			}
2488 
2489 			dirty->unit_x1 = dirty->fb_x + dest_x - crtc_x;
2490 			dirty->unit_y1 = dirty->fb_y + dest_y - crtc_y;
2491 
2492 			/* Skip this clip if it's outside the crtc region */
2493 			if (dirty->unit_x1 >= crtc_width ||
2494 			    dirty->unit_y1 >= crtc_height ||
2495 			    dirty->unit_x2 <= 0 || dirty->unit_y2 <= 0)
2496 				continue;
2497 
2498 			/* Clip right and bottom to crtc limits */
2499 			dirty->unit_x2 = min_t(s32, dirty->unit_x2,
2500 					       crtc_width);
2501 			dirty->unit_y2 = min_t(s32, dirty->unit_y2,
2502 					       crtc_height);
2503 
2504 			/* Clip left and top to crtc limits */
2505 			clip_left = min_t(s32, dirty->unit_x1, 0);
2506 			clip_top = min_t(s32, dirty->unit_y1, 0);
2507 			dirty->unit_x1 -= clip_left;
2508 			dirty->unit_y1 -= clip_top;
2509 			dirty->fb_x -= clip_left;
2510 			dirty->fb_y -= clip_top;
2511 
2512 			dirty->clip(dirty);
2513 		}
2514 
2515 		dirty->fifo_commit(dirty);
2516 	}
2517 
2518 	return 0;
2519 }
2520 
2521 /**
2522  * vmw_kms_helper_validation_finish - Helper for post KMS command submission
2523  * cleanup and fencing
2524  * @dev_priv: Pointer to the device-private struct
2525  * @file_priv: Pointer identifying the client when user-space fencing is used
2526  * @ctx: Pointer to the validation context
2527  * @out_fence: If non-NULL, returned refcounted fence-pointer
2528  * @user_fence_rep: If non-NULL, pointer to user-space address area
2529  * in which to copy user-space fence info
2530  */
2531 void vmw_kms_helper_validation_finish(struct vmw_private *dev_priv,
2532 				      struct drm_file *file_priv,
2533 				      struct vmw_validation_context *ctx,
2534 				      struct vmw_fence_obj **out_fence,
2535 				      struct drm_vmw_fence_rep __user *
2536 				      user_fence_rep)
2537 {
2538 	struct vmw_fence_obj *fence = NULL;
2539 	uint32_t handle = 0;
2540 	int ret = 0;
2541 
2542 	if (file_priv || user_fence_rep || vmw_validation_has_bos(ctx) ||
2543 	    out_fence)
2544 		ret = vmw_execbuf_fence_commands(file_priv, dev_priv, &fence,
2545 						 file_priv ? &handle : NULL);
2546 	vmw_validation_done(ctx, fence);
2547 	if (file_priv)
2548 		vmw_execbuf_copy_fence_user(dev_priv, vmw_fpriv(file_priv),
2549 					    ret, user_fence_rep, fence,
2550 					    handle, -1);
2551 	if (out_fence)
2552 		*out_fence = fence;
2553 	else
2554 		vmw_fence_obj_unreference(&fence);
2555 }
2556 
2557 /**
2558  * vmw_kms_update_proxy - Helper function to update a proxy surface from
2559  * its backing MOB.
2560  *
2561  * @res: Pointer to the surface resource
2562  * @clips: Clip rects in framebuffer (surface) space.
2563  * @num_clips: Number of clips in @clips.
2564  * @increment: Integer with which to increment the clip counter when looping.
2565  * Used to skip a predetermined number of clip rects.
2566  *
2567  * This function makes sure the proxy surface is updated from its backing MOB
2568  * using the region given by @clips. The surface resource @res and its backing
2569  * MOB needs to be reserved and validated on call.
2570  */
2571 int vmw_kms_update_proxy(struct vmw_resource *res,
2572 			 const struct drm_clip_rect *clips,
2573 			 unsigned num_clips,
2574 			 int increment)
2575 {
2576 	struct vmw_private *dev_priv = res->dev_priv;
2577 	struct drm_vmw_size *size = &vmw_res_to_srf(res)->metadata.base_size;
2578 	struct {
2579 		SVGA3dCmdHeader header;
2580 		SVGA3dCmdUpdateGBImage body;
2581 	} *cmd;
2582 	SVGA3dBox *box;
2583 	size_t copy_size = 0;
2584 	int i;
2585 
2586 	if (!clips)
2587 		return 0;
2588 
2589 	cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd) * num_clips);
2590 	if (!cmd)
2591 		return -ENOMEM;
2592 
2593 	for (i = 0; i < num_clips; ++i, clips += increment, ++cmd) {
2594 		box = &cmd->body.box;
2595 
2596 		cmd->header.id = SVGA_3D_CMD_UPDATE_GB_IMAGE;
2597 		cmd->header.size = sizeof(cmd->body);
2598 		cmd->body.image.sid = res->id;
2599 		cmd->body.image.face = 0;
2600 		cmd->body.image.mipmap = 0;
2601 
2602 		if (clips->x1 > size->width || clips->x2 > size->width ||
2603 		    clips->y1 > size->height || clips->y2 > size->height) {
2604 			DRM_ERROR("Invalid clips outsize of framebuffer.\n");
2605 			return -EINVAL;
2606 		}
2607 
2608 		box->x = clips->x1;
2609 		box->y = clips->y1;
2610 		box->z = 0;
2611 		box->w = clips->x2 - clips->x1;
2612 		box->h = clips->y2 - clips->y1;
2613 		box->d = 1;
2614 
2615 		copy_size += sizeof(*cmd);
2616 	}
2617 
2618 	vmw_cmd_commit(dev_priv, copy_size);
2619 
2620 	return 0;
2621 }
2622 
2623 /**
2624  * vmw_kms_create_implicit_placement_property - Set up the implicit placement
2625  * property.
2626  *
2627  * @dev_priv: Pointer to a device private struct.
2628  *
2629  * Sets up the implicit placement property unless it's already set up.
2630  */
2631 void
2632 vmw_kms_create_implicit_placement_property(struct vmw_private *dev_priv)
2633 {
2634 	if (dev_priv->implicit_placement_property)
2635 		return;
2636 
2637 	dev_priv->implicit_placement_property =
2638 		drm_property_create_range(&dev_priv->drm,
2639 					  DRM_MODE_PROP_IMMUTABLE,
2640 					  "implicit_placement", 0, 1);
2641 }
2642 
2643 /**
2644  * vmw_kms_suspend - Save modesetting state and turn modesetting off.
2645  *
2646  * @dev: Pointer to the drm device
2647  * Return: 0 on success. Negative error code on failure.
2648  */
2649 int vmw_kms_suspend(struct drm_device *dev)
2650 {
2651 	struct vmw_private *dev_priv = vmw_priv(dev);
2652 
2653 	dev_priv->suspend_state = drm_atomic_helper_suspend(dev);
2654 	if (IS_ERR(dev_priv->suspend_state)) {
2655 		int ret = PTR_ERR(dev_priv->suspend_state);
2656 
2657 		DRM_ERROR("Failed kms suspend: %d\n", ret);
2658 		dev_priv->suspend_state = NULL;
2659 
2660 		return ret;
2661 	}
2662 
2663 	return 0;
2664 }
2665 
2666 
2667 /**
2668  * vmw_kms_resume - Re-enable modesetting and restore state
2669  *
2670  * @dev: Pointer to the drm device
2671  * Return: 0 on success. Negative error code on failure.
2672  *
2673  * State is resumed from a previous vmw_kms_suspend(). It's illegal
2674  * to call this function without a previous vmw_kms_suspend().
2675  */
2676 int vmw_kms_resume(struct drm_device *dev)
2677 {
2678 	struct vmw_private *dev_priv = vmw_priv(dev);
2679 	int ret;
2680 
2681 	if (WARN_ON(!dev_priv->suspend_state))
2682 		return 0;
2683 
2684 	ret = drm_atomic_helper_resume(dev, dev_priv->suspend_state);
2685 	dev_priv->suspend_state = NULL;
2686 
2687 	return ret;
2688 }
2689 
2690 /**
2691  * vmw_kms_lost_device - Notify kms that modesetting capabilities will be lost
2692  *
2693  * @dev: Pointer to the drm device
2694  */
2695 void vmw_kms_lost_device(struct drm_device *dev)
2696 {
2697 	drm_atomic_helper_shutdown(dev);
2698 }
2699 
2700 /**
2701  * vmw_du_helper_plane_update - Helper to do plane update on a display unit.
2702  * @update: The closure structure.
2703  *
2704  * Call this helper after setting callbacks in &vmw_du_update_plane to do plane
2705  * update on display unit.
2706  *
2707  * Return: 0 on success or a negative error code on failure.
2708  */
2709 int vmw_du_helper_plane_update(struct vmw_du_update_plane *update)
2710 {
2711 	struct drm_plane_state *state = update->plane->state;
2712 	struct drm_plane_state *old_state = update->old_state;
2713 	struct drm_atomic_helper_damage_iter iter;
2714 	struct drm_rect clip;
2715 	struct drm_rect bb;
2716 	DECLARE_VAL_CONTEXT(val_ctx, NULL, 0);
2717 	uint32_t reserved_size = 0;
2718 	uint32_t submit_size = 0;
2719 	uint32_t curr_size = 0;
2720 	uint32_t num_hits = 0;
2721 	void *cmd_start;
2722 	char *cmd_next;
2723 	int ret;
2724 
2725 	/*
2726 	 * Iterate in advance to check if really need plane update and find the
2727 	 * number of clips that actually are in plane src for fifo allocation.
2728 	 */
2729 	drm_atomic_helper_damage_iter_init(&iter, old_state, state);
2730 	drm_atomic_for_each_plane_damage(&iter, &clip)
2731 		num_hits++;
2732 
2733 	if (num_hits == 0)
2734 		return 0;
2735 
2736 	if (update->vfb->bo) {
2737 		struct vmw_framebuffer_bo *vfbbo =
2738 			container_of(update->vfb, typeof(*vfbbo), base);
2739 
2740 		/*
2741 		 * For screen targets we want a mappable bo, for everything else we want
2742 		 * accelerated i.e. host backed (vram or gmr) bo. If the display unit
2743 		 * is not screen target then mob's shouldn't be available.
2744 		 */
2745 		if (update->dev_priv->active_display_unit == vmw_du_screen_target) {
2746 			vmw_bo_placement_set(vfbbo->buffer,
2747 					     VMW_BO_DOMAIN_SYS | VMW_BO_DOMAIN_MOB | VMW_BO_DOMAIN_GMR,
2748 					     VMW_BO_DOMAIN_SYS | VMW_BO_DOMAIN_MOB | VMW_BO_DOMAIN_GMR);
2749 		} else {
2750 			WARN_ON(update->dev_priv->has_mob);
2751 			vmw_bo_placement_set_default_accelerated(vfbbo->buffer);
2752 		}
2753 		ret = vmw_validation_add_bo(&val_ctx, vfbbo->buffer);
2754 	} else {
2755 		struct vmw_framebuffer_surface *vfbs =
2756 			container_of(update->vfb, typeof(*vfbs), base);
2757 
2758 		ret = vmw_validation_add_resource(&val_ctx, &vfbs->surface->res,
2759 						  0, VMW_RES_DIRTY_NONE, NULL,
2760 						  NULL);
2761 	}
2762 
2763 	if (ret)
2764 		return ret;
2765 
2766 	ret = vmw_validation_prepare(&val_ctx, update->mutex, update->intr);
2767 	if (ret)
2768 		goto out_unref;
2769 
2770 	reserved_size = update->calc_fifo_size(update, num_hits);
2771 	cmd_start = VMW_CMD_RESERVE(update->dev_priv, reserved_size);
2772 	if (!cmd_start) {
2773 		ret = -ENOMEM;
2774 		goto out_revert;
2775 	}
2776 
2777 	cmd_next = cmd_start;
2778 
2779 	if (update->post_prepare) {
2780 		curr_size = update->post_prepare(update, cmd_next);
2781 		cmd_next += curr_size;
2782 		submit_size += curr_size;
2783 	}
2784 
2785 	if (update->pre_clip) {
2786 		curr_size = update->pre_clip(update, cmd_next, num_hits);
2787 		cmd_next += curr_size;
2788 		submit_size += curr_size;
2789 	}
2790 
2791 	bb.x1 = INT_MAX;
2792 	bb.y1 = INT_MAX;
2793 	bb.x2 = INT_MIN;
2794 	bb.y2 = INT_MIN;
2795 
2796 	drm_atomic_helper_damage_iter_init(&iter, old_state, state);
2797 	drm_atomic_for_each_plane_damage(&iter, &clip) {
2798 		uint32_t fb_x = clip.x1;
2799 		uint32_t fb_y = clip.y1;
2800 
2801 		vmw_du_translate_to_crtc(state, &clip);
2802 		if (update->clip) {
2803 			curr_size = update->clip(update, cmd_next, &clip, fb_x,
2804 						 fb_y);
2805 			cmd_next += curr_size;
2806 			submit_size += curr_size;
2807 		}
2808 		bb.x1 = min_t(int, bb.x1, clip.x1);
2809 		bb.y1 = min_t(int, bb.y1, clip.y1);
2810 		bb.x2 = max_t(int, bb.x2, clip.x2);
2811 		bb.y2 = max_t(int, bb.y2, clip.y2);
2812 	}
2813 
2814 	curr_size = update->post_clip(update, cmd_next, &bb);
2815 	submit_size += curr_size;
2816 
2817 	if (reserved_size < submit_size)
2818 		submit_size = 0;
2819 
2820 	vmw_cmd_commit(update->dev_priv, submit_size);
2821 
2822 	vmw_kms_helper_validation_finish(update->dev_priv, NULL, &val_ctx,
2823 					 update->out_fence, NULL);
2824 	return ret;
2825 
2826 out_revert:
2827 	vmw_validation_revert(&val_ctx);
2828 
2829 out_unref:
2830 	vmw_validation_unref_lists(&val_ctx);
2831 	return ret;
2832 }
2833 
2834 /**
2835  * vmw_connector_mode_valid - implements drm_connector_helper_funcs.mode_valid callback
2836  *
2837  * @connector: the drm connector, part of a DU container
2838  * @mode: drm mode to check
2839  *
2840  * Returns MODE_OK on success, or a drm_mode_status error code.
2841  */
2842 enum drm_mode_status vmw_connector_mode_valid(struct drm_connector *connector,
2843 					      struct drm_display_mode *mode)
2844 {
2845 	struct drm_device *dev = connector->dev;
2846 	struct vmw_private *dev_priv = vmw_priv(dev);
2847 	u32 max_width = dev_priv->texture_max_width;
2848 	u32 max_height = dev_priv->texture_max_height;
2849 	u32 assumed_cpp = 4;
2850 
2851 	if (dev_priv->assume_16bpp)
2852 		assumed_cpp = 2;
2853 
2854 	if (dev_priv->active_display_unit == vmw_du_screen_target) {
2855 		max_width  = min(dev_priv->stdu_max_width,  max_width);
2856 		max_height = min(dev_priv->stdu_max_height, max_height);
2857 	}
2858 
2859 	if (max_width < mode->hdisplay)
2860 		return MODE_BAD_HVALUE;
2861 
2862 	if (max_height < mode->vdisplay)
2863 		return MODE_BAD_VVALUE;
2864 
2865 	if (!vmw_kms_validate_mode_vram(dev_priv,
2866 					mode->hdisplay * assumed_cpp,
2867 					mode->vdisplay))
2868 		return MODE_MEM;
2869 
2870 	return MODE_OK;
2871 }
2872 
2873 /**
2874  * vmw_connector_get_modes - implements drm_connector_helper_funcs.get_modes callback
2875  *
2876  * @connector: the drm connector, part of a DU container
2877  *
2878  * Returns the number of added modes.
2879  */
2880 int vmw_connector_get_modes(struct drm_connector *connector)
2881 {
2882 	struct vmw_display_unit *du = vmw_connector_to_du(connector);
2883 	struct drm_device *dev = connector->dev;
2884 	struct vmw_private *dev_priv = vmw_priv(dev);
2885 	struct drm_display_mode *mode = NULL;
2886 	struct drm_display_mode prefmode = { DRM_MODE("preferred",
2887 		DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED,
2888 		0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
2889 		DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC)
2890 	};
2891 	u32 max_width;
2892 	u32 max_height;
2893 	u32 num_modes;
2894 
2895 	/* Add preferred mode */
2896 	mode = drm_mode_duplicate(dev, &prefmode);
2897 	if (!mode)
2898 		return 0;
2899 
2900 	mode->hdisplay = du->pref_width;
2901 	mode->vdisplay = du->pref_height;
2902 	vmw_guess_mode_timing(mode);
2903 	drm_mode_set_name(mode);
2904 
2905 	drm_mode_probed_add(connector, mode);
2906 	drm_dbg_kms(dev, "preferred mode " DRM_MODE_FMT "\n", DRM_MODE_ARG(mode));
2907 
2908 	/* Probe connector for all modes not exceeding our geom limits */
2909 	max_width  = dev_priv->texture_max_width;
2910 	max_height = dev_priv->texture_max_height;
2911 
2912 	if (dev_priv->active_display_unit == vmw_du_screen_target) {
2913 		max_width  = min(dev_priv->stdu_max_width,  max_width);
2914 		max_height = min(dev_priv->stdu_max_height, max_height);
2915 	}
2916 
2917 	num_modes = 1 + drm_add_modes_noedid(connector, max_width, max_height);
2918 
2919 	return num_modes;
2920 }
2921