xref: /linux/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c (revision 0ce92d548b44649a8de706f9bb9e74a4ed2f18a7)
1 // SPDX-License-Identifier: GPL-2.0 OR MIT
2 /**************************************************************************
3  *
4  * Copyright (c) 2009-2025 Broadcom. All Rights Reserved. The term
5  * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
6  *
7  **************************************************************************/
8 
9 #include "vmwgfx_kms.h"
10 
11 #include "vmwgfx_bo.h"
12 #include "vmwgfx_resource_priv.h"
13 #include "vmwgfx_vkms.h"
14 #include "vmw_surface_cache.h"
15 
16 #include <drm/drm_atomic.h>
17 #include <drm/drm_atomic_helper.h>
18 #include <drm/drm_damage_helper.h>
19 #include <drm/drm_fourcc.h>
20 #include <drm/drm_rect.h>
21 #include <drm/drm_sysfs.h>
22 #include <drm/drm_edid.h>
23 
24 void vmw_du_init(struct vmw_display_unit *du)
25 {
26 	vmw_vkms_crtc_init(&du->crtc);
27 }
28 
29 void vmw_du_cleanup(struct vmw_display_unit *du)
30 {
31 	struct vmw_private *dev_priv = vmw_priv(du->primary.dev);
32 
33 	vmw_vkms_crtc_cleanup(&du->crtc);
34 	drm_plane_cleanup(&du->primary);
35 	if (vmw_cmd_supported(dev_priv))
36 		drm_plane_cleanup(&du->cursor.base);
37 
38 	drm_connector_unregister(&du->connector);
39 	drm_crtc_cleanup(&du->crtc);
40 	drm_encoder_cleanup(&du->encoder);
41 	drm_connector_cleanup(&du->connector);
42 }
43 
44 
45 void vmw_du_primary_plane_destroy(struct drm_plane *plane)
46 {
47 	drm_plane_cleanup(plane);
48 
49 	/* Planes are static in our case so we don't free it */
50 }
51 
52 
53 /**
54  * vmw_du_plane_unpin_surf - unpins resource associated with a framebuffer surface
55  *
56  * @vps: plane state associated with the display surface
57  */
58 void vmw_du_plane_unpin_surf(struct vmw_plane_state *vps)
59 {
60 	struct vmw_surface *surf = vmw_user_object_surface(&vps->uo);
61 
62 	if (surf) {
63 		if (vps->pinned) {
64 			vmw_resource_unpin(&surf->res);
65 			vps->pinned--;
66 		}
67 	}
68 }
69 
70 
71 /**
72  * vmw_du_plane_cleanup_fb - Unpins the plane surface
73  *
74  * @plane:  display plane
75  * @old_state: Contains the FB to clean up
76  *
77  * Unpins the framebuffer surface
78  *
79  * Returns 0 on success
80  */
81 void
82 vmw_du_plane_cleanup_fb(struct drm_plane *plane,
83 			struct drm_plane_state *old_state)
84 {
85 	struct vmw_plane_state *vps = vmw_plane_state_to_vps(old_state);
86 
87 	vmw_du_plane_unpin_surf(vps);
88 }
89 
90 
91 /**
92  * vmw_du_primary_plane_atomic_check - check if the new state is okay
93  *
94  * @plane: display plane
95  * @state: info on the new plane state, including the FB
96  *
97  * Check if the new state is settable given the current state.  Other
98  * than what the atomic helper checks, we care about crtc fitting
99  * the FB and maintaining one active framebuffer.
100  *
101  * Returns 0 on success
102  */
103 int vmw_du_primary_plane_atomic_check(struct drm_plane *plane,
104 				      struct drm_atomic_state *state)
105 {
106 	struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
107 									   plane);
108 	struct drm_plane_state *old_state = drm_atomic_get_old_plane_state(state,
109 									   plane);
110 	struct drm_crtc_state *crtc_state = NULL;
111 	struct drm_framebuffer *new_fb = new_state->fb;
112 	struct drm_framebuffer *old_fb = old_state->fb;
113 	int ret;
114 
115 	/*
116 	 * Ignore damage clips if the framebuffer attached to the plane's state
117 	 * has changed since the last plane update (page-flip). In this case, a
118 	 * full plane update should happen because uploads are done per-buffer.
119 	 */
120 	if (old_fb != new_fb)
121 		new_state->ignore_damage_clips = true;
122 
123 	if (new_state->crtc)
124 		crtc_state = drm_atomic_get_new_crtc_state(state,
125 							   new_state->crtc);
126 
127 	ret = drm_atomic_helper_check_plane_state(new_state, crtc_state,
128 						  DRM_PLANE_NO_SCALING,
129 						  DRM_PLANE_NO_SCALING,
130 						  false, true);
131 	return ret;
132 }
133 
134 int vmw_du_crtc_atomic_check(struct drm_crtc *crtc,
135 			     struct drm_atomic_state *state)
136 {
137 	struct vmw_private *vmw = vmw_priv(crtc->dev);
138 	struct drm_crtc_state *new_state = drm_atomic_get_new_crtc_state(state,
139 									 crtc);
140 	struct vmw_display_unit *du = vmw_crtc_to_du(new_state->crtc);
141 	int connector_mask = drm_connector_mask(&du->connector);
142 	bool has_primary = new_state->plane_mask &
143 			   drm_plane_mask(crtc->primary);
144 
145 	/*
146 	 * This is fine in general, but broken userspace might expect
147 	 * some actual rendering so give a clue as why it's blank.
148 	 */
149 	if (new_state->enable && !has_primary)
150 		drm_dbg_driver(&vmw->drm,
151 			       "CRTC without a primary plane will be blank.\n");
152 
153 
154 	if (new_state->connector_mask != connector_mask &&
155 	    new_state->connector_mask != 0) {
156 		DRM_ERROR("Invalid connectors configuration\n");
157 		return -EINVAL;
158 	}
159 
160 	/*
161 	 * Our virtual device does not have a dot clock, so use the logical
162 	 * clock value as the dot clock.
163 	 */
164 	if (new_state->mode.crtc_clock == 0)
165 		new_state->adjusted_mode.crtc_clock = new_state->mode.clock;
166 
167 	return 0;
168 }
169 
170 
171 void vmw_du_crtc_atomic_begin(struct drm_crtc *crtc,
172 			      struct drm_atomic_state *state)
173 {
174 	vmw_vkms_crtc_atomic_begin(crtc, state);
175 }
176 
177 /**
178  * vmw_du_crtc_duplicate_state - duplicate crtc state
179  * @crtc: DRM crtc
180  *
181  * Allocates and returns a copy of the crtc state (both common and
182  * vmw-specific) for the specified crtc.
183  *
184  * Returns: The newly allocated crtc state, or NULL on failure.
185  */
186 struct drm_crtc_state *
187 vmw_du_crtc_duplicate_state(struct drm_crtc *crtc)
188 {
189 	struct drm_crtc_state *state;
190 	struct vmw_crtc_state *vcs;
191 
192 	if (WARN_ON(!crtc->state))
193 		return NULL;
194 
195 	vcs = kmemdup(crtc->state, sizeof(*vcs), GFP_KERNEL);
196 
197 	if (!vcs)
198 		return NULL;
199 
200 	state = &vcs->base;
201 
202 	__drm_atomic_helper_crtc_duplicate_state(crtc, state);
203 
204 	return state;
205 }
206 
207 
208 /**
209  * vmw_du_crtc_reset - creates a blank vmw crtc state
210  * @crtc: DRM crtc
211  *
212  * Resets the atomic state for @crtc by freeing the state pointer (which
213  * might be NULL, e.g. at driver load time) and allocating a new empty state
214  * object.
215  */
216 void vmw_du_crtc_reset(struct drm_crtc *crtc)
217 {
218 	struct vmw_crtc_state *vcs;
219 
220 
221 	if (crtc->state) {
222 		__drm_atomic_helper_crtc_destroy_state(crtc->state);
223 
224 		kfree(vmw_crtc_state_to_vcs(crtc->state));
225 	}
226 
227 	vcs = kzalloc(sizeof(*vcs), GFP_KERNEL);
228 
229 	if (!vcs) {
230 		DRM_ERROR("Cannot allocate vmw_crtc_state\n");
231 		return;
232 	}
233 
234 	__drm_atomic_helper_crtc_reset(crtc, &vcs->base);
235 }
236 
237 
238 /**
239  * vmw_du_crtc_destroy_state - destroy crtc state
240  * @crtc: DRM crtc
241  * @state: state object to destroy
242  *
243  * Destroys the crtc state (both common and vmw-specific) for the
244  * specified plane.
245  */
246 void
247 vmw_du_crtc_destroy_state(struct drm_crtc *crtc,
248 			  struct drm_crtc_state *state)
249 {
250 	drm_atomic_helper_crtc_destroy_state(crtc, state);
251 }
252 
253 
254 /**
255  * vmw_du_plane_duplicate_state - duplicate plane state
256  * @plane: drm plane
257  *
258  * Allocates and returns a copy of the plane state (both common and
259  * vmw-specific) for the specified plane.
260  *
261  * Returns: The newly allocated plane state, or NULL on failure.
262  */
263 struct drm_plane_state *
264 vmw_du_plane_duplicate_state(struct drm_plane *plane)
265 {
266 	struct drm_plane_state *state;
267 	struct vmw_plane_state *vps;
268 
269 	vps = kmemdup(plane->state, sizeof(*vps), GFP_KERNEL);
270 
271 	if (!vps)
272 		return NULL;
273 
274 	vps->pinned = 0;
275 	vps->cpp = 0;
276 
277 	vps->cursor.mob = NULL;
278 
279 	/* Each ref counted resource needs to be acquired again */
280 	vmw_user_object_ref(&vps->uo);
281 	state = &vps->base;
282 
283 	__drm_atomic_helper_plane_duplicate_state(plane, state);
284 
285 	return state;
286 }
287 
288 
289 /**
290  * vmw_du_plane_reset - creates a blank vmw plane state
291  * @plane: drm plane
292  *
293  * Resets the atomic state for @plane by freeing the state pointer (which might
294  * be NULL, e.g. at driver load time) and allocating a new empty state object.
295  */
296 void vmw_du_plane_reset(struct drm_plane *plane)
297 {
298 	struct vmw_plane_state *vps;
299 
300 	if (plane->state)
301 		vmw_du_plane_destroy_state(plane, plane->state);
302 
303 	vps = kzalloc(sizeof(*vps), GFP_KERNEL);
304 
305 	if (!vps) {
306 		DRM_ERROR("Cannot allocate vmw_plane_state\n");
307 		return;
308 	}
309 
310 	__drm_atomic_helper_plane_reset(plane, &vps->base);
311 }
312 
313 
314 /**
315  * vmw_du_plane_destroy_state - destroy plane state
316  * @plane: DRM plane
317  * @state: state object to destroy
318  *
319  * Destroys the plane state (both common and vmw-specific) for the
320  * specified plane.
321  */
322 void
323 vmw_du_plane_destroy_state(struct drm_plane *plane,
324 			   struct drm_plane_state *state)
325 {
326 	struct vmw_plane_state *vps = vmw_plane_state_to_vps(state);
327 
328 	/* Should have been freed by cleanup_fb */
329 	vmw_user_object_unref(&vps->uo);
330 
331 	drm_atomic_helper_plane_destroy_state(plane, state);
332 }
333 
334 
335 /**
336  * vmw_du_connector_duplicate_state - duplicate connector state
337  * @connector: DRM connector
338  *
339  * Allocates and returns a copy of the connector state (both common and
340  * vmw-specific) for the specified connector.
341  *
342  * Returns: The newly allocated connector state, or NULL on failure.
343  */
344 struct drm_connector_state *
345 vmw_du_connector_duplicate_state(struct drm_connector *connector)
346 {
347 	struct drm_connector_state *state;
348 	struct vmw_connector_state *vcs;
349 
350 	if (WARN_ON(!connector->state))
351 		return NULL;
352 
353 	vcs = kmemdup(connector->state, sizeof(*vcs), GFP_KERNEL);
354 
355 	if (!vcs)
356 		return NULL;
357 
358 	state = &vcs->base;
359 
360 	__drm_atomic_helper_connector_duplicate_state(connector, state);
361 
362 	return state;
363 }
364 
365 
366 /**
367  * vmw_du_connector_reset - creates a blank vmw connector state
368  * @connector: DRM connector
369  *
370  * Resets the atomic state for @connector by freeing the state pointer (which
371  * might be NULL, e.g. at driver load time) and allocating a new empty state
372  * object.
373  */
374 void vmw_du_connector_reset(struct drm_connector *connector)
375 {
376 	struct vmw_connector_state *vcs;
377 
378 
379 	if (connector->state) {
380 		__drm_atomic_helper_connector_destroy_state(connector->state);
381 
382 		kfree(vmw_connector_state_to_vcs(connector->state));
383 	}
384 
385 	vcs = kzalloc(sizeof(*vcs), GFP_KERNEL);
386 
387 	if (!vcs) {
388 		DRM_ERROR("Cannot allocate vmw_connector_state\n");
389 		return;
390 	}
391 
392 	__drm_atomic_helper_connector_reset(connector, &vcs->base);
393 }
394 
395 
396 /**
397  * vmw_du_connector_destroy_state - destroy connector state
398  * @connector: DRM connector
399  * @state: state object to destroy
400  *
401  * Destroys the connector state (both common and vmw-specific) for the
402  * specified plane.
403  */
404 void
405 vmw_du_connector_destroy_state(struct drm_connector *connector,
406 			  struct drm_connector_state *state)
407 {
408 	drm_atomic_helper_connector_destroy_state(connector, state);
409 }
410 /*
411  * Generic framebuffer code
412  */
413 
414 /*
415  * Surface framebuffer code
416  */
417 
418 static void vmw_framebuffer_surface_destroy(struct drm_framebuffer *framebuffer)
419 {
420 	struct vmw_framebuffer_surface *vfbs =
421 		vmw_framebuffer_to_vfbs(framebuffer);
422 	struct vmw_bo *bo = vmw_user_object_buffer(&vfbs->uo);
423 	struct vmw_surface *surf = vmw_user_object_surface(&vfbs->uo);
424 
425 	if (bo) {
426 		vmw_bo_dirty_release(bo);
427 		/*
428 		 * bo->dirty is reference counted so it being NULL
429 		 * means that the surface wasn't coherent to begin
430 		 * with and so we have to free the dirty tracker
431 		 * in the vmw_resource
432 		 */
433 		if (!bo->dirty && surf && surf->res.dirty)
434 			surf->res.func->dirty_free(&surf->res);
435 	}
436 	drm_framebuffer_cleanup(framebuffer);
437 	vmw_user_object_unref(&vfbs->uo);
438 
439 	kfree(vfbs);
440 }
441 
442 /**
443  * vmw_kms_readback - Perform a readback from the screen system to
444  * a buffer-object backed framebuffer.
445  *
446  * @dev_priv: Pointer to the device private structure.
447  * @file_priv: Pointer to a struct drm_file identifying the caller.
448  * Must be set to NULL if @user_fence_rep is NULL.
449  * @vfb: Pointer to the buffer-object backed framebuffer.
450  * @user_fence_rep: User-space provided structure for fence information.
451  * Must be set to non-NULL if @file_priv is non-NULL.
452  * @vclips: Array of clip rects.
453  * @num_clips: Number of clip rects in @vclips.
454  *
455  * Returns 0 on success, negative error code on failure. -ERESTARTSYS if
456  * interrupted.
457  */
458 int vmw_kms_readback(struct vmw_private *dev_priv,
459 		     struct drm_file *file_priv,
460 		     struct vmw_framebuffer *vfb,
461 		     struct drm_vmw_fence_rep __user *user_fence_rep,
462 		     struct drm_vmw_rect *vclips,
463 		     uint32_t num_clips)
464 {
465 	switch (dev_priv->active_display_unit) {
466 	case vmw_du_screen_object:
467 		return vmw_kms_sou_readback(dev_priv, file_priv, vfb,
468 					    user_fence_rep, vclips, num_clips,
469 					    NULL);
470 	case vmw_du_screen_target:
471 		return vmw_kms_stdu_readback(dev_priv, file_priv, vfb,
472 					     user_fence_rep, NULL, vclips, num_clips,
473 					     1, NULL);
474 	default:
475 		WARN_ONCE(true,
476 			  "Readback called with invalid display system.\n");
477 }
478 
479 	return -ENOSYS;
480 }
481 
482 static int vmw_framebuffer_surface_create_handle(struct drm_framebuffer *fb,
483 						 struct drm_file *file_priv,
484 						 unsigned int *handle)
485 {
486 	struct vmw_framebuffer_surface *vfbs = vmw_framebuffer_to_vfbs(fb);
487 	struct vmw_bo *bo = vmw_user_object_buffer(&vfbs->uo);
488 
489 	if (WARN_ON(!bo))
490 		return -EINVAL;
491 	return drm_gem_handle_create(file_priv, &bo->tbo.base, handle);
492 }
493 
494 static const struct drm_framebuffer_funcs vmw_framebuffer_surface_funcs = {
495 	.create_handle = vmw_framebuffer_surface_create_handle,
496 	.destroy = vmw_framebuffer_surface_destroy,
497 	.dirty = drm_atomic_helper_dirtyfb,
498 };
499 
500 static int vmw_kms_new_framebuffer_surface(struct vmw_private *dev_priv,
501 					   struct vmw_user_object *uo,
502 					   struct vmw_framebuffer **out,
503 					   const struct drm_mode_fb_cmd2
504 					   *mode_cmd)
505 
506 {
507 	struct drm_device *dev = &dev_priv->drm;
508 	struct vmw_framebuffer_surface *vfbs;
509 	struct vmw_surface *surface;
510 	int ret;
511 
512 	/* 3D is only supported on HWv8 and newer hosts */
513 	if (dev_priv->active_display_unit == vmw_du_legacy)
514 		return -ENOSYS;
515 
516 	surface = vmw_user_object_surface(uo);
517 
518 	/*
519 	 * Sanity checks.
520 	 */
521 
522 	if (!drm_any_plane_has_format(&dev_priv->drm,
523 				      mode_cmd->pixel_format,
524 				      mode_cmd->modifier[0])) {
525 		drm_dbg(&dev_priv->drm,
526 			"unsupported pixel format %p4cc / modifier 0x%llx\n",
527 			&mode_cmd->pixel_format, mode_cmd->modifier[0]);
528 		return -EINVAL;
529 	}
530 
531 	/* Surface must be marked as a scanout. */
532 	if (unlikely(!surface->metadata.scanout))
533 		return -EINVAL;
534 
535 	if (unlikely(surface->metadata.mip_levels[0] != 1 ||
536 		     surface->metadata.num_sizes != 1 ||
537 		     surface->metadata.base_size.width < mode_cmd->width ||
538 		     surface->metadata.base_size.height < mode_cmd->height ||
539 		     surface->metadata.base_size.depth != 1)) {
540 		DRM_ERROR("Incompatible surface dimensions "
541 			  "for requested mode.\n");
542 		return -EINVAL;
543 	}
544 
545 	vfbs = kzalloc(sizeof(*vfbs), GFP_KERNEL);
546 	if (!vfbs) {
547 		ret = -ENOMEM;
548 		goto out_err1;
549 	}
550 
551 	drm_helper_mode_fill_fb_struct(dev, &vfbs->base.base, mode_cmd);
552 	memcpy(&vfbs->uo, uo, sizeof(vfbs->uo));
553 	vmw_user_object_ref(&vfbs->uo);
554 
555 	*out = &vfbs->base;
556 
557 	ret = drm_framebuffer_init(dev, &vfbs->base.base,
558 				   &vmw_framebuffer_surface_funcs);
559 	if (ret)
560 		goto out_err2;
561 
562 	return 0;
563 
564 out_err2:
565 	vmw_user_object_unref(&vfbs->uo);
566 	kfree(vfbs);
567 out_err1:
568 	return ret;
569 }
570 
571 /*
572  * Buffer-object framebuffer code
573  */
574 
575 static int vmw_framebuffer_bo_create_handle(struct drm_framebuffer *fb,
576 					    struct drm_file *file_priv,
577 					    unsigned int *handle)
578 {
579 	struct vmw_framebuffer_bo *vfbd =
580 			vmw_framebuffer_to_vfbd(fb);
581 	return drm_gem_handle_create(file_priv, &vfbd->buffer->tbo.base, handle);
582 }
583 
584 static void vmw_framebuffer_bo_destroy(struct drm_framebuffer *framebuffer)
585 {
586 	struct vmw_framebuffer_bo *vfbd =
587 		vmw_framebuffer_to_vfbd(framebuffer);
588 
589 	vmw_bo_dirty_release(vfbd->buffer);
590 	drm_framebuffer_cleanup(framebuffer);
591 	vmw_bo_unreference(&vfbd->buffer);
592 
593 	kfree(vfbd);
594 }
595 
596 static const struct drm_framebuffer_funcs vmw_framebuffer_bo_funcs = {
597 	.create_handle = vmw_framebuffer_bo_create_handle,
598 	.destroy = vmw_framebuffer_bo_destroy,
599 	.dirty = drm_atomic_helper_dirtyfb,
600 };
601 
602 static int vmw_kms_new_framebuffer_bo(struct vmw_private *dev_priv,
603 				      struct vmw_bo *bo,
604 				      struct vmw_framebuffer **out,
605 				      const struct drm_mode_fb_cmd2
606 				      *mode_cmd)
607 
608 {
609 	struct drm_device *dev = &dev_priv->drm;
610 	struct vmw_framebuffer_bo *vfbd;
611 	unsigned int requested_size;
612 	int ret;
613 
614 	requested_size = mode_cmd->height * mode_cmd->pitches[0];
615 	if (unlikely(requested_size > bo->tbo.base.size)) {
616 		DRM_ERROR("Screen buffer object size is too small "
617 			  "for requested mode.\n");
618 		return -EINVAL;
619 	}
620 
621 	if (!drm_any_plane_has_format(&dev_priv->drm,
622 				      mode_cmd->pixel_format,
623 				      mode_cmd->modifier[0])) {
624 		drm_dbg(&dev_priv->drm,
625 			"unsupported pixel format %p4cc / modifier 0x%llx\n",
626 			&mode_cmd->pixel_format, mode_cmd->modifier[0]);
627 		return -EINVAL;
628 	}
629 
630 	vfbd = kzalloc(sizeof(*vfbd), GFP_KERNEL);
631 	if (!vfbd) {
632 		ret = -ENOMEM;
633 		goto out_err1;
634 	}
635 
636 	vfbd->base.base.obj[0] = &bo->tbo.base;
637 	drm_helper_mode_fill_fb_struct(dev, &vfbd->base.base, mode_cmd);
638 	vfbd->base.bo = true;
639 	vfbd->buffer = vmw_bo_reference(bo);
640 	*out = &vfbd->base;
641 
642 	ret = drm_framebuffer_init(dev, &vfbd->base.base,
643 				   &vmw_framebuffer_bo_funcs);
644 	if (ret)
645 		goto out_err2;
646 
647 	return 0;
648 
649 out_err2:
650 	vmw_bo_unreference(&bo);
651 	kfree(vfbd);
652 out_err1:
653 	return ret;
654 }
655 
656 
657 /**
658  * vmw_kms_srf_ok - check if a surface can be created
659  *
660  * @dev_priv: Pointer to device private struct.
661  * @width: requested width
662  * @height: requested height
663  *
664  * Surfaces need to be less than texture size
665  */
666 static bool
667 vmw_kms_srf_ok(struct vmw_private *dev_priv, uint32_t width, uint32_t height)
668 {
669 	if (width  > dev_priv->texture_max_width ||
670 	    height > dev_priv->texture_max_height)
671 		return false;
672 
673 	return true;
674 }
675 
676 /**
677  * vmw_kms_new_framebuffer - Create a new framebuffer.
678  *
679  * @dev_priv: Pointer to device private struct.
680  * @uo: Pointer to user object to wrap the kms framebuffer around.
681  * Either the buffer or surface inside the user object must be NULL.
682  * @mode_cmd: Frame-buffer metadata.
683  */
684 struct vmw_framebuffer *
685 vmw_kms_new_framebuffer(struct vmw_private *dev_priv,
686 			struct vmw_user_object *uo,
687 			const struct drm_mode_fb_cmd2 *mode_cmd)
688 {
689 	struct vmw_framebuffer *vfb = NULL;
690 	int ret;
691 
692 	/* Create the new framebuffer depending one what we have */
693 	if (vmw_user_object_surface(uo)) {
694 		ret = vmw_kms_new_framebuffer_surface(dev_priv, uo, &vfb,
695 						      mode_cmd);
696 	} else if (uo->buffer) {
697 		ret = vmw_kms_new_framebuffer_bo(dev_priv, uo->buffer, &vfb,
698 						 mode_cmd);
699 	} else {
700 		BUG();
701 	}
702 
703 	if (ret)
704 		return ERR_PTR(ret);
705 
706 	return vfb;
707 }
708 
709 /*
710  * Generic Kernel modesetting functions
711  */
712 
713 static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev,
714 						 struct drm_file *file_priv,
715 						 const struct drm_mode_fb_cmd2 *mode_cmd)
716 {
717 	struct vmw_private *dev_priv = vmw_priv(dev);
718 	struct vmw_framebuffer *vfb = NULL;
719 	struct vmw_user_object uo = {0};
720 	struct vmw_bo *bo;
721 	struct vmw_surface *surface;
722 	int ret;
723 
724 	/* returns either a bo or surface */
725 	ret = vmw_user_object_lookup(dev_priv, file_priv, mode_cmd->handles[0],
726 				     &uo);
727 	if (ret) {
728 		DRM_ERROR("Invalid buffer object handle %u (0x%x).\n",
729 			  mode_cmd->handles[0], mode_cmd->handles[0]);
730 		goto err_out;
731 	}
732 
733 
734 	if (vmw_user_object_surface(&uo) &&
735 	    !vmw_kms_srf_ok(dev_priv, mode_cmd->width, mode_cmd->height)) {
736 		DRM_ERROR("Surface size cannot exceed %dx%d\n",
737 			dev_priv->texture_max_width,
738 			dev_priv->texture_max_height);
739 		ret = -EINVAL;
740 		goto err_out;
741 	}
742 
743 
744 	vfb = vmw_kms_new_framebuffer(dev_priv, &uo, mode_cmd);
745 	if (IS_ERR(vfb)) {
746 		ret = PTR_ERR(vfb);
747 		goto err_out;
748 	}
749 
750 err_out:
751 	bo = vmw_user_object_buffer(&uo);
752 	surface = vmw_user_object_surface(&uo);
753 	/* vmw_user_object_lookup takes one ref so does new_fb */
754 	vmw_user_object_unref(&uo);
755 
756 	if (ret) {
757 		DRM_ERROR("failed to create vmw_framebuffer: %i\n", ret);
758 		return ERR_PTR(ret);
759 	}
760 
761 	ttm_bo_reserve(&bo->tbo, false, false, NULL);
762 	ret = vmw_bo_dirty_add(bo);
763 	if (!ret && surface && surface->res.func->dirty_alloc) {
764 		surface->res.coherent = true;
765 		ret = surface->res.func->dirty_alloc(&surface->res);
766 	}
767 	ttm_bo_unreserve(&bo->tbo);
768 
769 	return &vfb->base;
770 }
771 
772 /**
773  * vmw_kms_check_display_memory - Validates display memory required for a
774  * topology
775  * @dev: DRM device
776  * @num_rects: number of drm_rect in rects
777  * @rects: array of drm_rect representing the topology to validate indexed by
778  * crtc index.
779  *
780  * Returns:
781  * 0 on success otherwise negative error code
782  */
783 static int vmw_kms_check_display_memory(struct drm_device *dev,
784 					uint32_t num_rects,
785 					struct drm_rect *rects)
786 {
787 	struct vmw_private *dev_priv = vmw_priv(dev);
788 	struct drm_rect bounding_box = {0};
789 	u64 total_pixels = 0, pixel_mem, bb_mem;
790 	int i;
791 
792 	for (i = 0; i < num_rects; i++) {
793 		/*
794 		 * For STDU only individual screen (screen target) is limited by
795 		 * SCREENTARGET_MAX_WIDTH/HEIGHT registers.
796 		 */
797 		if (dev_priv->active_display_unit == vmw_du_screen_target &&
798 		    (drm_rect_width(&rects[i]) > dev_priv->stdu_max_width ||
799 		     drm_rect_height(&rects[i]) > dev_priv->stdu_max_height)) {
800 			VMW_DEBUG_KMS("Screen size not supported.\n");
801 			return -EINVAL;
802 		}
803 
804 		/* Bounding box upper left is at (0,0). */
805 		if (rects[i].x2 > bounding_box.x2)
806 			bounding_box.x2 = rects[i].x2;
807 
808 		if (rects[i].y2 > bounding_box.y2)
809 			bounding_box.y2 = rects[i].y2;
810 
811 		total_pixels += (u64) drm_rect_width(&rects[i]) *
812 			(u64) drm_rect_height(&rects[i]);
813 	}
814 
815 	/* Virtual svga device primary limits are always in 32-bpp. */
816 	pixel_mem = total_pixels * 4;
817 
818 	/*
819 	 * For HV10 and below prim_bb_mem is vram size. When
820 	 * SVGA_REG_MAX_PRIMARY_BOUNDING_BOX_MEM is not present vram size is
821 	 * limit on primary bounding box
822 	 */
823 	if (pixel_mem > dev_priv->max_primary_mem) {
824 		VMW_DEBUG_KMS("Combined output size too large.\n");
825 		return -EINVAL;
826 	}
827 
828 	/* SVGA_CAP_NO_BB_RESTRICTION is available for STDU only. */
829 	if (dev_priv->active_display_unit != vmw_du_screen_target ||
830 	    !(dev_priv->capabilities & SVGA_CAP_NO_BB_RESTRICTION)) {
831 		bb_mem = (u64) bounding_box.x2 * bounding_box.y2 * 4;
832 
833 		if (bb_mem > dev_priv->max_primary_mem) {
834 			VMW_DEBUG_KMS("Topology is beyond supported limits.\n");
835 			return -EINVAL;
836 		}
837 	}
838 
839 	return 0;
840 }
841 
842 /**
843  * vmw_crtc_state_and_lock - Return new or current crtc state with locked
844  * crtc mutex
845  * @state: The atomic state pointer containing the new atomic state
846  * @crtc: The crtc
847  *
848  * This function returns the new crtc state if it's part of the state update.
849  * Otherwise returns the current crtc state. It also makes sure that the
850  * crtc mutex is locked.
851  *
852  * Returns: A valid crtc state pointer or NULL. It may also return a
853  * pointer error, in particular -EDEADLK if locking needs to be rerun.
854  */
855 static struct drm_crtc_state *
856 vmw_crtc_state_and_lock(struct drm_atomic_state *state, struct drm_crtc *crtc)
857 {
858 	struct drm_crtc_state *crtc_state;
859 
860 	crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
861 	if (crtc_state) {
862 		lockdep_assert_held(&crtc->mutex.mutex.base);
863 	} else {
864 		int ret = drm_modeset_lock(&crtc->mutex, state->acquire_ctx);
865 
866 		if (ret != 0 && ret != -EALREADY)
867 			return ERR_PTR(ret);
868 
869 		crtc_state = crtc->state;
870 	}
871 
872 	return crtc_state;
873 }
874 
875 /**
876  * vmw_kms_check_implicit - Verify that all implicit display units scan out
877  * from the same fb after the new state is committed.
878  * @dev: The drm_device.
879  * @state: The new state to be checked.
880  *
881  * Returns:
882  *   Zero on success,
883  *   -EINVAL on invalid state,
884  *   -EDEADLK if modeset locking needs to be rerun.
885  */
886 static int vmw_kms_check_implicit(struct drm_device *dev,
887 				  struct drm_atomic_state *state)
888 {
889 	struct drm_framebuffer *implicit_fb = NULL;
890 	struct drm_crtc *crtc;
891 	struct drm_crtc_state *crtc_state;
892 	struct drm_plane_state *plane_state;
893 
894 	drm_for_each_crtc(crtc, dev) {
895 		struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
896 
897 		if (!du->is_implicit)
898 			continue;
899 
900 		crtc_state = vmw_crtc_state_and_lock(state, crtc);
901 		if (IS_ERR(crtc_state))
902 			return PTR_ERR(crtc_state);
903 
904 		if (!crtc_state || !crtc_state->enable)
905 			continue;
906 
907 		/*
908 		 * Can't move primary planes across crtcs, so this is OK.
909 		 * It also means we don't need to take the plane mutex.
910 		 */
911 		plane_state = du->primary.state;
912 		if (plane_state->crtc != crtc)
913 			continue;
914 
915 		if (!implicit_fb)
916 			implicit_fb = plane_state->fb;
917 		else if (implicit_fb != plane_state->fb)
918 			return -EINVAL;
919 	}
920 
921 	return 0;
922 }
923 
924 /**
925  * vmw_kms_check_topology - Validates topology in drm_atomic_state
926  * @dev: DRM device
927  * @state: the driver state object
928  *
929  * Returns:
930  * 0 on success otherwise negative error code
931  */
932 static int vmw_kms_check_topology(struct drm_device *dev,
933 				  struct drm_atomic_state *state)
934 {
935 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
936 	struct drm_rect *rects;
937 	struct drm_crtc *crtc;
938 	uint32_t i;
939 	int ret = 0;
940 
941 	rects = kcalloc(dev->mode_config.num_crtc, sizeof(struct drm_rect),
942 			GFP_KERNEL);
943 	if (!rects)
944 		return -ENOMEM;
945 
946 	drm_for_each_crtc(crtc, dev) {
947 		struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
948 		struct drm_crtc_state *crtc_state;
949 
950 		i = drm_crtc_index(crtc);
951 
952 		crtc_state = vmw_crtc_state_and_lock(state, crtc);
953 		if (IS_ERR(crtc_state)) {
954 			ret = PTR_ERR(crtc_state);
955 			goto clean;
956 		}
957 
958 		if (!crtc_state)
959 			continue;
960 
961 		if (crtc_state->enable) {
962 			rects[i].x1 = du->gui_x;
963 			rects[i].y1 = du->gui_y;
964 			rects[i].x2 = du->gui_x + crtc_state->mode.hdisplay;
965 			rects[i].y2 = du->gui_y + crtc_state->mode.vdisplay;
966 		} else {
967 			rects[i].x1 = 0;
968 			rects[i].y1 = 0;
969 			rects[i].x2 = 0;
970 			rects[i].y2 = 0;
971 		}
972 	}
973 
974 	/* Determine change to topology due to new atomic state */
975 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
976 				      new_crtc_state, i) {
977 		struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
978 		struct drm_connector *connector;
979 		struct drm_connector_state *conn_state;
980 		struct vmw_connector_state *vmw_conn_state;
981 
982 		if (!du->pref_active && new_crtc_state->enable) {
983 			VMW_DEBUG_KMS("Enabling a disabled display unit\n");
984 			ret = -EINVAL;
985 			goto clean;
986 		}
987 
988 		/*
989 		 * For vmwgfx each crtc has only one connector attached and it
990 		 * is not changed so don't really need to check the
991 		 * crtc->connector_mask and iterate over it.
992 		 */
993 		connector = &du->connector;
994 		conn_state = drm_atomic_get_connector_state(state, connector);
995 		if (IS_ERR(conn_state)) {
996 			ret = PTR_ERR(conn_state);
997 			goto clean;
998 		}
999 
1000 		vmw_conn_state = vmw_connector_state_to_vcs(conn_state);
1001 		vmw_conn_state->gui_x = du->gui_x;
1002 		vmw_conn_state->gui_y = du->gui_y;
1003 	}
1004 
1005 	ret = vmw_kms_check_display_memory(dev, dev->mode_config.num_crtc,
1006 					   rects);
1007 
1008 clean:
1009 	kfree(rects);
1010 	return ret;
1011 }
1012 
1013 /**
1014  * vmw_kms_atomic_check_modeset- validate state object for modeset changes
1015  *
1016  * @dev: DRM device
1017  * @state: the driver state object
1018  *
1019  * This is a simple wrapper around drm_atomic_helper_check_modeset() for
1020  * us to assign a value to mode->crtc_clock so that
1021  * drm_calc_timestamping_constants() won't throw an error message
1022  *
1023  * Returns:
1024  * Zero for success or -errno
1025  */
1026 static int
1027 vmw_kms_atomic_check_modeset(struct drm_device *dev,
1028 			     struct drm_atomic_state *state)
1029 {
1030 	struct drm_crtc *crtc;
1031 	struct drm_crtc_state *crtc_state;
1032 	bool need_modeset = false;
1033 	int i, ret;
1034 
1035 	ret = drm_atomic_helper_check(dev, state);
1036 	if (ret)
1037 		return ret;
1038 
1039 	ret = vmw_kms_check_implicit(dev, state);
1040 	if (ret) {
1041 		VMW_DEBUG_KMS("Invalid implicit state\n");
1042 		return ret;
1043 	}
1044 
1045 	for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
1046 		if (drm_atomic_crtc_needs_modeset(crtc_state))
1047 			need_modeset = true;
1048 	}
1049 
1050 	if (need_modeset)
1051 		return vmw_kms_check_topology(dev, state);
1052 
1053 	return ret;
1054 }
1055 
1056 static const struct drm_mode_config_funcs vmw_kms_funcs = {
1057 	.fb_create = vmw_kms_fb_create,
1058 	.atomic_check = vmw_kms_atomic_check_modeset,
1059 	.atomic_commit = drm_atomic_helper_commit,
1060 };
1061 
1062 static int vmw_kms_generic_present(struct vmw_private *dev_priv,
1063 				   struct drm_file *file_priv,
1064 				   struct vmw_framebuffer *vfb,
1065 				   struct vmw_surface *surface,
1066 				   uint32_t sid,
1067 				   int32_t destX, int32_t destY,
1068 				   struct drm_vmw_rect *clips,
1069 				   uint32_t num_clips)
1070 {
1071 	return vmw_kms_sou_do_surface_dirty(dev_priv, vfb, NULL, clips,
1072 					    &surface->res, destX, destY,
1073 					    num_clips, 1, NULL, NULL);
1074 }
1075 
1076 
1077 int vmw_kms_present(struct vmw_private *dev_priv,
1078 		    struct drm_file *file_priv,
1079 		    struct vmw_framebuffer *vfb,
1080 		    struct vmw_surface *surface,
1081 		    uint32_t sid,
1082 		    int32_t destX, int32_t destY,
1083 		    struct drm_vmw_rect *clips,
1084 		    uint32_t num_clips)
1085 {
1086 	int ret;
1087 
1088 	switch (dev_priv->active_display_unit) {
1089 	case vmw_du_screen_target:
1090 		ret = vmw_kms_stdu_surface_dirty(dev_priv, vfb, NULL, clips,
1091 						 &surface->res, destX, destY,
1092 						 num_clips, 1, NULL, NULL);
1093 		break;
1094 	case vmw_du_screen_object:
1095 		ret = vmw_kms_generic_present(dev_priv, file_priv, vfb, surface,
1096 					      sid, destX, destY, clips,
1097 					      num_clips);
1098 		break;
1099 	default:
1100 		WARN_ONCE(true,
1101 			  "Present called with invalid display system.\n");
1102 		ret = -ENOSYS;
1103 		break;
1104 	}
1105 	if (ret)
1106 		return ret;
1107 
1108 	vmw_cmd_flush(dev_priv, false);
1109 
1110 	return 0;
1111 }
1112 
1113 static void
1114 vmw_kms_create_hotplug_mode_update_property(struct vmw_private *dev_priv)
1115 {
1116 	if (dev_priv->hotplug_mode_update_property)
1117 		return;
1118 
1119 	dev_priv->hotplug_mode_update_property =
1120 		drm_property_create_range(&dev_priv->drm,
1121 					  DRM_MODE_PROP_IMMUTABLE,
1122 					  "hotplug_mode_update", 0, 1);
1123 }
1124 
1125 static void
1126 vmw_atomic_commit_tail(struct drm_atomic_state *old_state)
1127 {
1128 	struct vmw_private *vmw = vmw_priv(old_state->dev);
1129 	struct drm_crtc *crtc;
1130 	struct drm_crtc_state *old_crtc_state;
1131 	int i;
1132 
1133 	drm_atomic_helper_commit_tail(old_state);
1134 
1135 	if (vmw->vkms_enabled) {
1136 		for_each_old_crtc_in_state(old_state, crtc, old_crtc_state, i) {
1137 			struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
1138 			(void)old_crtc_state;
1139 			flush_work(&du->vkms.crc_generator_work);
1140 		}
1141 	}
1142 }
1143 
1144 static const struct drm_mode_config_helper_funcs vmw_mode_config_helpers = {
1145 	.atomic_commit_tail = vmw_atomic_commit_tail,
1146 };
1147 
1148 int vmw_kms_init(struct vmw_private *dev_priv)
1149 {
1150 	struct drm_device *dev = &dev_priv->drm;
1151 	int ret;
1152 	static const char *display_unit_names[] = {
1153 		"Invalid",
1154 		"Legacy",
1155 		"Screen Object",
1156 		"Screen Target",
1157 		"Invalid (max)"
1158 	};
1159 
1160 	drm_mode_config_init(dev);
1161 	dev->mode_config.funcs = &vmw_kms_funcs;
1162 	dev->mode_config.min_width = 1;
1163 	dev->mode_config.min_height = 1;
1164 	dev->mode_config.max_width = dev_priv->texture_max_width;
1165 	dev->mode_config.max_height = dev_priv->texture_max_height;
1166 	dev->mode_config.preferred_depth = dev_priv->assume_16bpp ? 16 : 32;
1167 	dev->mode_config.helper_private = &vmw_mode_config_helpers;
1168 
1169 	drm_mode_create_suggested_offset_properties(dev);
1170 	vmw_kms_create_hotplug_mode_update_property(dev_priv);
1171 
1172 	ret = vmw_kms_stdu_init_display(dev_priv);
1173 	if (ret) {
1174 		ret = vmw_kms_sou_init_display(dev_priv);
1175 		if (ret) /* Fallback */
1176 			ret = vmw_kms_ldu_init_display(dev_priv);
1177 	}
1178 	BUILD_BUG_ON(ARRAY_SIZE(display_unit_names) != (vmw_du_max + 1));
1179 	drm_info(&dev_priv->drm, "%s display unit initialized\n",
1180 		 display_unit_names[dev_priv->active_display_unit]);
1181 
1182 	return ret;
1183 }
1184 
1185 int vmw_kms_close(struct vmw_private *dev_priv)
1186 {
1187 	int ret = 0;
1188 
1189 	/*
1190 	 * Docs says we should take the lock before calling this function
1191 	 * but since it destroys encoders and our destructor calls
1192 	 * drm_encoder_cleanup which takes the lock we deadlock.
1193 	 */
1194 	drm_mode_config_cleanup(&dev_priv->drm);
1195 	if (dev_priv->active_display_unit == vmw_du_legacy)
1196 		ret = vmw_kms_ldu_close_display(dev_priv);
1197 
1198 	return ret;
1199 }
1200 
1201 int vmw_kms_write_svga(struct vmw_private *vmw_priv,
1202 			unsigned width, unsigned height, unsigned pitch,
1203 			unsigned bpp, unsigned depth)
1204 {
1205 	if (vmw_priv->capabilities & SVGA_CAP_PITCHLOCK)
1206 		vmw_write(vmw_priv, SVGA_REG_PITCHLOCK, pitch);
1207 	else if (vmw_fifo_have_pitchlock(vmw_priv))
1208 		vmw_fifo_mem_write(vmw_priv, SVGA_FIFO_PITCHLOCK, pitch);
1209 	vmw_write(vmw_priv, SVGA_REG_WIDTH, width);
1210 	vmw_write(vmw_priv, SVGA_REG_HEIGHT, height);
1211 	if ((vmw_priv->capabilities & SVGA_CAP_8BIT_EMULATION) != 0)
1212 		vmw_write(vmw_priv, SVGA_REG_BITS_PER_PIXEL, bpp);
1213 
1214 	if (vmw_read(vmw_priv, SVGA_REG_DEPTH) != depth) {
1215 		DRM_ERROR("Invalid depth %u for %u bpp, host expects %u\n",
1216 			  depth, bpp, vmw_read(vmw_priv, SVGA_REG_DEPTH));
1217 		return -EINVAL;
1218 	}
1219 
1220 	return 0;
1221 }
1222 
1223 static
1224 bool vmw_kms_validate_mode_vram(struct vmw_private *dev_priv,
1225 				u64 pitch,
1226 				u64 height)
1227 {
1228 	return (pitch * height) < (u64)dev_priv->vram_size;
1229 }
1230 
1231 /**
1232  * vmw_du_update_layout - Update the display unit with topology from resolution
1233  * plugin and generate DRM uevent
1234  * @dev_priv: device private
1235  * @num_rects: number of drm_rect in rects
1236  * @rects: toplogy to update
1237  */
1238 static int vmw_du_update_layout(struct vmw_private *dev_priv,
1239 				unsigned int num_rects, struct drm_rect *rects)
1240 {
1241 	struct drm_device *dev = &dev_priv->drm;
1242 	struct vmw_display_unit *du;
1243 	struct drm_connector *con;
1244 	struct drm_connector_list_iter conn_iter;
1245 	struct drm_modeset_acquire_ctx ctx;
1246 	struct drm_crtc *crtc;
1247 	int ret;
1248 
1249 	/* Currently gui_x/y is protected with the crtc mutex */
1250 	mutex_lock(&dev->mode_config.mutex);
1251 	drm_modeset_acquire_init(&ctx, 0);
1252 retry:
1253 	drm_for_each_crtc(crtc, dev) {
1254 		ret = drm_modeset_lock(&crtc->mutex, &ctx);
1255 		if (ret < 0) {
1256 			if (ret == -EDEADLK) {
1257 				drm_modeset_backoff(&ctx);
1258 				goto retry;
1259 		}
1260 			goto out_fini;
1261 		}
1262 	}
1263 
1264 	drm_connector_list_iter_begin(dev, &conn_iter);
1265 	drm_for_each_connector_iter(con, &conn_iter) {
1266 		du = vmw_connector_to_du(con);
1267 		if (num_rects > du->unit) {
1268 			du->pref_width = drm_rect_width(&rects[du->unit]);
1269 			du->pref_height = drm_rect_height(&rects[du->unit]);
1270 			du->pref_active = true;
1271 			du->gui_x = rects[du->unit].x1;
1272 			du->gui_y = rects[du->unit].y1;
1273 		} else {
1274 			du->pref_width  = VMWGFX_MIN_INITIAL_WIDTH;
1275 			du->pref_height = VMWGFX_MIN_INITIAL_HEIGHT;
1276 			du->pref_active = false;
1277 			du->gui_x = 0;
1278 			du->gui_y = 0;
1279 		}
1280 	}
1281 	drm_connector_list_iter_end(&conn_iter);
1282 
1283 	list_for_each_entry(con, &dev->mode_config.connector_list, head) {
1284 		du = vmw_connector_to_du(con);
1285 		if (num_rects > du->unit) {
1286 			drm_object_property_set_value
1287 			  (&con->base, dev->mode_config.suggested_x_property,
1288 			   du->gui_x);
1289 			drm_object_property_set_value
1290 			  (&con->base, dev->mode_config.suggested_y_property,
1291 			   du->gui_y);
1292 		} else {
1293 			drm_object_property_set_value
1294 			  (&con->base, dev->mode_config.suggested_x_property,
1295 			   0);
1296 			drm_object_property_set_value
1297 			  (&con->base, dev->mode_config.suggested_y_property,
1298 			   0);
1299 		}
1300 		con->status = vmw_du_connector_detect(con, true);
1301 	}
1302 out_fini:
1303 	drm_modeset_drop_locks(&ctx);
1304 	drm_modeset_acquire_fini(&ctx);
1305 	mutex_unlock(&dev->mode_config.mutex);
1306 
1307 	drm_sysfs_hotplug_event(dev);
1308 
1309 	return 0;
1310 }
1311 
1312 int vmw_du_crtc_gamma_set(struct drm_crtc *crtc,
1313 			  u16 *r, u16 *g, u16 *b,
1314 			  uint32_t size,
1315 			  struct drm_modeset_acquire_ctx *ctx)
1316 {
1317 	struct vmw_private *dev_priv = vmw_priv(crtc->dev);
1318 	int i;
1319 
1320 	for (i = 0; i < size; i++) {
1321 		DRM_DEBUG("%d r/g/b = 0x%04x / 0x%04x / 0x%04x\n", i,
1322 			  r[i], g[i], b[i]);
1323 		vmw_write(dev_priv, SVGA_PALETTE_BASE + i * 3 + 0, r[i] >> 8);
1324 		vmw_write(dev_priv, SVGA_PALETTE_BASE + i * 3 + 1, g[i] >> 8);
1325 		vmw_write(dev_priv, SVGA_PALETTE_BASE + i * 3 + 2, b[i] >> 8);
1326 	}
1327 
1328 	return 0;
1329 }
1330 
1331 int vmw_du_connector_dpms(struct drm_connector *connector, int mode)
1332 {
1333 	return 0;
1334 }
1335 
1336 enum drm_connector_status
1337 vmw_du_connector_detect(struct drm_connector *connector, bool force)
1338 {
1339 	uint32_t num_displays;
1340 	struct drm_device *dev = connector->dev;
1341 	struct vmw_private *dev_priv = vmw_priv(dev);
1342 	struct vmw_display_unit *du = vmw_connector_to_du(connector);
1343 
1344 	num_displays = vmw_read(dev_priv, SVGA_REG_NUM_DISPLAYS);
1345 
1346 	return ((vmw_connector_to_du(connector)->unit < num_displays &&
1347 		 du->pref_active) ?
1348 		connector_status_connected : connector_status_disconnected);
1349 }
1350 
1351 /**
1352  * vmw_guess_mode_timing - Provide fake timings for a
1353  * 60Hz vrefresh mode.
1354  *
1355  * @mode: Pointer to a struct drm_display_mode with hdisplay and vdisplay
1356  * members filled in.
1357  */
1358 void vmw_guess_mode_timing(struct drm_display_mode *mode)
1359 {
1360 	mode->hsync_start = mode->hdisplay + 50;
1361 	mode->hsync_end = mode->hsync_start + 50;
1362 	mode->htotal = mode->hsync_end + 50;
1363 
1364 	mode->vsync_start = mode->vdisplay + 50;
1365 	mode->vsync_end = mode->vsync_start + 50;
1366 	mode->vtotal = mode->vsync_end + 50;
1367 
1368 	mode->clock = (u32)mode->htotal * (u32)mode->vtotal / 100 * 6;
1369 }
1370 
1371 
1372 /**
1373  * vmw_kms_update_layout_ioctl - Handler for DRM_VMW_UPDATE_LAYOUT ioctl
1374  * @dev: drm device for the ioctl
1375  * @data: data pointer for the ioctl
1376  * @file_priv: drm file for the ioctl call
1377  *
1378  * Update preferred topology of display unit as per ioctl request. The topology
1379  * is expressed as array of drm_vmw_rect.
1380  * e.g.
1381  * [0 0 640 480] [640 0 800 600] [0 480 640 480]
1382  *
1383  * NOTE:
1384  * The x and y offset (upper left) in drm_vmw_rect cannot be less than 0. Beside
1385  * device limit on topology, x + w and y + h (lower right) cannot be greater
1386  * than INT_MAX. So topology beyond these limits will return with error.
1387  *
1388  * Returns:
1389  * Zero on success, negative errno on failure.
1390  */
1391 int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data,
1392 				struct drm_file *file_priv)
1393 {
1394 	struct vmw_private *dev_priv = vmw_priv(dev);
1395 	struct drm_mode_config *mode_config = &dev->mode_config;
1396 	struct drm_vmw_update_layout_arg *arg =
1397 		(struct drm_vmw_update_layout_arg *)data;
1398 	const void __user *user_rects;
1399 	struct drm_vmw_rect *rects;
1400 	struct drm_rect *drm_rects;
1401 	unsigned rects_size;
1402 	int ret, i;
1403 
1404 	if (!arg->num_outputs) {
1405 		struct drm_rect def_rect = {0, 0,
1406 					    VMWGFX_MIN_INITIAL_WIDTH,
1407 					    VMWGFX_MIN_INITIAL_HEIGHT};
1408 		vmw_du_update_layout(dev_priv, 1, &def_rect);
1409 		return 0;
1410 	} else if (arg->num_outputs > VMWGFX_NUM_DISPLAY_UNITS) {
1411 		return -E2BIG;
1412 	}
1413 
1414 	rects_size = arg->num_outputs * sizeof(struct drm_vmw_rect);
1415 	rects = kcalloc(arg->num_outputs, sizeof(struct drm_vmw_rect),
1416 			GFP_KERNEL);
1417 	if (unlikely(!rects))
1418 		return -ENOMEM;
1419 
1420 	user_rects = (void __user *)(unsigned long)arg->rects;
1421 	ret = copy_from_user(rects, user_rects, rects_size);
1422 	if (unlikely(ret != 0)) {
1423 		DRM_ERROR("Failed to get rects.\n");
1424 		ret = -EFAULT;
1425 		goto out_free;
1426 	}
1427 
1428 	drm_rects = (struct drm_rect *)rects;
1429 
1430 	VMW_DEBUG_KMS("Layout count = %u\n", arg->num_outputs);
1431 	for (i = 0; i < arg->num_outputs; i++) {
1432 		struct drm_vmw_rect curr_rect;
1433 
1434 		/* Verify user-space for overflow as kernel use drm_rect */
1435 		if ((rects[i].x + rects[i].w > INT_MAX) ||
1436 		    (rects[i].y + rects[i].h > INT_MAX)) {
1437 			ret = -ERANGE;
1438 			goto out_free;
1439 		}
1440 
1441 		curr_rect = rects[i];
1442 		drm_rects[i].x1 = curr_rect.x;
1443 		drm_rects[i].y1 = curr_rect.y;
1444 		drm_rects[i].x2 = curr_rect.x + curr_rect.w;
1445 		drm_rects[i].y2 = curr_rect.y + curr_rect.h;
1446 
1447 		VMW_DEBUG_KMS("  x1 = %d y1 = %d x2 = %d y2 = %d\n",
1448 			      drm_rects[i].x1, drm_rects[i].y1,
1449 			      drm_rects[i].x2, drm_rects[i].y2);
1450 
1451 		/*
1452 		 * Currently this check is limiting the topology within
1453 		 * mode_config->max (which actually is max texture size
1454 		 * supported by virtual device). This limit is here to address
1455 		 * window managers that create a big framebuffer for whole
1456 		 * topology.
1457 		 */
1458 		if (drm_rects[i].x1 < 0 ||  drm_rects[i].y1 < 0 ||
1459 		    drm_rects[i].x2 > mode_config->max_width ||
1460 		    drm_rects[i].y2 > mode_config->max_height) {
1461 			VMW_DEBUG_KMS("Invalid layout %d %d %d %d\n",
1462 				      drm_rects[i].x1, drm_rects[i].y1,
1463 				      drm_rects[i].x2, drm_rects[i].y2);
1464 			ret = -EINVAL;
1465 			goto out_free;
1466 		}
1467 	}
1468 
1469 	ret = vmw_kms_check_display_memory(dev, arg->num_outputs, drm_rects);
1470 
1471 	if (ret == 0)
1472 		vmw_du_update_layout(dev_priv, arg->num_outputs, drm_rects);
1473 
1474 out_free:
1475 	kfree(rects);
1476 	return ret;
1477 }
1478 
1479 /**
1480  * vmw_kms_helper_dirty - Helper to build commands and perform actions based
1481  * on a set of cliprects and a set of display units.
1482  *
1483  * @dev_priv: Pointer to a device private structure.
1484  * @framebuffer: Pointer to the framebuffer on which to perform the actions.
1485  * @clips: A set of struct drm_clip_rect. Either this os @vclips must be NULL.
1486  * Cliprects are given in framebuffer coordinates.
1487  * @vclips: A set of struct drm_vmw_rect cliprects. Either this or @clips must
1488  * be NULL. Cliprects are given in source coordinates.
1489  * @dest_x: X coordinate offset for the crtc / destination clip rects.
1490  * @dest_y: Y coordinate offset for the crtc / destination clip rects.
1491  * @num_clips: Number of cliprects in the @clips or @vclips array.
1492  * @increment: Integer with which to increment the clip counter when looping.
1493  * Used to skip a predetermined number of clip rects.
1494  * @dirty: Closure structure. See the description of struct vmw_kms_dirty.
1495  */
1496 int vmw_kms_helper_dirty(struct vmw_private *dev_priv,
1497 			 struct vmw_framebuffer *framebuffer,
1498 			 const struct drm_clip_rect *clips,
1499 			 const struct drm_vmw_rect *vclips,
1500 			 s32 dest_x, s32 dest_y,
1501 			 int num_clips,
1502 			 int increment,
1503 			 struct vmw_kms_dirty *dirty)
1504 {
1505 	struct vmw_display_unit *units[VMWGFX_NUM_DISPLAY_UNITS];
1506 	struct drm_crtc *crtc;
1507 	u32 num_units = 0;
1508 	u32 i, k;
1509 
1510 	dirty->dev_priv = dev_priv;
1511 
1512 	/* If crtc is passed, no need to iterate over other display units */
1513 	if (dirty->crtc) {
1514 		units[num_units++] = vmw_crtc_to_du(dirty->crtc);
1515 	} else {
1516 		list_for_each_entry(crtc, &dev_priv->drm.mode_config.crtc_list,
1517 				    head) {
1518 			struct drm_plane *plane = crtc->primary;
1519 
1520 			if (plane->state->fb == &framebuffer->base)
1521 				units[num_units++] = vmw_crtc_to_du(crtc);
1522 		}
1523 	}
1524 
1525 	for (k = 0; k < num_units; k++) {
1526 		struct vmw_display_unit *unit = units[k];
1527 		s32 crtc_x = unit->crtc.x;
1528 		s32 crtc_y = unit->crtc.y;
1529 		s32 crtc_width = unit->crtc.mode.hdisplay;
1530 		s32 crtc_height = unit->crtc.mode.vdisplay;
1531 		const struct drm_clip_rect *clips_ptr = clips;
1532 		const struct drm_vmw_rect *vclips_ptr = vclips;
1533 
1534 		dirty->unit = unit;
1535 		if (dirty->fifo_reserve_size > 0) {
1536 			dirty->cmd = VMW_CMD_RESERVE(dev_priv,
1537 						      dirty->fifo_reserve_size);
1538 			if (!dirty->cmd)
1539 				return -ENOMEM;
1540 
1541 			memset(dirty->cmd, 0, dirty->fifo_reserve_size);
1542 		}
1543 		dirty->num_hits = 0;
1544 		for (i = 0; i < num_clips; i++, clips_ptr += increment,
1545 		       vclips_ptr += increment) {
1546 			s32 clip_left;
1547 			s32 clip_top;
1548 
1549 			/*
1550 			 * Select clip array type. Note that integer type
1551 			 * in @clips is unsigned short, whereas in @vclips
1552 			 * it's 32-bit.
1553 			 */
1554 			if (clips) {
1555 				dirty->fb_x = (s32) clips_ptr->x1;
1556 				dirty->fb_y = (s32) clips_ptr->y1;
1557 				dirty->unit_x2 = (s32) clips_ptr->x2 + dest_x -
1558 					crtc_x;
1559 				dirty->unit_y2 = (s32) clips_ptr->y2 + dest_y -
1560 					crtc_y;
1561 			} else {
1562 				dirty->fb_x = vclips_ptr->x;
1563 				dirty->fb_y = vclips_ptr->y;
1564 				dirty->unit_x2 = dirty->fb_x + vclips_ptr->w +
1565 					dest_x - crtc_x;
1566 				dirty->unit_y2 = dirty->fb_y + vclips_ptr->h +
1567 					dest_y - crtc_y;
1568 			}
1569 
1570 			dirty->unit_x1 = dirty->fb_x + dest_x - crtc_x;
1571 			dirty->unit_y1 = dirty->fb_y + dest_y - crtc_y;
1572 
1573 			/* Skip this clip if it's outside the crtc region */
1574 			if (dirty->unit_x1 >= crtc_width ||
1575 			    dirty->unit_y1 >= crtc_height ||
1576 			    dirty->unit_x2 <= 0 || dirty->unit_y2 <= 0)
1577 				continue;
1578 
1579 			/* Clip right and bottom to crtc limits */
1580 			dirty->unit_x2 = min_t(s32, dirty->unit_x2,
1581 					       crtc_width);
1582 			dirty->unit_y2 = min_t(s32, dirty->unit_y2,
1583 					       crtc_height);
1584 
1585 			/* Clip left and top to crtc limits */
1586 			clip_left = min_t(s32, dirty->unit_x1, 0);
1587 			clip_top = min_t(s32, dirty->unit_y1, 0);
1588 			dirty->unit_x1 -= clip_left;
1589 			dirty->unit_y1 -= clip_top;
1590 			dirty->fb_x -= clip_left;
1591 			dirty->fb_y -= clip_top;
1592 
1593 			dirty->clip(dirty);
1594 		}
1595 
1596 		dirty->fifo_commit(dirty);
1597 	}
1598 
1599 	return 0;
1600 }
1601 
1602 /**
1603  * vmw_kms_helper_validation_finish - Helper for post KMS command submission
1604  * cleanup and fencing
1605  * @dev_priv: Pointer to the device-private struct
1606  * @file_priv: Pointer identifying the client when user-space fencing is used
1607  * @ctx: Pointer to the validation context
1608  * @out_fence: If non-NULL, returned refcounted fence-pointer
1609  * @user_fence_rep: If non-NULL, pointer to user-space address area
1610  * in which to copy user-space fence info
1611  */
1612 void vmw_kms_helper_validation_finish(struct vmw_private *dev_priv,
1613 				      struct drm_file *file_priv,
1614 				      struct vmw_validation_context *ctx,
1615 				      struct vmw_fence_obj **out_fence,
1616 				      struct drm_vmw_fence_rep __user *
1617 				      user_fence_rep)
1618 {
1619 	struct vmw_fence_obj *fence = NULL;
1620 	uint32_t handle = 0;
1621 	int ret = 0;
1622 
1623 	if (file_priv || user_fence_rep || vmw_validation_has_bos(ctx) ||
1624 	    out_fence)
1625 		ret = vmw_execbuf_fence_commands(file_priv, dev_priv, &fence,
1626 						 file_priv ? &handle : NULL);
1627 	vmw_validation_done(ctx, fence);
1628 	if (file_priv)
1629 		vmw_execbuf_copy_fence_user(dev_priv, vmw_fpriv(file_priv),
1630 					    ret, user_fence_rep, fence,
1631 					    handle, -1);
1632 	if (out_fence)
1633 		*out_fence = fence;
1634 	else
1635 		vmw_fence_obj_unreference(&fence);
1636 }
1637 
1638 /**
1639  * vmw_kms_create_implicit_placement_property - Set up the implicit placement
1640  * property.
1641  *
1642  * @dev_priv: Pointer to a device private struct.
1643  *
1644  * Sets up the implicit placement property unless it's already set up.
1645  */
1646 void
1647 vmw_kms_create_implicit_placement_property(struct vmw_private *dev_priv)
1648 {
1649 	if (dev_priv->implicit_placement_property)
1650 		return;
1651 
1652 	dev_priv->implicit_placement_property =
1653 		drm_property_create_range(&dev_priv->drm,
1654 					  DRM_MODE_PROP_IMMUTABLE,
1655 					  "implicit_placement", 0, 1);
1656 }
1657 
1658 /**
1659  * vmw_kms_suspend - Save modesetting state and turn modesetting off.
1660  *
1661  * @dev: Pointer to the drm device
1662  * Return: 0 on success. Negative error code on failure.
1663  */
1664 int vmw_kms_suspend(struct drm_device *dev)
1665 {
1666 	struct vmw_private *dev_priv = vmw_priv(dev);
1667 
1668 	dev_priv->suspend_state = drm_atomic_helper_suspend(dev);
1669 	if (IS_ERR(dev_priv->suspend_state)) {
1670 		int ret = PTR_ERR(dev_priv->suspend_state);
1671 
1672 		DRM_ERROR("Failed kms suspend: %d\n", ret);
1673 		dev_priv->suspend_state = NULL;
1674 
1675 		return ret;
1676 	}
1677 
1678 	return 0;
1679 }
1680 
1681 
1682 /**
1683  * vmw_kms_resume - Re-enable modesetting and restore state
1684  *
1685  * @dev: Pointer to the drm device
1686  * Return: 0 on success. Negative error code on failure.
1687  *
1688  * State is resumed from a previous vmw_kms_suspend(). It's illegal
1689  * to call this function without a previous vmw_kms_suspend().
1690  */
1691 int vmw_kms_resume(struct drm_device *dev)
1692 {
1693 	struct vmw_private *dev_priv = vmw_priv(dev);
1694 	int ret;
1695 
1696 	if (WARN_ON(!dev_priv->suspend_state))
1697 		return 0;
1698 
1699 	ret = drm_atomic_helper_resume(dev, dev_priv->suspend_state);
1700 	dev_priv->suspend_state = NULL;
1701 
1702 	return ret;
1703 }
1704 
1705 /**
1706  * vmw_kms_lost_device - Notify kms that modesetting capabilities will be lost
1707  *
1708  * @dev: Pointer to the drm device
1709  */
1710 void vmw_kms_lost_device(struct drm_device *dev)
1711 {
1712 	drm_atomic_helper_shutdown(dev);
1713 }
1714 
1715 /**
1716  * vmw_du_helper_plane_update - Helper to do plane update on a display unit.
1717  * @update: The closure structure.
1718  *
1719  * Call this helper after setting callbacks in &vmw_du_update_plane to do plane
1720  * update on display unit.
1721  *
1722  * Return: 0 on success or a negative error code on failure.
1723  */
1724 int vmw_du_helper_plane_update(struct vmw_du_update_plane *update)
1725 {
1726 	struct drm_plane_state *state = update->plane->state;
1727 	struct drm_plane_state *old_state = update->old_state;
1728 	struct drm_atomic_helper_damage_iter iter;
1729 	struct drm_rect clip;
1730 	struct drm_rect bb;
1731 	DECLARE_VAL_CONTEXT(val_ctx, NULL, 0);
1732 	uint32_t reserved_size = 0;
1733 	uint32_t submit_size = 0;
1734 	uint32_t curr_size = 0;
1735 	uint32_t num_hits = 0;
1736 	void *cmd_start;
1737 	char *cmd_next;
1738 	int ret;
1739 
1740 	/*
1741 	 * Iterate in advance to check if really need plane update and find the
1742 	 * number of clips that actually are in plane src for fifo allocation.
1743 	 */
1744 	drm_atomic_helper_damage_iter_init(&iter, old_state, state);
1745 	drm_atomic_for_each_plane_damage(&iter, &clip)
1746 		num_hits++;
1747 
1748 	if (num_hits == 0)
1749 		return 0;
1750 
1751 	if (update->vfb->bo) {
1752 		struct vmw_framebuffer_bo *vfbbo =
1753 			container_of(update->vfb, typeof(*vfbbo), base);
1754 
1755 		/*
1756 		 * For screen targets we want a mappable bo, for everything else we want
1757 		 * accelerated i.e. host backed (vram or gmr) bo. If the display unit
1758 		 * is not screen target then mob's shouldn't be available.
1759 		 */
1760 		if (update->dev_priv->active_display_unit == vmw_du_screen_target) {
1761 			vmw_bo_placement_set(vfbbo->buffer,
1762 					     VMW_BO_DOMAIN_SYS | VMW_BO_DOMAIN_MOB | VMW_BO_DOMAIN_GMR,
1763 					     VMW_BO_DOMAIN_SYS | VMW_BO_DOMAIN_MOB | VMW_BO_DOMAIN_GMR);
1764 		} else {
1765 			WARN_ON(update->dev_priv->has_mob);
1766 			vmw_bo_placement_set_default_accelerated(vfbbo->buffer);
1767 		}
1768 		ret = vmw_validation_add_bo(&val_ctx, vfbbo->buffer);
1769 	} else {
1770 		struct vmw_framebuffer_surface *vfbs =
1771 			container_of(update->vfb, typeof(*vfbs), base);
1772 		struct vmw_surface *surf = vmw_user_object_surface(&vfbs->uo);
1773 
1774 		ret = vmw_validation_add_resource(&val_ctx, &surf->res,
1775 						  0, VMW_RES_DIRTY_NONE, NULL,
1776 						  NULL);
1777 	}
1778 
1779 	if (ret)
1780 		return ret;
1781 
1782 	ret = vmw_validation_prepare(&val_ctx, update->mutex, update->intr);
1783 	if (ret)
1784 		goto out_unref;
1785 
1786 	reserved_size = update->calc_fifo_size(update, num_hits);
1787 	cmd_start = VMW_CMD_RESERVE(update->dev_priv, reserved_size);
1788 	if (!cmd_start) {
1789 		ret = -ENOMEM;
1790 		goto out_revert;
1791 	}
1792 
1793 	cmd_next = cmd_start;
1794 
1795 	if (update->post_prepare) {
1796 		curr_size = update->post_prepare(update, cmd_next);
1797 		cmd_next += curr_size;
1798 		submit_size += curr_size;
1799 	}
1800 
1801 	if (update->pre_clip) {
1802 		curr_size = update->pre_clip(update, cmd_next, num_hits);
1803 		cmd_next += curr_size;
1804 		submit_size += curr_size;
1805 	}
1806 
1807 	bb.x1 = INT_MAX;
1808 	bb.y1 = INT_MAX;
1809 	bb.x2 = INT_MIN;
1810 	bb.y2 = INT_MIN;
1811 
1812 	drm_atomic_helper_damage_iter_init(&iter, old_state, state);
1813 	drm_atomic_for_each_plane_damage(&iter, &clip) {
1814 		uint32_t fb_x = clip.x1;
1815 		uint32_t fb_y = clip.y1;
1816 
1817 		vmw_du_translate_to_crtc(state, &clip);
1818 		if (update->clip) {
1819 			curr_size = update->clip(update, cmd_next, &clip, fb_x,
1820 						 fb_y);
1821 			cmd_next += curr_size;
1822 			submit_size += curr_size;
1823 		}
1824 		bb.x1 = min_t(int, bb.x1, clip.x1);
1825 		bb.y1 = min_t(int, bb.y1, clip.y1);
1826 		bb.x2 = max_t(int, bb.x2, clip.x2);
1827 		bb.y2 = max_t(int, bb.y2, clip.y2);
1828 	}
1829 
1830 	curr_size = update->post_clip(update, cmd_next, &bb);
1831 	submit_size += curr_size;
1832 
1833 	if (reserved_size < submit_size)
1834 		submit_size = 0;
1835 
1836 	vmw_cmd_commit(update->dev_priv, submit_size);
1837 
1838 	vmw_kms_helper_validation_finish(update->dev_priv, NULL, &val_ctx,
1839 					 update->out_fence, NULL);
1840 	return ret;
1841 
1842 out_revert:
1843 	vmw_validation_revert(&val_ctx);
1844 
1845 out_unref:
1846 	vmw_validation_unref_lists(&val_ctx);
1847 	return ret;
1848 }
1849 
1850 /**
1851  * vmw_connector_mode_valid - implements drm_connector_helper_funcs.mode_valid callback
1852  *
1853  * @connector: the drm connector, part of a DU container
1854  * @mode: drm mode to check
1855  *
1856  * Returns MODE_OK on success, or a drm_mode_status error code.
1857  */
1858 enum drm_mode_status vmw_connector_mode_valid(struct drm_connector *connector,
1859 					      const struct drm_display_mode *mode)
1860 {
1861 	enum drm_mode_status ret;
1862 	struct drm_device *dev = connector->dev;
1863 	struct vmw_private *dev_priv = vmw_priv(dev);
1864 	u32 assumed_cpp = 4;
1865 
1866 	if (dev_priv->assume_16bpp)
1867 		assumed_cpp = 2;
1868 
1869 	ret = drm_mode_validate_size(mode, dev_priv->texture_max_width,
1870 				     dev_priv->texture_max_height);
1871 	if (ret != MODE_OK)
1872 		return ret;
1873 
1874 	if (!vmw_kms_validate_mode_vram(dev_priv,
1875 					mode->hdisplay * assumed_cpp,
1876 					mode->vdisplay))
1877 		return MODE_MEM;
1878 
1879 	return MODE_OK;
1880 }
1881 
1882 /**
1883  * vmw_connector_get_modes - implements drm_connector_helper_funcs.get_modes callback
1884  *
1885  * @connector: the drm connector, part of a DU container
1886  *
1887  * Returns the number of added modes.
1888  */
1889 int vmw_connector_get_modes(struct drm_connector *connector)
1890 {
1891 	struct vmw_display_unit *du = vmw_connector_to_du(connector);
1892 	struct drm_device *dev = connector->dev;
1893 	struct vmw_private *dev_priv = vmw_priv(dev);
1894 	struct drm_display_mode *mode = NULL;
1895 	struct drm_display_mode prefmode = { DRM_MODE("preferred",
1896 		DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED,
1897 		0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1898 		DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC)
1899 	};
1900 	u32 max_width;
1901 	u32 max_height;
1902 	u32 num_modes;
1903 
1904 	/* Add preferred mode */
1905 	mode = drm_mode_duplicate(dev, &prefmode);
1906 	if (!mode)
1907 		return 0;
1908 
1909 	mode->hdisplay = du->pref_width;
1910 	mode->vdisplay = du->pref_height;
1911 	vmw_guess_mode_timing(mode);
1912 	drm_mode_set_name(mode);
1913 
1914 	drm_mode_probed_add(connector, mode);
1915 	drm_dbg_kms(dev, "preferred mode " DRM_MODE_FMT "\n", DRM_MODE_ARG(mode));
1916 
1917 	/* Probe connector for all modes not exceeding our geom limits */
1918 	max_width  = dev_priv->texture_max_width;
1919 	max_height = dev_priv->texture_max_height;
1920 
1921 	if (dev_priv->active_display_unit == vmw_du_screen_target) {
1922 		max_width  = min(dev_priv->stdu_max_width,  max_width);
1923 		max_height = min(dev_priv->stdu_max_height, max_height);
1924 	}
1925 
1926 	num_modes = 1 + drm_add_modes_noedid(connector, max_width, max_height);
1927 
1928 	return num_modes;
1929 }
1930 
1931 struct vmw_user_object *vmw_user_object_ref(struct vmw_user_object *uo)
1932 {
1933 	if (uo->buffer)
1934 		vmw_user_bo_ref(uo->buffer);
1935 	else if (uo->surface)
1936 		vmw_surface_reference(uo->surface);
1937 	return uo;
1938 }
1939 
1940 void vmw_user_object_unref(struct vmw_user_object *uo)
1941 {
1942 	if (uo->buffer)
1943 		vmw_user_bo_unref(&uo->buffer);
1944 	else if (uo->surface)
1945 		vmw_surface_unreference(&uo->surface);
1946 }
1947 
1948 struct vmw_bo *
1949 vmw_user_object_buffer(struct vmw_user_object *uo)
1950 {
1951 	if (uo->buffer)
1952 		return uo->buffer;
1953 	else if (uo->surface)
1954 		return uo->surface->res.guest_memory_bo;
1955 	return NULL;
1956 }
1957 
1958 struct vmw_surface *
1959 vmw_user_object_surface(struct vmw_user_object *uo)
1960 {
1961 	if (uo->buffer)
1962 		return uo->buffer->dumb_surface;
1963 	return uo->surface;
1964 }
1965 
1966 void *vmw_user_object_map(struct vmw_user_object *uo)
1967 {
1968 	struct vmw_bo *bo = vmw_user_object_buffer(uo);
1969 
1970 	WARN_ON(!bo);
1971 	return vmw_bo_map_and_cache(bo);
1972 }
1973 
1974 void *vmw_user_object_map_size(struct vmw_user_object *uo, size_t size)
1975 {
1976 	struct vmw_bo *bo = vmw_user_object_buffer(uo);
1977 
1978 	WARN_ON(!bo);
1979 	return vmw_bo_map_and_cache_size(bo, size);
1980 }
1981 
1982 void vmw_user_object_unmap(struct vmw_user_object *uo)
1983 {
1984 	struct vmw_bo *bo = vmw_user_object_buffer(uo);
1985 	int ret;
1986 
1987 	WARN_ON(!bo);
1988 
1989 	/* Fence the mob creation so we are guarateed to have the mob */
1990 	ret = ttm_bo_reserve(&bo->tbo, false, false, NULL);
1991 	if (ret != 0)
1992 		return;
1993 
1994 	vmw_bo_unmap(bo);
1995 	vmw_bo_pin_reserved(bo, false);
1996 
1997 	ttm_bo_unreserve(&bo->tbo);
1998 }
1999 
2000 bool vmw_user_object_is_mapped(struct vmw_user_object *uo)
2001 {
2002 	struct vmw_bo *bo;
2003 
2004 	if (!uo || vmw_user_object_is_null(uo))
2005 		return false;
2006 
2007 	bo = vmw_user_object_buffer(uo);
2008 
2009 	if (WARN_ON(!bo))
2010 		return false;
2011 
2012 	WARN_ON(bo->map.bo && !bo->map.virtual);
2013 	return bo->map.virtual;
2014 }
2015 
2016 bool vmw_user_object_is_null(struct vmw_user_object *uo)
2017 {
2018 	return !uo->buffer && !uo->surface;
2019 }
2020