xref: /linux/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c (revision 20371ba120635d9ab7fc7670497105af8f33eb08)
18038d2a9SDave Airlie // SPDX-License-Identifier: GPL-2.0 OR MIT
2f1d34bfdSThomas Hellstrom /**************************************************************************
3f1d34bfdSThomas Hellstrom  *
4d6667f0dSZack Rusin  * Copyright (c) 2011-2024 Broadcom. All Rights Reserved. The term
5d6667f0dSZack Rusin  * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
6f1d34bfdSThomas Hellstrom  *
7f1d34bfdSThomas Hellstrom  * Permission is hereby granted, free of charge, to any person obtaining a
8f1d34bfdSThomas Hellstrom  * copy of this software and associated documentation files (the
9f1d34bfdSThomas Hellstrom  * "Software"), to deal in the Software without restriction, including
10f1d34bfdSThomas Hellstrom  * without limitation the rights to use, copy, modify, merge, publish,
11f1d34bfdSThomas Hellstrom  * distribute, sub license, and/or sell copies of the Software, and to
12f1d34bfdSThomas Hellstrom  * permit persons to whom the Software is furnished to do so, subject to
13f1d34bfdSThomas Hellstrom  * the following conditions:
14f1d34bfdSThomas Hellstrom  *
15f1d34bfdSThomas Hellstrom  * The above copyright notice and this permission notice (including the
16f1d34bfdSThomas Hellstrom  * next paragraph) shall be included in all copies or substantial portions
17f1d34bfdSThomas Hellstrom  * of the Software.
18f1d34bfdSThomas Hellstrom  *
19f1d34bfdSThomas Hellstrom  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20f1d34bfdSThomas Hellstrom  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21f1d34bfdSThomas Hellstrom  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
22f1d34bfdSThomas Hellstrom  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
23f1d34bfdSThomas Hellstrom  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
24f1d34bfdSThomas Hellstrom  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
25f1d34bfdSThomas Hellstrom  * USE OR OTHER DEALINGS IN THE SOFTWARE.
26f1d34bfdSThomas Hellstrom  *
27f1d34bfdSThomas Hellstrom  **************************************************************************/
28f1d34bfdSThomas Hellstrom 
2909881d29SZack Rusin #include "vmwgfx_bo.h"
3009881d29SZack Rusin #include "vmwgfx_drv.h"
31d6667f0dSZack Rusin #include "vmwgfx_resource_priv.h"
3209881d29SZack Rusin 
33f1d34bfdSThomas Hellstrom #include <drm/ttm/ttm_placement.h>
34f1d34bfdSThomas Hellstrom 
vmw_bo_release(struct vmw_bo * vbo)35668b2066SZack Rusin static void vmw_bo_release(struct vmw_bo *vbo)
36668b2066SZack Rusin {
37d6667f0dSZack Rusin 	struct vmw_resource *res;
38d6667f0dSZack Rusin 
3991398b41SZack Rusin 	WARN_ON(vbo->tbo.base.funcs &&
4091398b41SZack Rusin 		kref_read(&vbo->tbo.base.refcount) != 0);
41668b2066SZack Rusin 	vmw_bo_unmap(vbo);
42d6667f0dSZack Rusin 
43d6667f0dSZack Rusin 	xa_destroy(&vbo->detached_resources);
44d6667f0dSZack Rusin 	WARN_ON(vbo->is_dumb && !vbo->dumb_surface);
45d6667f0dSZack Rusin 	if (vbo->is_dumb && vbo->dumb_surface) {
46d6667f0dSZack Rusin 		res = &vbo->dumb_surface->res;
47d6667f0dSZack Rusin 		WARN_ON(vbo != res->guest_memory_bo);
48d6667f0dSZack Rusin 		WARN_ON(!res->guest_memory_bo);
49d6667f0dSZack Rusin 		if (res->guest_memory_bo) {
50d6667f0dSZack Rusin 			/* Reserve and switch the backing mob. */
51d6667f0dSZack Rusin 			mutex_lock(&res->dev_priv->cmdbuf_mutex);
52d6667f0dSZack Rusin 			(void)vmw_resource_reserve(res, false, true);
53d6667f0dSZack Rusin 			vmw_resource_mob_detach(res);
54d6667f0dSZack Rusin 			if (res->coherent)
55d6667f0dSZack Rusin 				vmw_bo_dirty_release(res->guest_memory_bo);
56d6667f0dSZack Rusin 			res->guest_memory_bo = NULL;
57d6667f0dSZack Rusin 			res->guest_memory_offset = 0;
58d6667f0dSZack Rusin 			vmw_resource_unreserve(res, false, false, false, NULL,
59d6667f0dSZack Rusin 					       0);
60d6667f0dSZack Rusin 			mutex_unlock(&res->dev_priv->cmdbuf_mutex);
61d6667f0dSZack Rusin 		}
62d6667f0dSZack Rusin 		vmw_surface_unreference(&vbo->dumb_surface);
63d6667f0dSZack Rusin 	}
64668b2066SZack Rusin 	drm_gem_object_release(&vbo->tbo.base);
65668b2066SZack Rusin }
66668b2066SZack Rusin 
67e9431ea5SThomas Hellstrom /**
6809881d29SZack Rusin  * vmw_bo_free - vmw_bo destructor
696b2e8aa4SZack Rusin  *
706b2e8aa4SZack Rusin  * @bo: Pointer to the embedded struct ttm_buffer_object
716b2e8aa4SZack Rusin  */
vmw_bo_free(struct ttm_buffer_object * bo)7209881d29SZack Rusin static void vmw_bo_free(struct ttm_buffer_object *bo)
736b2e8aa4SZack Rusin {
7409881d29SZack Rusin 	struct vmw_bo *vbo = to_vmw_bo(&bo->base);
756b2e8aa4SZack Rusin 
7609881d29SZack Rusin 	WARN_ON(vbo->dirty);
7709881d29SZack Rusin 	WARN_ON(!RB_EMPTY_ROOT(&vbo->res_tree));
78668b2066SZack Rusin 	vmw_bo_release(vbo);
7909881d29SZack Rusin 	kfree(vbo);
806b2e8aa4SZack Rusin }
816b2e8aa4SZack Rusin 
826b2e8aa4SZack Rusin /**
83f1d34bfdSThomas Hellstrom  * vmw_bo_pin_in_placement - Validate a buffer to placement.
84f1d34bfdSThomas Hellstrom  *
85f1d34bfdSThomas Hellstrom  * @dev_priv:  Driver private.
86f1d34bfdSThomas Hellstrom  * @buf:  DMA buffer to move.
87f1d34bfdSThomas Hellstrom  * @placement:  The placement to pin it.
88f1d34bfdSThomas Hellstrom  * @interruptible:  Use interruptible wait.
89e9431ea5SThomas Hellstrom  * Return: Zero on success, Negative error code on failure. In particular
90e9431ea5SThomas Hellstrom  * -ERESTARTSYS if interrupted by a signal
91f1d34bfdSThomas Hellstrom  */
vmw_bo_pin_in_placement(struct vmw_private * dev_priv,struct vmw_bo * buf,struct ttm_placement * placement,bool interruptible)926703e28fSZack Rusin static int vmw_bo_pin_in_placement(struct vmw_private *dev_priv,
9309881d29SZack Rusin 				   struct vmw_bo *buf,
94f1d34bfdSThomas Hellstrom 				   struct ttm_placement *placement,
95f1d34bfdSThomas Hellstrom 				   bool interruptible)
96f1d34bfdSThomas Hellstrom {
97f1d34bfdSThomas Hellstrom 	struct ttm_operation_ctx ctx = {interruptible, false };
98668b2066SZack Rusin 	struct ttm_buffer_object *bo = &buf->tbo;
99f1d34bfdSThomas Hellstrom 	int ret;
100f1d34bfdSThomas Hellstrom 
101f1d34bfdSThomas Hellstrom 	vmw_execbuf_release_pinned_bo(dev_priv);
102f1d34bfdSThomas Hellstrom 
103f1d34bfdSThomas Hellstrom 	ret = ttm_bo_reserve(bo, interruptible, false, NULL);
104f1d34bfdSThomas Hellstrom 	if (unlikely(ret != 0))
105f1d34bfdSThomas Hellstrom 		goto err;
106f1d34bfdSThomas Hellstrom 
107f1d34bfdSThomas Hellstrom 	ret = ttm_bo_validate(bo, placement, &ctx);
108f1d34bfdSThomas Hellstrom 	if (!ret)
109f1d34bfdSThomas Hellstrom 		vmw_bo_pin_reserved(buf, true);
110f1d34bfdSThomas Hellstrom 
111f1d34bfdSThomas Hellstrom 	ttm_bo_unreserve(bo);
112f1d34bfdSThomas Hellstrom err:
113f1d34bfdSThomas Hellstrom 	return ret;
114f1d34bfdSThomas Hellstrom }
115f1d34bfdSThomas Hellstrom 
116e9431ea5SThomas Hellstrom 
117f1d34bfdSThomas Hellstrom /**
118f1d34bfdSThomas Hellstrom  * vmw_bo_pin_in_vram_or_gmr - Move a buffer to vram or gmr.
119f1d34bfdSThomas Hellstrom  *
120f1d34bfdSThomas Hellstrom  * This function takes the reservation_sem in write mode.
121f1d34bfdSThomas Hellstrom  * Flushes and unpins the query bo to avoid failures.
122f1d34bfdSThomas Hellstrom  *
123f1d34bfdSThomas Hellstrom  * @dev_priv:  Driver private.
124f1d34bfdSThomas Hellstrom  * @buf:  DMA buffer to move.
125f1d34bfdSThomas Hellstrom  * @interruptible:  Use interruptible wait.
126e9431ea5SThomas Hellstrom  * Return: Zero on success, Negative error code on failure. In particular
127e9431ea5SThomas Hellstrom  * -ERESTARTSYS if interrupted by a signal
128f1d34bfdSThomas Hellstrom  */
vmw_bo_pin_in_vram_or_gmr(struct vmw_private * dev_priv,struct vmw_bo * buf,bool interruptible)129f1d34bfdSThomas Hellstrom int vmw_bo_pin_in_vram_or_gmr(struct vmw_private *dev_priv,
13009881d29SZack Rusin 			      struct vmw_bo *buf,
131f1d34bfdSThomas Hellstrom 			      bool interruptible)
132f1d34bfdSThomas Hellstrom {
133f1d34bfdSThomas Hellstrom 	struct ttm_operation_ctx ctx = {interruptible, false };
134668b2066SZack Rusin 	struct ttm_buffer_object *bo = &buf->tbo;
135f1d34bfdSThomas Hellstrom 	int ret;
136f1d34bfdSThomas Hellstrom 
137f1d34bfdSThomas Hellstrom 	vmw_execbuf_release_pinned_bo(dev_priv);
138f1d34bfdSThomas Hellstrom 
139f1d34bfdSThomas Hellstrom 	ret = ttm_bo_reserve(bo, interruptible, false, NULL);
140f1d34bfdSThomas Hellstrom 	if (unlikely(ret != 0))
141f1d34bfdSThomas Hellstrom 		goto err;
142f1d34bfdSThomas Hellstrom 
14339985eeaSZack Rusin 	vmw_bo_placement_set(buf,
14439985eeaSZack Rusin 			     VMW_BO_DOMAIN_GMR | VMW_BO_DOMAIN_VRAM,
14539985eeaSZack Rusin 			     VMW_BO_DOMAIN_GMR);
14639985eeaSZack Rusin 	ret = ttm_bo_validate(bo, &buf->placement, &ctx);
147f1d34bfdSThomas Hellstrom 	if (likely(ret == 0) || ret == -ERESTARTSYS)
148f1d34bfdSThomas Hellstrom 		goto out_unreserve;
149f1d34bfdSThomas Hellstrom 
15039985eeaSZack Rusin 	vmw_bo_placement_set(buf,
15139985eeaSZack Rusin 			     VMW_BO_DOMAIN_VRAM,
15239985eeaSZack Rusin 			     VMW_BO_DOMAIN_VRAM);
15339985eeaSZack Rusin 	ret = ttm_bo_validate(bo, &buf->placement, &ctx);
154f1d34bfdSThomas Hellstrom 
155f1d34bfdSThomas Hellstrom out_unreserve:
156f1d34bfdSThomas Hellstrom 	if (!ret)
157f1d34bfdSThomas Hellstrom 		vmw_bo_pin_reserved(buf, true);
158f1d34bfdSThomas Hellstrom 
159f1d34bfdSThomas Hellstrom 	ttm_bo_unreserve(bo);
160f1d34bfdSThomas Hellstrom err:
161f1d34bfdSThomas Hellstrom 	return ret;
162f1d34bfdSThomas Hellstrom }
163f1d34bfdSThomas Hellstrom 
164e9431ea5SThomas Hellstrom 
165f1d34bfdSThomas Hellstrom /**
166f1d34bfdSThomas Hellstrom  * vmw_bo_pin_in_vram - Move a buffer to vram.
167f1d34bfdSThomas Hellstrom  *
168f1d34bfdSThomas Hellstrom  * This function takes the reservation_sem in write mode.
169f1d34bfdSThomas Hellstrom  * Flushes and unpins the query bo to avoid failures.
170f1d34bfdSThomas Hellstrom  *
171f1d34bfdSThomas Hellstrom  * @dev_priv:  Driver private.
172f1d34bfdSThomas Hellstrom  * @buf:  DMA buffer to move.
173f1d34bfdSThomas Hellstrom  * @interruptible:  Use interruptible wait.
174e9431ea5SThomas Hellstrom  * Return: Zero on success, Negative error code on failure. In particular
175e9431ea5SThomas Hellstrom  * -ERESTARTSYS if interrupted by a signal
176f1d34bfdSThomas Hellstrom  */
vmw_bo_pin_in_vram(struct vmw_private * dev_priv,struct vmw_bo * buf,bool interruptible)177f1d34bfdSThomas Hellstrom int vmw_bo_pin_in_vram(struct vmw_private *dev_priv,
17809881d29SZack Rusin 		       struct vmw_bo *buf,
179f1d34bfdSThomas Hellstrom 		       bool interruptible)
180f1d34bfdSThomas Hellstrom {
181f1d34bfdSThomas Hellstrom 	return vmw_bo_pin_in_placement(dev_priv, buf, &vmw_vram_placement,
182f1d34bfdSThomas Hellstrom 				       interruptible);
183f1d34bfdSThomas Hellstrom }
184f1d34bfdSThomas Hellstrom 
185e9431ea5SThomas Hellstrom 
186f1d34bfdSThomas Hellstrom /**
187f1d34bfdSThomas Hellstrom  * vmw_bo_pin_in_start_of_vram - Move a buffer to start of vram.
188f1d34bfdSThomas Hellstrom  *
189f1d34bfdSThomas Hellstrom  * This function takes the reservation_sem in write mode.
190f1d34bfdSThomas Hellstrom  * Flushes and unpins the query bo to avoid failures.
191f1d34bfdSThomas Hellstrom  *
192f1d34bfdSThomas Hellstrom  * @dev_priv:  Driver private.
193f1d34bfdSThomas Hellstrom  * @buf:  DMA buffer to pin.
194f1d34bfdSThomas Hellstrom  * @interruptible:  Use interruptible wait.
195e9431ea5SThomas Hellstrom  * Return: Zero on success, Negative error code on failure. In particular
196e9431ea5SThomas Hellstrom  * -ERESTARTSYS if interrupted by a signal
197f1d34bfdSThomas Hellstrom  */
vmw_bo_pin_in_start_of_vram(struct vmw_private * dev_priv,struct vmw_bo * buf,bool interruptible)198f1d34bfdSThomas Hellstrom int vmw_bo_pin_in_start_of_vram(struct vmw_private *dev_priv,
19909881d29SZack Rusin 				struct vmw_bo *buf,
200f1d34bfdSThomas Hellstrom 				bool interruptible)
201f1d34bfdSThomas Hellstrom {
202f1d34bfdSThomas Hellstrom 	struct ttm_operation_ctx ctx = {interruptible, false };
203668b2066SZack Rusin 	struct ttm_buffer_object *bo = &buf->tbo;
204f1d34bfdSThomas Hellstrom 	int ret = 0;
205f1d34bfdSThomas Hellstrom 
206f1d34bfdSThomas Hellstrom 	vmw_execbuf_release_pinned_bo(dev_priv);
207f1d34bfdSThomas Hellstrom 	ret = ttm_bo_reserve(bo, interruptible, false, NULL);
208f1d34bfdSThomas Hellstrom 	if (unlikely(ret != 0))
209f1d34bfdSThomas Hellstrom 		goto err_unlock;
210f1d34bfdSThomas Hellstrom 
211f1d34bfdSThomas Hellstrom 	/*
212f1d34bfdSThomas Hellstrom 	 * Is this buffer already in vram but not at the start of it?
213f1d34bfdSThomas Hellstrom 	 * In that case, evict it first because TTM isn't good at handling
214f1d34bfdSThomas Hellstrom 	 * that situation.
215f1d34bfdSThomas Hellstrom 	 */
216d3116756SChristian König 	if (bo->resource->mem_type == TTM_PL_VRAM &&
217e3c92eb4SSomalapuram Amaranath 	    bo->resource->start < PFN_UP(bo->resource->size) &&
218d3116756SChristian König 	    bo->resource->start > 0 &&
219668b2066SZack Rusin 	    buf->tbo.pin_count == 0) {
220f1d34bfdSThomas Hellstrom 		ctx.interruptible = false;
22139985eeaSZack Rusin 		vmw_bo_placement_set(buf,
22239985eeaSZack Rusin 				     VMW_BO_DOMAIN_SYS,
22339985eeaSZack Rusin 				     VMW_BO_DOMAIN_SYS);
22439985eeaSZack Rusin 		(void)ttm_bo_validate(bo, &buf->placement, &ctx);
225f1d34bfdSThomas Hellstrom 	}
226f1d34bfdSThomas Hellstrom 
22739985eeaSZack Rusin 	vmw_bo_placement_set(buf,
22839985eeaSZack Rusin 			     VMW_BO_DOMAIN_VRAM,
22939985eeaSZack Rusin 			     VMW_BO_DOMAIN_VRAM);
23039985eeaSZack Rusin 	buf->places[0].lpfn = PFN_UP(bo->resource->size);
231782e5e79SIan Forbes 	buf->busy_places[0].lpfn = PFN_UP(bo->resource->size);
23239985eeaSZack Rusin 	ret = ttm_bo_validate(bo, &buf->placement, &ctx);
233f1d34bfdSThomas Hellstrom 
234f1d34bfdSThomas Hellstrom 	/* For some reason we didn't end up at the start of vram */
235d3116756SChristian König 	WARN_ON(ret == 0 && bo->resource->start != 0);
236f1d34bfdSThomas Hellstrom 	if (!ret)
237f1d34bfdSThomas Hellstrom 		vmw_bo_pin_reserved(buf, true);
238f1d34bfdSThomas Hellstrom 
239f1d34bfdSThomas Hellstrom 	ttm_bo_unreserve(bo);
240f1d34bfdSThomas Hellstrom err_unlock:
241f1d34bfdSThomas Hellstrom 
242f1d34bfdSThomas Hellstrom 	return ret;
243f1d34bfdSThomas Hellstrom }
244f1d34bfdSThomas Hellstrom 
245e9431ea5SThomas Hellstrom 
246f1d34bfdSThomas Hellstrom /**
247f1d34bfdSThomas Hellstrom  * vmw_bo_unpin - Unpin the buffer given buffer, does not move the buffer.
248f1d34bfdSThomas Hellstrom  *
249f1d34bfdSThomas Hellstrom  * This function takes the reservation_sem in write mode.
250f1d34bfdSThomas Hellstrom  *
251f1d34bfdSThomas Hellstrom  * @dev_priv:  Driver private.
252f1d34bfdSThomas Hellstrom  * @buf:  DMA buffer to unpin.
253f1d34bfdSThomas Hellstrom  * @interruptible:  Use interruptible wait.
254e9431ea5SThomas Hellstrom  * Return: Zero on success, Negative error code on failure. In particular
255e9431ea5SThomas Hellstrom  * -ERESTARTSYS if interrupted by a signal
256f1d34bfdSThomas Hellstrom  */
vmw_bo_unpin(struct vmw_private * dev_priv,struct vmw_bo * buf,bool interruptible)257f1d34bfdSThomas Hellstrom int vmw_bo_unpin(struct vmw_private *dev_priv,
25809881d29SZack Rusin 		 struct vmw_bo *buf,
259f1d34bfdSThomas Hellstrom 		 bool interruptible)
260f1d34bfdSThomas Hellstrom {
261668b2066SZack Rusin 	struct ttm_buffer_object *bo = &buf->tbo;
262f1d34bfdSThomas Hellstrom 	int ret;
263f1d34bfdSThomas Hellstrom 
264f1d34bfdSThomas Hellstrom 	ret = ttm_bo_reserve(bo, interruptible, false, NULL);
265f1d34bfdSThomas Hellstrom 	if (unlikely(ret != 0))
266f1d34bfdSThomas Hellstrom 		goto err;
267f1d34bfdSThomas Hellstrom 
268f1d34bfdSThomas Hellstrom 	vmw_bo_pin_reserved(buf, false);
269f1d34bfdSThomas Hellstrom 
270f1d34bfdSThomas Hellstrom 	ttm_bo_unreserve(bo);
271f1d34bfdSThomas Hellstrom 
272f1d34bfdSThomas Hellstrom err:
273f1d34bfdSThomas Hellstrom 	return ret;
274f1d34bfdSThomas Hellstrom }
275f1d34bfdSThomas Hellstrom 
276f1d34bfdSThomas Hellstrom /**
277f1d34bfdSThomas Hellstrom  * vmw_bo_get_guest_ptr - Get the guest ptr representing the current placement
278f1d34bfdSThomas Hellstrom  * of a buffer.
279f1d34bfdSThomas Hellstrom  *
280f1d34bfdSThomas Hellstrom  * @bo: Pointer to a struct ttm_buffer_object. Must be pinned or reserved.
281f1d34bfdSThomas Hellstrom  * @ptr: SVGAGuestPtr returning the result.
282f1d34bfdSThomas Hellstrom  */
vmw_bo_get_guest_ptr(const struct ttm_buffer_object * bo,SVGAGuestPtr * ptr)283f1d34bfdSThomas Hellstrom void vmw_bo_get_guest_ptr(const struct ttm_buffer_object *bo,
284f1d34bfdSThomas Hellstrom 			  SVGAGuestPtr *ptr)
285f1d34bfdSThomas Hellstrom {
286d3116756SChristian König 	if (bo->resource->mem_type == TTM_PL_VRAM) {
287f1d34bfdSThomas Hellstrom 		ptr->gmrId = SVGA_GMR_FRAMEBUFFER;
288d3116756SChristian König 		ptr->offset = bo->resource->start << PAGE_SHIFT;
289f1d34bfdSThomas Hellstrom 	} else {
290d3116756SChristian König 		ptr->gmrId = bo->resource->start;
291f1d34bfdSThomas Hellstrom 		ptr->offset = 0;
292f1d34bfdSThomas Hellstrom 	}
293f1d34bfdSThomas Hellstrom }
294f1d34bfdSThomas Hellstrom 
295f1d34bfdSThomas Hellstrom 
296f1d34bfdSThomas Hellstrom /**
297f1d34bfdSThomas Hellstrom  * vmw_bo_pin_reserved - Pin or unpin a buffer object without moving it.
298f1d34bfdSThomas Hellstrom  *
299f1d34bfdSThomas Hellstrom  * @vbo: The buffer object. Must be reserved.
300f1d34bfdSThomas Hellstrom  * @pin: Whether to pin or unpin.
301f1d34bfdSThomas Hellstrom  *
302f1d34bfdSThomas Hellstrom  */
vmw_bo_pin_reserved(struct vmw_bo * vbo,bool pin)30309881d29SZack Rusin void vmw_bo_pin_reserved(struct vmw_bo *vbo, bool pin)
304f1d34bfdSThomas Hellstrom {
305f1d34bfdSThomas Hellstrom 	struct ttm_operation_ctx ctx = { false, true };
306f1d34bfdSThomas Hellstrom 	struct ttm_place pl;
307f1d34bfdSThomas Hellstrom 	struct ttm_placement placement;
308668b2066SZack Rusin 	struct ttm_buffer_object *bo = &vbo->tbo;
309d3116756SChristian König 	uint32_t old_mem_type = bo->resource->mem_type;
310f1d34bfdSThomas Hellstrom 	int ret;
311f1d34bfdSThomas Hellstrom 
31252791eeeSChristian König 	dma_resv_assert_held(bo->base.resv);
313f1d34bfdSThomas Hellstrom 
314fbe86ca5SChristian König 	if (pin == !!bo->pin_count)
315f1d34bfdSThomas Hellstrom 		return;
316f1d34bfdSThomas Hellstrom 
317f1d34bfdSThomas Hellstrom 	pl.fpfn = 0;
318f1d34bfdSThomas Hellstrom 	pl.lpfn = 0;
319d3116756SChristian König 	pl.mem_type = bo->resource->mem_type;
320d3116756SChristian König 	pl.flags = bo->resource->placement;
321f1d34bfdSThomas Hellstrom 
322f1d34bfdSThomas Hellstrom 	memset(&placement, 0, sizeof(placement));
323f1d34bfdSThomas Hellstrom 	placement.num_placement = 1;
324f1d34bfdSThomas Hellstrom 	placement.placement = &pl;
325f1d34bfdSThomas Hellstrom 
326f1d34bfdSThomas Hellstrom 	ret = ttm_bo_validate(bo, &placement, &ctx);
327f1d34bfdSThomas Hellstrom 
328d3116756SChristian König 	BUG_ON(ret != 0 || bo->resource->mem_type != old_mem_type);
329f1d34bfdSThomas Hellstrom 
330fbe86ca5SChristian König 	if (pin)
331fbe86ca5SChristian König 		ttm_bo_pin(bo);
332fbe86ca5SChristian König 	else
333fbe86ca5SChristian König 		ttm_bo_unpin(bo);
334fbe86ca5SChristian König }
335f1d34bfdSThomas Hellstrom 
336e9431ea5SThomas Hellstrom /**
337e9431ea5SThomas Hellstrom  * vmw_bo_map_and_cache - Map a buffer object and cache the map
338f1d34bfdSThomas Hellstrom  *
339f1d34bfdSThomas Hellstrom  * @vbo: The buffer object to map
340f1d34bfdSThomas Hellstrom  * Return: A kernel virtual address or NULL if mapping failed.
341f1d34bfdSThomas Hellstrom  *
342f1d34bfdSThomas Hellstrom  * This function maps a buffer object into the kernel address space, or
343f1d34bfdSThomas Hellstrom  * returns the virtual kernel address of an already existing map. The virtual
344f1d34bfdSThomas Hellstrom  * address remains valid as long as the buffer object is pinned or reserved.
345f1d34bfdSThomas Hellstrom  * The cached map is torn down on either
346f1d34bfdSThomas Hellstrom  * 1) Buffer object move
347f1d34bfdSThomas Hellstrom  * 2) Buffer object swapout
348f1d34bfdSThomas Hellstrom  * 3) Buffer object destruction
349f1d34bfdSThomas Hellstrom  *
350f1d34bfdSThomas Hellstrom  */
vmw_bo_map_and_cache(struct vmw_bo * vbo)35109881d29SZack Rusin void *vmw_bo_map_and_cache(struct vmw_bo *vbo)
352f1d34bfdSThomas Hellstrom {
353d6667f0dSZack Rusin 	return vmw_bo_map_and_cache_size(vbo, vbo->tbo.base.size);
354d6667f0dSZack Rusin }
355d6667f0dSZack Rusin 
vmw_bo_map_and_cache_size(struct vmw_bo * vbo,size_t size)356d6667f0dSZack Rusin void *vmw_bo_map_and_cache_size(struct vmw_bo *vbo, size_t size)
357d6667f0dSZack Rusin {
358668b2066SZack Rusin 	struct ttm_buffer_object *bo = &vbo->tbo;
359f1d34bfdSThomas Hellstrom 	bool not_used;
360f1d34bfdSThomas Hellstrom 	void *virtual;
361f1d34bfdSThomas Hellstrom 	int ret;
362f1d34bfdSThomas Hellstrom 
363*aba07b9aSZack Rusin 	atomic_inc(&vbo->map_count);
364*aba07b9aSZack Rusin 
365f1d34bfdSThomas Hellstrom 	virtual = ttm_kmap_obj_virtual(&vbo->map, &not_used);
366f1d34bfdSThomas Hellstrom 	if (virtual)
367f1d34bfdSThomas Hellstrom 		return virtual;
368f1d34bfdSThomas Hellstrom 
369d6667f0dSZack Rusin 	ret = ttm_bo_kmap(bo, 0, PFN_UP(size), &vbo->map);
370f1d34bfdSThomas Hellstrom 	if (ret)
371d6667f0dSZack Rusin 		DRM_ERROR("Buffer object map failed: %d (size: bo = %zu, map = %zu).\n",
372d6667f0dSZack Rusin 			  ret, bo->base.size, size);
373f1d34bfdSThomas Hellstrom 
374f1d34bfdSThomas Hellstrom 	return ttm_kmap_obj_virtual(&vbo->map, &not_used);
375f1d34bfdSThomas Hellstrom }
376e9431ea5SThomas Hellstrom 
377e9431ea5SThomas Hellstrom 
378e9431ea5SThomas Hellstrom /**
379e9431ea5SThomas Hellstrom  * vmw_bo_unmap - Tear down a cached buffer object map.
380e9431ea5SThomas Hellstrom  *
381e9431ea5SThomas Hellstrom  * @vbo: The buffer object whose map we are tearing down.
382e9431ea5SThomas Hellstrom  *
383e9431ea5SThomas Hellstrom  * This function tears down a cached map set up using
38409881d29SZack Rusin  * vmw_bo_map_and_cache().
385e9431ea5SThomas Hellstrom  */
vmw_bo_unmap(struct vmw_bo * vbo)38609881d29SZack Rusin void vmw_bo_unmap(struct vmw_bo *vbo)
387e9431ea5SThomas Hellstrom {
388*aba07b9aSZack Rusin 	int map_count;
389*aba07b9aSZack Rusin 
390e9431ea5SThomas Hellstrom 	if (vbo->map.bo == NULL)
391e9431ea5SThomas Hellstrom 		return;
392e9431ea5SThomas Hellstrom 
393*aba07b9aSZack Rusin 	map_count = atomic_dec_return(&vbo->map_count);
394*aba07b9aSZack Rusin 
395*aba07b9aSZack Rusin 	if (!map_count) {
396e9431ea5SThomas Hellstrom 		ttm_bo_kunmap(&vbo->map);
397668b2066SZack Rusin 		vbo->map.bo = NULL;
398e9431ea5SThomas Hellstrom 	}
399*aba07b9aSZack Rusin }
400e9431ea5SThomas Hellstrom 
40135079323SChristian König 
402b254557cSChristian König /**
403668b2066SZack Rusin  * vmw_bo_init - Initialize a vmw buffer object
404b254557cSChristian König  *
405b254557cSChristian König  * @dev_priv: Pointer to the device private struct
406668b2066SZack Rusin  * @vmw_bo: Buffer object to initialize
407668b2066SZack Rusin  * @params: Parameters used to initialize the buffer object
408668b2066SZack Rusin  * @destroy: The function used to delete the buffer object
409668b2066SZack Rusin  * Returns: Zero on success, negative error code on error.
410b254557cSChristian König  *
411b254557cSChristian König  */
vmw_bo_init(struct vmw_private * dev_priv,struct vmw_bo * vmw_bo,struct vmw_bo_params * params,void (* destroy)(struct ttm_buffer_object *))412668b2066SZack Rusin static int vmw_bo_init(struct vmw_private *dev_priv,
413668b2066SZack Rusin 		       struct vmw_bo *vmw_bo,
414668b2066SZack Rusin 		       struct vmw_bo_params *params,
415668b2066SZack Rusin 		       void (*destroy)(struct ttm_buffer_object *))
416b254557cSChristian König {
4178aadeb8aSZack Rusin 	struct ttm_operation_ctx ctx = {
418668b2066SZack Rusin 		.interruptible = params->bo_type != ttm_bo_type_kernel,
419b32233acSZack Rusin 		.no_wait_gpu = false,
420b32233acSZack Rusin 		.resv = params->resv,
4218aadeb8aSZack Rusin 	};
422668b2066SZack Rusin 	struct ttm_device *bdev = &dev_priv->bdev;
4238afa13a0SZack Rusin 	struct drm_device *vdev = &dev_priv->drm;
424b254557cSChristian König 	int ret;
425b254557cSChristian König 
426668b2066SZack Rusin 	memset(vmw_bo, 0, sizeof(*vmw_bo));
427b254557cSChristian König 
428668b2066SZack Rusin 	BUILD_BUG_ON(TTM_MAX_BO_PRIORITY <= 3);
429668b2066SZack Rusin 	vmw_bo->tbo.priority = 3;
430668b2066SZack Rusin 	vmw_bo->res_tree = RB_ROOT;
431d6667f0dSZack Rusin 	xa_init(&vmw_bo->detached_resources);
432*aba07b9aSZack Rusin 	atomic_set(&vmw_bo->map_count, 0);
4338afa13a0SZack Rusin 
434668b2066SZack Rusin 	params->size = ALIGN(params->size, PAGE_SIZE);
435668b2066SZack Rusin 	drm_gem_private_object_init(vdev, &vmw_bo->tbo.base, params->size);
436d02117f8SChristian König 
437668b2066SZack Rusin 	vmw_bo_placement_set(vmw_bo, params->domain, params->busy_domain);
438668b2066SZack Rusin 	ret = ttm_bo_init_reserved(bdev, &vmw_bo->tbo, params->bo_type,
439b32233acSZack Rusin 				   &vmw_bo->placement, 0, &ctx,
440b32233acSZack Rusin 				   params->sg, params->resv, destroy);
441f07069daSChristian König 	if (unlikely(ret))
442668b2066SZack Rusin 		return ret;
443f07069daSChristian König 
444668b2066SZack Rusin 	if (params->pin)
445668b2066SZack Rusin 		ttm_bo_pin(&vmw_bo->tbo);
446668b2066SZack Rusin 	ttm_bo_unreserve(&vmw_bo->tbo);
447b254557cSChristian König 
448b254557cSChristian König 	return 0;
449b254557cSChristian König }
450e9431ea5SThomas Hellstrom 
vmw_bo_create(struct vmw_private * vmw,struct vmw_bo_params * params,struct vmw_bo ** p_bo)4518afa13a0SZack Rusin int vmw_bo_create(struct vmw_private *vmw,
452668b2066SZack Rusin 		  struct vmw_bo_params *params,
45309881d29SZack Rusin 		  struct vmw_bo **p_bo)
4548afa13a0SZack Rusin {
4558afa13a0SZack Rusin 	int ret;
4568afa13a0SZack Rusin 
4578afa13a0SZack Rusin 	*p_bo = kmalloc(sizeof(**p_bo), GFP_KERNEL);
4588afa13a0SZack Rusin 	if (unlikely(!*p_bo)) {
4598afa13a0SZack Rusin 		DRM_ERROR("Failed to allocate a buffer.\n");
4608afa13a0SZack Rusin 		return -ENOMEM;
4618afa13a0SZack Rusin 	}
4628afa13a0SZack Rusin 
46336d421e6SZack Rusin 	/*
46436d421e6SZack Rusin 	 * vmw_bo_init will delete the *p_bo object if it fails
46536d421e6SZack Rusin 	 */
466668b2066SZack Rusin 	ret = vmw_bo_init(vmw, *p_bo, params, vmw_bo_free);
4678afa13a0SZack Rusin 	if (unlikely(ret != 0))
4688afa13a0SZack Rusin 		goto out_error;
4698afa13a0SZack Rusin 
4708afa13a0SZack Rusin 	return ret;
4718afa13a0SZack Rusin out_error:
4728afa13a0SZack Rusin 	*p_bo = NULL;
4738afa13a0SZack Rusin 	return ret;
4748afa13a0SZack Rusin }
4758afa13a0SZack Rusin 
476e9431ea5SThomas Hellstrom /**
47709881d29SZack Rusin  * vmw_user_bo_synccpu_grab - Grab a struct vmw_bo for cpu
478e9431ea5SThomas Hellstrom  * access, idling previous GPU operations on the buffer and optionally
479e9431ea5SThomas Hellstrom  * blocking it for further command submissions.
480e9431ea5SThomas Hellstrom  *
4818afa13a0SZack Rusin  * @vmw_bo: Pointer to the buffer object being grabbed for CPU access
482e9431ea5SThomas Hellstrom  * @flags: Flags indicating how the grab should be performed.
483e9431ea5SThomas Hellstrom  * Return: Zero on success, Negative error code on error. In particular,
484e9431ea5SThomas Hellstrom  * -EBUSY will be returned if a dontblock operation is requested and the
485e9431ea5SThomas Hellstrom  * buffer object is busy, and -ERESTARTSYS will be returned if a wait is
486e9431ea5SThomas Hellstrom  * interrupted by a signal.
487e9431ea5SThomas Hellstrom  *
488e9431ea5SThomas Hellstrom  * A blocking grab will be automatically released when @tfile is closed.
489e9431ea5SThomas Hellstrom  */
vmw_user_bo_synccpu_grab(struct vmw_bo * vmw_bo,uint32_t flags)49009881d29SZack Rusin static int vmw_user_bo_synccpu_grab(struct vmw_bo *vmw_bo,
491e9431ea5SThomas Hellstrom 				    uint32_t flags)
492e9431ea5SThomas Hellstrom {
4937fb03cc3SChristian König 	bool nonblock = !!(flags & drm_vmw_synccpu_dontblock);
494668b2066SZack Rusin 	struct ttm_buffer_object *bo = &vmw_bo->tbo;
495e9431ea5SThomas Hellstrom 	int ret;
496e9431ea5SThomas Hellstrom 
497e9431ea5SThomas Hellstrom 	if (flags & drm_vmw_synccpu_allow_cs) {
498e9431ea5SThomas Hellstrom 		long lret;
499e9431ea5SThomas Hellstrom 
5007bc80a54SChristian König 		lret = dma_resv_wait_timeout(bo->base.resv, DMA_RESV_USAGE_READ,
5017bc80a54SChristian König 					     true, nonblock ? 0 :
502d3fae3b3SChristian König 					     MAX_SCHEDULE_TIMEOUT);
503e9431ea5SThomas Hellstrom 		if (!lret)
504e9431ea5SThomas Hellstrom 			return -EBUSY;
505e9431ea5SThomas Hellstrom 		else if (lret < 0)
506e9431ea5SThomas Hellstrom 			return lret;
507e9431ea5SThomas Hellstrom 		return 0;
508e9431ea5SThomas Hellstrom 	}
509e9431ea5SThomas Hellstrom 
5107fb03cc3SChristian König 	ret = ttm_bo_reserve(bo, true, nonblock, NULL);
5117fb03cc3SChristian König 	if (unlikely(ret != 0))
5127fb03cc3SChristian König 		return ret;
5137fb03cc3SChristian König 
5147fb03cc3SChristian König 	ret = ttm_bo_wait(bo, true, nonblock);
5157fb03cc3SChristian König 	if (likely(ret == 0))
5168afa13a0SZack Rusin 		atomic_inc(&vmw_bo->cpu_writers);
5177fb03cc3SChristian König 
5187fb03cc3SChristian König 	ttm_bo_unreserve(bo);
519e9431ea5SThomas Hellstrom 	if (unlikely(ret != 0))
520e9431ea5SThomas Hellstrom 		return ret;
521e9431ea5SThomas Hellstrom 
522e9431ea5SThomas Hellstrom 	return ret;
523e9431ea5SThomas Hellstrom }
524e9431ea5SThomas Hellstrom 
525e9431ea5SThomas Hellstrom /**
526e9431ea5SThomas Hellstrom  * vmw_user_bo_synccpu_release - Release a previous grab for CPU access,
527e9431ea5SThomas Hellstrom  * and unblock command submission on the buffer if blocked.
528e9431ea5SThomas Hellstrom  *
5298afa13a0SZack Rusin  * @filp: Identifying the caller.
530e9431ea5SThomas Hellstrom  * @handle: Handle identifying the buffer object.
531e9431ea5SThomas Hellstrom  * @flags: Flags indicating the type of release.
532e9431ea5SThomas Hellstrom  */
vmw_user_bo_synccpu_release(struct drm_file * filp,uint32_t handle,uint32_t flags)5338afa13a0SZack Rusin static int vmw_user_bo_synccpu_release(struct drm_file *filp,
5348afa13a0SZack Rusin 				       uint32_t handle,
535e9431ea5SThomas Hellstrom 				       uint32_t flags)
536e9431ea5SThomas Hellstrom {
53709881d29SZack Rusin 	struct vmw_bo *vmw_bo;
5388afa13a0SZack Rusin 	int ret = vmw_user_bo_lookup(filp, handle, &vmw_bo);
539e9431ea5SThomas Hellstrom 
54060c9ecd7SZack Rusin 	if (!ret) {
5418afa13a0SZack Rusin 		if (!(flags & drm_vmw_synccpu_allow_cs)) {
5428afa13a0SZack Rusin 			atomic_dec(&vmw_bo->cpu_writers);
5438afa13a0SZack Rusin 		}
54491398b41SZack Rusin 		vmw_user_bo_unref(&vmw_bo);
54560c9ecd7SZack Rusin 	}
5468afa13a0SZack Rusin 
5478afa13a0SZack Rusin 	return ret;
548e9431ea5SThomas Hellstrom }
549e9431ea5SThomas Hellstrom 
550e9431ea5SThomas Hellstrom 
551e9431ea5SThomas Hellstrom /**
552e9431ea5SThomas Hellstrom  * vmw_user_bo_synccpu_ioctl - ioctl function implementing the synccpu
553e9431ea5SThomas Hellstrom  * functionality.
554e9431ea5SThomas Hellstrom  *
555e9431ea5SThomas Hellstrom  * @dev: Identifies the drm device.
556e9431ea5SThomas Hellstrom  * @data: Pointer to the ioctl argument.
557e9431ea5SThomas Hellstrom  * @file_priv: Identifies the caller.
558e9431ea5SThomas Hellstrom  * Return: Zero on success, negative error code on error.
559e9431ea5SThomas Hellstrom  *
560e9431ea5SThomas Hellstrom  * This function checks the ioctl arguments for validity and calls the
561e9431ea5SThomas Hellstrom  * relevant synccpu functions.
562e9431ea5SThomas Hellstrom  */
vmw_user_bo_synccpu_ioctl(struct drm_device * dev,void * data,struct drm_file * file_priv)563e9431ea5SThomas Hellstrom int vmw_user_bo_synccpu_ioctl(struct drm_device *dev, void *data,
564e9431ea5SThomas Hellstrom 			      struct drm_file *file_priv)
565e9431ea5SThomas Hellstrom {
566e9431ea5SThomas Hellstrom 	struct drm_vmw_synccpu_arg *arg =
567e9431ea5SThomas Hellstrom 		(struct drm_vmw_synccpu_arg *) data;
56809881d29SZack Rusin 	struct vmw_bo *vbo;
569e9431ea5SThomas Hellstrom 	int ret;
570e9431ea5SThomas Hellstrom 
571e9431ea5SThomas Hellstrom 	if ((arg->flags & (drm_vmw_synccpu_read | drm_vmw_synccpu_write)) == 0
572e9431ea5SThomas Hellstrom 	    || (arg->flags & ~(drm_vmw_synccpu_read | drm_vmw_synccpu_write |
573e9431ea5SThomas Hellstrom 			       drm_vmw_synccpu_dontblock |
574e9431ea5SThomas Hellstrom 			       drm_vmw_synccpu_allow_cs)) != 0) {
575e9431ea5SThomas Hellstrom 		DRM_ERROR("Illegal synccpu flags.\n");
576e9431ea5SThomas Hellstrom 		return -EINVAL;
577e9431ea5SThomas Hellstrom 	}
578e9431ea5SThomas Hellstrom 
579e9431ea5SThomas Hellstrom 	switch (arg->op) {
580e9431ea5SThomas Hellstrom 	case drm_vmw_synccpu_grab:
5818afa13a0SZack Rusin 		ret = vmw_user_bo_lookup(file_priv, arg->handle, &vbo);
582e9431ea5SThomas Hellstrom 		if (unlikely(ret != 0))
583e9431ea5SThomas Hellstrom 			return ret;
584e9431ea5SThomas Hellstrom 
5858afa13a0SZack Rusin 		ret = vmw_user_bo_synccpu_grab(vbo, arg->flags);
58691398b41SZack Rusin 		vmw_user_bo_unref(&vbo);
587298799a2SZack Rusin 		if (unlikely(ret != 0)) {
588298799a2SZack Rusin 			if (ret == -ERESTARTSYS || ret == -EBUSY)
589298799a2SZack Rusin 				return -EBUSY;
590e9431ea5SThomas Hellstrom 			DRM_ERROR("Failed synccpu grab on handle 0x%08x.\n",
591e9431ea5SThomas Hellstrom 				  (unsigned int) arg->handle);
592e9431ea5SThomas Hellstrom 			return ret;
593e9431ea5SThomas Hellstrom 		}
594e9431ea5SThomas Hellstrom 		break;
595e9431ea5SThomas Hellstrom 	case drm_vmw_synccpu_release:
5968afa13a0SZack Rusin 		ret = vmw_user_bo_synccpu_release(file_priv,
5978afa13a0SZack Rusin 						  arg->handle,
598e9431ea5SThomas Hellstrom 						  arg->flags);
599e9431ea5SThomas Hellstrom 		if (unlikely(ret != 0)) {
600e9431ea5SThomas Hellstrom 			DRM_ERROR("Failed synccpu release on handle 0x%08x.\n",
601e9431ea5SThomas Hellstrom 				  (unsigned int) arg->handle);
602e9431ea5SThomas Hellstrom 			return ret;
603e9431ea5SThomas Hellstrom 		}
604e9431ea5SThomas Hellstrom 		break;
605e9431ea5SThomas Hellstrom 	default:
606e9431ea5SThomas Hellstrom 		DRM_ERROR("Invalid synccpu operation.\n");
607e9431ea5SThomas Hellstrom 		return -EINVAL;
608e9431ea5SThomas Hellstrom 	}
609e9431ea5SThomas Hellstrom 
610e9431ea5SThomas Hellstrom 	return 0;
611e9431ea5SThomas Hellstrom }
612e9431ea5SThomas Hellstrom 
613e9431ea5SThomas Hellstrom /**
614e9431ea5SThomas Hellstrom  * vmw_bo_unref_ioctl - Generic handle close ioctl.
615e9431ea5SThomas Hellstrom  *
616e9431ea5SThomas Hellstrom  * @dev: Identifies the drm device.
617e9431ea5SThomas Hellstrom  * @data: Pointer to the ioctl argument.
618e9431ea5SThomas Hellstrom  * @file_priv: Identifies the caller.
619e9431ea5SThomas Hellstrom  * Return: Zero on success, negative error code on error.
620e9431ea5SThomas Hellstrom  *
621e9431ea5SThomas Hellstrom  * This function checks the ioctl arguments for validity and closes a
622e9431ea5SThomas Hellstrom  * handle to a TTM base object, optionally freeing the object.
623e9431ea5SThomas Hellstrom  */
vmw_bo_unref_ioctl(struct drm_device * dev,void * data,struct drm_file * file_priv)624e9431ea5SThomas Hellstrom int vmw_bo_unref_ioctl(struct drm_device *dev, void *data,
625e9431ea5SThomas Hellstrom 		       struct drm_file *file_priv)
626e9431ea5SThomas Hellstrom {
627e9431ea5SThomas Hellstrom 	struct drm_vmw_unref_dmabuf_arg *arg =
628e9431ea5SThomas Hellstrom 	    (struct drm_vmw_unref_dmabuf_arg *)data;
629e9431ea5SThomas Hellstrom 
630668b2066SZack Rusin 	return drm_gem_handle_delete(file_priv, arg->handle);
631e9431ea5SThomas Hellstrom }
632e9431ea5SThomas Hellstrom 
633e9431ea5SThomas Hellstrom 
634e9431ea5SThomas Hellstrom /**
635e9431ea5SThomas Hellstrom  * vmw_user_bo_lookup - Look up a vmw user buffer object from a handle.
636e9431ea5SThomas Hellstrom  *
6378afa13a0SZack Rusin  * @filp: The file the handle is registered with.
638e9431ea5SThomas Hellstrom  * @handle: The user buffer object handle
639e9431ea5SThomas Hellstrom  * @out: Pointer to a where a pointer to the embedded
64009881d29SZack Rusin  * struct vmw_bo should be placed.
641e9431ea5SThomas Hellstrom  * Return: Zero on success, Negative error code on error.
642e9431ea5SThomas Hellstrom  *
6439ef8d83eSZack Rusin  * The vmw buffer object pointer will be refcounted (both ttm and gem)
644e9431ea5SThomas Hellstrom  */
vmw_user_bo_lookup(struct drm_file * filp,u32 handle,struct vmw_bo ** out)6458afa13a0SZack Rusin int vmw_user_bo_lookup(struct drm_file *filp,
646668b2066SZack Rusin 		       u32 handle,
64709881d29SZack Rusin 		       struct vmw_bo **out)
648e9431ea5SThomas Hellstrom {
6498afa13a0SZack Rusin 	struct drm_gem_object *gobj;
650e9431ea5SThomas Hellstrom 
6518afa13a0SZack Rusin 	gobj = drm_gem_object_lookup(filp, handle);
6528afa13a0SZack Rusin 	if (!gobj) {
653e9431ea5SThomas Hellstrom 		DRM_ERROR("Invalid buffer object handle 0x%08lx.\n",
654e9431ea5SThomas Hellstrom 			  (unsigned long)handle);
655e9431ea5SThomas Hellstrom 		return -ESRCH;
656e9431ea5SThomas Hellstrom 	}
657e9431ea5SThomas Hellstrom 
65809881d29SZack Rusin 	*out = to_vmw_bo(gobj);
659e9431ea5SThomas Hellstrom 
660e9431ea5SThomas Hellstrom 	return 0;
661e9431ea5SThomas Hellstrom }
662e9431ea5SThomas Hellstrom 
663b733bc2eSThomas Hellstrom /**
664e9431ea5SThomas Hellstrom  * vmw_bo_fence_single - Utility function to fence a single TTM buffer
665e9431ea5SThomas Hellstrom  *                       object without unreserving it.
666e9431ea5SThomas Hellstrom  *
667e9431ea5SThomas Hellstrom  * @bo:             Pointer to the struct ttm_buffer_object to fence.
668e9431ea5SThomas Hellstrom  * @fence:          Pointer to the fence. If NULL, this function will
669e9431ea5SThomas Hellstrom  *                  insert a fence into the command stream..
670e9431ea5SThomas Hellstrom  *
671e9431ea5SThomas Hellstrom  * Contrary to the ttm_eu version of this function, it takes only
672e9431ea5SThomas Hellstrom  * a single buffer object instead of a list, and it also doesn't
673e9431ea5SThomas Hellstrom  * unreserve the buffer object, which needs to be done separately.
674e9431ea5SThomas Hellstrom  */
vmw_bo_fence_single(struct ttm_buffer_object * bo,struct vmw_fence_obj * fence)675e9431ea5SThomas Hellstrom void vmw_bo_fence_single(struct ttm_buffer_object *bo,
676e9431ea5SThomas Hellstrom 			 struct vmw_fence_obj *fence)
677e9431ea5SThomas Hellstrom {
6788af8a109SChristian König 	struct ttm_device *bdev = bo->bdev;
679668b2066SZack Rusin 	struct vmw_private *dev_priv = vmw_priv_from_ttm(bdev);
680c8d4c18bSChristian König 	int ret;
681e9431ea5SThomas Hellstrom 
682c8d4c18bSChristian König 	if (fence == NULL)
683e9431ea5SThomas Hellstrom 		vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL);
684c8d4c18bSChristian König 	else
685c8d4c18bSChristian König 		dma_fence_get(&fence->base);
686c8d4c18bSChristian König 
687c8d4c18bSChristian König 	ret = dma_resv_reserve_fences(bo->base.resv, 1);
688c8d4c18bSChristian König 	if (!ret)
68973511edfSChristian König 		dma_resv_add_fence(bo->base.resv, &fence->base,
690b29895e1SChristian König 				   DMA_RESV_USAGE_KERNEL);
691c8d4c18bSChristian König 	else
692c8d4c18bSChristian König 		/* Last resort fallback when we are OOM */
693c8d4c18bSChristian König 		dma_fence_wait(&fence->base, false);
694e9431ea5SThomas Hellstrom 	dma_fence_put(&fence->base);
695e9431ea5SThomas Hellstrom }
696e9431ea5SThomas Hellstrom 
697e9431ea5SThomas Hellstrom /**
698e9431ea5SThomas Hellstrom  * vmw_bo_swap_notify - swapout notify callback.
699e9431ea5SThomas Hellstrom  *
700e9431ea5SThomas Hellstrom  * @bo: The buffer object to be swapped out.
701e9431ea5SThomas Hellstrom  */
vmw_bo_swap_notify(struct ttm_buffer_object * bo)702e9431ea5SThomas Hellstrom void vmw_bo_swap_notify(struct ttm_buffer_object *bo)
703e9431ea5SThomas Hellstrom {
704e9431ea5SThomas Hellstrom 	/* Kill any cached kernel maps before swapout */
70509881d29SZack Rusin 	vmw_bo_unmap(to_vmw_bo(&bo->base));
706e9431ea5SThomas Hellstrom }
707e9431ea5SThomas Hellstrom 
708e9431ea5SThomas Hellstrom 
709e9431ea5SThomas Hellstrom /**
710e9431ea5SThomas Hellstrom  * vmw_bo_move_notify - TTM move_notify_callback
711e9431ea5SThomas Hellstrom  *
712e9431ea5SThomas Hellstrom  * @bo: The TTM buffer object about to move.
7132966141aSDave Airlie  * @mem: The struct ttm_resource indicating to what memory
714e9431ea5SThomas Hellstrom  *       region the move is taking place.
715e9431ea5SThomas Hellstrom  *
716e9431ea5SThomas Hellstrom  * Detaches cached maps and device bindings that require that the
717e9431ea5SThomas Hellstrom  * buffer doesn't move.
718e9431ea5SThomas Hellstrom  */
vmw_bo_move_notify(struct ttm_buffer_object * bo,struct ttm_resource * mem)719e9431ea5SThomas Hellstrom void vmw_bo_move_notify(struct ttm_buffer_object *bo,
7202966141aSDave Airlie 			struct ttm_resource *mem)
721e9431ea5SThomas Hellstrom {
722668b2066SZack Rusin 	struct vmw_bo *vbo = to_vmw_bo(&bo->base);
723e9431ea5SThomas Hellstrom 
724e9431ea5SThomas Hellstrom 	/*
725098d7d53SThomas Hellstrom 	 * Kill any cached kernel maps before move to or from VRAM.
726098d7d53SThomas Hellstrom 	 * With other types of moves, the underlying pages stay the same,
727098d7d53SThomas Hellstrom 	 * and the map can be kept.
728e9431ea5SThomas Hellstrom 	 */
729d3116756SChristian König 	if (mem->mem_type == TTM_PL_VRAM || bo->resource->mem_type == TTM_PL_VRAM)
730e9431ea5SThomas Hellstrom 		vmw_bo_unmap(vbo);
731e9431ea5SThomas Hellstrom 
732e9431ea5SThomas Hellstrom 	/*
733e9431ea5SThomas Hellstrom 	 * If we're moving a backup MOB out of MOB placement, then make sure we
734e9431ea5SThomas Hellstrom 	 * read back all resource content first, and unbind the MOB from
735e9431ea5SThomas Hellstrom 	 * the resource.
736e9431ea5SThomas Hellstrom 	 */
737d3116756SChristian König 	if (mem->mem_type != VMW_PL_MOB && bo->resource->mem_type == VMW_PL_MOB)
738e9431ea5SThomas Hellstrom 		vmw_resource_unbind_list(vbo);
739e9431ea5SThomas Hellstrom }
74039985eeaSZack Rusin 
placement_flags(u32 domain,u32 desired,u32 fallback)741a78a8da5SSomalapuram Amaranath static u32 placement_flags(u32 domain, u32 desired, u32 fallback)
74239985eeaSZack Rusin {
743a78a8da5SSomalapuram Amaranath 	if (desired & fallback & domain)
744a78a8da5SSomalapuram Amaranath 		return 0;
745a78a8da5SSomalapuram Amaranath 
746a78a8da5SSomalapuram Amaranath 	if (desired & domain)
747a78a8da5SSomalapuram Amaranath 		return TTM_PL_FLAG_DESIRED;
748a78a8da5SSomalapuram Amaranath 
749a78a8da5SSomalapuram Amaranath 	return TTM_PL_FLAG_FALLBACK;
750a78a8da5SSomalapuram Amaranath }
751a78a8da5SSomalapuram Amaranath 
752a78a8da5SSomalapuram Amaranath static u32
set_placement_list(struct ttm_place * pl,u32 desired,u32 fallback)753a78a8da5SSomalapuram Amaranath set_placement_list(struct ttm_place *pl, u32 desired, u32 fallback)
754a78a8da5SSomalapuram Amaranath {
755a78a8da5SSomalapuram Amaranath 	u32 domain = desired | fallback;
75639985eeaSZack Rusin 	u32 n = 0;
75739985eeaSZack Rusin 
75839985eeaSZack Rusin 	/*
75939985eeaSZack Rusin 	 * The placements are ordered according to our preferences
76039985eeaSZack Rusin 	 */
76139985eeaSZack Rusin 	if (domain & VMW_BO_DOMAIN_MOB) {
76239985eeaSZack Rusin 		pl[n].mem_type = VMW_PL_MOB;
763a78a8da5SSomalapuram Amaranath 		pl[n].flags = placement_flags(VMW_BO_DOMAIN_MOB, desired,
764a78a8da5SSomalapuram Amaranath 					      fallback);
76539985eeaSZack Rusin 		pl[n].fpfn = 0;
76639985eeaSZack Rusin 		pl[n].lpfn = 0;
76739985eeaSZack Rusin 		n++;
76839985eeaSZack Rusin 	}
76939985eeaSZack Rusin 	if (domain & VMW_BO_DOMAIN_GMR) {
77039985eeaSZack Rusin 		pl[n].mem_type = VMW_PL_GMR;
771a78a8da5SSomalapuram Amaranath 		pl[n].flags = placement_flags(VMW_BO_DOMAIN_GMR, desired,
772a78a8da5SSomalapuram Amaranath 					      fallback);
77339985eeaSZack Rusin 		pl[n].fpfn = 0;
77439985eeaSZack Rusin 		pl[n].lpfn = 0;
77539985eeaSZack Rusin 		n++;
77639985eeaSZack Rusin 	}
77739985eeaSZack Rusin 	if (domain & VMW_BO_DOMAIN_VRAM) {
77839985eeaSZack Rusin 		pl[n].mem_type = TTM_PL_VRAM;
779a78a8da5SSomalapuram Amaranath 		pl[n].flags = placement_flags(VMW_BO_DOMAIN_VRAM, desired,
780a78a8da5SSomalapuram Amaranath 					      fallback);
78139985eeaSZack Rusin 		pl[n].fpfn = 0;
78239985eeaSZack Rusin 		pl[n].lpfn = 0;
78339985eeaSZack Rusin 		n++;
78439985eeaSZack Rusin 	}
78539985eeaSZack Rusin 	if (domain & VMW_BO_DOMAIN_WAITABLE_SYS) {
78639985eeaSZack Rusin 		pl[n].mem_type = VMW_PL_SYSTEM;
787a78a8da5SSomalapuram Amaranath 		pl[n].flags = placement_flags(VMW_BO_DOMAIN_WAITABLE_SYS,
788a78a8da5SSomalapuram Amaranath 					      desired, fallback);
78939985eeaSZack Rusin 		pl[n].fpfn = 0;
79039985eeaSZack Rusin 		pl[n].lpfn = 0;
79139985eeaSZack Rusin 		n++;
79239985eeaSZack Rusin 	}
79339985eeaSZack Rusin 	if (domain & VMW_BO_DOMAIN_SYS) {
79439985eeaSZack Rusin 		pl[n].mem_type = TTM_PL_SYSTEM;
795a78a8da5SSomalapuram Amaranath 		pl[n].flags = placement_flags(VMW_BO_DOMAIN_SYS, desired,
796a78a8da5SSomalapuram Amaranath 					      fallback);
79739985eeaSZack Rusin 		pl[n].fpfn = 0;
79839985eeaSZack Rusin 		pl[n].lpfn = 0;
79939985eeaSZack Rusin 		n++;
80039985eeaSZack Rusin 	}
80139985eeaSZack Rusin 
80239985eeaSZack Rusin 	WARN_ON(!n);
80339985eeaSZack Rusin 	if (!n) {
80439985eeaSZack Rusin 		pl[n].mem_type = TTM_PL_SYSTEM;
80539985eeaSZack Rusin 		pl[n].flags = 0;
80639985eeaSZack Rusin 		pl[n].fpfn = 0;
80739985eeaSZack Rusin 		pl[n].lpfn = 0;
80839985eeaSZack Rusin 		n++;
80939985eeaSZack Rusin 	}
81039985eeaSZack Rusin 	return n;
81139985eeaSZack Rusin }
81239985eeaSZack Rusin 
vmw_bo_placement_set(struct vmw_bo * bo,u32 domain,u32 busy_domain)81339985eeaSZack Rusin void vmw_bo_placement_set(struct vmw_bo *bo, u32 domain, u32 busy_domain)
81439985eeaSZack Rusin {
815668b2066SZack Rusin 	struct ttm_device *bdev = bo->tbo.bdev;
816668b2066SZack Rusin 	struct vmw_private *vmw = vmw_priv_from_ttm(bdev);
81739985eeaSZack Rusin 	struct ttm_placement *pl = &bo->placement;
81839985eeaSZack Rusin 	bool mem_compatible = false;
81939985eeaSZack Rusin 	u32 i;
82039985eeaSZack Rusin 
82139985eeaSZack Rusin 	pl->placement = bo->places;
822a78a8da5SSomalapuram Amaranath 	pl->num_placement = set_placement_list(bo->places, domain, busy_domain);
82339985eeaSZack Rusin 
824668b2066SZack Rusin 	if (drm_debug_enabled(DRM_UT_DRIVER) && bo->tbo.resource) {
82539985eeaSZack Rusin 		for (i = 0; i < pl->num_placement; ++i) {
826668b2066SZack Rusin 			if (bo->tbo.resource->mem_type == TTM_PL_SYSTEM ||
827668b2066SZack Rusin 			    bo->tbo.resource->mem_type == pl->placement[i].mem_type)
82839985eeaSZack Rusin 				mem_compatible = true;
82939985eeaSZack Rusin 		}
83039985eeaSZack Rusin 		if (!mem_compatible)
83139985eeaSZack Rusin 			drm_warn(&vmw->drm,
83239985eeaSZack Rusin 				 "%s: Incompatible transition from "
83339985eeaSZack Rusin 				 "bo->base.resource->mem_type = %u to domain = %u\n",
834668b2066SZack Rusin 				 __func__, bo->tbo.resource->mem_type, domain);
83539985eeaSZack Rusin 	}
83639985eeaSZack Rusin 
83739985eeaSZack Rusin }
83839985eeaSZack Rusin 
vmw_bo_placement_set_default_accelerated(struct vmw_bo * bo)83939985eeaSZack Rusin void vmw_bo_placement_set_default_accelerated(struct vmw_bo *bo)
84039985eeaSZack Rusin {
841668b2066SZack Rusin 	struct ttm_device *bdev = bo->tbo.bdev;
842668b2066SZack Rusin 	struct vmw_private *vmw = vmw_priv_from_ttm(bdev);
84339985eeaSZack Rusin 	u32 domain = VMW_BO_DOMAIN_GMR | VMW_BO_DOMAIN_VRAM;
84439985eeaSZack Rusin 
84539985eeaSZack Rusin 	if (vmw->has_mob)
84639985eeaSZack Rusin 		domain = VMW_BO_DOMAIN_MOB;
84739985eeaSZack Rusin 
84839985eeaSZack Rusin 	vmw_bo_placement_set(bo, domain, domain);
84939985eeaSZack Rusin }
850d6667f0dSZack Rusin 
vmw_bo_add_detached_resource(struct vmw_bo * vbo,struct vmw_resource * res)851d6667f0dSZack Rusin void vmw_bo_add_detached_resource(struct vmw_bo *vbo, struct vmw_resource *res)
852d6667f0dSZack Rusin {
853d6667f0dSZack Rusin 	xa_store(&vbo->detached_resources, (unsigned long)res, res, GFP_KERNEL);
854d6667f0dSZack Rusin }
855d6667f0dSZack Rusin 
vmw_bo_del_detached_resource(struct vmw_bo * vbo,struct vmw_resource * res)856d6667f0dSZack Rusin void vmw_bo_del_detached_resource(struct vmw_bo *vbo, struct vmw_resource *res)
857d6667f0dSZack Rusin {
858d6667f0dSZack Rusin 	xa_erase(&vbo->detached_resources, (unsigned long)res);
859d6667f0dSZack Rusin }
860d6667f0dSZack Rusin 
vmw_bo_surface(struct vmw_bo * vbo)861d6667f0dSZack Rusin struct vmw_surface *vmw_bo_surface(struct vmw_bo *vbo)
862d6667f0dSZack Rusin {
863d6667f0dSZack Rusin 	unsigned long index;
864d6667f0dSZack Rusin 	struct vmw_resource *res = NULL;
865d6667f0dSZack Rusin 	struct vmw_surface *surf = NULL;
866d6667f0dSZack Rusin 	struct rb_node *rb_itr = vbo->res_tree.rb_node;
867d6667f0dSZack Rusin 
868d6667f0dSZack Rusin 	if (vbo->is_dumb && vbo->dumb_surface) {
869d6667f0dSZack Rusin 		res = &vbo->dumb_surface->res;
870d6667f0dSZack Rusin 		goto out;
871d6667f0dSZack Rusin 	}
872d6667f0dSZack Rusin 
873d6667f0dSZack Rusin 	xa_for_each(&vbo->detached_resources, index, res) {
874d6667f0dSZack Rusin 		if (res->func->res_type == vmw_res_surface)
875d6667f0dSZack Rusin 			goto out;
876d6667f0dSZack Rusin 	}
877d6667f0dSZack Rusin 
878d6667f0dSZack Rusin 	for (rb_itr = rb_first(&vbo->res_tree); rb_itr;
879d6667f0dSZack Rusin 	     rb_itr = rb_next(rb_itr)) {
880d6667f0dSZack Rusin 		res = rb_entry(rb_itr, struct vmw_resource, mob_node);
881d6667f0dSZack Rusin 		if (res->func->res_type == vmw_res_surface)
882d6667f0dSZack Rusin 			goto out;
883d6667f0dSZack Rusin 	}
884d6667f0dSZack Rusin 
885d6667f0dSZack Rusin out:
886d6667f0dSZack Rusin 	if (res)
887d6667f0dSZack Rusin 		surf = vmw_res_to_srf(res);
888d6667f0dSZack Rusin 	return surf;
889d6667f0dSZack Rusin }
890