xref: /linux/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c (revision 39d3389331abd712461f50249722f7ed9d815068)
1 // SPDX-License-Identifier: GPL-2.0 OR MIT
2 /**************************************************************************
3  *
4  * Copyright (c) 2011-2024 Broadcom. All Rights Reserved. The term
5  * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
6  *
7  * Permission is hereby granted, free of charge, to any person obtaining a
8  * copy of this software and associated documentation files (the
9  * "Software"), to deal in the Software without restriction, including
10  * without limitation the rights to use, copy, modify, merge, publish,
11  * distribute, sub license, and/or sell copies of the Software, and to
12  * permit persons to whom the Software is furnished to do so, subject to
13  * the following conditions:
14  *
15  * The above copyright notice and this permission notice (including the
16  * next paragraph) shall be included in all copies or substantial portions
17  * of the Software.
18  *
19  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
22  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
23  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
24  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
25  * USE OR OTHER DEALINGS IN THE SOFTWARE.
26  *
27  **************************************************************************/
28 
29 #include "vmwgfx_bo.h"
30 #include "vmwgfx_drv.h"
31 #include "vmwgfx_resource_priv.h"
32 
33 #include <drm/ttm/ttm_placement.h>
34 
35 /**
36  * vmw_bo_free - vmw_bo destructor
37  *
38  * @bo: Pointer to the embedded struct ttm_buffer_object
39  */
vmw_bo_free(struct ttm_buffer_object * bo)40 static void vmw_bo_free(struct ttm_buffer_object *bo)
41 {
42 	struct vmw_resource *res;
43 	struct vmw_bo *vbo = to_vmw_bo(&bo->base);
44 
45 	WARN_ON(kref_read(&vbo->tbo.base.refcount) != 0);
46 	vmw_bo_unmap(vbo);
47 
48 	xa_destroy(&vbo->detached_resources);
49 	WARN_ON(vbo->is_dumb && !vbo->dumb_surface);
50 	if (vbo->is_dumb && vbo->dumb_surface) {
51 		res = &vbo->dumb_surface->res;
52 		WARN_ON(vbo != res->guest_memory_bo);
53 		WARN_ON(!res->guest_memory_bo);
54 		if (res->guest_memory_bo) {
55 			/* Reserve and switch the backing mob. */
56 			mutex_lock(&res->dev_priv->cmdbuf_mutex);
57 			(void)vmw_resource_reserve(res, false, true);
58 			vmw_resource_mob_detach(res);
59 			if (res->dirty)
60 				res->func->dirty_free(res);
61 			if (res->coherent)
62 				vmw_bo_dirty_release(res->guest_memory_bo);
63 			res->guest_memory_bo = NULL;
64 			res->guest_memory_offset = 0;
65 			vmw_resource_unreserve(res, true, false, false, NULL,
66 					       0);
67 			mutex_unlock(&res->dev_priv->cmdbuf_mutex);
68 		}
69 		vmw_surface_unreference(&vbo->dumb_surface);
70 	}
71 	WARN_ON(!RB_EMPTY_ROOT(&vbo->res_tree));
72 	drm_gem_object_release(&vbo->tbo.base);
73 	WARN_ON(vbo->dirty);
74 	kfree(vbo);
75 }
76 
77 /**
78  * vmw_bo_pin_in_placement - Validate a buffer to placement.
79  *
80  * @dev_priv:  Driver private.
81  * @buf:  DMA buffer to move.
82  * @placement:  The placement to pin it.
83  * @interruptible:  Use interruptible wait.
84  * Return: Zero on success, Negative error code on failure. In particular
85  * -ERESTARTSYS if interrupted by a signal
86  */
vmw_bo_pin_in_placement(struct vmw_private * dev_priv,struct vmw_bo * buf,struct ttm_placement * placement,bool interruptible)87 static int vmw_bo_pin_in_placement(struct vmw_private *dev_priv,
88 				   struct vmw_bo *buf,
89 				   struct ttm_placement *placement,
90 				   bool interruptible)
91 {
92 	struct ttm_operation_ctx ctx = {interruptible, false };
93 	struct ttm_buffer_object *bo = &buf->tbo;
94 	int ret;
95 
96 	vmw_execbuf_release_pinned_bo(dev_priv);
97 
98 	ret = ttm_bo_reserve(bo, interruptible, false, NULL);
99 	if (unlikely(ret != 0))
100 		goto err;
101 
102 	ret = ttm_bo_validate(bo, placement, &ctx);
103 	if (!ret)
104 		vmw_bo_pin_reserved(buf, true);
105 
106 	ttm_bo_unreserve(bo);
107 err:
108 	return ret;
109 }
110 
111 
112 /**
113  * vmw_bo_pin_in_vram_or_gmr - Move a buffer to vram or gmr.
114  *
115  * This function takes the reservation_sem in write mode.
116  * Flushes and unpins the query bo to avoid failures.
117  *
118  * @dev_priv:  Driver private.
119  * @buf:  DMA buffer to move.
120  * @interruptible:  Use interruptible wait.
121  * Return: Zero on success, Negative error code on failure. In particular
122  * -ERESTARTSYS if interrupted by a signal
123  */
vmw_bo_pin_in_vram_or_gmr(struct vmw_private * dev_priv,struct vmw_bo * buf,bool interruptible)124 int vmw_bo_pin_in_vram_or_gmr(struct vmw_private *dev_priv,
125 			      struct vmw_bo *buf,
126 			      bool interruptible)
127 {
128 	struct ttm_operation_ctx ctx = {interruptible, false };
129 	struct ttm_buffer_object *bo = &buf->tbo;
130 	int ret;
131 
132 	vmw_execbuf_release_pinned_bo(dev_priv);
133 
134 	ret = ttm_bo_reserve(bo, interruptible, false, NULL);
135 	if (unlikely(ret != 0))
136 		goto err;
137 
138 	vmw_bo_placement_set(buf,
139 			     VMW_BO_DOMAIN_GMR | VMW_BO_DOMAIN_VRAM,
140 			     VMW_BO_DOMAIN_GMR);
141 	ret = ttm_bo_validate(bo, &buf->placement, &ctx);
142 	if (likely(ret == 0) || ret == -ERESTARTSYS)
143 		goto out_unreserve;
144 
145 	vmw_bo_placement_set(buf,
146 			     VMW_BO_DOMAIN_VRAM,
147 			     VMW_BO_DOMAIN_VRAM);
148 	ret = ttm_bo_validate(bo, &buf->placement, &ctx);
149 
150 out_unreserve:
151 	if (!ret)
152 		vmw_bo_pin_reserved(buf, true);
153 
154 	ttm_bo_unreserve(bo);
155 err:
156 	return ret;
157 }
158 
159 
160 /**
161  * vmw_bo_pin_in_vram - Move a buffer to vram.
162  *
163  * This function takes the reservation_sem in write mode.
164  * Flushes and unpins the query bo to avoid failures.
165  *
166  * @dev_priv:  Driver private.
167  * @buf:  DMA buffer to move.
168  * @interruptible:  Use interruptible wait.
169  * Return: Zero on success, Negative error code on failure. In particular
170  * -ERESTARTSYS if interrupted by a signal
171  */
vmw_bo_pin_in_vram(struct vmw_private * dev_priv,struct vmw_bo * buf,bool interruptible)172 int vmw_bo_pin_in_vram(struct vmw_private *dev_priv,
173 		       struct vmw_bo *buf,
174 		       bool interruptible)
175 {
176 	return vmw_bo_pin_in_placement(dev_priv, buf, &vmw_vram_placement,
177 				       interruptible);
178 }
179 
180 
181 /**
182  * vmw_bo_pin_in_start_of_vram - Move a buffer to start of vram.
183  *
184  * This function takes the reservation_sem in write mode.
185  * Flushes and unpins the query bo to avoid failures.
186  *
187  * @dev_priv:  Driver private.
188  * @buf:  DMA buffer to pin.
189  * @interruptible:  Use interruptible wait.
190  * Return: Zero on success, Negative error code on failure. In particular
191  * -ERESTARTSYS if interrupted by a signal
192  */
vmw_bo_pin_in_start_of_vram(struct vmw_private * dev_priv,struct vmw_bo * buf,bool interruptible)193 int vmw_bo_pin_in_start_of_vram(struct vmw_private *dev_priv,
194 				struct vmw_bo *buf,
195 				bool interruptible)
196 {
197 	struct ttm_operation_ctx ctx = {interruptible, false };
198 	struct ttm_buffer_object *bo = &buf->tbo;
199 	int ret = 0;
200 
201 	vmw_execbuf_release_pinned_bo(dev_priv);
202 	ret = ttm_bo_reserve(bo, interruptible, false, NULL);
203 	if (unlikely(ret != 0))
204 		goto err_unlock;
205 
206 	/*
207 	 * Is this buffer already in vram but not at the start of it?
208 	 * In that case, evict it first because TTM isn't good at handling
209 	 * that situation.
210 	 */
211 	if (bo->resource->mem_type == TTM_PL_VRAM &&
212 	    bo->resource->start < PFN_UP(bo->resource->size) &&
213 	    bo->resource->start > 0 &&
214 	    buf->tbo.pin_count == 0) {
215 		ctx.interruptible = false;
216 		vmw_bo_placement_set(buf,
217 				     VMW_BO_DOMAIN_SYS,
218 				     VMW_BO_DOMAIN_SYS);
219 		(void)ttm_bo_validate(bo, &buf->placement, &ctx);
220 	}
221 
222 	vmw_bo_placement_set(buf,
223 			     VMW_BO_DOMAIN_VRAM,
224 			     VMW_BO_DOMAIN_VRAM);
225 	buf->places[0].lpfn = PFN_UP(bo->resource->size);
226 	ret = ttm_bo_validate(bo, &buf->placement, &ctx);
227 
228 	/* For some reason we didn't end up at the start of vram */
229 	WARN_ON(ret == 0 && bo->resource->start != 0);
230 	if (!ret)
231 		vmw_bo_pin_reserved(buf, true);
232 
233 	ttm_bo_unreserve(bo);
234 err_unlock:
235 
236 	return ret;
237 }
238 
239 
240 /**
241  * vmw_bo_unpin - Unpin the buffer given buffer, does not move the buffer.
242  *
243  * This function takes the reservation_sem in write mode.
244  *
245  * @dev_priv:  Driver private.
246  * @buf:  DMA buffer to unpin.
247  * @interruptible:  Use interruptible wait.
248  * Return: Zero on success, Negative error code on failure. In particular
249  * -ERESTARTSYS if interrupted by a signal
250  */
vmw_bo_unpin(struct vmw_private * dev_priv,struct vmw_bo * buf,bool interruptible)251 int vmw_bo_unpin(struct vmw_private *dev_priv,
252 		 struct vmw_bo *buf,
253 		 bool interruptible)
254 {
255 	struct ttm_buffer_object *bo = &buf->tbo;
256 	int ret;
257 
258 	ret = ttm_bo_reserve(bo, interruptible, false, NULL);
259 	if (unlikely(ret != 0))
260 		goto err;
261 
262 	vmw_bo_pin_reserved(buf, false);
263 
264 	ttm_bo_unreserve(bo);
265 
266 err:
267 	return ret;
268 }
269 
270 /**
271  * vmw_bo_get_guest_ptr - Get the guest ptr representing the current placement
272  * of a buffer.
273  *
274  * @bo: Pointer to a struct ttm_buffer_object. Must be pinned or reserved.
275  * @ptr: SVGAGuestPtr returning the result.
276  */
vmw_bo_get_guest_ptr(const struct ttm_buffer_object * bo,SVGAGuestPtr * ptr)277 void vmw_bo_get_guest_ptr(const struct ttm_buffer_object *bo,
278 			  SVGAGuestPtr *ptr)
279 {
280 	if (bo->resource->mem_type == TTM_PL_VRAM) {
281 		ptr->gmrId = SVGA_GMR_FRAMEBUFFER;
282 		ptr->offset = bo->resource->start << PAGE_SHIFT;
283 	} else {
284 		ptr->gmrId = bo->resource->start;
285 		ptr->offset = 0;
286 	}
287 }
288 
289 
290 /**
291  * vmw_bo_pin_reserved - Pin or unpin a buffer object without moving it.
292  *
293  * @vbo: The buffer object. Must be reserved.
294  * @pin: Whether to pin or unpin.
295  *
296  */
vmw_bo_pin_reserved(struct vmw_bo * vbo,bool pin)297 void vmw_bo_pin_reserved(struct vmw_bo *vbo, bool pin)
298 {
299 	struct ttm_operation_ctx ctx = { false, true };
300 	struct ttm_place pl;
301 	struct ttm_placement placement;
302 	struct ttm_buffer_object *bo = &vbo->tbo;
303 	uint32_t old_mem_type = bo->resource->mem_type;
304 	int ret;
305 
306 	dma_resv_assert_held(bo->base.resv);
307 
308 	if (pin == !!bo->pin_count)
309 		return;
310 
311 	pl.fpfn = 0;
312 	pl.lpfn = 0;
313 	pl.mem_type = bo->resource->mem_type;
314 	pl.flags = bo->resource->placement;
315 
316 	memset(&placement, 0, sizeof(placement));
317 	placement.num_placement = 1;
318 	placement.placement = &pl;
319 
320 	ret = ttm_bo_validate(bo, &placement, &ctx);
321 
322 	BUG_ON(ret != 0 || bo->resource->mem_type != old_mem_type);
323 
324 	if (pin)
325 		ttm_bo_pin(bo);
326 	else
327 		ttm_bo_unpin(bo);
328 }
329 
330 /**
331  * vmw_bo_map_and_cache - Map a buffer object and cache the map
332  *
333  * @vbo: The buffer object to map
334  * Return: A kernel virtual address or NULL if mapping failed.
335  *
336  * This function maps a buffer object into the kernel address space, or
337  * returns the virtual kernel address of an already existing map. The virtual
338  * address remains valid as long as the buffer object is pinned or reserved.
339  * The cached map is torn down on either
340  * 1) Buffer object move
341  * 2) Buffer object swapout
342  * 3) Buffer object destruction
343  *
344  */
vmw_bo_map_and_cache(struct vmw_bo * vbo)345 void *vmw_bo_map_and_cache(struct vmw_bo *vbo)
346 {
347 	return vmw_bo_map_and_cache_size(vbo, vbo->tbo.base.size);
348 }
349 
vmw_bo_map_and_cache_size(struct vmw_bo * vbo,size_t size)350 void *vmw_bo_map_and_cache_size(struct vmw_bo *vbo, size_t size)
351 {
352 	struct ttm_buffer_object *bo = &vbo->tbo;
353 	bool not_used;
354 	void *virtual;
355 	int ret;
356 
357 	atomic_inc(&vbo->map_count);
358 
359 	virtual = ttm_kmap_obj_virtual(&vbo->map, &not_used);
360 	if (virtual)
361 		return virtual;
362 
363 	ret = ttm_bo_kmap(bo, 0, PFN_UP(size), &vbo->map);
364 	if (ret)
365 		DRM_ERROR("Buffer object map failed: %d (size: bo = %zu, map = %zu).\n",
366 			  ret, bo->base.size, size);
367 
368 	return ttm_kmap_obj_virtual(&vbo->map, &not_used);
369 }
370 
371 
372 /**
373  * vmw_bo_unmap - Tear down a cached buffer object map.
374  *
375  * @vbo: The buffer object whose map we are tearing down.
376  *
377  * This function tears down a cached map set up using
378  * vmw_bo_map_and_cache().
379  */
vmw_bo_unmap(struct vmw_bo * vbo)380 void vmw_bo_unmap(struct vmw_bo *vbo)
381 {
382 	int map_count;
383 
384 	if (vbo->map.bo == NULL)
385 		return;
386 
387 	map_count = atomic_dec_return(&vbo->map_count);
388 
389 	if (!map_count) {
390 		ttm_bo_kunmap(&vbo->map);
391 		vbo->map.bo = NULL;
392 	}
393 }
394 
395 
396 /**
397  * vmw_bo_init - Initialize a vmw buffer object
398  *
399  * @dev_priv: Pointer to the device private struct
400  * @vmw_bo: Buffer object to initialize
401  * @params: Parameters used to initialize the buffer object
402  * @destroy: The function used to delete the buffer object
403  * Returns: Zero on success, negative error code on error.
404  *
405  */
vmw_bo_init(struct vmw_private * dev_priv,struct vmw_bo * vmw_bo,struct vmw_bo_params * params,void (* destroy)(struct ttm_buffer_object *))406 static int vmw_bo_init(struct vmw_private *dev_priv,
407 		       struct vmw_bo *vmw_bo,
408 		       struct vmw_bo_params *params,
409 		       void (*destroy)(struct ttm_buffer_object *))
410 {
411 	struct ttm_operation_ctx ctx = {
412 		.interruptible = params->bo_type != ttm_bo_type_kernel,
413 		.no_wait_gpu = false,
414 		.resv = params->resv,
415 	};
416 	struct ttm_device *bdev = &dev_priv->bdev;
417 	struct drm_device *vdev = &dev_priv->drm;
418 	int ret;
419 
420 	memset(vmw_bo, 0, sizeof(*vmw_bo));
421 
422 	BUILD_BUG_ON(TTM_MAX_BO_PRIORITY <= 3);
423 	vmw_bo->tbo.priority = 3;
424 	vmw_bo->res_tree = RB_ROOT;
425 	xa_init(&vmw_bo->detached_resources);
426 	atomic_set(&vmw_bo->map_count, 0);
427 
428 	params->size = ALIGN(params->size, PAGE_SIZE);
429 	drm_gem_private_object_init(vdev, &vmw_bo->tbo.base, params->size);
430 
431 	vmw_bo_placement_set(vmw_bo, params->domain, params->busy_domain);
432 	ret = ttm_bo_init_reserved(bdev, &vmw_bo->tbo, params->bo_type,
433 				   &vmw_bo->placement, 0, &ctx,
434 				   params->sg, params->resv, destroy);
435 	if (unlikely(ret))
436 		return ret;
437 
438 	if (params->pin)
439 		ttm_bo_pin(&vmw_bo->tbo);
440 	if (!params->keep_resv)
441 		ttm_bo_unreserve(&vmw_bo->tbo);
442 
443 	return 0;
444 }
445 
vmw_bo_create(struct vmw_private * vmw,struct vmw_bo_params * params,struct vmw_bo ** p_bo)446 int vmw_bo_create(struct vmw_private *vmw,
447 		  struct vmw_bo_params *params,
448 		  struct vmw_bo **p_bo)
449 {
450 	int ret;
451 
452 	*p_bo = kmalloc(sizeof(**p_bo), GFP_KERNEL);
453 	if (unlikely(!*p_bo)) {
454 		DRM_ERROR("Failed to allocate a buffer.\n");
455 		return -ENOMEM;
456 	}
457 
458 	/*
459 	 * vmw_bo_init will delete the *p_bo object if it fails
460 	 */
461 	ret = vmw_bo_init(vmw, *p_bo, params, vmw_bo_free);
462 	if (unlikely(ret != 0))
463 		goto out_error;
464 
465 	(*p_bo)->tbo.base.funcs = &vmw_gem_object_funcs;
466 	return ret;
467 out_error:
468 	*p_bo = NULL;
469 	return ret;
470 }
471 
472 /**
473  * vmw_user_bo_synccpu_grab - Grab a struct vmw_bo for cpu
474  * access, idling previous GPU operations on the buffer and optionally
475  * blocking it for further command submissions.
476  *
477  * @vmw_bo: Pointer to the buffer object being grabbed for CPU access
478  * @flags: Flags indicating how the grab should be performed.
479  * Return: Zero on success, Negative error code on error. In particular,
480  * -EBUSY will be returned if a dontblock operation is requested and the
481  * buffer object is busy, and -ERESTARTSYS will be returned if a wait is
482  * interrupted by a signal.
483  *
484  * A blocking grab will be automatically released when @tfile is closed.
485  */
vmw_user_bo_synccpu_grab(struct vmw_bo * vmw_bo,uint32_t flags)486 static int vmw_user_bo_synccpu_grab(struct vmw_bo *vmw_bo,
487 				    uint32_t flags)
488 {
489 	bool nonblock = !!(flags & drm_vmw_synccpu_dontblock);
490 	struct ttm_buffer_object *bo = &vmw_bo->tbo;
491 	int ret;
492 
493 	if (flags & drm_vmw_synccpu_allow_cs) {
494 		long lret;
495 
496 		lret = dma_resv_wait_timeout(bo->base.resv, DMA_RESV_USAGE_READ,
497 					     true, nonblock ? 0 :
498 					     MAX_SCHEDULE_TIMEOUT);
499 		if (!lret)
500 			return -EBUSY;
501 		else if (lret < 0)
502 			return lret;
503 		return 0;
504 	}
505 
506 	ret = ttm_bo_reserve(bo, true, nonblock, NULL);
507 	if (unlikely(ret != 0))
508 		return ret;
509 
510 	ret = ttm_bo_wait(bo, true, nonblock);
511 	if (likely(ret == 0))
512 		atomic_inc(&vmw_bo->cpu_writers);
513 
514 	ttm_bo_unreserve(bo);
515 	if (unlikely(ret != 0))
516 		return ret;
517 
518 	return ret;
519 }
520 
521 /**
522  * vmw_user_bo_synccpu_release - Release a previous grab for CPU access,
523  * and unblock command submission on the buffer if blocked.
524  *
525  * @filp: Identifying the caller.
526  * @handle: Handle identifying the buffer object.
527  * @flags: Flags indicating the type of release.
528  */
vmw_user_bo_synccpu_release(struct drm_file * filp,uint32_t handle,uint32_t flags)529 static int vmw_user_bo_synccpu_release(struct drm_file *filp,
530 				       uint32_t handle,
531 				       uint32_t flags)
532 {
533 	struct vmw_bo *vmw_bo;
534 	int ret = vmw_user_bo_lookup(filp, handle, &vmw_bo);
535 
536 	if (!ret) {
537 		if (!(flags & drm_vmw_synccpu_allow_cs)) {
538 			atomic_dec(&vmw_bo->cpu_writers);
539 		}
540 		vmw_user_bo_unref(&vmw_bo);
541 	}
542 
543 	return ret;
544 }
545 
546 
547 /**
548  * vmw_user_bo_synccpu_ioctl - ioctl function implementing the synccpu
549  * functionality.
550  *
551  * @dev: Identifies the drm device.
552  * @data: Pointer to the ioctl argument.
553  * @file_priv: Identifies the caller.
554  * Return: Zero on success, negative error code on error.
555  *
556  * This function checks the ioctl arguments for validity and calls the
557  * relevant synccpu functions.
558  */
vmw_user_bo_synccpu_ioctl(struct drm_device * dev,void * data,struct drm_file * file_priv)559 int vmw_user_bo_synccpu_ioctl(struct drm_device *dev, void *data,
560 			      struct drm_file *file_priv)
561 {
562 	struct drm_vmw_synccpu_arg *arg =
563 		(struct drm_vmw_synccpu_arg *) data;
564 	struct vmw_bo *vbo;
565 	int ret;
566 
567 	if ((arg->flags & (drm_vmw_synccpu_read | drm_vmw_synccpu_write)) == 0
568 	    || (arg->flags & ~(drm_vmw_synccpu_read | drm_vmw_synccpu_write |
569 			       drm_vmw_synccpu_dontblock |
570 			       drm_vmw_synccpu_allow_cs)) != 0) {
571 		DRM_ERROR("Illegal synccpu flags.\n");
572 		return -EINVAL;
573 	}
574 
575 	switch (arg->op) {
576 	case drm_vmw_synccpu_grab:
577 		ret = vmw_user_bo_lookup(file_priv, arg->handle, &vbo);
578 		if (unlikely(ret != 0))
579 			return ret;
580 
581 		ret = vmw_user_bo_synccpu_grab(vbo, arg->flags);
582 		vmw_user_bo_unref(&vbo);
583 		if (unlikely(ret != 0)) {
584 			if (ret == -ERESTARTSYS || ret == -EBUSY)
585 				return -EBUSY;
586 			DRM_ERROR("Failed synccpu grab on handle 0x%08x.\n",
587 				  (unsigned int) arg->handle);
588 			return ret;
589 		}
590 		break;
591 	case drm_vmw_synccpu_release:
592 		ret = vmw_user_bo_synccpu_release(file_priv,
593 						  arg->handle,
594 						  arg->flags);
595 		if (unlikely(ret != 0)) {
596 			DRM_ERROR("Failed synccpu release on handle 0x%08x.\n",
597 				  (unsigned int) arg->handle);
598 			return ret;
599 		}
600 		break;
601 	default:
602 		DRM_ERROR("Invalid synccpu operation.\n");
603 		return -EINVAL;
604 	}
605 
606 	return 0;
607 }
608 
609 /**
610  * vmw_bo_unref_ioctl - Generic handle close ioctl.
611  *
612  * @dev: Identifies the drm device.
613  * @data: Pointer to the ioctl argument.
614  * @file_priv: Identifies the caller.
615  * Return: Zero on success, negative error code on error.
616  *
617  * This function checks the ioctl arguments for validity and closes a
618  * handle to a TTM base object, optionally freeing the object.
619  */
vmw_bo_unref_ioctl(struct drm_device * dev,void * data,struct drm_file * file_priv)620 int vmw_bo_unref_ioctl(struct drm_device *dev, void *data,
621 		       struct drm_file *file_priv)
622 {
623 	struct drm_vmw_unref_dmabuf_arg *arg =
624 	    (struct drm_vmw_unref_dmabuf_arg *)data;
625 
626 	return drm_gem_handle_delete(file_priv, arg->handle);
627 }
628 
629 
630 /**
631  * vmw_user_bo_lookup - Look up a vmw user buffer object from a handle.
632  *
633  * @filp: The file the handle is registered with.
634  * @handle: The user buffer object handle
635  * @out: Pointer to a where a pointer to the embedded
636  * struct vmw_bo should be placed.
637  * Return: Zero on success, Negative error code on error.
638  *
639  * The vmw buffer object pointer will be refcounted (both ttm and gem)
640  */
vmw_user_bo_lookup(struct drm_file * filp,u32 handle,struct vmw_bo ** out)641 int vmw_user_bo_lookup(struct drm_file *filp,
642 		       u32 handle,
643 		       struct vmw_bo **out)
644 {
645 	struct drm_gem_object *gobj;
646 
647 	gobj = drm_gem_object_lookup(filp, handle);
648 	if (!gobj) {
649 		DRM_ERROR("Invalid buffer object handle 0x%08lx.\n",
650 			  (unsigned long)handle);
651 		return -ESRCH;
652 	}
653 
654 	*out = to_vmw_bo(gobj);
655 
656 	return 0;
657 }
658 
659 /**
660  * vmw_bo_fence_single - Utility function to fence a single TTM buffer
661  *                       object without unreserving it.
662  *
663  * @bo:             Pointer to the struct ttm_buffer_object to fence.
664  * @fence:          Pointer to the fence. If NULL, this function will
665  *                  insert a fence into the command stream..
666  *
667  * Contrary to the ttm_eu version of this function, it takes only
668  * a single buffer object instead of a list, and it also doesn't
669  * unreserve the buffer object, which needs to be done separately.
670  */
vmw_bo_fence_single(struct ttm_buffer_object * bo,struct vmw_fence_obj * fence)671 void vmw_bo_fence_single(struct ttm_buffer_object *bo,
672 			 struct vmw_fence_obj *fence)
673 {
674 	struct ttm_device *bdev = bo->bdev;
675 	struct vmw_private *dev_priv = vmw_priv_from_ttm(bdev);
676 	int ret;
677 
678 	if (fence == NULL)
679 		vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL);
680 	else
681 		dma_fence_get(&fence->base);
682 
683 	ret = dma_resv_reserve_fences(bo->base.resv, 1);
684 	if (!ret)
685 		dma_resv_add_fence(bo->base.resv, &fence->base,
686 				   DMA_RESV_USAGE_KERNEL);
687 	else
688 		/* Last resort fallback when we are OOM */
689 		dma_fence_wait(&fence->base, false);
690 	dma_fence_put(&fence->base);
691 }
692 
693 /**
694  * vmw_bo_swap_notify - swapout notify callback.
695  *
696  * @bo: The buffer object to be swapped out.
697  */
vmw_bo_swap_notify(struct ttm_buffer_object * bo)698 void vmw_bo_swap_notify(struct ttm_buffer_object *bo)
699 {
700 	/* Kill any cached kernel maps before swapout */
701 	vmw_bo_unmap(to_vmw_bo(&bo->base));
702 }
703 
704 
705 /**
706  * vmw_bo_move_notify - TTM move_notify_callback
707  *
708  * @bo: The TTM buffer object about to move.
709  * @mem: The struct ttm_resource indicating to what memory
710  *       region the move is taking place.
711  *
712  * Detaches cached maps and device bindings that require that the
713  * buffer doesn't move.
714  */
vmw_bo_move_notify(struct ttm_buffer_object * bo,struct ttm_resource * mem)715 void vmw_bo_move_notify(struct ttm_buffer_object *bo,
716 			struct ttm_resource *mem)
717 {
718 	struct vmw_bo *vbo = to_vmw_bo(&bo->base);
719 
720 	/*
721 	 * Kill any cached kernel maps before move to or from VRAM.
722 	 * With other types of moves, the underlying pages stay the same,
723 	 * and the map can be kept.
724 	 */
725 	if (mem->mem_type == TTM_PL_VRAM || bo->resource->mem_type == TTM_PL_VRAM)
726 		vmw_bo_unmap(vbo);
727 
728 	/*
729 	 * If we're moving a backup MOB out of MOB placement, then make sure we
730 	 * read back all resource content first, and unbind the MOB from
731 	 * the resource.
732 	 */
733 	if (mem->mem_type != VMW_PL_MOB && bo->resource->mem_type == VMW_PL_MOB)
734 		vmw_resource_unbind_list(vbo);
735 }
736 
placement_flags(u32 domain,u32 desired,u32 fallback)737 static u32 placement_flags(u32 domain, u32 desired, u32 fallback)
738 {
739 	if (desired & fallback & domain)
740 		return 0;
741 
742 	if (desired & domain)
743 		return TTM_PL_FLAG_DESIRED;
744 
745 	return TTM_PL_FLAG_FALLBACK;
746 }
747 
748 static u32
set_placement_list(struct ttm_place * pl,u32 desired,u32 fallback)749 set_placement_list(struct ttm_place *pl, u32 desired, u32 fallback)
750 {
751 	u32 domain = desired | fallback;
752 	u32 n = 0;
753 
754 	/*
755 	 * The placements are ordered according to our preferences
756 	 */
757 	if (domain & VMW_BO_DOMAIN_MOB) {
758 		pl[n].mem_type = VMW_PL_MOB;
759 		pl[n].flags = placement_flags(VMW_BO_DOMAIN_MOB, desired,
760 					      fallback);
761 		pl[n].fpfn = 0;
762 		pl[n].lpfn = 0;
763 		n++;
764 	}
765 	if (domain & VMW_BO_DOMAIN_GMR) {
766 		pl[n].mem_type = VMW_PL_GMR;
767 		pl[n].flags = placement_flags(VMW_BO_DOMAIN_GMR, desired,
768 					      fallback);
769 		pl[n].fpfn = 0;
770 		pl[n].lpfn = 0;
771 		n++;
772 	}
773 	if (domain & VMW_BO_DOMAIN_VRAM) {
774 		pl[n].mem_type = TTM_PL_VRAM;
775 		pl[n].flags = placement_flags(VMW_BO_DOMAIN_VRAM, desired,
776 					      fallback);
777 		pl[n].fpfn = 0;
778 		pl[n].lpfn = 0;
779 		n++;
780 	}
781 	if (domain & VMW_BO_DOMAIN_WAITABLE_SYS) {
782 		pl[n].mem_type = VMW_PL_SYSTEM;
783 		pl[n].flags = placement_flags(VMW_BO_DOMAIN_WAITABLE_SYS,
784 					      desired, fallback);
785 		pl[n].fpfn = 0;
786 		pl[n].lpfn = 0;
787 		n++;
788 	}
789 	if (domain & VMW_BO_DOMAIN_SYS) {
790 		pl[n].mem_type = TTM_PL_SYSTEM;
791 		pl[n].flags = placement_flags(VMW_BO_DOMAIN_SYS, desired,
792 					      fallback);
793 		pl[n].fpfn = 0;
794 		pl[n].lpfn = 0;
795 		n++;
796 	}
797 
798 	WARN_ON(!n);
799 	if (!n) {
800 		pl[n].mem_type = TTM_PL_SYSTEM;
801 		pl[n].flags = 0;
802 		pl[n].fpfn = 0;
803 		pl[n].lpfn = 0;
804 		n++;
805 	}
806 	return n;
807 }
808 
vmw_bo_placement_set(struct vmw_bo * bo,u32 domain,u32 busy_domain)809 void vmw_bo_placement_set(struct vmw_bo *bo, u32 domain, u32 busy_domain)
810 {
811 	struct ttm_device *bdev = bo->tbo.bdev;
812 	struct vmw_private *vmw = vmw_priv_from_ttm(bdev);
813 	struct ttm_placement *pl = &bo->placement;
814 	bool mem_compatible = false;
815 	u32 i;
816 
817 	pl->placement = bo->places;
818 	pl->num_placement = set_placement_list(bo->places, domain, busy_domain);
819 
820 	if (drm_debug_enabled(DRM_UT_DRIVER) && bo->tbo.resource) {
821 		for (i = 0; i < pl->num_placement; ++i) {
822 			if (bo->tbo.resource->mem_type == TTM_PL_SYSTEM ||
823 			    bo->tbo.resource->mem_type == pl->placement[i].mem_type)
824 				mem_compatible = true;
825 		}
826 		if (!mem_compatible)
827 			drm_warn(&vmw->drm,
828 				 "%s: Incompatible transition from "
829 				 "bo->base.resource->mem_type = %u to domain = %u\n",
830 				 __func__, bo->tbo.resource->mem_type, domain);
831 	}
832 
833 }
834 
vmw_bo_placement_set_default_accelerated(struct vmw_bo * bo)835 void vmw_bo_placement_set_default_accelerated(struct vmw_bo *bo)
836 {
837 	struct ttm_device *bdev = bo->tbo.bdev;
838 	struct vmw_private *vmw = vmw_priv_from_ttm(bdev);
839 	u32 domain = VMW_BO_DOMAIN_GMR | VMW_BO_DOMAIN_VRAM;
840 
841 	if (vmw->has_mob)
842 		domain = VMW_BO_DOMAIN_MOB;
843 
844 	vmw_bo_placement_set(bo, domain, domain);
845 }
846 
vmw_bo_add_detached_resource(struct vmw_bo * vbo,struct vmw_resource * res)847 int vmw_bo_add_detached_resource(struct vmw_bo *vbo, struct vmw_resource *res)
848 {
849 	return xa_err(xa_store(&vbo->detached_resources, (unsigned long)res, res, GFP_KERNEL));
850 }
851 
vmw_bo_del_detached_resource(struct vmw_bo * vbo,struct vmw_resource * res)852 void vmw_bo_del_detached_resource(struct vmw_bo *vbo, struct vmw_resource *res)
853 {
854 	xa_erase(&vbo->detached_resources, (unsigned long)res);
855 }
856 
vmw_bo_surface(struct vmw_bo * vbo)857 struct vmw_surface *vmw_bo_surface(struct vmw_bo *vbo)
858 {
859 	unsigned long index;
860 	struct vmw_resource *res = NULL;
861 	struct vmw_surface *surf = NULL;
862 	struct rb_node *rb_itr = vbo->res_tree.rb_node;
863 
864 	if (vbo->is_dumb && vbo->dumb_surface) {
865 		res = &vbo->dumb_surface->res;
866 		goto out;
867 	}
868 
869 	xa_for_each(&vbo->detached_resources, index, res) {
870 		if (res->func->res_type == vmw_res_surface)
871 			goto out;
872 	}
873 
874 	for (rb_itr = rb_first(&vbo->res_tree); rb_itr;
875 	     rb_itr = rb_next(rb_itr)) {
876 		res = rb_entry(rb_itr, struct vmw_resource, mob_node);
877 		if (res->func->res_type == vmw_res_surface)
878 			goto out;
879 	}
880 
881 out:
882 	if (res)
883 		surf = vmw_res_to_srf(res);
884 	return surf;
885 }
886 
vmw_bo_mobid(struct vmw_bo * vbo)887 s32 vmw_bo_mobid(struct vmw_bo *vbo)
888 {
889 	WARN_ON(vbo->tbo.resource->mem_type != VMW_PL_MOB);
890 	return (s32)vbo->tbo.resource->start;
891 }
892