1 // SPDX-License-Identifier: GPL-2.0 OR MIT
2 /**************************************************************************
3 *
4 * Copyright (c) 2011-2024 Broadcom. All Rights Reserved. The term
5 * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
6 *
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the
9 * "Software"), to deal in the Software without restriction, including
10 * without limitation the rights to use, copy, modify, merge, publish,
11 * distribute, sub license, and/or sell copies of the Software, and to
12 * permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the
16 * next paragraph) shall be included in all copies or substantial portions
17 * of the Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
22 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
23 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
24 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
25 * USE OR OTHER DEALINGS IN THE SOFTWARE.
26 *
27 **************************************************************************/
28
29 #include "vmwgfx_bo.h"
30 #include "vmwgfx_drv.h"
31 #include "vmwgfx_resource_priv.h"
32
33 #include <drm/ttm/ttm_placement.h>
34
vmw_bo_release(struct vmw_bo * vbo)35 static void vmw_bo_release(struct vmw_bo *vbo)
36 {
37 struct vmw_resource *res;
38
39 WARN_ON(vbo->tbo.base.funcs &&
40 kref_read(&vbo->tbo.base.refcount) != 0);
41 vmw_bo_unmap(vbo);
42
43 xa_destroy(&vbo->detached_resources);
44 WARN_ON(vbo->is_dumb && !vbo->dumb_surface);
45 if (vbo->is_dumb && vbo->dumb_surface) {
46 res = &vbo->dumb_surface->res;
47 WARN_ON(vbo != res->guest_memory_bo);
48 WARN_ON(!res->guest_memory_bo);
49 if (res->guest_memory_bo) {
50 /* Reserve and switch the backing mob. */
51 mutex_lock(&res->dev_priv->cmdbuf_mutex);
52 (void)vmw_resource_reserve(res, false, true);
53 vmw_resource_mob_detach(res);
54 if (res->coherent)
55 vmw_bo_dirty_release(res->guest_memory_bo);
56 res->guest_memory_bo = NULL;
57 res->guest_memory_offset = 0;
58 vmw_resource_unreserve(res, false, false, false, NULL,
59 0);
60 mutex_unlock(&res->dev_priv->cmdbuf_mutex);
61 }
62 vmw_surface_unreference(&vbo->dumb_surface);
63 }
64 drm_gem_object_release(&vbo->tbo.base);
65 }
66
67 /**
68 * vmw_bo_free - vmw_bo destructor
69 *
70 * @bo: Pointer to the embedded struct ttm_buffer_object
71 */
vmw_bo_free(struct ttm_buffer_object * bo)72 static void vmw_bo_free(struct ttm_buffer_object *bo)
73 {
74 struct vmw_bo *vbo = to_vmw_bo(&bo->base);
75
76 WARN_ON(vbo->dirty);
77 WARN_ON(!RB_EMPTY_ROOT(&vbo->res_tree));
78 vmw_bo_release(vbo);
79 kfree(vbo);
80 }
81
82 /**
83 * vmw_bo_pin_in_placement - Validate a buffer to placement.
84 *
85 * @dev_priv: Driver private.
86 * @buf: DMA buffer to move.
87 * @placement: The placement to pin it.
88 * @interruptible: Use interruptible wait.
89 * Return: Zero on success, Negative error code on failure. In particular
90 * -ERESTARTSYS if interrupted by a signal
91 */
vmw_bo_pin_in_placement(struct vmw_private * dev_priv,struct vmw_bo * buf,struct ttm_placement * placement,bool interruptible)92 static int vmw_bo_pin_in_placement(struct vmw_private *dev_priv,
93 struct vmw_bo *buf,
94 struct ttm_placement *placement,
95 bool interruptible)
96 {
97 struct ttm_operation_ctx ctx = {interruptible, false };
98 struct ttm_buffer_object *bo = &buf->tbo;
99 int ret;
100
101 vmw_execbuf_release_pinned_bo(dev_priv);
102
103 ret = ttm_bo_reserve(bo, interruptible, false, NULL);
104 if (unlikely(ret != 0))
105 goto err;
106
107 ret = ttm_bo_validate(bo, placement, &ctx);
108 if (!ret)
109 vmw_bo_pin_reserved(buf, true);
110
111 ttm_bo_unreserve(bo);
112 err:
113 return ret;
114 }
115
116
117 /**
118 * vmw_bo_pin_in_vram_or_gmr - Move a buffer to vram or gmr.
119 *
120 * This function takes the reservation_sem in write mode.
121 * Flushes and unpins the query bo to avoid failures.
122 *
123 * @dev_priv: Driver private.
124 * @buf: DMA buffer to move.
125 * @interruptible: Use interruptible wait.
126 * Return: Zero on success, Negative error code on failure. In particular
127 * -ERESTARTSYS if interrupted by a signal
128 */
vmw_bo_pin_in_vram_or_gmr(struct vmw_private * dev_priv,struct vmw_bo * buf,bool interruptible)129 int vmw_bo_pin_in_vram_or_gmr(struct vmw_private *dev_priv,
130 struct vmw_bo *buf,
131 bool interruptible)
132 {
133 struct ttm_operation_ctx ctx = {interruptible, false };
134 struct ttm_buffer_object *bo = &buf->tbo;
135 int ret;
136
137 vmw_execbuf_release_pinned_bo(dev_priv);
138
139 ret = ttm_bo_reserve(bo, interruptible, false, NULL);
140 if (unlikely(ret != 0))
141 goto err;
142
143 vmw_bo_placement_set(buf,
144 VMW_BO_DOMAIN_GMR | VMW_BO_DOMAIN_VRAM,
145 VMW_BO_DOMAIN_GMR);
146 ret = ttm_bo_validate(bo, &buf->placement, &ctx);
147 if (likely(ret == 0) || ret == -ERESTARTSYS)
148 goto out_unreserve;
149
150 vmw_bo_placement_set(buf,
151 VMW_BO_DOMAIN_VRAM,
152 VMW_BO_DOMAIN_VRAM);
153 ret = ttm_bo_validate(bo, &buf->placement, &ctx);
154
155 out_unreserve:
156 if (!ret)
157 vmw_bo_pin_reserved(buf, true);
158
159 ttm_bo_unreserve(bo);
160 err:
161 return ret;
162 }
163
164
165 /**
166 * vmw_bo_pin_in_vram - Move a buffer to vram.
167 *
168 * This function takes the reservation_sem in write mode.
169 * Flushes and unpins the query bo to avoid failures.
170 *
171 * @dev_priv: Driver private.
172 * @buf: DMA buffer to move.
173 * @interruptible: Use interruptible wait.
174 * Return: Zero on success, Negative error code on failure. In particular
175 * -ERESTARTSYS if interrupted by a signal
176 */
vmw_bo_pin_in_vram(struct vmw_private * dev_priv,struct vmw_bo * buf,bool interruptible)177 int vmw_bo_pin_in_vram(struct vmw_private *dev_priv,
178 struct vmw_bo *buf,
179 bool interruptible)
180 {
181 return vmw_bo_pin_in_placement(dev_priv, buf, &vmw_vram_placement,
182 interruptible);
183 }
184
185
186 /**
187 * vmw_bo_pin_in_start_of_vram - Move a buffer to start of vram.
188 *
189 * This function takes the reservation_sem in write mode.
190 * Flushes and unpins the query bo to avoid failures.
191 *
192 * @dev_priv: Driver private.
193 * @buf: DMA buffer to pin.
194 * @interruptible: Use interruptible wait.
195 * Return: Zero on success, Negative error code on failure. In particular
196 * -ERESTARTSYS if interrupted by a signal
197 */
vmw_bo_pin_in_start_of_vram(struct vmw_private * dev_priv,struct vmw_bo * buf,bool interruptible)198 int vmw_bo_pin_in_start_of_vram(struct vmw_private *dev_priv,
199 struct vmw_bo *buf,
200 bool interruptible)
201 {
202 struct ttm_operation_ctx ctx = {interruptible, false };
203 struct ttm_buffer_object *bo = &buf->tbo;
204 int ret = 0;
205
206 vmw_execbuf_release_pinned_bo(dev_priv);
207 ret = ttm_bo_reserve(bo, interruptible, false, NULL);
208 if (unlikely(ret != 0))
209 goto err_unlock;
210
211 /*
212 * Is this buffer already in vram but not at the start of it?
213 * In that case, evict it first because TTM isn't good at handling
214 * that situation.
215 */
216 if (bo->resource->mem_type == TTM_PL_VRAM &&
217 bo->resource->start < PFN_UP(bo->resource->size) &&
218 bo->resource->start > 0 &&
219 buf->tbo.pin_count == 0) {
220 ctx.interruptible = false;
221 vmw_bo_placement_set(buf,
222 VMW_BO_DOMAIN_SYS,
223 VMW_BO_DOMAIN_SYS);
224 (void)ttm_bo_validate(bo, &buf->placement, &ctx);
225 }
226
227 vmw_bo_placement_set(buf,
228 VMW_BO_DOMAIN_VRAM,
229 VMW_BO_DOMAIN_VRAM);
230 buf->places[0].lpfn = PFN_UP(bo->resource->size);
231 buf->busy_places[0].lpfn = PFN_UP(bo->resource->size);
232 ret = ttm_bo_validate(bo, &buf->placement, &ctx);
233
234 /* For some reason we didn't end up at the start of vram */
235 WARN_ON(ret == 0 && bo->resource->start != 0);
236 if (!ret)
237 vmw_bo_pin_reserved(buf, true);
238
239 ttm_bo_unreserve(bo);
240 err_unlock:
241
242 return ret;
243 }
244
245
246 /**
247 * vmw_bo_unpin - Unpin the buffer given buffer, does not move the buffer.
248 *
249 * This function takes the reservation_sem in write mode.
250 *
251 * @dev_priv: Driver private.
252 * @buf: DMA buffer to unpin.
253 * @interruptible: Use interruptible wait.
254 * Return: Zero on success, Negative error code on failure. In particular
255 * -ERESTARTSYS if interrupted by a signal
256 */
vmw_bo_unpin(struct vmw_private * dev_priv,struct vmw_bo * buf,bool interruptible)257 int vmw_bo_unpin(struct vmw_private *dev_priv,
258 struct vmw_bo *buf,
259 bool interruptible)
260 {
261 struct ttm_buffer_object *bo = &buf->tbo;
262 int ret;
263
264 ret = ttm_bo_reserve(bo, interruptible, false, NULL);
265 if (unlikely(ret != 0))
266 goto err;
267
268 vmw_bo_pin_reserved(buf, false);
269
270 ttm_bo_unreserve(bo);
271
272 err:
273 return ret;
274 }
275
276 /**
277 * vmw_bo_get_guest_ptr - Get the guest ptr representing the current placement
278 * of a buffer.
279 *
280 * @bo: Pointer to a struct ttm_buffer_object. Must be pinned or reserved.
281 * @ptr: SVGAGuestPtr returning the result.
282 */
vmw_bo_get_guest_ptr(const struct ttm_buffer_object * bo,SVGAGuestPtr * ptr)283 void vmw_bo_get_guest_ptr(const struct ttm_buffer_object *bo,
284 SVGAGuestPtr *ptr)
285 {
286 if (bo->resource->mem_type == TTM_PL_VRAM) {
287 ptr->gmrId = SVGA_GMR_FRAMEBUFFER;
288 ptr->offset = bo->resource->start << PAGE_SHIFT;
289 } else {
290 ptr->gmrId = bo->resource->start;
291 ptr->offset = 0;
292 }
293 }
294
295
296 /**
297 * vmw_bo_pin_reserved - Pin or unpin a buffer object without moving it.
298 *
299 * @vbo: The buffer object. Must be reserved.
300 * @pin: Whether to pin or unpin.
301 *
302 */
vmw_bo_pin_reserved(struct vmw_bo * vbo,bool pin)303 void vmw_bo_pin_reserved(struct vmw_bo *vbo, bool pin)
304 {
305 struct ttm_operation_ctx ctx = { false, true };
306 struct ttm_place pl;
307 struct ttm_placement placement;
308 struct ttm_buffer_object *bo = &vbo->tbo;
309 uint32_t old_mem_type = bo->resource->mem_type;
310 int ret;
311
312 dma_resv_assert_held(bo->base.resv);
313
314 if (pin == !!bo->pin_count)
315 return;
316
317 pl.fpfn = 0;
318 pl.lpfn = 0;
319 pl.mem_type = bo->resource->mem_type;
320 pl.flags = bo->resource->placement;
321
322 memset(&placement, 0, sizeof(placement));
323 placement.num_placement = 1;
324 placement.placement = &pl;
325
326 ret = ttm_bo_validate(bo, &placement, &ctx);
327
328 BUG_ON(ret != 0 || bo->resource->mem_type != old_mem_type);
329
330 if (pin)
331 ttm_bo_pin(bo);
332 else
333 ttm_bo_unpin(bo);
334 }
335
336 /**
337 * vmw_bo_map_and_cache - Map a buffer object and cache the map
338 *
339 * @vbo: The buffer object to map
340 * Return: A kernel virtual address or NULL if mapping failed.
341 *
342 * This function maps a buffer object into the kernel address space, or
343 * returns the virtual kernel address of an already existing map. The virtual
344 * address remains valid as long as the buffer object is pinned or reserved.
345 * The cached map is torn down on either
346 * 1) Buffer object move
347 * 2) Buffer object swapout
348 * 3) Buffer object destruction
349 *
350 */
vmw_bo_map_and_cache(struct vmw_bo * vbo)351 void *vmw_bo_map_and_cache(struct vmw_bo *vbo)
352 {
353 return vmw_bo_map_and_cache_size(vbo, vbo->tbo.base.size);
354 }
355
vmw_bo_map_and_cache_size(struct vmw_bo * vbo,size_t size)356 void *vmw_bo_map_and_cache_size(struct vmw_bo *vbo, size_t size)
357 {
358 struct ttm_buffer_object *bo = &vbo->tbo;
359 bool not_used;
360 void *virtual;
361 int ret;
362
363 atomic_inc(&vbo->map_count);
364
365 virtual = ttm_kmap_obj_virtual(&vbo->map, ¬_used);
366 if (virtual)
367 return virtual;
368
369 ret = ttm_bo_kmap(bo, 0, PFN_UP(size), &vbo->map);
370 if (ret)
371 DRM_ERROR("Buffer object map failed: %d (size: bo = %zu, map = %zu).\n",
372 ret, bo->base.size, size);
373
374 return ttm_kmap_obj_virtual(&vbo->map, ¬_used);
375 }
376
377
378 /**
379 * vmw_bo_unmap - Tear down a cached buffer object map.
380 *
381 * @vbo: The buffer object whose map we are tearing down.
382 *
383 * This function tears down a cached map set up using
384 * vmw_bo_map_and_cache().
385 */
vmw_bo_unmap(struct vmw_bo * vbo)386 void vmw_bo_unmap(struct vmw_bo *vbo)
387 {
388 int map_count;
389
390 if (vbo->map.bo == NULL)
391 return;
392
393 map_count = atomic_dec_return(&vbo->map_count);
394
395 if (!map_count) {
396 ttm_bo_kunmap(&vbo->map);
397 vbo->map.bo = NULL;
398 }
399 }
400
401
402 /**
403 * vmw_bo_init - Initialize a vmw buffer object
404 *
405 * @dev_priv: Pointer to the device private struct
406 * @vmw_bo: Buffer object to initialize
407 * @params: Parameters used to initialize the buffer object
408 * @destroy: The function used to delete the buffer object
409 * Returns: Zero on success, negative error code on error.
410 *
411 */
vmw_bo_init(struct vmw_private * dev_priv,struct vmw_bo * vmw_bo,struct vmw_bo_params * params,void (* destroy)(struct ttm_buffer_object *))412 static int vmw_bo_init(struct vmw_private *dev_priv,
413 struct vmw_bo *vmw_bo,
414 struct vmw_bo_params *params,
415 void (*destroy)(struct ttm_buffer_object *))
416 {
417 struct ttm_operation_ctx ctx = {
418 .interruptible = params->bo_type != ttm_bo_type_kernel,
419 .no_wait_gpu = false,
420 .resv = params->resv,
421 };
422 struct ttm_device *bdev = &dev_priv->bdev;
423 struct drm_device *vdev = &dev_priv->drm;
424 int ret;
425
426 memset(vmw_bo, 0, sizeof(*vmw_bo));
427
428 BUILD_BUG_ON(TTM_MAX_BO_PRIORITY <= 3);
429 vmw_bo->tbo.priority = 3;
430 vmw_bo->res_tree = RB_ROOT;
431 xa_init(&vmw_bo->detached_resources);
432 atomic_set(&vmw_bo->map_count, 0);
433
434 params->size = ALIGN(params->size, PAGE_SIZE);
435 drm_gem_private_object_init(vdev, &vmw_bo->tbo.base, params->size);
436
437 vmw_bo_placement_set(vmw_bo, params->domain, params->busy_domain);
438 ret = ttm_bo_init_reserved(bdev, &vmw_bo->tbo, params->bo_type,
439 &vmw_bo->placement, 0, &ctx,
440 params->sg, params->resv, destroy);
441 if (unlikely(ret))
442 return ret;
443
444 if (params->pin)
445 ttm_bo_pin(&vmw_bo->tbo);
446 ttm_bo_unreserve(&vmw_bo->tbo);
447
448 return 0;
449 }
450
vmw_bo_create(struct vmw_private * vmw,struct vmw_bo_params * params,struct vmw_bo ** p_bo)451 int vmw_bo_create(struct vmw_private *vmw,
452 struct vmw_bo_params *params,
453 struct vmw_bo **p_bo)
454 {
455 int ret;
456
457 *p_bo = kmalloc(sizeof(**p_bo), GFP_KERNEL);
458 if (unlikely(!*p_bo)) {
459 DRM_ERROR("Failed to allocate a buffer.\n");
460 return -ENOMEM;
461 }
462
463 /*
464 * vmw_bo_init will delete the *p_bo object if it fails
465 */
466 ret = vmw_bo_init(vmw, *p_bo, params, vmw_bo_free);
467 if (unlikely(ret != 0))
468 goto out_error;
469
470 return ret;
471 out_error:
472 *p_bo = NULL;
473 return ret;
474 }
475
476 /**
477 * vmw_user_bo_synccpu_grab - Grab a struct vmw_bo for cpu
478 * access, idling previous GPU operations on the buffer and optionally
479 * blocking it for further command submissions.
480 *
481 * @vmw_bo: Pointer to the buffer object being grabbed for CPU access
482 * @flags: Flags indicating how the grab should be performed.
483 * Return: Zero on success, Negative error code on error. In particular,
484 * -EBUSY will be returned if a dontblock operation is requested and the
485 * buffer object is busy, and -ERESTARTSYS will be returned if a wait is
486 * interrupted by a signal.
487 *
488 * A blocking grab will be automatically released when @tfile is closed.
489 */
vmw_user_bo_synccpu_grab(struct vmw_bo * vmw_bo,uint32_t flags)490 static int vmw_user_bo_synccpu_grab(struct vmw_bo *vmw_bo,
491 uint32_t flags)
492 {
493 bool nonblock = !!(flags & drm_vmw_synccpu_dontblock);
494 struct ttm_buffer_object *bo = &vmw_bo->tbo;
495 int ret;
496
497 if (flags & drm_vmw_synccpu_allow_cs) {
498 long lret;
499
500 lret = dma_resv_wait_timeout(bo->base.resv, DMA_RESV_USAGE_READ,
501 true, nonblock ? 0 :
502 MAX_SCHEDULE_TIMEOUT);
503 if (!lret)
504 return -EBUSY;
505 else if (lret < 0)
506 return lret;
507 return 0;
508 }
509
510 ret = ttm_bo_reserve(bo, true, nonblock, NULL);
511 if (unlikely(ret != 0))
512 return ret;
513
514 ret = ttm_bo_wait(bo, true, nonblock);
515 if (likely(ret == 0))
516 atomic_inc(&vmw_bo->cpu_writers);
517
518 ttm_bo_unreserve(bo);
519 if (unlikely(ret != 0))
520 return ret;
521
522 return ret;
523 }
524
525 /**
526 * vmw_user_bo_synccpu_release - Release a previous grab for CPU access,
527 * and unblock command submission on the buffer if blocked.
528 *
529 * @filp: Identifying the caller.
530 * @handle: Handle identifying the buffer object.
531 * @flags: Flags indicating the type of release.
532 */
vmw_user_bo_synccpu_release(struct drm_file * filp,uint32_t handle,uint32_t flags)533 static int vmw_user_bo_synccpu_release(struct drm_file *filp,
534 uint32_t handle,
535 uint32_t flags)
536 {
537 struct vmw_bo *vmw_bo;
538 int ret = vmw_user_bo_lookup(filp, handle, &vmw_bo);
539
540 if (!ret) {
541 if (!(flags & drm_vmw_synccpu_allow_cs)) {
542 atomic_dec(&vmw_bo->cpu_writers);
543 }
544 vmw_user_bo_unref(&vmw_bo);
545 }
546
547 return ret;
548 }
549
550
551 /**
552 * vmw_user_bo_synccpu_ioctl - ioctl function implementing the synccpu
553 * functionality.
554 *
555 * @dev: Identifies the drm device.
556 * @data: Pointer to the ioctl argument.
557 * @file_priv: Identifies the caller.
558 * Return: Zero on success, negative error code on error.
559 *
560 * This function checks the ioctl arguments for validity and calls the
561 * relevant synccpu functions.
562 */
vmw_user_bo_synccpu_ioctl(struct drm_device * dev,void * data,struct drm_file * file_priv)563 int vmw_user_bo_synccpu_ioctl(struct drm_device *dev, void *data,
564 struct drm_file *file_priv)
565 {
566 struct drm_vmw_synccpu_arg *arg =
567 (struct drm_vmw_synccpu_arg *) data;
568 struct vmw_bo *vbo;
569 int ret;
570
571 if ((arg->flags & (drm_vmw_synccpu_read | drm_vmw_synccpu_write)) == 0
572 || (arg->flags & ~(drm_vmw_synccpu_read | drm_vmw_synccpu_write |
573 drm_vmw_synccpu_dontblock |
574 drm_vmw_synccpu_allow_cs)) != 0) {
575 DRM_ERROR("Illegal synccpu flags.\n");
576 return -EINVAL;
577 }
578
579 switch (arg->op) {
580 case drm_vmw_synccpu_grab:
581 ret = vmw_user_bo_lookup(file_priv, arg->handle, &vbo);
582 if (unlikely(ret != 0))
583 return ret;
584
585 ret = vmw_user_bo_synccpu_grab(vbo, arg->flags);
586 vmw_user_bo_unref(&vbo);
587 if (unlikely(ret != 0)) {
588 if (ret == -ERESTARTSYS || ret == -EBUSY)
589 return -EBUSY;
590 DRM_ERROR("Failed synccpu grab on handle 0x%08x.\n",
591 (unsigned int) arg->handle);
592 return ret;
593 }
594 break;
595 case drm_vmw_synccpu_release:
596 ret = vmw_user_bo_synccpu_release(file_priv,
597 arg->handle,
598 arg->flags);
599 if (unlikely(ret != 0)) {
600 DRM_ERROR("Failed synccpu release on handle 0x%08x.\n",
601 (unsigned int) arg->handle);
602 return ret;
603 }
604 break;
605 default:
606 DRM_ERROR("Invalid synccpu operation.\n");
607 return -EINVAL;
608 }
609
610 return 0;
611 }
612
613 /**
614 * vmw_bo_unref_ioctl - Generic handle close ioctl.
615 *
616 * @dev: Identifies the drm device.
617 * @data: Pointer to the ioctl argument.
618 * @file_priv: Identifies the caller.
619 * Return: Zero on success, negative error code on error.
620 *
621 * This function checks the ioctl arguments for validity and closes a
622 * handle to a TTM base object, optionally freeing the object.
623 */
vmw_bo_unref_ioctl(struct drm_device * dev,void * data,struct drm_file * file_priv)624 int vmw_bo_unref_ioctl(struct drm_device *dev, void *data,
625 struct drm_file *file_priv)
626 {
627 struct drm_vmw_unref_dmabuf_arg *arg =
628 (struct drm_vmw_unref_dmabuf_arg *)data;
629
630 return drm_gem_handle_delete(file_priv, arg->handle);
631 }
632
633
634 /**
635 * vmw_user_bo_lookup - Look up a vmw user buffer object from a handle.
636 *
637 * @filp: The file the handle is registered with.
638 * @handle: The user buffer object handle
639 * @out: Pointer to a where a pointer to the embedded
640 * struct vmw_bo should be placed.
641 * Return: Zero on success, Negative error code on error.
642 *
643 * The vmw buffer object pointer will be refcounted (both ttm and gem)
644 */
vmw_user_bo_lookup(struct drm_file * filp,u32 handle,struct vmw_bo ** out)645 int vmw_user_bo_lookup(struct drm_file *filp,
646 u32 handle,
647 struct vmw_bo **out)
648 {
649 struct drm_gem_object *gobj;
650
651 gobj = drm_gem_object_lookup(filp, handle);
652 if (!gobj) {
653 DRM_ERROR("Invalid buffer object handle 0x%08lx.\n",
654 (unsigned long)handle);
655 return -ESRCH;
656 }
657
658 *out = to_vmw_bo(gobj);
659
660 return 0;
661 }
662
663 /**
664 * vmw_bo_fence_single - Utility function to fence a single TTM buffer
665 * object without unreserving it.
666 *
667 * @bo: Pointer to the struct ttm_buffer_object to fence.
668 * @fence: Pointer to the fence. If NULL, this function will
669 * insert a fence into the command stream..
670 *
671 * Contrary to the ttm_eu version of this function, it takes only
672 * a single buffer object instead of a list, and it also doesn't
673 * unreserve the buffer object, which needs to be done separately.
674 */
vmw_bo_fence_single(struct ttm_buffer_object * bo,struct vmw_fence_obj * fence)675 void vmw_bo_fence_single(struct ttm_buffer_object *bo,
676 struct vmw_fence_obj *fence)
677 {
678 struct ttm_device *bdev = bo->bdev;
679 struct vmw_private *dev_priv = vmw_priv_from_ttm(bdev);
680 int ret;
681
682 if (fence == NULL)
683 vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL);
684 else
685 dma_fence_get(&fence->base);
686
687 ret = dma_resv_reserve_fences(bo->base.resv, 1);
688 if (!ret)
689 dma_resv_add_fence(bo->base.resv, &fence->base,
690 DMA_RESV_USAGE_KERNEL);
691 else
692 /* Last resort fallback when we are OOM */
693 dma_fence_wait(&fence->base, false);
694 dma_fence_put(&fence->base);
695 }
696
697 /**
698 * vmw_bo_swap_notify - swapout notify callback.
699 *
700 * @bo: The buffer object to be swapped out.
701 */
vmw_bo_swap_notify(struct ttm_buffer_object * bo)702 void vmw_bo_swap_notify(struct ttm_buffer_object *bo)
703 {
704 /* Kill any cached kernel maps before swapout */
705 vmw_bo_unmap(to_vmw_bo(&bo->base));
706 }
707
708
709 /**
710 * vmw_bo_move_notify - TTM move_notify_callback
711 *
712 * @bo: The TTM buffer object about to move.
713 * @mem: The struct ttm_resource indicating to what memory
714 * region the move is taking place.
715 *
716 * Detaches cached maps and device bindings that require that the
717 * buffer doesn't move.
718 */
vmw_bo_move_notify(struct ttm_buffer_object * bo,struct ttm_resource * mem)719 void vmw_bo_move_notify(struct ttm_buffer_object *bo,
720 struct ttm_resource *mem)
721 {
722 struct vmw_bo *vbo = to_vmw_bo(&bo->base);
723
724 /*
725 * Kill any cached kernel maps before move to or from VRAM.
726 * With other types of moves, the underlying pages stay the same,
727 * and the map can be kept.
728 */
729 if (mem->mem_type == TTM_PL_VRAM || bo->resource->mem_type == TTM_PL_VRAM)
730 vmw_bo_unmap(vbo);
731
732 /*
733 * If we're moving a backup MOB out of MOB placement, then make sure we
734 * read back all resource content first, and unbind the MOB from
735 * the resource.
736 */
737 if (mem->mem_type != VMW_PL_MOB && bo->resource->mem_type == VMW_PL_MOB)
738 vmw_resource_unbind_list(vbo);
739 }
740
placement_flags(u32 domain,u32 desired,u32 fallback)741 static u32 placement_flags(u32 domain, u32 desired, u32 fallback)
742 {
743 if (desired & fallback & domain)
744 return 0;
745
746 if (desired & domain)
747 return TTM_PL_FLAG_DESIRED;
748
749 return TTM_PL_FLAG_FALLBACK;
750 }
751
752 static u32
set_placement_list(struct ttm_place * pl,u32 desired,u32 fallback)753 set_placement_list(struct ttm_place *pl, u32 desired, u32 fallback)
754 {
755 u32 domain = desired | fallback;
756 u32 n = 0;
757
758 /*
759 * The placements are ordered according to our preferences
760 */
761 if (domain & VMW_BO_DOMAIN_MOB) {
762 pl[n].mem_type = VMW_PL_MOB;
763 pl[n].flags = placement_flags(VMW_BO_DOMAIN_MOB, desired,
764 fallback);
765 pl[n].fpfn = 0;
766 pl[n].lpfn = 0;
767 n++;
768 }
769 if (domain & VMW_BO_DOMAIN_GMR) {
770 pl[n].mem_type = VMW_PL_GMR;
771 pl[n].flags = placement_flags(VMW_BO_DOMAIN_GMR, desired,
772 fallback);
773 pl[n].fpfn = 0;
774 pl[n].lpfn = 0;
775 n++;
776 }
777 if (domain & VMW_BO_DOMAIN_VRAM) {
778 pl[n].mem_type = TTM_PL_VRAM;
779 pl[n].flags = placement_flags(VMW_BO_DOMAIN_VRAM, desired,
780 fallback);
781 pl[n].fpfn = 0;
782 pl[n].lpfn = 0;
783 n++;
784 }
785 if (domain & VMW_BO_DOMAIN_WAITABLE_SYS) {
786 pl[n].mem_type = VMW_PL_SYSTEM;
787 pl[n].flags = placement_flags(VMW_BO_DOMAIN_WAITABLE_SYS,
788 desired, fallback);
789 pl[n].fpfn = 0;
790 pl[n].lpfn = 0;
791 n++;
792 }
793 if (domain & VMW_BO_DOMAIN_SYS) {
794 pl[n].mem_type = TTM_PL_SYSTEM;
795 pl[n].flags = placement_flags(VMW_BO_DOMAIN_SYS, desired,
796 fallback);
797 pl[n].fpfn = 0;
798 pl[n].lpfn = 0;
799 n++;
800 }
801
802 WARN_ON(!n);
803 if (!n) {
804 pl[n].mem_type = TTM_PL_SYSTEM;
805 pl[n].flags = 0;
806 pl[n].fpfn = 0;
807 pl[n].lpfn = 0;
808 n++;
809 }
810 return n;
811 }
812
vmw_bo_placement_set(struct vmw_bo * bo,u32 domain,u32 busy_domain)813 void vmw_bo_placement_set(struct vmw_bo *bo, u32 domain, u32 busy_domain)
814 {
815 struct ttm_device *bdev = bo->tbo.bdev;
816 struct vmw_private *vmw = vmw_priv_from_ttm(bdev);
817 struct ttm_placement *pl = &bo->placement;
818 bool mem_compatible = false;
819 u32 i;
820
821 pl->placement = bo->places;
822 pl->num_placement = set_placement_list(bo->places, domain, busy_domain);
823
824 if (drm_debug_enabled(DRM_UT_DRIVER) && bo->tbo.resource) {
825 for (i = 0; i < pl->num_placement; ++i) {
826 if (bo->tbo.resource->mem_type == TTM_PL_SYSTEM ||
827 bo->tbo.resource->mem_type == pl->placement[i].mem_type)
828 mem_compatible = true;
829 }
830 if (!mem_compatible)
831 drm_warn(&vmw->drm,
832 "%s: Incompatible transition from "
833 "bo->base.resource->mem_type = %u to domain = %u\n",
834 __func__, bo->tbo.resource->mem_type, domain);
835 }
836
837 }
838
vmw_bo_placement_set_default_accelerated(struct vmw_bo * bo)839 void vmw_bo_placement_set_default_accelerated(struct vmw_bo *bo)
840 {
841 struct ttm_device *bdev = bo->tbo.bdev;
842 struct vmw_private *vmw = vmw_priv_from_ttm(bdev);
843 u32 domain = VMW_BO_DOMAIN_GMR | VMW_BO_DOMAIN_VRAM;
844
845 if (vmw->has_mob)
846 domain = VMW_BO_DOMAIN_MOB;
847
848 vmw_bo_placement_set(bo, domain, domain);
849 }
850
vmw_bo_add_detached_resource(struct vmw_bo * vbo,struct vmw_resource * res)851 void vmw_bo_add_detached_resource(struct vmw_bo *vbo, struct vmw_resource *res)
852 {
853 xa_store(&vbo->detached_resources, (unsigned long)res, res, GFP_KERNEL);
854 }
855
vmw_bo_del_detached_resource(struct vmw_bo * vbo,struct vmw_resource * res)856 void vmw_bo_del_detached_resource(struct vmw_bo *vbo, struct vmw_resource *res)
857 {
858 xa_erase(&vbo->detached_resources, (unsigned long)res);
859 }
860
vmw_bo_surface(struct vmw_bo * vbo)861 struct vmw_surface *vmw_bo_surface(struct vmw_bo *vbo)
862 {
863 unsigned long index;
864 struct vmw_resource *res = NULL;
865 struct vmw_surface *surf = NULL;
866 struct rb_node *rb_itr = vbo->res_tree.rb_node;
867
868 if (vbo->is_dumb && vbo->dumb_surface) {
869 res = &vbo->dumb_surface->res;
870 goto out;
871 }
872
873 xa_for_each(&vbo->detached_resources, index, res) {
874 if (res->func->res_type == vmw_res_surface)
875 goto out;
876 }
877
878 for (rb_itr = rb_first(&vbo->res_tree); rb_itr;
879 rb_itr = rb_next(rb_itr)) {
880 res = rb_entry(rb_itr, struct vmw_resource, mob_node);
881 if (res->func->res_type == vmw_res_surface)
882 goto out;
883 }
884
885 out:
886 if (res)
887 surf = vmw_res_to_srf(res);
888 return surf;
889 }
890