Lines Matching +full:wait +full:- +full:pin
1 // SPDX-License-Identifier: GPL-2.0 OR MIT
4 * Copyright (c) 2011-2024 Broadcom. All Rights Reserved. The term
21 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
39 WARN_ON(vbo->tbo.base.funcs && in vmw_bo_release()
40 kref_read(&vbo->tbo.base.refcount) != 0); in vmw_bo_release()
43 xa_destroy(&vbo->detached_resources); in vmw_bo_release()
44 WARN_ON(vbo->is_dumb && !vbo->dumb_surface); in vmw_bo_release()
45 if (vbo->is_dumb && vbo->dumb_surface) { in vmw_bo_release()
46 res = &vbo->dumb_surface->res; in vmw_bo_release()
47 WARN_ON(vbo != res->guest_memory_bo); in vmw_bo_release()
48 WARN_ON(!res->guest_memory_bo); in vmw_bo_release()
49 if (res->guest_memory_bo) { in vmw_bo_release()
51 mutex_lock(&res->dev_priv->cmdbuf_mutex); in vmw_bo_release()
54 if (res->coherent) in vmw_bo_release()
55 vmw_bo_dirty_release(res->guest_memory_bo); in vmw_bo_release()
56 res->guest_memory_bo = NULL; in vmw_bo_release()
57 res->guest_memory_offset = 0; in vmw_bo_release()
60 mutex_unlock(&res->dev_priv->cmdbuf_mutex); in vmw_bo_release()
62 vmw_surface_unreference(&vbo->dumb_surface); in vmw_bo_release()
64 drm_gem_object_release(&vbo->tbo.base); in vmw_bo_release()
68 * vmw_bo_free - vmw_bo destructor
74 struct vmw_bo *vbo = to_vmw_bo(&bo->base); in vmw_bo_free()
76 WARN_ON(vbo->dirty); in vmw_bo_free()
77 WARN_ON(!RB_EMPTY_ROOT(&vbo->res_tree)); in vmw_bo_free()
83 * vmw_bo_pin_in_placement - Validate a buffer to placement.
87 * @placement: The placement to pin it.
88 * @interruptible: Use interruptible wait.
90 * -ERESTARTSYS if interrupted by a signal
98 struct ttm_buffer_object *bo = &buf->tbo; in vmw_bo_pin_in_placement()
118 * vmw_bo_pin_in_vram_or_gmr - Move a buffer to vram or gmr.
125 * @interruptible: Use interruptible wait.
127 * -ERESTARTSYS if interrupted by a signal
134 struct ttm_buffer_object *bo = &buf->tbo; in vmw_bo_pin_in_vram_or_gmr()
146 ret = ttm_bo_validate(bo, &buf->placement, &ctx); in vmw_bo_pin_in_vram_or_gmr()
147 if (likely(ret == 0) || ret == -ERESTARTSYS) in vmw_bo_pin_in_vram_or_gmr()
153 ret = ttm_bo_validate(bo, &buf->placement, &ctx); in vmw_bo_pin_in_vram_or_gmr()
166 * vmw_bo_pin_in_vram - Move a buffer to vram.
173 * @interruptible: Use interruptible wait.
175 * -ERESTARTSYS if interrupted by a signal
187 * vmw_bo_pin_in_start_of_vram - Move a buffer to start of vram.
193 * @buf: DMA buffer to pin.
194 * @interruptible: Use interruptible wait.
196 * -ERESTARTSYS if interrupted by a signal
203 struct ttm_buffer_object *bo = &buf->tbo; in vmw_bo_pin_in_start_of_vram()
216 if (bo->resource->mem_type == TTM_PL_VRAM && in vmw_bo_pin_in_start_of_vram()
217 bo->resource->start < PFN_UP(bo->resource->size) && in vmw_bo_pin_in_start_of_vram()
218 bo->resource->start > 0 && in vmw_bo_pin_in_start_of_vram()
219 buf->tbo.pin_count == 0) { in vmw_bo_pin_in_start_of_vram()
224 (void)ttm_bo_validate(bo, &buf->placement, &ctx); in vmw_bo_pin_in_start_of_vram()
230 buf->places[0].lpfn = PFN_UP(bo->resource->size); in vmw_bo_pin_in_start_of_vram()
231 buf->busy_places[0].lpfn = PFN_UP(bo->resource->size); in vmw_bo_pin_in_start_of_vram()
232 ret = ttm_bo_validate(bo, &buf->placement, &ctx); in vmw_bo_pin_in_start_of_vram()
235 WARN_ON(ret == 0 && bo->resource->start != 0); in vmw_bo_pin_in_start_of_vram()
247 * vmw_bo_unpin - Unpin the buffer given buffer, does not move the buffer.
253 * @interruptible: Use interruptible wait.
255 * -ERESTARTSYS if interrupted by a signal
261 struct ttm_buffer_object *bo = &buf->tbo; in vmw_bo_unpin()
277 * vmw_bo_get_guest_ptr - Get the guest ptr representing the current placement
286 if (bo->resource->mem_type == TTM_PL_VRAM) { in vmw_bo_get_guest_ptr()
287 ptr->gmrId = SVGA_GMR_FRAMEBUFFER; in vmw_bo_get_guest_ptr()
288 ptr->offset = bo->resource->start << PAGE_SHIFT; in vmw_bo_get_guest_ptr()
290 ptr->gmrId = bo->resource->start; in vmw_bo_get_guest_ptr()
291 ptr->offset = 0; in vmw_bo_get_guest_ptr()
297 * vmw_bo_pin_reserved - Pin or unpin a buffer object without moving it.
300 * @pin: Whether to pin or unpin.
303 void vmw_bo_pin_reserved(struct vmw_bo *vbo, bool pin) in vmw_bo_pin_reserved() argument
308 struct ttm_buffer_object *bo = &vbo->tbo; in vmw_bo_pin_reserved()
309 uint32_t old_mem_type = bo->resource->mem_type; in vmw_bo_pin_reserved()
312 dma_resv_assert_held(bo->base.resv); in vmw_bo_pin_reserved()
314 if (pin == !!bo->pin_count) in vmw_bo_pin_reserved()
319 pl.mem_type = bo->resource->mem_type; in vmw_bo_pin_reserved()
320 pl.flags = bo->resource->placement; in vmw_bo_pin_reserved()
328 BUG_ON(ret != 0 || bo->resource->mem_type != old_mem_type); in vmw_bo_pin_reserved()
330 if (pin) in vmw_bo_pin_reserved()
337 * vmw_bo_map_and_cache - Map a buffer object and cache the map
353 return vmw_bo_map_and_cache_size(vbo, vbo->tbo.base.size); in vmw_bo_map_and_cache()
358 struct ttm_buffer_object *bo = &vbo->tbo; in vmw_bo_map_and_cache_size()
363 atomic_inc(&vbo->map_count); in vmw_bo_map_and_cache_size()
365 virtual = ttm_kmap_obj_virtual(&vbo->map, ¬_used); in vmw_bo_map_and_cache_size()
369 ret = ttm_bo_kmap(bo, 0, PFN_UP(size), &vbo->map); in vmw_bo_map_and_cache_size()
372 ret, bo->base.size, size); in vmw_bo_map_and_cache_size()
374 return ttm_kmap_obj_virtual(&vbo->map, ¬_used); in vmw_bo_map_and_cache_size()
379 * vmw_bo_unmap - Tear down a cached buffer object map.
390 if (vbo->map.bo == NULL) in vmw_bo_unmap()
393 map_count = atomic_dec_return(&vbo->map_count); in vmw_bo_unmap()
396 ttm_bo_kunmap(&vbo->map); in vmw_bo_unmap()
397 vbo->map.bo = NULL; in vmw_bo_unmap()
403 * vmw_bo_init - Initialize a vmw buffer object
418 .interruptible = params->bo_type != ttm_bo_type_kernel, in vmw_bo_init()
420 .resv = params->resv, in vmw_bo_init()
422 struct ttm_device *bdev = &dev_priv->bdev; in vmw_bo_init()
423 struct drm_device *vdev = &dev_priv->drm; in vmw_bo_init()
429 vmw_bo->tbo.priority = 3; in vmw_bo_init()
430 vmw_bo->res_tree = RB_ROOT; in vmw_bo_init()
431 xa_init(&vmw_bo->detached_resources); in vmw_bo_init()
432 atomic_set(&vmw_bo->map_count, 0); in vmw_bo_init()
434 params->size = ALIGN(params->size, PAGE_SIZE); in vmw_bo_init()
435 drm_gem_private_object_init(vdev, &vmw_bo->tbo.base, params->size); in vmw_bo_init()
437 vmw_bo_placement_set(vmw_bo, params->domain, params->busy_domain); in vmw_bo_init()
438 ret = ttm_bo_init_reserved(bdev, &vmw_bo->tbo, params->bo_type, in vmw_bo_init()
439 &vmw_bo->placement, 0, &ctx, in vmw_bo_init()
440 params->sg, params->resv, destroy); in vmw_bo_init()
444 if (params->pin) in vmw_bo_init()
445 ttm_bo_pin(&vmw_bo->tbo); in vmw_bo_init()
446 ttm_bo_unreserve(&vmw_bo->tbo); in vmw_bo_init()
460 return -ENOMEM; in vmw_bo_create()
477 * vmw_user_bo_synccpu_grab - Grab a struct vmw_bo for cpu
484 * -EBUSY will be returned if a dontblock operation is requested and the
485 * buffer object is busy, and -ERESTARTSYS will be returned if a wait is
494 struct ttm_buffer_object *bo = &vmw_bo->tbo; in vmw_user_bo_synccpu_grab()
500 lret = dma_resv_wait_timeout(bo->base.resv, DMA_RESV_USAGE_READ, in vmw_user_bo_synccpu_grab()
504 return -EBUSY; in vmw_user_bo_synccpu_grab()
516 atomic_inc(&vmw_bo->cpu_writers); in vmw_user_bo_synccpu_grab()
526 * vmw_user_bo_synccpu_release - Release a previous grab for CPU access,
542 atomic_dec(&vmw_bo->cpu_writers); in vmw_user_bo_synccpu_release()
552 * vmw_user_bo_synccpu_ioctl - ioctl function implementing the synccpu
571 if ((arg->flags & (drm_vmw_synccpu_read | drm_vmw_synccpu_write)) == 0 in vmw_user_bo_synccpu_ioctl()
572 || (arg->flags & ~(drm_vmw_synccpu_read | drm_vmw_synccpu_write | in vmw_user_bo_synccpu_ioctl()
576 return -EINVAL; in vmw_user_bo_synccpu_ioctl()
579 switch (arg->op) { in vmw_user_bo_synccpu_ioctl()
581 ret = vmw_user_bo_lookup(file_priv, arg->handle, &vbo); in vmw_user_bo_synccpu_ioctl()
585 ret = vmw_user_bo_synccpu_grab(vbo, arg->flags); in vmw_user_bo_synccpu_ioctl()
588 if (ret == -ERESTARTSYS || ret == -EBUSY) in vmw_user_bo_synccpu_ioctl()
589 return -EBUSY; in vmw_user_bo_synccpu_ioctl()
591 (unsigned int) arg->handle); in vmw_user_bo_synccpu_ioctl()
597 arg->handle, in vmw_user_bo_synccpu_ioctl()
598 arg->flags); in vmw_user_bo_synccpu_ioctl()
601 (unsigned int) arg->handle); in vmw_user_bo_synccpu_ioctl()
607 return -EINVAL; in vmw_user_bo_synccpu_ioctl()
614 * vmw_bo_unref_ioctl - Generic handle close ioctl.
630 return drm_gem_handle_delete(file_priv, arg->handle); in vmw_bo_unref_ioctl()
635 * vmw_user_bo_lookup - Look up a vmw user buffer object from a handle.
655 return -ESRCH; in vmw_user_bo_lookup()
664 * vmw_bo_fence_single - Utility function to fence a single TTM buffer
678 struct ttm_device *bdev = bo->bdev; in vmw_bo_fence_single()
685 dma_fence_get(&fence->base); in vmw_bo_fence_single()
687 ret = dma_resv_reserve_fences(bo->base.resv, 1); in vmw_bo_fence_single()
689 dma_resv_add_fence(bo->base.resv, &fence->base, in vmw_bo_fence_single()
693 dma_fence_wait(&fence->base, false); in vmw_bo_fence_single()
694 dma_fence_put(&fence->base); in vmw_bo_fence_single()
698 * vmw_bo_swap_notify - swapout notify callback.
705 vmw_bo_unmap(to_vmw_bo(&bo->base)); in vmw_bo_swap_notify()
710 * vmw_bo_move_notify - TTM move_notify_callback
722 struct vmw_bo *vbo = to_vmw_bo(&bo->base); in vmw_bo_move_notify()
729 if (mem->mem_type == TTM_PL_VRAM || bo->resource->mem_type == TTM_PL_VRAM) in vmw_bo_move_notify()
737 if (mem->mem_type != VMW_PL_MOB && bo->resource->mem_type == VMW_PL_MOB) in vmw_bo_move_notify()
815 struct ttm_device *bdev = bo->tbo.bdev; in vmw_bo_placement_set()
817 struct ttm_placement *pl = &bo->placement; in vmw_bo_placement_set()
821 pl->placement = bo->places; in vmw_bo_placement_set()
822 pl->num_placement = set_placement_list(bo->places, domain, busy_domain); in vmw_bo_placement_set()
824 if (drm_debug_enabled(DRM_UT_DRIVER) && bo->tbo.resource) { in vmw_bo_placement_set()
825 for (i = 0; i < pl->num_placement; ++i) { in vmw_bo_placement_set()
826 if (bo->tbo.resource->mem_type == TTM_PL_SYSTEM || in vmw_bo_placement_set()
827 bo->tbo.resource->mem_type == pl->placement[i].mem_type) in vmw_bo_placement_set()
831 drm_warn(&vmw->drm, in vmw_bo_placement_set()
833 "bo->base.resource->mem_type = %u to domain = %u\n", in vmw_bo_placement_set()
834 __func__, bo->tbo.resource->mem_type, domain); in vmw_bo_placement_set()
841 struct ttm_device *bdev = bo->tbo.bdev; in vmw_bo_placement_set_default_accelerated()
845 if (vmw->has_mob) in vmw_bo_placement_set_default_accelerated()
853 xa_store(&vbo->detached_resources, (unsigned long)res, res, GFP_KERNEL); in vmw_bo_add_detached_resource()
858 xa_erase(&vbo->detached_resources, (unsigned long)res); in vmw_bo_del_detached_resource()
866 struct rb_node *rb_itr = vbo->res_tree.rb_node; in vmw_bo_surface()
868 if (vbo->is_dumb && vbo->dumb_surface) { in vmw_bo_surface()
869 res = &vbo->dumb_surface->res; in vmw_bo_surface()
873 xa_for_each(&vbo->detached_resources, index, res) { in vmw_bo_surface()
874 if (res->func->res_type == vmw_res_surface) in vmw_bo_surface()
878 for (rb_itr = rb_first(&vbo->res_tree); rb_itr; in vmw_bo_surface()
881 if (res->func->res_type == vmw_res_surface) in vmw_bo_surface()