Lines Matching +full:mem +full:- +full:base

1 /* SPDX-License-Identifier: GPL-2.0 OR MIT */
4 * Copyright (c) 2007-2009 VMware, Inc., Palo Alto, CA., USA
21 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
29 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
41 struct ttm_buffer_object base; member
46 struct ttm_resource *mem) in ttm_mem_io_reserve() argument
48 if (mem->bus.offset || mem->bus.addr) in ttm_mem_io_reserve()
51 mem->bus.is_iomem = false; in ttm_mem_io_reserve()
52 if (!bdev->funcs->io_mem_reserve) in ttm_mem_io_reserve()
55 return bdev->funcs->io_mem_reserve(bdev, mem); in ttm_mem_io_reserve()
59 struct ttm_resource *mem) in ttm_mem_io_free() argument
61 if (!mem) in ttm_mem_io_free()
64 if (!mem->bus.offset && !mem->bus.addr) in ttm_mem_io_free()
67 if (bdev->funcs->io_mem_free) in ttm_mem_io_free()
68 bdev->funcs->io_mem_free(bdev, mem); in ttm_mem_io_free()
70 mem->bus.offset = 0; in ttm_mem_io_free()
71 mem->bus.addr = NULL; in ttm_mem_io_free()
75 * ttm_move_memcpy - Helper to perform a memcpy ttm move operation.
82 * dma-fence if desired.
89 const struct ttm_kmap_iter_ops *dst_ops = dst_iter->ops; in ttm_move_memcpy()
90 const struct ttm_kmap_iter_ops *src_ops = src_iter->ops; in ttm_move_memcpy()
95 if (dst_ops->maps_tt && src_ops->maps_tt) in ttm_move_memcpy()
101 dst_ops->map_local(dst_iter, &dst_map, i); in ttm_move_memcpy()
106 if (dst_ops->unmap_local) in ttm_move_memcpy()
107 dst_ops->unmap_local(dst_iter, &dst_map); in ttm_move_memcpy()
113 dst_ops->map_local(dst_iter, &dst_map, i); in ttm_move_memcpy()
114 src_ops->map_local(src_iter, &src_map, i); in ttm_move_memcpy()
118 if (src_ops->unmap_local) in ttm_move_memcpy()
119 src_ops->unmap_local(src_iter, &src_map); in ttm_move_memcpy()
120 if (dst_ops->unmap_local) in ttm_move_memcpy()
121 dst_ops->unmap_local(dst_iter, &dst_map); in ttm_move_memcpy()
135 * free any old aperture space, and set (@new_mem)->mm_node to NULL,
136 * and update the (@bo)->mem placement flags. If unsuccessful, the old
146 struct ttm_device *bdev = bo->bdev; in ttm_bo_move_memcpy()
148 ttm_manager_type(bo->bdev, dst_mem->mem_type); in ttm_bo_move_memcpy()
149 struct ttm_tt *ttm = bo->ttm; in ttm_bo_move_memcpy()
150 struct ttm_resource *src_mem = bo->resource; in ttm_bo_move_memcpy()
161 return -EINVAL; in ttm_bo_move_memcpy()
163 src_man = ttm_manager_type(bdev, src_mem->mem_type); in ttm_bo_move_memcpy()
164 if (ttm && ((ttm->page_flags & TTM_TT_FLAG_SWAPPED) || in ttm_bo_move_memcpy()
165 dst_man->use_tt)) { in ttm_bo_move_memcpy()
172 if (PTR_ERR(dst_iter) == -EINVAL && dst_man->use_tt) in ttm_bo_move_memcpy()
173 dst_iter = ttm_kmap_iter_tt_init(&_dst_iter.tt, bo->ttm); in ttm_bo_move_memcpy()
178 if (PTR_ERR(src_iter) == -EINVAL && src_man->use_tt) in ttm_bo_move_memcpy()
179 src_iter = ttm_kmap_iter_tt_init(&_src_iter.tt, bo->ttm); in ttm_bo_move_memcpy()
185 clear = src_iter->ops->maps_tt && (!ttm || !ttm_tt_is_populated(ttm)); in ttm_bo_move_memcpy()
186 if (!(clear && ttm && !(ttm->page_flags & TTM_TT_FLAG_ZERO_ALLOC))) in ttm_bo_move_memcpy()
187 ttm_move_memcpy(clear, PFN_UP(dst_mem->size), dst_iter, src_iter); in ttm_bo_move_memcpy()
189 if (!src_iter->ops->maps_tt) in ttm_bo_move_memcpy()
194 if (!dst_iter->ops->maps_tt) in ttm_bo_move_memcpy()
205 fbo = container_of(bo, struct ttm_transfer_obj, base); in ttm_transfered_destroy()
206 dma_resv_fini(&fbo->base.base._resv); in ttm_transfered_destroy()
207 ttm_bo_put(fbo->bo); in ttm_transfered_destroy()
234 return -ENOMEM; in ttm_buffer_object_transfer()
236 fbo->base = *bo; in ttm_buffer_object_transfer()
244 drm_vma_node_reset(&fbo->base.base.vma_node); in ttm_buffer_object_transfer()
246 kref_init(&fbo->base.kref); in ttm_buffer_object_transfer()
247 fbo->base.destroy = &ttm_transfered_destroy; in ttm_buffer_object_transfer()
248 fbo->base.pin_count = 0; in ttm_buffer_object_transfer()
249 if (bo->type != ttm_bo_type_sg) in ttm_buffer_object_transfer()
250 fbo->base.base.resv = &fbo->base.base._resv; in ttm_buffer_object_transfer()
252 dma_resv_init(&fbo->base.base._resv); in ttm_buffer_object_transfer()
253 fbo->base.base.dev = NULL; in ttm_buffer_object_transfer()
254 ret = dma_resv_trylock(&fbo->base.base._resv); in ttm_buffer_object_transfer()
257 if (fbo->base.resource) { in ttm_buffer_object_transfer()
258 ttm_resource_set_bo(fbo->base.resource, &fbo->base); in ttm_buffer_object_transfer()
259 bo->resource = NULL; in ttm_buffer_object_transfer()
260 ttm_bo_set_bulk_move(&fbo->base, NULL); in ttm_buffer_object_transfer()
262 fbo->base.bulk_move = NULL; in ttm_buffer_object_transfer()
265 ret = dma_resv_reserve_fences(&fbo->base.base._resv, 1); in ttm_buffer_object_transfer()
272 fbo->bo = bo; in ttm_buffer_object_transfer()
274 ttm_bo_move_to_lru_tail_unlocked(&fbo->base); in ttm_buffer_object_transfer()
276 *new_obj = &fbo->base; in ttm_buffer_object_transfer()
296 man = ttm_manager_type(bo->bdev, res->mem_type); in ttm_io_prot()
297 if (man->use_tt) { in ttm_io_prot()
298 caching = bo->ttm->caching; in ttm_io_prot()
299 if (bo->ttm->page_flags & TTM_TT_FLAG_DECRYPTED) in ttm_io_prot()
302 caching = res->bus.caching; in ttm_io_prot()
314 struct ttm_resource *mem = bo->resource; in ttm_bo_ioremap() local
316 if (bo->resource->bus.addr) { in ttm_bo_ioremap()
317 map->bo_kmap_type = ttm_bo_map_premapped; in ttm_bo_ioremap()
318 map->virtual = ((u8 *)bo->resource->bus.addr) + offset; in ttm_bo_ioremap()
320 resource_size_t res = bo->resource->bus.offset + offset; in ttm_bo_ioremap()
322 map->bo_kmap_type = ttm_bo_map_iomap; in ttm_bo_ioremap()
323 if (mem->bus.caching == ttm_write_combined) in ttm_bo_ioremap()
324 map->virtual = ioremap_wc(res, size); in ttm_bo_ioremap()
326 else if (mem->bus.caching == ttm_cached) in ttm_bo_ioremap()
327 map->virtual = ioremap_cache(res, size); in ttm_bo_ioremap()
330 map->virtual = ioremap(res, size); in ttm_bo_ioremap()
332 return (!map->virtual) ? -ENOMEM : 0; in ttm_bo_ioremap()
340 struct ttm_resource *mem = bo->resource; in ttm_bo_kmap_ttm() local
345 struct ttm_tt *ttm = bo->ttm; in ttm_bo_kmap_ttm()
347 ttm_manager_type(bo->bdev, bo->resource->mem_type); in ttm_bo_kmap_ttm()
353 ret = ttm_tt_populate(bo->bdev, ttm, &ctx); in ttm_bo_kmap_ttm()
357 if (num_pages == 1 && ttm->caching == ttm_cached && in ttm_bo_kmap_ttm()
358 !(man->use_tt && (ttm->page_flags & TTM_TT_FLAG_DECRYPTED))) { in ttm_bo_kmap_ttm()
364 map->bo_kmap_type = ttm_bo_map_kmap; in ttm_bo_kmap_ttm()
365 map->page = ttm->pages[start_page]; in ttm_bo_kmap_ttm()
366 map->virtual = kmap(map->page); in ttm_bo_kmap_ttm()
372 prot = ttm_io_prot(bo, mem, PAGE_KERNEL); in ttm_bo_kmap_ttm()
373 map->bo_kmap_type = ttm_bo_map_vmap; in ttm_bo_kmap_ttm()
374 map->virtual = vmap(ttm->pages + start_page, num_pages, in ttm_bo_kmap_ttm()
377 return (!map->virtual) ? -ENOMEM : 0; in ttm_bo_kmap_ttm()
393 * -ENOMEM: Out of memory.
394 * -EINVAL: Invalid range.
403 map->virtual = NULL; in ttm_bo_kmap()
404 map->bo = bo; in ttm_bo_kmap()
405 if (num_pages > PFN_UP(bo->resource->size)) in ttm_bo_kmap()
406 return -EINVAL; in ttm_bo_kmap()
407 if ((start_page + num_pages) > PFN_UP(bo->resource->size)) in ttm_bo_kmap()
408 return -EINVAL; in ttm_bo_kmap()
410 ret = ttm_mem_io_reserve(bo->bdev, bo->resource); in ttm_bo_kmap()
413 if (!bo->resource->bus.is_iomem) { in ttm_bo_kmap()
432 if (!map->virtual) in ttm_bo_kunmap()
434 switch (map->bo_kmap_type) { in ttm_bo_kunmap()
436 iounmap(map->virtual); in ttm_bo_kunmap()
439 vunmap(map->virtual); in ttm_bo_kunmap()
442 kunmap(map->page); in ttm_bo_kunmap()
449 ttm_mem_io_free(map->bo->bdev, map->bo->resource); in ttm_bo_kunmap()
450 map->virtual = NULL; in ttm_bo_kunmap()
451 map->page = NULL; in ttm_bo_kunmap()
466 * -ENOMEM: Out of memory.
467 * -EINVAL: Invalid range.
471 struct ttm_resource *mem = bo->resource; in ttm_bo_vmap() local
474 dma_resv_assert_held(bo->base.resv); in ttm_bo_vmap()
476 ret = ttm_mem_io_reserve(bo->bdev, mem); in ttm_bo_vmap()
480 if (mem->bus.is_iomem) { in ttm_bo_vmap()
483 if (mem->bus.addr) in ttm_bo_vmap()
484 vaddr_iomem = (void __iomem *)mem->bus.addr; in ttm_bo_vmap()
485 else if (mem->bus.caching == ttm_write_combined) in ttm_bo_vmap()
486 vaddr_iomem = ioremap_wc(mem->bus.offset, in ttm_bo_vmap()
487 bo->base.size); in ttm_bo_vmap()
489 else if (mem->bus.caching == ttm_cached) in ttm_bo_vmap()
490 vaddr_iomem = ioremap_cache(mem->bus.offset, in ttm_bo_vmap()
491 bo->base.size); in ttm_bo_vmap()
494 vaddr_iomem = ioremap(mem->bus.offset, bo->base.size); in ttm_bo_vmap()
497 return -ENOMEM; in ttm_bo_vmap()
506 struct ttm_tt *ttm = bo->ttm; in ttm_bo_vmap()
510 ret = ttm_tt_populate(bo->bdev, ttm, &ctx); in ttm_bo_vmap()
518 prot = ttm_io_prot(bo, mem, PAGE_KERNEL); in ttm_bo_vmap()
519 vaddr = vmap(ttm->pages, ttm->num_pages, 0, prot); in ttm_bo_vmap()
521 return -ENOMEM; in ttm_bo_vmap()
540 struct ttm_resource *mem = bo->resource; in ttm_bo_vunmap() local
542 dma_resv_assert_held(bo->base.resv); in ttm_bo_vunmap()
547 if (!map->is_iomem) in ttm_bo_vunmap()
548 vunmap(map->vaddr); in ttm_bo_vunmap()
549 else if (!mem->bus.addr) in ttm_bo_vunmap()
550 iounmap(map->vaddr_iomem); in ttm_bo_vunmap()
553 ttm_mem_io_free(bo->bdev, bo->resource); in ttm_bo_vunmap()
562 ret = dma_resv_wait_timeout(bo->base.resv, DMA_RESV_USAGE_BOOKKEEP, in ttm_bo_wait_free_node()
565 return -EBUSY; in ttm_bo_wait_free_node()
571 ttm_resource_free(bo, &bo->resource); in ttm_bo_wait_free_node()
594 dma_resv_add_fence(&ghost_obj->base._resv, fence, in ttm_bo_move_to_ghost()
604 ghost_obj->ttm = NULL; in ttm_bo_move_to_ghost()
606 bo->ttm = NULL; in ttm_bo_move_to_ghost()
608 dma_resv_unlock(&ghost_obj->base._resv); in ttm_bo_move_to_ghost()
616 struct ttm_device *bdev = bo->bdev; in ttm_bo_move_pipeline_evict()
619 from = ttm_manager_type(bdev, bo->resource->mem_type); in ttm_bo_move_pipeline_evict()
625 spin_lock(&from->move_lock); in ttm_bo_move_pipeline_evict()
626 if (!from->move || dma_fence_is_later(fence, from->move)) { in ttm_bo_move_pipeline_evict()
627 dma_fence_put(from->move); in ttm_bo_move_pipeline_evict()
628 from->move = dma_fence_get(fence); in ttm_bo_move_pipeline_evict()
630 spin_unlock(&from->move_lock); in ttm_bo_move_pipeline_evict()
632 ttm_resource_free(bo, &bo->resource); in ttm_bo_move_pipeline_evict()
636 * ttm_bo_move_accel_cleanup - cleanup helper for hw copies
657 struct ttm_device *bdev = bo->bdev; in ttm_bo_move_accel_cleanup()
658 struct ttm_resource_manager *from = ttm_manager_type(bdev, bo->resource->mem_type); in ttm_bo_move_accel_cleanup()
659 struct ttm_resource_manager *man = ttm_manager_type(bdev, new_mem->mem_type); in ttm_bo_move_accel_cleanup()
662 dma_resv_add_fence(bo->base.resv, fence, DMA_RESV_USAGE_KERNEL); in ttm_bo_move_accel_cleanup()
664 ret = ttm_bo_move_to_ghost(bo, fence, man->use_tt); in ttm_bo_move_accel_cleanup()
665 else if (!from->use_tt && pipeline) in ttm_bo_move_accel_cleanup()
668 ret = ttm_bo_wait_free_node(bo, man->use_tt); in ttm_bo_move_accel_cleanup()
680 * ttm_bo_move_sync_cleanup - cleanup by waiting for the move to finish
691 struct ttm_device *bdev = bo->bdev; in ttm_bo_move_sync_cleanup()
692 struct ttm_resource_manager *man = ttm_manager_type(bdev, new_mem->mem_type); in ttm_bo_move_sync_cleanup()
695 ret = ttm_bo_wait_free_node(bo, man->use_tt); in ttm_bo_move_sync_cleanup()
704 * ttm_bo_pipeline_gutting - purge the contents of a bo
721 if (dma_resv_test_signaled(bo->base.resv, DMA_RESV_USAGE_BOOKKEEP)) { in ttm_bo_pipeline_gutting()
722 if (!bo->ttm) { in ttm_bo_pipeline_gutting()
728 ttm_tt_unpopulate(bo->bdev, bo->ttm); in ttm_bo_pipeline_gutting()
729 if (bo->type == ttm_bo_type_device) in ttm_bo_pipeline_gutting()
730 ttm_tt_mark_for_clear(bo->ttm); in ttm_bo_pipeline_gutting()
732 ttm_resource_free(bo, &bo->resource); in ttm_bo_pipeline_gutting()
741 * to avoid leaking sensitive information to user-space. in ttm_bo_pipeline_gutting()
744 ttm = bo->ttm; in ttm_bo_pipeline_gutting()
745 bo->ttm = NULL; in ttm_bo_pipeline_gutting()
747 swap(bo->ttm, ttm); in ttm_bo_pipeline_gutting()
755 ret = dma_resv_copy_fences(&ghost->base._resv, bo->base.resv); in ttm_bo_pipeline_gutting()
758 dma_resv_wait_timeout(bo->base.resv, DMA_RESV_USAGE_BOOKKEEP, in ttm_bo_pipeline_gutting()
762 dma_resv_unlock(&ghost->base._resv); in ttm_bo_pipeline_gutting()
764 bo->ttm = ttm; in ttm_bo_pipeline_gutting()
768 ttm_tt_destroy(bo->bdev, ttm); in ttm_bo_pipeline_gutting()
776 struct ttm_operation_ctx *ctx = walk->ctx; in ttm_lru_walk_trylock()
780 if (dma_resv_trylock(bo->base.resv)) { in ttm_lru_walk_trylock()
785 if (bo->base.resv == ctx->resv && ctx->allow_res_evict) { in ttm_lru_walk_trylock()
786 dma_resv_assert_held(bo->base.resv); in ttm_lru_walk_trylock()
797 struct dma_resv *resv = bo->base.resv; in ttm_lru_walk_ticketlock()
800 if (walk->ctx->interruptible) in ttm_lru_walk_ticketlock()
801 ret = dma_resv_lock_interruptible(resv, walk->ticket); in ttm_lru_walk_ticketlock()
803 ret = dma_resv_lock(resv, walk->ticket); in ttm_lru_walk_ticketlock()
809 * to return -EDEADLK causing the eviction to fail, so in ttm_lru_walk_ticketlock()
813 walk->ticket = NULL; in ttm_lru_walk_ticketlock()
814 } else if (ret == -EDEADLK) { in ttm_lru_walk_ticketlock()
816 ret = -ENOSPC; in ttm_lru_walk_ticketlock()
825 dma_resv_unlock(bo->base.resv); in ttm_lru_walk_unlock()
829 * ttm_lru_walk_for_evict() - Perform a LRU list walk, with actions taken on
842 * With a locked object, the actions indicated by @walk->process_bo are
847 * Typically @walk->process_bo() would return the number of pages evicted,
867 spin_lock(&bdev->lru_lock); in ttm_lru_walk_for_evict()
869 struct ttm_buffer_object *bo = res->bo; in ttm_lru_walk_for_evict()
881 else if (!walk->ticket || walk->ctx->no_wait_gpu || in ttm_lru_walk_for_evict()
882 walk->trylock_only) in ttm_lru_walk_for_evict()
890 mem_type = res->mem_type; in ttm_lru_walk_for_evict()
891 spin_unlock(&bdev->lru_lock); in ttm_lru_walk_for_evict()
904 if (!lret && bo->resource && bo->resource->mem_type == mem_type) in ttm_lru_walk_for_evict()
905 lret = walk->ops->process_bo(walk, bo); in ttm_lru_walk_for_evict()
909 if (lret == -EBUSY || lret == -EALREADY) in ttm_lru_walk_for_evict()
913 spin_lock(&bdev->lru_lock); in ttm_lru_walk_for_evict()
918 spin_unlock(&bdev->lru_lock); in ttm_lru_walk_for_evict()