| /linux/drivers/gpu/drm/qxl/ |
| H A D | qxl_object.c | 32 static void qxl_ttm_bo_destroy(struct ttm_buffer_object *tbo) in qxl_ttm_bo_destroy() argument 37 bo = to_qxl_bo(tbo); in qxl_ttm_bo_destroy() 38 qdev = to_qxl(bo->tbo.base.dev); in qxl_ttm_bo_destroy() 45 drm_gem_object_release(&bo->tbo.base); in qxl_ttm_bo_destroy() 62 if (qbo->tbo.base.size <= PAGE_SIZE) in qxl_ttm_placement_from_domain() 123 r = drm_gem_object_init(&qdev->ddev, &bo->tbo.base, size); in qxl_bo_create() 128 bo->tbo.base.funcs = &qxl_object_funcs; in qxl_bo_create() 138 bo->tbo.priority = priority; in qxl_bo_create() 139 r = ttm_bo_init_reserved(&qdev->mman.bdev, &bo->tbo, type, in qxl_bo_create() 150 ttm_bo_pin(&bo->tbo); in qxl_bo_create() [all …]
|
| H A D | qxl_object.h | 34 r = ttm_bo_reserve(&bo->tbo, true, false, NULL); in qxl_bo_reserve() 37 struct drm_device *ddev = bo->tbo.base.dev; in qxl_bo_reserve() 48 ttm_bo_unreserve(&bo->tbo); in qxl_bo_unreserve() 53 return bo->tbo.base.size; in qxl_bo_size()
|
| H A D | qxl_drv.h | 73 struct ttm_buffer_object tbo; member 94 #define gem_to_qxl_bo(gobj) container_of((gobj), struct qxl_bo, tbo.base) 95 #define to_qxl_bo(tobj) container_of((tobj), struct qxl_bo, tbo) 284 (bo->tbo.resource->mem_type == TTM_PL_VRAM) in qxl_bo_physical_address() 289 return slot->high_bits | ((bo->tbo.resource->start << PAGE_SHIFT) + offset); in qxl_bo_physical_address()
|
| /linux/drivers/gpu/drm/radeon/ |
| H A D | radeon_object.c | 52 static void radeon_ttm_bo_destroy(struct ttm_buffer_object *tbo) in radeon_ttm_bo_destroy() argument 56 bo = container_of(tbo, struct radeon_bo, tbo); in radeon_ttm_bo_destroy() 63 if (bo->tbo.base.import_attach) in radeon_ttm_bo_destroy() 64 drm_prime_gem_destroy(&bo->tbo.base, bo->tbo.sg); in radeon_ttm_bo_destroy() 65 drm_gem_object_release(&bo->tbo.base); in radeon_ttm_bo_destroy() 153 drm_gem_private_object_init(rdev_to_drm(rdev), &bo->tbo.base, size); in radeon_bo_create() 154 bo->tbo.base.funcs = &radeon_gem_object_funcs; in radeon_bo_create() 204 r = ttm_bo_init_validate(&rdev->mman.bdev, &bo->tbo, type, in radeon_bo_create() 223 r = dma_resv_wait_timeout(bo->tbo.base.resv, DMA_RESV_USAGE_KERNEL, in radeon_bo_kmap() 234 r = ttm_bo_kmap(&bo->tbo, 0, PFN_UP(bo->tbo.base.size), &bo->kmap); in radeon_bo_kmap() [all …]
|
| H A D | radeon_object.h | 68 r = ttm_bo_reserve(&bo->tbo, !no_intr, false, NULL); in radeon_bo_reserve() 79 ttm_bo_unreserve(&bo->tbo); in radeon_bo_unreserve() 96 rdev = radeon_get_rdev(bo->tbo.bdev); in radeon_bo_gpu_offset() 98 switch (bo->tbo.resource->mem_type) { in radeon_bo_gpu_offset() 107 return (bo->tbo.resource->start << PAGE_SHIFT) + start; in radeon_bo_gpu_offset() 112 return bo->tbo.base.size; in radeon_bo_size() 117 return bo->tbo.base.size / RADEON_GPU_PAGE_SIZE; in radeon_bo_ngpu_pages() 122 return (bo->tbo.page_alignment << PAGE_SHIFT) / RADEON_GPU_PAGE_SIZE; in radeon_bo_gpu_page_alignment() 133 return drm_vma_node_offset_addr(&bo->tbo.base.vma_node); in radeon_bo_mmap_offset()
|
| H A D | radeon_mn.c | 57 if (!bo->tbo.ttm || !radeon_ttm_tt_is_bound(bo->tbo.bdev, bo->tbo.ttm)) in radeon_mn_invalidate() 69 r = dma_resv_wait_timeout(bo->tbo.base.resv, DMA_RESV_USAGE_BOOKKEEP, in radeon_mn_invalidate() 75 r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); in radeon_mn_invalidate()
|
| H A D | radeon_prime.c | 41 return drm_prime_pages_to_sg(obj->dev, bo->tbo.ttm->pages, in radeon_gem_prime_get_sg_table() 42 bo->tbo.ttm->num_pages); in radeon_gem_prime_get_sg_table() 61 bo->tbo.base.funcs = &radeon_gem_object_funcs; in radeon_gem_prime_import_sg_table() 68 return &bo->tbo.base; in radeon_gem_prime_import_sg_table() 98 if (radeon_ttm_tt_has_userptr(bo->rdev, bo->tbo.ttm)) in radeon_gem_prime_export()
|
| H A D | radeon_benchmark.c | 125 dobj->tbo.base.resv); in radeon_benchmark_move() 136 dobj->tbo.base.resv); in radeon_benchmark_move()
|
| H A D | radeon_vm.c | 399 r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); in radeon_vm_clear_bo() 702 radeon_sync_resv(rdev, &ib.sync, pd->tbo.base.resv, true); in radeon_vm_update_page_directory() 831 radeon_sync_resv(rdev, &ib->sync, pt->tbo.base.resv, true); in radeon_vm_update_ptes() 832 r = dma_resv_reserve_fences(pt->tbo.base.resv, 1); in radeon_vm_update_ptes() 942 if (bo_va->bo && radeon_ttm_tt_is_readonly(rdev, bo_va->bo->tbo.ttm)) in radeon_vm_bo_update()
|
| /linux/drivers/gpu/drm/amd/amdgpu/ |
| H A D | amdgpu_dma_buf.c | 63 return amdgpu_ttm_adev(bo->tbo.bdev); in dma_buf_attach_adev() 83 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); in amdgpu_dma_buf_attach() 114 r = dma_resv_lock(bo->tbo.base.resv, NULL); in amdgpu_dma_buf_attach() 120 dma_resv_unlock(bo->tbo.base.resv); in amdgpu_dma_buf_attach() 199 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); in amdgpu_dma_buf_map() 203 if (!bo->tbo.pin_count) { in amdgpu_dma_buf_map() 214 r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); in amdgpu_dma_buf_map() 219 switch (bo->tbo.resource->mem_type) { in amdgpu_dma_buf_map() 222 bo->tbo.ttm->pages, in amdgpu_dma_buf_map() 223 bo->tbo.ttm->num_pages); in amdgpu_dma_buf_map() [all …]
|
| H A D | amdgpu_amdkfd_gpuvm.c | 311 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); in amdgpu_amdkfd_release_notify() 346 ret = amdgpu_gem_object_create(adev, mem->bo->tbo.base.size, 1, in create_dmamap_sg_bo() 348 ttm_bo_type_sg, mem->bo->tbo.base.resv, &gem_obj, 0); in create_dmamap_sg_bo() 383 dma_resv_replace_fences(bo->tbo.base.resv, ef->base.context, in amdgpu_amdkfd_remove_eviction_fence() 400 struct dma_resv *resv = &bo->tbo.base._resv; in amdgpu_amdkfd_remove_all_eviction_fences() 423 if (WARN(amdgpu_ttm_tt_get_usermm(bo->tbo.ttm), in amdgpu_amdkfd_bo_validate() 428 if (bo->tbo.pin_count) in amdgpu_amdkfd_bo_validate() 433 ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); in amdgpu_amdkfd_bo_validate() 456 ret = dma_resv_reserve_fences(bo->tbo.base.resv, 1); in amdgpu_amdkfd_bo_validate_and_fence() 460 dma_resv_add_fence(bo->tbo.base.resv, fence, in amdgpu_amdkfd_bo_validate_and_fence() [all …]
|
| H A D | amdgpu_vm.c | 138 dma_resv_assert_held(vm->root.bo->tbo.base.resv); in amdgpu_vm_assert_locked() 157 if (bo->tbo.type == ttm_bo_type_kernel) in amdgpu_vm_bo_evicted() 285 if (!bo || bo->tbo.type != ttm_bo_type_kernel) in amdgpu_vm_bo_reset_state_machine() 309 dma_resv_assert_held(bo->tbo.base.resv); in amdgpu_vm_update_shared() 311 shared = drm_gem_object_is_shared_for_memory_stats(&bo->tbo.base); in amdgpu_vm_update_shared() 423 base->shared = drm_gem_object_is_shared_for_memory_stats(&bo->tbo.base); in amdgpu_vm_bo_base_init() 424 amdgpu_vm_update_stats_locked(base, bo->tbo.resource, +1); in amdgpu_vm_bo_base_init() 430 dma_resv_assert_held(vm->root.bo->tbo.base.resv); in amdgpu_vm_bo_base_init() 432 ttm_bo_set_bulk_move(&bo->tbo, &vm->lru_bulk_move); in amdgpu_vm_bo_base_init() 433 if (bo->tbo.type == ttm_bo_type_kernel && bo->parent) in amdgpu_vm_bo_base_init() [all …]
|
| H A D | amdgpu_cs.c | 144 if (amdgpu_ttm_tt_get_usermm(p->uf_bo->tbo.ttm)) in amdgpu_cs_p1_user_fence() 802 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); in amdgpu_cs_bo_validate() 807 .resv = bo->tbo.base.resv in amdgpu_cs_bo_validate() 812 if (bo->tbo.pin_count) in amdgpu_cs_bo_validate() 819 (!bo->tbo.base.dma_buf || in amdgpu_cs_bo_validate() 820 list_empty(&bo->tbo.base.dma_buf->attachments))) { in amdgpu_cs_bo_validate() 840 r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); in amdgpu_cs_bo_validate() 844 amdgpu_res_cpu_visible(adev, bo->tbo.resource)) in amdgpu_cs_bo_validate() 902 for (i = 0; i < bo->tbo.ttm->num_pages; i++) { in amdgpu_cs_parser_bos() 903 if (bo->tbo.ttm->pages[i] != in amdgpu_cs_parser_bos() [all …]
|
| H A D | amdgpu_vram_mgr.c | 279 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); in amdgpu_vram_mgr_bo_visible_size() 280 struct ttm_resource *res = bo->tbo.resource; in amdgpu_vram_mgr_bo_visible_size() 442 struct ttm_buffer_object *tbo, in amdgpu_vram_mgr_new() argument 448 struct amdgpu_bo *bo = ttm_to_amdgpu_bo(tbo); in amdgpu_vram_mgr_new() 465 if (tbo->type != ttm_bo_type_kernel) in amdgpu_vram_mgr_new() 478 tbo->page_alignment); in amdgpu_vram_mgr_new() 485 ttm_resource_init(tbo, place, &vres->base); in amdgpu_vram_mgr_new() 524 if (tbo->page_alignment) in amdgpu_vram_mgr_new() 525 min_block_size = (u64)tbo->page_alignment << PAGE_SHIFT; in amdgpu_vram_mgr_new() 550 tbo->page_alignment); in amdgpu_vram_mgr_new()
|
| H A D | amdgpu_ttm.c | 330 (abo_src->tbo.resource->mem_type == TTM_PL_VRAM)) in amdgpu_ttm_copy_mem_to_mem() 706 struct ttm_tt *ttm = bo->tbo.ttm; in amdgpu_ttm_tt_get_user_pages() 865 struct ttm_buffer_object *tbo, in amdgpu_ttm_gart_bind() argument 868 struct amdgpu_bo *abo = ttm_to_amdgpu_bo(tbo); in amdgpu_ttm_gart_bind() 869 struct ttm_tt *ttm = tbo->ttm; in amdgpu_ttm_gart_bind() 1007 void amdgpu_ttm_recover_gart(struct ttm_buffer_object *tbo) in amdgpu_ttm_recover_gart() argument 1009 struct amdgpu_device *adev = amdgpu_ttm_adev(tbo->bdev); in amdgpu_ttm_recover_gart() 1012 if (!tbo->ttm) in amdgpu_ttm_recover_gart() 1015 flags = amdgpu_ttm_tt_pte_flags(adev, tbo->ttm, tbo->resource); in amdgpu_ttm_recover_gart() 1016 amdgpu_ttm_gart_bind(adev, tbo, flags); in amdgpu_ttm_recover_gart() [all …]
|
| H A D | amdgpu_csa.c | 76 r = drm_exec_lock_obj(&exec, &bo->tbo.base); in amdgpu_map_static_csa() 116 r = drm_exec_lock_obj(&exec, &bo->tbo.base); in amdgpu_unmap_static_csa()
|
| H A D | amdgpu_eviction_fence.c | 201 struct dma_resv *resv = bo->tbo.base.resv; in amdgpu_eviction_fence_attach() 227 dma_resv_replace_fences(bo->tbo.base.resv, evf_mgr->ev_fence_ctx, in amdgpu_eviction_fence_detach()
|
| H A D | amdgpu_vm_pt.c | 383 r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); in amdgpu_vm_pt_clear() 476 bp.resv = vm->root.bo->tbo.base.resv; in amdgpu_vm_pt_create() 542 amdgpu_vm_update_stats(entry, entry->bo->tbo.resource, -1); in amdgpu_vm_pt_free() 544 ttm_bo_set_bulk_move(&entry->bo->tbo, NULL); in amdgpu_vm_pt_free()
|
| H A D | amdgpu_gmc.c | 114 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); in amdgpu_gmc_get_pde_for_bo() 116 switch (bo->tbo.resource->mem_type) { in amdgpu_gmc_get_pde_for_bo() 118 *addr = bo->tbo.ttm->dma_address[0]; in amdgpu_gmc_get_pde_for_bo() 127 *flags = amdgpu_ttm_tt_pde_flags(bo->tbo.ttm, bo->tbo.resource); in amdgpu_gmc_get_pde_for_bo() 136 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); in amdgpu_gmc_pd_addr()
|
| /linux/drivers/gpu/drm/vmwgfx/ |
| H A D | vmwgfx_cursor_plane.c | 158 ttm_bo_unpin(&(*vbo)->tbo); in vmw_cursor_mob_destroy() 179 ret = ttm_bo_reserve(&vbo->tbo, true, false, NULL); in vmw_cursor_mob_unmap() 182 ttm_bo_unreserve(&vbo->tbo); in vmw_cursor_mob_unmap() 209 if (vcp->cursor_mobs[i]->tbo.base.size < in vmw_cursor_mob_put() 210 vps->cursor.mob->tbo.base.size) { in vmw_cursor_mob_put() 245 if (vps->cursor.mob->tbo.base.size >= size) in vmw_cursor_mob_get() 253 vcp->cursor_mobs[i]->tbo.base.size >= size) { in vmw_cursor_mob_get() 267 ret = ttm_bo_reserve(&vps->cursor.mob->tbo, false, false, NULL); in vmw_cursor_mob_get() 273 ttm_bo_unreserve(&vps->cursor.mob->tbo); in vmw_cursor_mob_get() 280 ttm_bo_unreserve(&vps->cursor.mob->tbo); in vmw_cursor_mob_get() [all …]
|
| H A D | vmwgfx_page_dirty.c | 71 pgoff_t offset = drm_vma_node_start(&vbo->tbo.base.vma_node); in vmw_bo_dirty_scan_pagetable() 72 struct address_space *mapping = vbo->tbo.bdev->dev_mapping; in vmw_bo_dirty_scan_pagetable() 109 unsigned long offset = drm_vma_node_start(&vbo->tbo.base.vma_node); in vmw_bo_dirty_scan_mkwrite() 110 struct address_space *mapping = vbo->tbo.bdev->dev_mapping; in vmw_bo_dirty_scan_mkwrite() 116 num_marked = wp_shared_mapping_range(vbo->tbo.bdev->dev_mapping, in vmw_bo_dirty_scan_mkwrite() 174 unsigned long offset = drm_vma_node_start(&vbo->tbo.base.vma_node); in vmw_bo_dirty_pre_unmap() 175 struct address_space *mapping = vbo->tbo.bdev->dev_mapping; in vmw_bo_dirty_pre_unmap() 198 unsigned long offset = drm_vma_node_start(&vbo->tbo.base.vma_node); in vmw_bo_dirty_unmap() 199 struct address_space *mapping = vbo->tbo.bdev->dev_mapping; in vmw_bo_dirty_unmap() 219 pgoff_t num_pages = PFN_UP(vbo->tbo.resource->size); in vmw_bo_dirty_add() [all …]
|
| H A D | vmwgfx_ttm_buffer.c | 583 ret = vmw_ttm_populate(vbo->tbo.bdev, vbo->tbo.ttm, &ctx); in vmw_bo_create_and_populate() 586 container_of(vbo->tbo.ttm, struct vmw_ttm_tt, dma_ttm); in vmw_bo_create_and_populate() 590 ttm_bo_unreserve(&vbo->tbo); in vmw_bo_create_and_populate()
|
| H A D | vmwgfx_streamoutput.c | 109 cmd->body.mobid = res->guest_memory_bo->tbo.resource->start; in vmw_dx_streamoutput_unscrub() 200 if (WARN_ON(res->guest_memory_bo->tbo.resource->mem_type != VMW_PL_MOB)) in vmw_dx_streamoutput_unbind()
|
| H A D | vmwgfx_validation.c | 179 if (entry->base.bo == &vbo->tbo) { in vmw_validation_find_bo_dup() 266 val_buf->bo = &vbo->tbo; in vmw_validation_add_bo() 509 if (vbo->tbo.pin_count > 0) in vmw_validation_bo_validate_single()
|
| /linux/drivers/gpu/drm/loongson/ |
| H A D | lsdc_ttm.h | 24 struct ttm_buffer_object tbo; member 52 static inline struct lsdc_bo *to_lsdc_bo(struct ttm_buffer_object *tbo) in to_lsdc_bo() argument 54 return container_of(tbo, struct lsdc_bo, tbo); in to_lsdc_bo() 59 return container_of(gem, struct lsdc_bo, tbo.base); in gem_to_lsdc_bo()
|