| /linux/drivers/gpu/drm/ttm/ |
| H A D | ttm_tt.c | 76 if (bo->ttm) in ttm_tt_create() 104 bo->ttm = bdev->funcs->ttm_tt_create(bo, page_flags); in ttm_tt_create() 105 if (unlikely(bo->ttm == NULL)) in ttm_tt_create() 108 WARN_ON(bo->ttm->page_flags & TTM_TT_FLAG_EXTERNAL_MAPPABLE && in ttm_tt_create() 109 !(bo->ttm->page_flags & TTM_TT_FLAG_EXTERNAL)); in ttm_tt_create() 118 static int ttm_tt_alloc_page_directory(struct ttm_tt *ttm) in ttm_tt_alloc_page_directory() argument 120 ttm->pages = kvcalloc(ttm->num_pages, sizeof(void*), GFP_KERNEL); in ttm_tt_alloc_page_directory() 121 if (!ttm->pages) in ttm_tt_alloc_page_directory() 127 static int ttm_dma_tt_alloc_page_directory(struct ttm_tt *ttm) in ttm_dma_tt_alloc_page_directory() argument 129 ttm->pages = kvcalloc(ttm->num_pages, sizeof(*ttm->pages) + in ttm_dma_tt_alloc_page_directory() [all …]
|
| H A D | ttm_agp_backend.c | 46 struct ttm_tt ttm; member 51 int ttm_agp_bind(struct ttm_tt *ttm, struct ttm_resource *bo_mem) in ttm_agp_bind() argument 53 struct ttm_agp_backend *agp_be = container_of(ttm, struct ttm_agp_backend, ttm); in ttm_agp_bind() 56 int ret, cached = ttm->caching == ttm_cached; in ttm_agp_bind() 62 mem = agp_allocate_memory(agp_be->bridge, ttm->num_pages, AGP_USER_MEMORY); in ttm_agp_bind() 67 for (i = 0; i < ttm->num_pages; i++) { in ttm_agp_bind() 68 struct page *page = ttm->pages[i]; in ttm_agp_bind() 88 void ttm_agp_unbind(struct ttm_tt *ttm) in ttm_agp_unbind() argument 90 struct ttm_agp_backend *agp_be = container_of(ttm, struct ttm_agp_backend, ttm); in ttm_agp_unbind() 103 bool ttm_agp_is_bound(struct ttm_tt *ttm) in ttm_agp_is_bound() argument [all …]
|
| H A D | ttm_bo_util.c | 153 struct ttm_tt *ttm = bo->ttm; in ttm_bo_move_memcpy() local 168 if (ttm && ((ttm->page_flags & TTM_TT_FLAG_SWAPPED) || in ttm_bo_move_memcpy() 177 dst_iter = ttm_kmap_iter_tt_init(&_dst_iter.tt, ttm); in ttm_bo_move_memcpy() 183 src_iter = ttm_kmap_iter_tt_init(&_src_iter.tt, ttm); in ttm_bo_move_memcpy() 189 clear = src_iter->ops->maps_tt && (!ttm || !ttm_tt_is_populated(ttm)); in ttm_bo_move_memcpy() 190 if (!(clear && ttm && !(ttm->page_flags & TTM_TT_FLAG_ZERO_ALLOC))) in ttm_bo_move_memcpy() 303 caching = bo->ttm->caching; in ttm_io_prot() 304 if (bo->ttm->page_flags & TTM_TT_FLAG_DECRYPTED) in ttm_io_prot() 347 struct ttm_tt *ttm = bo->ttm; in ttm_bo_kmap_ttm() local 353 BUG_ON(!ttm); in ttm_bo_kmap_ttm() [all …]
|
| /linux/drivers/gpu/drm/radeon/ |
| H A D | radeon_ttm.c | 56 static int radeon_ttm_tt_bind(struct ttm_device *bdev, struct ttm_tt *ttm, 58 static void radeon_ttm_tt_unbind(struct ttm_device *bdev, struct ttm_tt *ttm); 198 r = radeon_ttm_tt_bind(bo->bdev, bo->ttm, new_mem); in radeon_bo_move() 209 bo->ttm == NULL)) { in radeon_bo_move() 221 radeon_ttm_tt_unbind(bo->bdev, bo->ttm); in radeon_bo_move() 313 struct ttm_tt ttm; member 323 static int radeon_ttm_tt_pin_userptr(struct ttm_device *bdev, struct ttm_tt *ttm) in radeon_ttm_tt_pin_userptr() argument 326 struct radeon_ttm_tt *gtt = (void *)ttm; in radeon_ttm_tt_pin_userptr() 340 unsigned long end = gtt->userptr + (u64)ttm->num_pages * PAGE_SIZE; in radeon_ttm_tt_pin_userptr() 348 unsigned num_pages = ttm->num_pages - pinned; in radeon_ttm_tt_pin_userptr() [all …]
|
| H A D | radeon_prime.c | 41 return drm_prime_pages_to_sg(obj->dev, bo->tbo.ttm->pages, in radeon_gem_prime_get_sg_table() 42 bo->tbo.ttm->num_pages); in radeon_gem_prime_get_sg_table() 98 if (radeon_ttm_tt_has_userptr(bo->rdev, bo->tbo.ttm)) in radeon_gem_prime_export()
|
| /linux/drivers/gpu/drm/i915/gem/ |
| H A D | i915_gem_ttm.c | 52 struct ttm_tt ttm; member 184 struct ttm_tt *ttm, in i915_ttm_tt_shmem_populate() argument 189 struct i915_ttm_tt *i915_tt = container_of(ttm, typeof(*i915_tt), ttm); in i915_ttm_tt_shmem_populate() 191 const size_t size = (size_t)ttm->num_pages << PAGE_SHIFT; in i915_ttm_tt_shmem_populate() 230 ttm->pages[i++] = page; in i915_ttm_tt_shmem_populate() 232 if (ttm->page_flags & TTM_TT_FLAG_SWAPPED) in i915_ttm_tt_shmem_populate() 233 ttm->page_flags &= ~TTM_TT_FLAG_SWAPPED; in i915_ttm_tt_shmem_populate() 243 static void i915_ttm_tt_shmem_unpopulate(struct ttm_tt *ttm) in i915_ttm_tt_shmem_unpopulate() argument 245 struct i915_ttm_tt *i915_tt = container_of(ttm, typeof(*i915_tt), ttm); in i915_ttm_tt_shmem_unpopulate() 246 bool backup = ttm->page_flags & TTM_TT_FLAG_SWAPPED; in i915_ttm_tt_shmem_unpopulate() [all …]
|
| H A D | i915_gem_ttm_move.c | 53 struct ttm_tt *ttm) in i915_ttm_cache_level() argument 57 ttm->caching == ttm_cached) ? I915_CACHE_LLC : in i915_ttm_cache_level() 86 if (i915_ttm_cpu_maps_iomem(bo->resource) || bo->ttm->caching != ttm_cached) { in i915_ttm_adjust_domains_after_move() 125 bo->ttm); in i915_ttm_adjust_gem_after_move() 198 struct ttm_tt *src_ttm = bo->ttm; in i915_ttm_accel_move() 330 ttm_kmap_iter_tt_init(&arg->_src_iter.tt, bo->ttm) : in i915_ttm_memcpy_init() 332 &obj->ttm.cached_io_rsgt->table, in i915_ttm_memcpy_init() 579 struct ttm_tt *ttm = bo->ttm; in i915_ttm_move() local 626 if (ttm && (dst_man->use_tt || (ttm->page_flags & TTM_TT_FLAG_SWAPPED))) { in i915_ttm_move() 636 clear = !i915_ttm_cpu_maps_iomem(bo->resource) && (!ttm || !ttm_tt_is_populated(ttm)); in i915_ttm_move() [all …]
|
| /linux/drivers/gpu/drm/xe/ |
| H A D | xe_bo.c | 107 return resource_is_vram(bo->ttm.resource) || in xe_bo_is_vram() 108 resource_is_stolen_vram(xe_bo_device(bo), bo->ttm.resource); in xe_bo_is_vram() 113 return bo->ttm.resource->mem_type == XE_PL_STOLEN; in xe_bo_is_stolen() 158 return !list_empty(&bo->ttm.base.gpuva.list); in xe_bo_is_vm_bound() 183 mgr = ttm_manager_type(&xe->ttm, res->mem_type); in res_to_mem_region() 186 return container_of(vram_mgr, struct xe_vram_region, ttm); in res_to_mem_region() 247 struct ttm_resource_manager *mgr = ttm_manager_type(&xe->ttm, mem_type); in add_vram() 255 vram = container_of(vram_mgr, struct xe_vram_region, ttm); in add_vram() 331 struct xe_device *xe = container_of(tbo->bdev, typeof(*xe), ttm); in xe_evict_flags() 376 struct ttm_tt ttm; member [all …]
|
| H A D | xe_drm_client.c | 155 struct xe_device *xe = ttm_to_xe_device(bo->ttm.bdev); in xe_drm_client_remove_bo() 158 xe_assert(xe, !kref_read(&bo->ttm.base.refcount)); in xe_drm_client_remove_bo() 171 u32 mem_type = bo->ttm.resource->mem_type; in bo_meminfo() 175 if (drm_gem_object_is_shared_for_memory_stats(&bo->ttm.base)) in bo_meminfo() 183 if (!dma_resv_test_signaled(bo->ttm.base.resv, in bo_meminfo() 195 struct ttm_device *bdev = &xef->xe->ttm; in show_meminfo() 211 if (dma_resv_trylock(bo->ttm.base.resv)) { in show_meminfo() 231 if (!kref_get_unless_zero(&bo->ttm.base.refcount)) in show_meminfo() 234 if (dma_resv_trylock(bo->ttm.base.resv)) { in show_meminfo()
|
| H A D | xe_dma_buf.c | 123 switch (bo->ttm.resource->mem_type) { in xe_dma_buf_map() 126 bo->ttm.ttm->pages, in xe_dma_buf_map() 139 bo->ttm.resource, 0, in xe_dma_buf_map() 140 bo->ttm.base.size, attach->dev, in xe_dma_buf_map() 186 ret = drm_exec_lock_obj(&exec, &bo->ttm.base); in xe_dma_buf_begin_cpu_access() 230 ret = ttm_bo_setup_export(&bo->ttm, &ctx); in xe_gem_prime_export() 276 return ret ? ERR_PTR(ret) : &bo->ttm.base; in xe_dma_buf_init_obj() 345 attach = dma_buf_dynamic_attach(dma_buf, dev->dev, attach_ops, &bo->ttm.base); in xe_gem_prime_import()
|
| H A D | xe_ttm_sys_mgr.c | 95 err = ttm_resource_manager_evict_all(&xe->ttm, man); in xe_ttm_sys_mgr_fini() 100 ttm_set_driver_manager(&xe->ttm, XE_PL_TT, NULL); in xe_ttm_sys_mgr_fini() 115 ttm_resource_manager_init(man, &xe->ttm, gtt_size >> PAGE_SHIFT); in xe_ttm_sys_mgr_init() 116 ttm_set_driver_manager(&xe->ttm, XE_PL_TT, man); in xe_ttm_sys_mgr_init()
|
| /linux/drivers/gpu/drm/nouveau/ |
| H A D | nouveau_sgdma.c | 15 struct ttm_tt ttm; member 20 nouveau_sgdma_destroy(struct ttm_device *bdev, struct ttm_tt *ttm) in nouveau_sgdma_destroy() argument 22 struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm; in nouveau_sgdma_destroy() 24 if (ttm) { in nouveau_sgdma_destroy() 25 ttm_tt_fini(&nvbe->ttm); in nouveau_sgdma_destroy() 31 nouveau_sgdma_bind(struct ttm_device *bdev, struct ttm_tt *ttm, struct ttm_resource *reg) in nouveau_sgdma_bind() argument 33 struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm; in nouveau_sgdma_bind() 41 ret = nouveau_mem_host(reg, &nvbe->ttm); in nouveau_sgdma_bind() 58 nouveau_sgdma_unbind(struct ttm_device *bdev, struct ttm_tt *ttm) in nouveau_sgdma_unbind() argument 60 struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm; in nouveau_sgdma_unbind() [all …]
|
| H A D | nouveau_ttm.c | 170 drm->ttm.type_host[!!kind] = typei; in nouveau_ttm_init_host() 176 drm->ttm.type_ncoh[!!kind] = typei; in nouveau_ttm_init_host() 191 ttm_resource_manager_init(man, &drm->ttm.bdev, in nouveau_ttm_init_vram() 193 ttm_set_driver_manager(&drm->ttm.bdev, TTM_PL_VRAM, man); in nouveau_ttm_init_vram() 197 return ttm_range_man_init(&drm->ttm.bdev, TTM_PL_VRAM, false, in nouveau_ttm_init_vram() 205 struct ttm_resource_manager *man = ttm_manager_type(&drm->ttm.bdev, TTM_PL_VRAM); in nouveau_ttm_fini_vram() 209 ttm_resource_manager_evict_all(&drm->ttm.bdev, man); in nouveau_ttm_fini_vram() 211 ttm_set_driver_manager(&drm->ttm.bdev, TTM_PL_VRAM, NULL); in nouveau_ttm_fini_vram() 214 ttm_range_man_fini(&drm->ttm.bdev, TTM_PL_VRAM); in nouveau_ttm_fini_vram() 229 return ttm_range_man_init(&drm->ttm.bdev, TTM_PL_TT, true, in nouveau_ttm_init_gtt() [all …]
|
| H A D | nouveau_bo.c | 47 static int nouveau_ttm_tt_bind(struct ttm_device *bdev, struct ttm_tt *ttm, 49 static void nouveau_ttm_tt_unbind(struct ttm_device *bdev, struct ttm_tt *ttm); 232 nvbo->bo.bdev = &drm->ttm.bdev; in nouveau_bo_alloc() 695 struct ttm_tt *ttm_dma = (struct ttm_tt *)nvbo->bo.ttm; in nouveau_bo_sync_for_device() 731 struct ttm_tt *ttm_dma = (struct ttm_tt *)nvbo->bo.ttm; in nouveau_bo_sync_for_cpu() 768 mutex_lock(&drm->ttm.io_reserve_mutex); in nouveau_bo_add_io_reserve_lru() 769 list_move_tail(&nvbo->io_reserve_lru, &drm->ttm.io_reserve_lru); in nouveau_bo_add_io_reserve_lru() 770 mutex_unlock(&drm->ttm.io_reserve_mutex); in nouveau_bo_add_io_reserve_lru() 778 mutex_lock(&drm->ttm.io_reserve_mutex); in nouveau_bo_del_io_reserve_lru() 780 mutex_unlock(&drm->ttm.io_reserve_mutex); in nouveau_bo_del_io_reserve_lru() [all …]
|
| H A D | nouveau_ttm.h | 8 return container_of(bd, struct nouveau_drm, ttm.bdev); in nouveau_bdev() 24 int nouveau_sgdma_bind(struct ttm_device *bdev, struct ttm_tt *ttm, struct ttm_resource *reg); 25 void nouveau_sgdma_unbind(struct ttm_device *bdev, struct ttm_tt *ttm); 26 void nouveau_sgdma_destroy(struct ttm_device *bdev, struct ttm_tt *ttm);
|
| /linux/drivers/gpu/drm/vmwgfx/ |
| H A D | vmwgfx_ttm_buffer.c | 267 container_of(bo->ttm, struct vmw_ttm_tt, dma_ttm); in vmw_bo_sg_table() 274 struct ttm_tt *ttm, struct ttm_resource *bo_mem) in vmw_ttm_bind() argument 277 container_of(ttm, struct vmw_ttm_tt, dma_ttm); in vmw_ttm_bind() 296 ttm->num_pages, vmw_be->gmr_id); in vmw_ttm_bind() 301 vmw_mob_create(ttm->num_pages); in vmw_ttm_bind() 307 &vmw_be->vsgt, ttm->num_pages, in vmw_ttm_bind() 321 struct ttm_tt *ttm) in vmw_ttm_unbind() argument 324 container_of(ttm, struct vmw_ttm_tt, dma_ttm); in vmw_ttm_unbind() 348 static void vmw_ttm_destroy(struct ttm_device *bdev, struct ttm_tt *ttm) in vmw_ttm_destroy() argument 351 container_of(ttm, struct vmw_ttm_tt, dma_ttm); in vmw_ttm_destroy() [all …]
|
| H A D | vmwgfx_blit.c | 559 bool src_external = (src->ttm->page_flags & TTM_TT_FLAG_EXTERNAL) != 0; in vmw_bo_cpu_blit() 560 bool dst_external = (dst->ttm->page_flags & TTM_TT_FLAG_EXTERNAL) != 0; in vmw_bo_cpu_blit() 571 if (!ttm_tt_is_populated(dst->ttm)) { in vmw_bo_cpu_blit() 572 ret = dst->bdev->funcs->ttm_tt_populate(dst->bdev, dst->ttm, &ctx); in vmw_bo_cpu_blit() 577 if (!ttm_tt_is_populated(src->ttm)) { in vmw_bo_cpu_blit() 578 ret = src->bdev->funcs->ttm_tt_populate(src->bdev, src->ttm, &ctx); in vmw_bo_cpu_blit() 588 if (!src->ttm->pages && src->ttm->sg) { in vmw_bo_cpu_blit() 589 src_pages = kvmalloc_objs(struct page *, src->ttm->num_pages); in vmw_bo_cpu_blit() 592 ret = drm_prime_sg_to_page_array(src->ttm->sg, src_pages, in vmw_bo_cpu_blit() 593 src->ttm->num_pages); in vmw_bo_cpu_blit() [all …]
|
| /linux/include/drm/ttm/ |
| H A D | ttm_tt.h | 207 int ttm_tt_init(struct ttm_tt *ttm, struct ttm_buffer_object *bo, 220 void ttm_tt_fini(struct ttm_tt *ttm); 230 void ttm_tt_destroy(struct ttm_device *bdev, struct ttm_tt *ttm); 239 int ttm_tt_swapin(struct ttm_tt *ttm); 240 int ttm_tt_swapout(struct ttm_device *bdev, struct ttm_tt *ttm, 252 int ttm_tt_populate(struct ttm_device *bdev, struct ttm_tt *ttm, 263 void ttm_tt_unpopulate(struct ttm_device *bdev, struct ttm_tt *ttm); 273 static inline void ttm_tt_mark_for_clear(struct ttm_tt *ttm) in ttm_tt_mark_for_clear() argument 275 ttm->page_flags |= TTM_TT_FLAG_ZERO_ALLOC; in ttm_tt_mark_for_clear() 321 int ttm_agp_bind(struct ttm_tt *ttm, struct ttm_resource *bo_mem); [all …]
|
| /linux/drivers/gpu/drm/amd/amdgpu/ |
| H A D | amdgpu_ttm.c | 69 struct ttm_tt *ttm, 72 struct ttm_tt *ttm); 255 flags = amdgpu_ttm_tt_pte_flags(adev, bo->ttm, mem); in amdgpu_ttm_map_buffer() 264 dma_addr = &bo->ttm->dma_address[mm_cur->start >> PAGE_SHIFT]; in amdgpu_ttm_map_buffer() 510 r = amdgpu_ttm_backend_bind(bo->bdev, bo->ttm, new_mem); in amdgpu_bo_move() 519 bo->ttm == NULL)) { in amdgpu_bo_move() 538 amdgpu_ttm_backend_unbind(bo->bdev, bo->ttm); in amdgpu_bo_move() 696 struct ttm_tt ttm; member 706 #define ttm_to_amdgpu_ttm_tt(ptr) container_of(ptr, struct amdgpu_ttm_tt, ttm) 720 struct ttm_tt *ttm = bo->tbo.ttm; in amdgpu_ttm_tt_get_user_pages() local [all …]
|
| H A D | amdgpu_amdkfd_gpuvm.c | 423 if (WARN(amdgpu_ttm_tt_get_usermm(bo->tbo.ttm), in amdgpu_amdkfd_bo_validate() 569 struct ttm_tt *src_ttm = mem->bo->tbo.ttm; in kfd_mem_dmamap_userptr() 570 struct ttm_tt *ttm = bo->tbo.ttm; in kfd_mem_dmamap_userptr() local 573 if (WARN_ON(ttm->num_pages != src_ttm->num_pages)) in kfd_mem_dmamap_userptr() 576 ttm->sg = kmalloc_obj(*ttm->sg); in kfd_mem_dmamap_userptr() 577 if (unlikely(!ttm->sg)) in kfd_mem_dmamap_userptr() 581 ret = sg_alloc_table_from_pages(ttm->sg, src_ttm->pages, in kfd_mem_dmamap_userptr() 582 ttm->num_pages, 0, in kfd_mem_dmamap_userptr() 583 (u64)ttm->num_pages << PAGE_SHIFT, in kfd_mem_dmamap_userptr() 588 ret = dma_map_sgtable(adev->dev, ttm->sg, direction, 0); in kfd_mem_dmamap_userptr() [all …]
|
| /linux/drivers/gpu/drm/xe/tests/ |
| H A D | xe_bo.c | 30 struct ttm_tt *ttm; in ccs_test_migrate() local 47 fence = xe_migrate_clear(tile->migrate, bo, bo->ttm.resource, in ccs_test_migrate() 71 timeout = dma_resv_wait_timeout(bo->ttm.base.resv, in ccs_test_migrate() 85 ttm = bo->ttm.ttm; in ccs_test_migrate() 86 if (!ttm || !ttm_tt_is_populated(ttm)) { in ccs_test_migrate() 92 if (ccs_page >= ttm->num_pages) { in ccs_test_migrate() 97 page = ttm->pages[ccs_page]; in ccs_test_migrate() 383 int ret = ttm_bo_vmap(&bo->ttm, &map); in shrink_test_fill_random() 389 for (i = 0; i < bo->ttm.base.size; i += sizeof(u32)) { in shrink_test_fill_random() 397 ttm_bo_vunmap(&bo->ttm, &map); in shrink_test_fill_random() [all …]
|
| /linux/drivers/gpu/drm/qxl/ |
| H A D | qxl_ttm.c | 101 static void qxl_ttm_backend_destroy(struct ttm_device *bdev, struct ttm_tt *ttm) in qxl_ttm_backend_destroy() argument 103 ttm_tt_fini(ttm); in qxl_ttm_backend_destroy() 104 kfree(ttm); in qxl_ttm_backend_destroy() 110 struct ttm_tt *ttm; in qxl_ttm_tt_create() local 112 ttm = kzalloc_obj(struct ttm_tt); in qxl_ttm_tt_create() 113 if (ttm == NULL) in qxl_ttm_tt_create() 115 if (ttm_tt_init(ttm, bo, page_flags, ttm_cached, 0)) { in qxl_ttm_tt_create() 116 kfree(ttm); in qxl_ttm_tt_create() 119 return ttm; in qxl_ttm_tt_create() 162 if (old_mem->mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) { in qxl_bo_move()
|
| /linux/drivers/gpu/drm/ttm/tests/ |
| H A D | ttm_kunit_helpers.c | 58 static void ttm_tt_simple_destroy(struct ttm_device *bdev, struct ttm_tt *ttm) in ttm_tt_simple_destroy() argument 60 kfree(ttm); in ttm_tt_simple_destroy() 70 if (!old_mem || (old_mem->mem_type == TTM_PL_SYSTEM && !bo->ttm)) { in mock_move() 119 struct ttm_device *ttm, in ttm_device_kunit_init_with_funcs() argument 126 err = ttm_device_init(ttm, funcs, drm->dev, in ttm_device_kunit_init_with_funcs() 144 struct ttm_device *ttm, in ttm_device_kunit_init() argument 147 return ttm_device_kunit_init_with_funcs(priv, ttm, alloc_flags, in ttm_device_kunit_init() 162 struct ttm_device *ttm) in ttm_device_kunit_init_bad_evict() argument 164 return ttm_device_kunit_init_with_funcs(priv, ttm, 0, in ttm_device_kunit_init_bad_evict()
|
| /linux/arch/powerpc/perf/ |
| H A D | ppc970-pmu.c | 264 unsigned int ttm, grp; in p970_compute_mmcr() local 321 ttm = unitmap[i]; in p970_compute_mmcr() 322 ++ttmuse[(ttm >> 2) & 1]; in p970_compute_mmcr() 323 mmcr1 |= (unsigned long)(ttm & ~4) << MMCR1_TTM1SEL_SH; in p970_compute_mmcr() 335 ttm = (unitmap[unit] >> 2) & 1; in p970_compute_mmcr() 337 ttm = 2; in p970_compute_mmcr() 339 ttm = 3; in p970_compute_mmcr() 343 mmcr1 |= (unsigned long)ttm in p970_compute_mmcr()
|
| /linux/drivers/gpu/drm/loongson/ |
| H A D | lsdc_ttm.c | 113 struct ttm_tt *ttm, in lsdc_ttm_tt_populate() argument 116 bool slave = !!(ttm->page_flags & TTM_TT_FLAG_EXTERNAL); in lsdc_ttm_tt_populate() 118 if (slave && ttm->sg) { in lsdc_ttm_tt_populate() 119 drm_prime_sg_to_dma_addr_array(ttm->sg, in lsdc_ttm_tt_populate() 120 ttm->dma_address, in lsdc_ttm_tt_populate() 121 ttm->num_pages); in lsdc_ttm_tt_populate() 126 return ttm_pool_alloc(&bdev->pool, ttm, ctx); in lsdc_ttm_tt_populate() 130 struct ttm_tt *ttm) in lsdc_ttm_tt_unpopulate() argument 132 bool slave = !!(ttm->page_flags & TTM_TT_FLAG_EXTERNAL); in lsdc_ttm_tt_unpopulate() 137 return ttm_pool_free(&bdev->pool, ttm); in lsdc_ttm_tt_unpopulate() [all …]
|