Lines Matching refs:tbo
295 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
330 ret = amdgpu_gem_object_create(adev, mem->bo->tbo.base.size, 1,
332 ttm_bo_type_sg, mem->bo->tbo.base.resv, &gem_obj, 0);
353 * NOTE: Must be called with BO reserved i.e. bo->tbo.resv->lock held.
367 dma_resv_replace_fences(bo->tbo.base.resv, ef->base.context,
384 struct dma_resv *resv = &bo->tbo.base._resv;
407 if (WARN(amdgpu_ttm_tt_get_usermm(bo->tbo.ttm),
412 if (bo->tbo.pin_count)
417 ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
440 ret = dma_resv_reserve_fences(bo->tbo.base.resv, 1);
444 dma_resv_add_fence(bo->tbo.base.resv, fence,
469 struct amdgpu_device *adev = amdgpu_ttm_adev(pd->tbo.bdev);
487 struct amdgpu_device *adev = amdgpu_ttm_adev(pd->tbo.bdev);
552 struct ttm_tt *src_ttm = mem->bo->tbo.ttm;
553 struct ttm_tt *ttm = bo->tbo.ttm;
576 ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
600 return ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
636 struct ttm_tt *ttm = bo->tbo.ttm;
651 dma_addr = mem->bo->tbo.sg->sgl->dma_address;
652 pr_debug("%d BO size: %d\n", mmio, mem->bo->tbo.sg->sgl->length);
655 mem->bo->tbo.sg->sgl->length, dir, DMA_ATTR_SKIP_CPU_SYNC);
661 ttm->sg = create_sg_table(dma_addr, mem->bo->tbo.sg->sgl->length);
668 ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
679 dma_unmap_resource(adev->dev, dma_addr, mem->bo->tbo.sg->sgl->length,
713 struct ttm_tt *ttm = bo->tbo.ttm;
719 (void)ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
759 struct ttm_tt *ttm = bo->tbo.ttm;
768 (void)ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
777 bo->tbo.sg = NULL;
807 bo_adev = amdgpu_ttm_adev(mem->bo->tbo.bdev);
857 struct amdgpu_device *bo_adev = amdgpu_ttm_adev(mem->bo->tbo.bdev);
858 unsigned long bo_size = mem->bo->tbo.base.size;
900 (amdgpu_ttm_tt_get_usermm(mem->bo->tbo.ttm) && reuse_dmamap(adev, bo_adev)) ||
909 drm_gem_object_get(&bo[i]->tbo.base);
914 drm_gem_object_get(&bo[i]->tbo.base);
915 } else if (amdgpu_ttm_tt_get_usermm(mem->bo->tbo.ttm)) {
922 } else if (mem->bo->tbo.type == ttm_bo_type_sg) {
985 drm_gem_object_put(&bo[i]->tbo.base);
999 drm_gem_object_put(&bo->tbo.base);
1048 ret = amdgpu_ttm_tt_set_userptr(&bo->tbo, user_addr, 0);
1075 ret = amdgpu_ttm_tt_get_user_pages(bo, bo->tbo.ttm->pages, &range);
1090 ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
1096 amdgpu_ttm_tt_get_user_pages_done(bo->tbo.ttm, range);
1149 ret = drm_exec_prepare_obj(&ctx->exec, &bo->tbo.base, 1);
1199 ret = drm_exec_prepare_obj(&ctx->exec, &bo->tbo.base, 1);
1339 ret = amdgpu_sync_resv(NULL, sync, pd->tbo.base.resv,
1415 ret = dma_resv_reserve_fences(vm->root.bo->tbo.base.resv, 1);
1418 dma_resv_add_fence(vm->root.bo->tbo.base.resv,
1476 if (!(bo->tbo.resource->placement & TTM_PL_FLAG_CONTIGUOUS)) {
1480 ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
1482 pr_debug("validate bo 0x%p to GTT failed %d\n", &bo->tbo, ret);
1579 struct amdgpu_device *adev = amdgpu_ttm_adev(pd->tbo.bdev);
1779 bo->tbo.sg = sg;
1780 bo->tbo.ttm->sg = sg;
1856 unsigned long bo_size = mem->bo->tbo.base.size;
1892 if (amdgpu_ttm_tt_get_usermm(mem->bo->tbo.ttm)) {
1895 amdgpu_ttm_tt_discard_user_pages(mem->bo->tbo.ttm, mem->range);
1922 if (mem->bo->tbo.sg) {
1923 sg_free_table(mem->bo->tbo.sg);
1924 kfree(mem->bo->tbo.sg);
1942 drm_vma_node_revoke(&mem->bo->tbo.base.vma_node, drm_priv);
1954 drm_gem_object_put(&mem->bo->tbo.base);
1995 if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm)) {
2004 bo_size = bo->tbo.base.size;
2026 if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm) &&
2027 bo->tbo.resource->mem_type == TTM_PL_SYSTEM)
2089 if (entry->bo_va->base.bo->tbo.ttm &&
2090 !entry->bo_va->base.bo->tbo.ttm->sg)
2107 unsigned long bo_size = mem->bo->tbo.base.size;
2198 ret = amdgpu_ttm_alloc_gart(&bo->tbo);
2241 if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm)) {
2522 amdgpu_ttm_tt_discard_user_pages(bo->tbo.ttm, mem->range);
2533 if (bo->tbo.resource->mem_type != TTM_PL_SYSTEM) {
2537 ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
2547 ret = amdgpu_ttm_tt_get_user_pages(bo, bo->tbo.ttm->pages,
2620 gobj = &mem->bo->tbo.base;
2641 if (bo->tbo.ttm->pages[0]) {
2643 ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
2704 mem->bo->tbo.ttm, mem->range);
2880 gobj = &mem->bo->tbo.base;
2914 dma_resv_for_each_fence(&cursor, bo->tbo.base.resv,
2958 peer_vm->root.bo->tbo.bdev);
3018 if (mem->bo->tbo.pin_count)
3021 dma_resv_add_fence(mem->bo->tbo.base.resv,
3030 dma_resv_add_fence(bo->tbo.base.resv,
3082 ret = dma_resv_reserve_fences(gws_bo->tbo.base.resv, 1);
3085 dma_resv_add_fence(gws_bo->tbo.base.resv,