Lines Matching +full:multi +full:- +full:tt
1 // SPDX-License-Identifier: MIT
8 #include <linux/dma-buf.h>
79 return res->mem_type == XE_PL_STOLEN && IS_DGFX(xe); in resource_is_stolen_vram()
84 return mem_type_is_vram(res->mem_type); in resource_is_vram()
89 return resource_is_vram(bo->ttm.resource) || in xe_bo_is_vram()
90 resource_is_stolen_vram(xe_bo_device(bo), bo->ttm.resource); in xe_bo_is_vram()
95 return bo->ttm.resource->mem_type == XE_PL_STOLEN; in xe_bo_is_stolen()
99 * xe_bo_has_single_placement - check if BO is placed only in one memory location
109 return bo->placement.num_placement == 1; in xe_bo_has_single_placement()
113 * xe_bo_is_stolen_devmem - check if BO is of stolen type accessed via PCI BAR
129 return bo->flags & XE_BO_FLAG_USER; in xe_bo_is_user()
138 tile = &xe->tiles[mem_type == XE_PL_STOLEN ? 0 : (mem_type - XE_PL_VRAM0)]; in mem_type_to_migrate()
139 return tile->migrate; in mem_type_to_migrate()
144 struct xe_device *xe = ttm_to_xe_device(res->bo->bdev); in res_to_mem_region()
148 mgr = ttm_manager_type(&xe->ttm, res->mem_type); in res_to_mem_region()
149 return to_xe_ttm_vram_mgr(mgr)->vram; in res_to_mem_region()
156 xe_assert(xe, *c < ARRAY_SIZE(bo->placements)); in try_add_system()
158 bo->placements[*c] = (struct ttm_place) { in try_add_system()
172 xe_assert(xe, *c < ARRAY_SIZE(bo->placements)); in add_vram()
174 vram = to_xe_ttm_vram_mgr(ttm_manager_type(&xe->ttm, mem_type))->vram; in add_vram()
175 xe_assert(xe, vram && vram->usable_size); in add_vram()
176 io_size = vram->io_size; in add_vram()
186 if (io_size < vram->usable_size) { in add_vram()
202 add_vram(xe, bo, bo->placements, bo_flags, XE_PL_VRAM0, c); in try_add_vram()
204 add_vram(xe, bo, bo->placements, bo_flags, XE_PL_VRAM1, c); in try_add_vram()
211 xe_assert(xe, *c < ARRAY_SIZE(bo->placements)); in try_add_stolen()
213 bo->placements[*c] = (struct ttm_place) { in try_add_stolen()
233 return -EINVAL; in __xe_bo_placement_for_flags()
235 bo->placement = (struct ttm_placement) { in __xe_bo_placement_for_flags()
237 .placement = bo->placements, in __xe_bo_placement_for_flags()
255 if (tbo->type == ttm_bo_type_sg) { in xe_evict_flags()
256 placement->num_placement = 0; in xe_evict_flags()
268 switch (tbo->resource->mem_type) { in xe_evict_flags()
288 static int xe_tt_map_sg(struct ttm_tt *tt) in xe_tt_map_sg() argument
290 struct xe_ttm_tt *xe_tt = container_of(tt, struct xe_ttm_tt, ttm); in xe_tt_map_sg()
291 unsigned long num_pages = tt->num_pages; in xe_tt_map_sg()
294 XE_WARN_ON(tt->page_flags & TTM_TT_FLAG_EXTERNAL); in xe_tt_map_sg()
296 if (xe_tt->sg) in xe_tt_map_sg()
299 ret = sg_alloc_table_from_pages_segment(&xe_tt->sgt, tt->pages, in xe_tt_map_sg()
302 xe_sg_segment_size(xe_tt->dev), in xe_tt_map_sg()
307 xe_tt->sg = &xe_tt->sgt; in xe_tt_map_sg()
308 ret = dma_map_sgtable(xe_tt->dev, xe_tt->sg, DMA_BIDIRECTIONAL, in xe_tt_map_sg()
311 sg_free_table(xe_tt->sg); in xe_tt_map_sg()
312 xe_tt->sg = NULL; in xe_tt_map_sg()
319 static void xe_tt_unmap_sg(struct ttm_tt *tt) in xe_tt_unmap_sg() argument
321 struct xe_ttm_tt *xe_tt = container_of(tt, struct xe_ttm_tt, ttm); in xe_tt_unmap_sg()
323 if (xe_tt->sg) { in xe_tt_unmap_sg()
324 dma_unmap_sgtable(xe_tt->dev, xe_tt->sg, in xe_tt_unmap_sg()
326 sg_free_table(xe_tt->sg); in xe_tt_unmap_sg()
327 xe_tt->sg = NULL; in xe_tt_unmap_sg()
333 struct ttm_tt *tt = bo->ttm.ttm; in xe_bo_sg() local
334 struct xe_ttm_tt *xe_tt = container_of(tt, struct xe_ttm_tt, ttm); in xe_bo_sg()
336 return xe_tt->sg; in xe_bo_sg()
344 struct xe_ttm_tt *tt; in xe_ttm_tt_create() local
349 tt = kzalloc(sizeof(*tt), GFP_KERNEL); in xe_ttm_tt_create()
350 if (!tt) in xe_ttm_tt_create()
353 tt->dev = xe->drm.dev; in xe_ttm_tt_create()
357 extra_pages = DIV_ROUND_UP(xe_device_ccs_bytes(xe, bo->size), in xe_ttm_tt_create()
367 switch (bo->cpu_caching) { in xe_ttm_tt_create()
376 WARN_ON((bo->flags & XE_BO_FLAG_USER) && !bo->cpu_caching); in xe_ttm_tt_create()
379 * Display scanout is always non-coherent with the CPU cache. in xe_ttm_tt_create()
382 * non-coherent and require a CPU:WC mapping. in xe_ttm_tt_create()
384 if ((!bo->cpu_caching && bo->flags & XE_BO_FLAG_SCANOUT) || in xe_ttm_tt_create()
385 (xe->info.graphics_verx100 >= 1270 && in xe_ttm_tt_create()
386 bo->flags & XE_BO_FLAG_PAGETABLE)) in xe_ttm_tt_create()
390 if (bo->flags & XE_BO_FLAG_NEEDS_UC) { in xe_ttm_tt_create()
392 * Valid only for internally-created buffers only, for in xe_ttm_tt_create()
395 xe_assert(xe, bo->cpu_caching == 0); in xe_ttm_tt_create()
399 err = ttm_tt_init(&tt->ttm, &bo->ttm, page_flags, caching, extra_pages); in xe_ttm_tt_create()
401 kfree(tt); in xe_ttm_tt_create()
405 return &tt->ttm; in xe_ttm_tt_create()
408 static int xe_ttm_tt_populate(struct ttm_device *ttm_dev, struct ttm_tt *tt, in xe_ttm_tt_populate() argument
414 * dma-bufs are not populated with pages, and the dma- in xe_ttm_tt_populate()
417 if (tt->page_flags & TTM_TT_FLAG_EXTERNAL) in xe_ttm_tt_populate()
420 err = ttm_pool_alloc(&ttm_dev->pool, tt, ctx); in xe_ttm_tt_populate()
427 static void xe_ttm_tt_unpopulate(struct ttm_device *ttm_dev, struct ttm_tt *tt) in xe_ttm_tt_unpopulate() argument
429 if (tt->page_flags & TTM_TT_FLAG_EXTERNAL) in xe_ttm_tt_unpopulate()
432 xe_tt_unmap_sg(tt); in xe_ttm_tt_unpopulate()
434 return ttm_pool_free(&ttm_dev->pool, tt); in xe_ttm_tt_unpopulate()
437 static void xe_ttm_tt_destroy(struct ttm_device *ttm_dev, struct ttm_tt *tt) in xe_ttm_tt_destroy() argument
439 ttm_tt_fini(tt); in xe_ttm_tt_destroy()
440 kfree(tt); in xe_ttm_tt_destroy()
448 switch (mem->mem_type) { in xe_ttm_io_mem_reserve()
458 if (vres->used_visible_size < mem->size) in xe_ttm_io_mem_reserve()
459 return -EINVAL; in xe_ttm_io_mem_reserve()
461 mem->bus.offset = mem->start << PAGE_SHIFT; in xe_ttm_io_mem_reserve()
463 if (vram->mapping && in xe_ttm_io_mem_reserve()
464 mem->placement & TTM_PL_FLAG_CONTIGUOUS) in xe_ttm_io_mem_reserve()
465 mem->bus.addr = (u8 __force *)vram->mapping + in xe_ttm_io_mem_reserve()
466 mem->bus.offset; in xe_ttm_io_mem_reserve()
468 mem->bus.offset += vram->io_start; in xe_ttm_io_mem_reserve()
469 mem->bus.is_iomem = true; in xe_ttm_io_mem_reserve()
472 mem->bus.caching = ttm_write_combined; in xe_ttm_io_mem_reserve()
478 return -EINVAL; in xe_ttm_io_mem_reserve()
487 struct drm_gem_object *obj = &bo->ttm.base; in xe_bo_trigger_rebind()
492 dma_resv_assert_held(bo->ttm.base.resv); in xe_bo_trigger_rebind()
494 if (!list_empty(&bo->ttm.base.gpuva.list)) { in xe_bo_trigger_rebind()
495 dma_resv_iter_begin(&cursor, bo->ttm.base.resv, in xe_bo_trigger_rebind()
503 struct xe_vm *vm = gpuvm_to_vm(vm_bo->vm); in xe_bo_trigger_rebind()
514 if (ctx->no_wait_gpu && in xe_bo_trigger_rebind()
515 !dma_resv_test_signaled(bo->ttm.base.resv, in xe_bo_trigger_rebind()
517 return -EBUSY; in xe_bo_trigger_rebind()
519 timeout = dma_resv_wait_timeout(bo->ttm.base.resv, in xe_bo_trigger_rebind()
521 ctx->interruptible, in xe_bo_trigger_rebind()
524 return -ETIME; in xe_bo_trigger_rebind()
545 * The dma-buf map_attachment() / unmap_attachment() is hooked up here.
556 struct dma_buf_attachment *attach = ttm_bo->base.import_attach; in xe_bo_move_dmabuf()
557 struct xe_ttm_tt *xe_tt = container_of(ttm_bo->ttm, struct xe_ttm_tt, in xe_bo_move_dmabuf()
559 struct xe_device *xe = ttm_to_xe_device(ttm_bo->bdev); in xe_bo_move_dmabuf()
563 xe_assert(xe, ttm_bo->ttm); in xe_bo_move_dmabuf()
565 if (new_res->mem_type == XE_PL_SYSTEM) in xe_bo_move_dmabuf()
568 if (ttm_bo->sg) { in xe_bo_move_dmabuf()
569 dma_buf_unmap_attachment(attach, ttm_bo->sg, DMA_BIDIRECTIONAL); in xe_bo_move_dmabuf()
570 ttm_bo->sg = NULL; in xe_bo_move_dmabuf()
577 ttm_bo->sg = sg; in xe_bo_move_dmabuf()
578 xe_tt->sg = sg; in xe_bo_move_dmabuf()
587 * xe_bo_move_notify - Notify subsystems of a pending move
601 * Return: 0 on success, -EINTR or -ERESTARTSYS if interrupted in fault mode,
607 struct ttm_buffer_object *ttm_bo = &bo->ttm; in xe_bo_move_notify()
608 struct xe_device *xe = ttm_to_xe_device(ttm_bo->bdev); in xe_bo_move_notify()
609 struct ttm_resource *old_mem = ttm_bo->resource; in xe_bo_move_notify()
610 u32 old_mem_type = old_mem ? old_mem->mem_type : XE_PL_SYSTEM; in xe_bo_move_notify()
619 return -EINVAL; in xe_bo_move_notify()
626 /* Don't call move_notify() for imported dma-bufs. */ in xe_bo_move_notify()
627 if (ttm_bo->base.dma_buf && !ttm_bo->base.import_attach) in xe_bo_move_notify()
628 dma_buf_move_notify(ttm_bo->base.dma_buf); in xe_bo_move_notify()
636 mutex_lock(&xe->mem_access.vram_userfault.lock); in xe_bo_move_notify()
637 if (!list_empty(&bo->vram_userfault_link)) in xe_bo_move_notify()
638 list_del_init(&bo->vram_userfault_link); in xe_bo_move_notify()
639 mutex_unlock(&xe->mem_access.vram_userfault.lock); in xe_bo_move_notify()
650 struct xe_device *xe = ttm_to_xe_device(ttm_bo->bdev); in xe_bo_move()
652 struct ttm_resource *old_mem = ttm_bo->resource; in xe_bo_move()
653 u32 old_mem_type = old_mem ? old_mem->mem_type : XE_PL_SYSTEM; in xe_bo_move()
654 struct ttm_tt *ttm = ttm_bo->ttm; in xe_bo_move()
664 /* Bo creation path, moving to system or TT. */ in xe_bo_move()
666 if (new_mem->mem_type == XE_PL_TT) in xe_bo_move()
673 if (ttm_bo->type == ttm_bo_type_sg) { in xe_bo_move()
681 (ttm->page_flags & TTM_TT_FLAG_SWAPPED)); in xe_bo_move()
683 move_lacks_source = !old_mem || (handle_system_ccs ? (!bo->ccs_cleared) : in xe_bo_move()
686 needs_clear = (ttm && ttm->page_flags & TTM_TT_FLAG_ZERO_ALLOC) || in xe_bo_move()
687 (!ttm && ttm_bo->type == ttm_bo_type_device); in xe_bo_move()
689 if (new_mem->mem_type == XE_PL_TT) { in xe_bo_move()
700 if (old_mem_type == XE_PL_SYSTEM && new_mem->mem_type == XE_PL_TT && !handle_system_ccs) { in xe_bo_move()
706 * Failed multi-hop where the old_mem is still marked as in xe_bo_move()
710 new_mem->mem_type == XE_PL_TT) { in xe_bo_move()
722 new_mem->mem_type == XE_PL_SYSTEM) { in xe_bo_move()
723 long timeout = dma_resv_wait_timeout(ttm_bo->base.resv, in xe_bo_move()
741 new_mem->mem_type == XE_PL_SYSTEM))) { in xe_bo_move()
742 hop->fpfn = 0; in xe_bo_move()
743 hop->lpfn = 0; in xe_bo_move()
744 hop->mem_type = XE_PL_TT; in xe_bo_move()
745 hop->flags = TTM_PL_FLAG_TEMPORARY; in xe_bo_move()
746 ret = -EMULTIHOP; in xe_bo_move()
750 if (bo->tile) in xe_bo_move()
751 migrate = bo->tile->migrate; in xe_bo_move()
753 migrate = mem_type_to_migrate(xe, new_mem->mem_type); in xe_bo_move()
757 migrate = xe->tiles[0].migrate; in xe_bo_move()
760 trace_xe_bo_move(bo, new_mem->mem_type, old_mem_type, move_lacks_source); in xe_bo_move()
768 drm_WARN_ON(&xe->drm, handle_system_ccs); in xe_bo_move()
789 void __iomem *new_addr = vram->mapping + in xe_bo_move()
790 (new_mem->start << PAGE_SHIFT); in xe_bo_move()
792 if (XE_WARN_ON(new_mem->start == XE_BO_INVALID_OFFSET)) { in xe_bo_move()
793 ret = -EINVAL; in xe_bo_move()
798 xe_assert(xe, new_mem->start == in xe_bo_move()
799 bo->placements->fpfn); in xe_bo_move()
801 iosys_map_set_vaddr_iomem(&bo->vmap, new_addr); in xe_bo_move()
808 if (mem_type_is_vram(new_mem->mem_type)) in xe_bo_move()
834 * bo->resource == NULL, so just attach the in xe_bo_move()
837 dma_resv_add_fence(ttm_bo->base.resv, fence, in xe_bo_move()
848 if ((!ttm_bo->resource || ttm_bo->resource->mem_type == XE_PL_SYSTEM) && in xe_bo_move()
849 ttm_bo->ttm) in xe_bo_move()
850 xe_tt_unmap_sg(ttm_bo->ttm); in xe_bo_move()
856 * xe_bo_evict_pinned() - Evict a pinned VRAM object to system memory
862 * suspend-resume.
883 if (WARN_ON(!bo->ttm.resource)) in xe_bo_evict_pinned()
884 return -EINVAL; in xe_bo_evict_pinned()
887 return -EINVAL; in xe_bo_evict_pinned()
892 ret = ttm_bo_mem_space(&bo->ttm, &placement, &new_mem, &ctx); in xe_bo_evict_pinned()
896 if (!bo->ttm.ttm) { in xe_bo_evict_pinned()
897 bo->ttm.ttm = xe_ttm_tt_create(&bo->ttm, 0); in xe_bo_evict_pinned()
898 if (!bo->ttm.ttm) { in xe_bo_evict_pinned()
899 ret = -ENOMEM; in xe_bo_evict_pinned()
904 ret = ttm_tt_populate(bo->ttm.bdev, bo->ttm.ttm, &ctx); in xe_bo_evict_pinned()
908 ret = dma_resv_reserve_fences(bo->ttm.base.resv, 1); in xe_bo_evict_pinned()
912 ret = xe_bo_move(&bo->ttm, false, &ctx, new_mem, NULL); in xe_bo_evict_pinned()
919 ttm_resource_free(&bo->ttm, &new_mem); in xe_bo_evict_pinned()
924 * xe_bo_restore_pinned() - Restore a pinned VRAM object
930 * suspend-resume.
940 struct ttm_place *place = &bo->placements[0]; in xe_bo_restore_pinned()
945 if (WARN_ON(!bo->ttm.resource)) in xe_bo_restore_pinned()
946 return -EINVAL; in xe_bo_restore_pinned()
949 return -EINVAL; in xe_bo_restore_pinned()
952 return -EINVAL; in xe_bo_restore_pinned()
954 if (WARN_ON(!bo->ttm.ttm && !xe_bo_is_stolen(bo))) in xe_bo_restore_pinned()
955 return -EINVAL; in xe_bo_restore_pinned()
957 if (!mem_type_is_vram(place->mem_type)) in xe_bo_restore_pinned()
960 ret = ttm_bo_mem_space(&bo->ttm, &bo->placement, &new_mem, &ctx); in xe_bo_restore_pinned()
964 ret = ttm_tt_populate(bo->ttm.bdev, bo->ttm.ttm, &ctx); in xe_bo_restore_pinned()
968 ret = dma_resv_reserve_fences(bo->ttm.base.resv, 1); in xe_bo_restore_pinned()
972 ret = xe_bo_move(&bo->ttm, false, &ctx, new_mem, NULL); in xe_bo_restore_pinned()
979 ttm_resource_free(&bo->ttm, &new_mem); in xe_bo_restore_pinned()
990 if (ttm_bo->resource->mem_type == XE_PL_STOLEN) in xe_ttm_io_mem_pfn()
993 vram = res_to_mem_region(ttm_bo->resource); in xe_ttm_io_mem_pfn()
994 xe_res_first(ttm_bo->resource, (u64)page_offset << PAGE_SHIFT, 0, &cursor); in xe_ttm_io_mem_pfn()
995 return (vram->io_start + cursor.start) >> PAGE_SHIFT; in xe_ttm_io_mem_pfn()
1006 struct xe_device *xe = ttm_to_xe_device(ttm_bo->bdev); in xe_ttm_bo_lock_in_destructor()
1009 xe_assert(xe, !kref_read(&ttm_bo->kref)); in xe_ttm_bo_lock_in_destructor()
1017 spin_lock(&ttm_bo->bdev->lru_lock); in xe_ttm_bo_lock_in_destructor()
1018 locked = dma_resv_trylock(ttm_bo->base.resv); in xe_ttm_bo_lock_in_destructor()
1019 spin_unlock(&ttm_bo->bdev->lru_lock); in xe_ttm_bo_lock_in_destructor()
1036 xe_assert(xe_bo_device(bo), !(bo->created && kref_read(&ttm_bo->base.refcount))); in xe_ttm_bo_release_notify()
1042 if (ttm_bo->base.resv != &ttm_bo->base._resv) in xe_ttm_bo_release_notify()
1054 dma_resv_for_each_fence(&cursor, ttm_bo->base.resv, in xe_ttm_bo_release_notify()
1061 dma_resv_replace_fences(ttm_bo->base.resv, in xe_ttm_bo_release_notify()
1062 fence->context, in xe_ttm_bo_release_notify()
1069 dma_resv_unlock(ttm_bo->base.resv); in xe_ttm_bo_release_notify()
1079 * dma-buf attachment. in xe_ttm_bo_delete_mem_notify()
1081 if (ttm_bo->type == ttm_bo_type_sg && ttm_bo->sg) { in xe_ttm_bo_delete_mem_notify()
1082 struct xe_ttm_tt *xe_tt = container_of(ttm_bo->ttm, in xe_ttm_bo_delete_mem_notify()
1085 dma_buf_unmap_attachment(ttm_bo->base.import_attach, ttm_bo->sg, in xe_ttm_bo_delete_mem_notify()
1087 ttm_bo->sg = NULL; in xe_ttm_bo_delete_mem_notify()
1088 xe_tt->sg = NULL; in xe_ttm_bo_delete_mem_notify()
1109 struct xe_device *xe = ttm_to_xe_device(ttm_bo->bdev); in xe_ttm_bo_destroy()
1111 if (bo->ttm.base.import_attach) in xe_ttm_bo_destroy()
1112 drm_prime_gem_destroy(&bo->ttm.base, NULL); in xe_ttm_bo_destroy()
1113 drm_gem_object_release(&bo->ttm.base); in xe_ttm_bo_destroy()
1115 xe_assert(xe, list_empty(&ttm_bo->base.gpuva.list)); in xe_ttm_bo_destroy()
1117 if (bo->ggtt_node && bo->ggtt_node->base.size) in xe_ttm_bo_destroy()
1118 xe_ggtt_remove_bo(bo->tile->mem.ggtt, bo); in xe_ttm_bo_destroy()
1121 if (bo->client) in xe_ttm_bo_destroy()
1125 if (bo->vm && xe_bo_is_user(bo)) in xe_ttm_bo_destroy()
1126 xe_vm_put(bo->vm); in xe_ttm_bo_destroy()
1128 mutex_lock(&xe->mem_access.vram_userfault.lock); in xe_ttm_bo_destroy()
1129 if (!list_empty(&bo->vram_userfault_link)) in xe_ttm_bo_destroy()
1130 list_del(&bo->vram_userfault_link); in xe_ttm_bo_destroy()
1131 mutex_unlock(&xe->mem_access.vram_userfault.lock); in xe_ttm_bo_destroy()
1160 if (bo->vm && !xe_vm_in_fault_mode(bo->vm)) { in xe_gem_object_close()
1164 ttm_bo_set_bulk_move(&bo->ttm, NULL); in xe_gem_object_close()
1171 struct ttm_buffer_object *tbo = vmf->vma->vm_private_data; in xe_gem_fault()
1172 struct drm_device *ddev = tbo->base.dev; in xe_gem_fault()
1175 bool needs_rpm = bo->flags & XE_BO_FLAG_VRAM_MASK; in xe_gem_fault()
1189 ret = ttm_bo_vm_fault_reserved(vmf, vmf->vma->vm_page_prot, in xe_gem_fault()
1193 ret = ttm_bo_vm_dummy_page(vmf, vmf->vma->vm_page_prot); in xe_gem_fault()
1196 if (ret == VM_FAULT_RETRY && !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT)) in xe_gem_fault()
1201 if (ret == VM_FAULT_NOPAGE && mem_type_is_vram(tbo->resource->mem_type)) { in xe_gem_fault()
1202 mutex_lock(&xe->mem_access.vram_userfault.lock); in xe_gem_fault()
1203 if (list_empty(&bo->vram_userfault_link)) in xe_gem_fault()
1204 list_add(&bo->vram_userfault_link, &xe->mem_access.vram_userfault.list); in xe_gem_fault()
1205 mutex_unlock(&xe->mem_access.vram_userfault.lock); in xe_gem_fault()
1208 dma_resv_unlock(tbo->base.resv); in xe_gem_fault()
1232 * xe_bo_alloc - Allocate storage for a struct xe_bo
1242 * ERR_PTR(-ENOMEM) on error.
1249 return ERR_PTR(-ENOMEM); in xe_bo_alloc()
1255 * xe_bo_free - Free storage allocated using xe_bo_alloc()
1258 * Refer to xe_bo_alloc() documentation for valid use-cases.
1285 return ERR_PTR(-EINVAL); in ___xe_bo_create_locked()
1290 ((xe->info.vram_flags & XE_VRAM_FLAGS_NEED64K) || in ___xe_bo_create_locked()
1306 return ERR_PTR(-EINVAL); in ___xe_bo_create_locked()
1314 bo->ccs_cleared = false; in ___xe_bo_create_locked()
1315 bo->tile = tile; in ___xe_bo_create_locked()
1316 bo->size = size; in ___xe_bo_create_locked()
1317 bo->flags = flags; in ___xe_bo_create_locked()
1318 bo->cpu_caching = cpu_caching; in ___xe_bo_create_locked()
1319 bo->ttm.base.funcs = &xe_gem_object_funcs; in ___xe_bo_create_locked()
1320 bo->ttm.priority = XE_BO_PRIORITY_NORMAL; in ___xe_bo_create_locked()
1321 INIT_LIST_HEAD(&bo->pinned_link); in ___xe_bo_create_locked()
1323 INIT_LIST_HEAD(&bo->client_link); in ___xe_bo_create_locked()
1325 INIT_LIST_HEAD(&bo->vram_userfault_link); in ___xe_bo_create_locked()
1327 drm_gem_private_object_init(&xe->drm, &bo->ttm.base, size); in ___xe_bo_create_locked()
1335 err = __xe_bo_placement_for_flags(xe, bo, bo->flags); in ___xe_bo_create_locked()
1337 xe_ttm_bo_destroy(&bo->ttm); in ___xe_bo_create_locked()
1344 bo->flags & XE_BO_FLAG_DEFER_BACKING) ? &sys_placement : in ___xe_bo_create_locked()
1345 &bo->placement; in ___xe_bo_create_locked()
1346 err = ttm_bo_init_reserved(&xe->ttm, &bo->ttm, type, in ___xe_bo_create_locked()
1356 * dma-resv using the DMA_RESV_USAGE_KERNEL slot. in ___xe_bo_create_locked()
1370 long timeout = dma_resv_wait_timeout(bo->ttm.base.resv, in ___xe_bo_create_locked()
1377 dma_resv_unlock(bo->ttm.base.resv); in ___xe_bo_create_locked()
1383 bo->created = true; in ___xe_bo_create_locked()
1385 ttm_bo_set_bulk_move(&bo->ttm, bulk); in ___xe_bo_create_locked()
1387 ttm_bo_move_to_lru_tail_unlocked(&bo->ttm); in ___xe_bo_create_locked()
1397 struct ttm_place *place = bo->placements; in __xe_bo_fixed_placement()
1400 return -EINVAL; in __xe_bo_fixed_placement()
1402 place->flags = TTM_PL_FLAG_CONTIGUOUS; in __xe_bo_fixed_placement()
1403 place->fpfn = start >> PAGE_SHIFT; in __xe_bo_fixed_placement()
1404 place->lpfn = end >> PAGE_SHIFT; in __xe_bo_fixed_placement()
1408 place->mem_type = XE_PL_VRAM0; in __xe_bo_fixed_placement()
1411 place->mem_type = XE_PL_VRAM1; in __xe_bo_fixed_placement()
1414 place->mem_type = XE_PL_STOLEN; in __xe_bo_fixed_placement()
1419 return -EINVAL; in __xe_bo_fixed_placement()
1422 bo->placement = (struct ttm_placement) { in __xe_bo_fixed_placement()
1458 &vm->lru_bulk_move : NULL, size, in __xe_bo_create_locked()
1472 bo->vm = vm; in __xe_bo_create_locked()
1474 if (bo->flags & XE_BO_FLAG_GGTT) { in __xe_bo_create_locked()
1481 err = xe_ggtt_insert_bo_at(tile->mem.ggtt, bo, in __xe_bo_create_locked()
1482 start + bo->size, U64_MAX); in __xe_bo_create_locked()
1484 err = xe_ggtt_insert_bo(tile->mem.ggtt, bo); in __xe_bo_create_locked()
1597 xe_map_memcpy_to(xe, &bo->vmap, 0, data, size); in xe_bo_create_from_data()
1617 ret = devm_add_action_or_reset(xe->drm.dev, __xe_bo_unpin_map_no_vm, bo); in xe_managed_bo_create_pin_map()
1632 xe_map_memcpy_to(xe, &bo->vmap, 0, data, size); in xe_managed_bo_create_from_data()
1655 dst_flags |= (*src)->flags & XE_BO_FLAG_GGTT_INVALIDATE; in xe_managed_bo_reinit_in_vram()
1658 xe_assert(xe, !(*src)->vmap.is_iomem); in xe_managed_bo_reinit_in_vram()
1660 bo = xe_managed_bo_create_from_data(xe, tile, (*src)->vmap.vaddr, in xe_managed_bo_reinit_in_vram()
1661 (*src)->size, dst_flags); in xe_managed_bo_reinit_in_vram()
1665 devm_release_action(xe->drm.dev, __xe_bo_unpin_map_no_vm, *src); in xe_managed_bo_reinit_in_vram()
1677 struct xe_device *xe = ttm_to_xe_device(res->bo->bdev); in vram_region_gpu_offset()
1679 if (res->mem_type == XE_PL_STOLEN) in vram_region_gpu_offset()
1682 return res_to_mem_region(res)->dpa_base; in vram_region_gpu_offset()
1686 * xe_bo_pin_external - pin an external BO
1689 * Pin an external (not tied to a VM, can be exported via dma-buf / prime FD)
1700 xe_assert(xe, !bo->vm); in xe_bo_pin_external()
1709 spin_lock(&xe->pinned.lock); in xe_bo_pin_external()
1710 list_add_tail(&bo->pinned_link, in xe_bo_pin_external()
1711 &xe->pinned.external_vram); in xe_bo_pin_external()
1712 spin_unlock(&xe->pinned.lock); in xe_bo_pin_external()
1716 ttm_bo_pin(&bo->ttm); in xe_bo_pin_external()
1722 ttm_bo_move_to_lru_tail_unlocked(&bo->ttm); in xe_bo_pin_external()
1729 struct ttm_place *place = &bo->placements[0]; in xe_bo_pin()
1737 xe_assert(xe, bo->flags & (XE_BO_FLAG_PINNED | in xe_bo_pin()
1741 * No reason we can't support pinning imported dma-bufs we just don't in xe_bo_pin()
1742 * expect to pin an imported dma-buf. in xe_bo_pin()
1744 xe_assert(xe, !bo->ttm.base.import_attach); in xe_bo_pin()
1759 bo->flags & XE_BO_FLAG_INTERNAL_TEST)) { in xe_bo_pin()
1760 if (mem_type_is_vram(place->mem_type)) { in xe_bo_pin()
1761 xe_assert(xe, place->flags & TTM_PL_FLAG_CONTIGUOUS); in xe_bo_pin()
1763 place->fpfn = (xe_bo_addr(bo, 0, PAGE_SIZE) - in xe_bo_pin()
1764 vram_region_gpu_offset(bo->ttm.resource)) >> PAGE_SHIFT; in xe_bo_pin()
1765 place->lpfn = place->fpfn + (bo->size >> PAGE_SHIFT); in xe_bo_pin()
1769 if (mem_type_is_vram(place->mem_type) || bo->flags & XE_BO_FLAG_GGTT) { in xe_bo_pin()
1770 spin_lock(&xe->pinned.lock); in xe_bo_pin()
1771 list_add_tail(&bo->pinned_link, &xe->pinned.kernel_bo_present); in xe_bo_pin()
1772 spin_unlock(&xe->pinned.lock); in xe_bo_pin()
1775 ttm_bo_pin(&bo->ttm); in xe_bo_pin()
1781 ttm_bo_move_to_lru_tail_unlocked(&bo->ttm); in xe_bo_pin()
1787 * xe_bo_unpin_external - unpin an external BO
1790 * Unpin an external (not tied to a VM, can be exported via dma-buf / prime FD)
1800 xe_assert(xe, !bo->vm); in xe_bo_unpin_external()
1804 spin_lock(&xe->pinned.lock); in xe_bo_unpin_external()
1805 if (bo->ttm.pin_count == 1 && !list_empty(&bo->pinned_link)) in xe_bo_unpin_external()
1806 list_del_init(&bo->pinned_link); in xe_bo_unpin_external()
1807 spin_unlock(&xe->pinned.lock); in xe_bo_unpin_external()
1809 ttm_bo_unpin(&bo->ttm); in xe_bo_unpin_external()
1815 ttm_bo_move_to_lru_tail_unlocked(&bo->ttm); in xe_bo_unpin_external()
1820 struct ttm_place *place = &bo->placements[0]; in xe_bo_unpin()
1823 xe_assert(xe, !bo->ttm.base.import_attach); in xe_bo_unpin()
1826 if (mem_type_is_vram(place->mem_type) || bo->flags & XE_BO_FLAG_GGTT) { in xe_bo_unpin()
1827 spin_lock(&xe->pinned.lock); in xe_bo_unpin()
1828 xe_assert(xe, !list_empty(&bo->pinned_link)); in xe_bo_unpin()
1829 list_del_init(&bo->pinned_link); in xe_bo_unpin()
1830 spin_unlock(&xe->pinned.lock); in xe_bo_unpin()
1832 ttm_bo_unpin(&bo->ttm); in xe_bo_unpin()
1836 * xe_bo_validate() - Make sure the bo is in an allowed placement
1849 * -EINTR or -ERESTARTSYS if internal waits are interrupted by a signal.
1859 lockdep_assert_held(&vm->lock); in xe_bo_validate()
1866 return ttm_bo_validate(&bo->ttm, &bo->placement, &ctx); in xe_bo_validate()
1871 if (bo->destroy == &xe_ttm_bo_destroy) in xe_bo_is_xe_bo()
1891 offset &= (PAGE_SIZE - 1); in __xe_bo_addr()
1894 xe_assert(xe, bo->ttm.ttm); in __xe_bo_addr()
1902 xe_res_first(bo->ttm.resource, page << PAGE_SHIFT, in __xe_bo_addr()
1904 return cur.start + offset + vram_region_gpu_offset(bo->ttm.resource); in __xe_bo_addr()
1910 if (!READ_ONCE(bo->ttm.pin_count)) in xe_bo_addr()
1923 if (!(bo->flags & XE_BO_FLAG_NEEDS_CPU_ACCESS)) in xe_bo_vmap()
1924 return -EINVAL; in xe_bo_vmap()
1926 if (!iosys_map_is_null(&bo->vmap)) in xe_bo_vmap()
1936 ret = ttm_bo_kmap(&bo->ttm, 0, bo->size >> PAGE_SHIFT, &bo->kmap); in xe_bo_vmap()
1940 virtual = ttm_kmap_obj_virtual(&bo->kmap, &is_iomem); in xe_bo_vmap()
1942 iosys_map_set_vaddr_iomem(&bo->vmap, (void __iomem *)virtual); in xe_bo_vmap()
1944 iosys_map_set_vaddr(&bo->vmap, virtual); in xe_bo_vmap()
1951 if (!iosys_map_is_null(&bo->vmap)) { in __xe_bo_vunmap()
1952 iosys_map_clear(&bo->vmap); in __xe_bo_vunmap()
1953 ttm_bo_kunmap(&bo->kmap); in __xe_bo_vunmap()
1975 if (XE_IOCTL_DBG(xe, args->extensions) || in xe_gem_create_ioctl()
1976 XE_IOCTL_DBG(xe, args->pad[0] || args->pad[1] || args->pad[2]) || in xe_gem_create_ioctl()
1977 XE_IOCTL_DBG(xe, args->reserved[0] || args->reserved[1])) in xe_gem_create_ioctl()
1978 return -EINVAL; in xe_gem_create_ioctl()
1981 if (XE_IOCTL_DBG(xe, (args->placement & ~xe->info.mem_region_mask) || in xe_gem_create_ioctl()
1982 !args->placement)) in xe_gem_create_ioctl()
1983 return -EINVAL; in xe_gem_create_ioctl()
1985 if (XE_IOCTL_DBG(xe, args->flags & in xe_gem_create_ioctl()
1989 return -EINVAL; in xe_gem_create_ioctl()
1991 if (XE_IOCTL_DBG(xe, args->handle)) in xe_gem_create_ioctl()
1992 return -EINVAL; in xe_gem_create_ioctl()
1994 if (XE_IOCTL_DBG(xe, !args->size)) in xe_gem_create_ioctl()
1995 return -EINVAL; in xe_gem_create_ioctl()
1997 if (XE_IOCTL_DBG(xe, args->size > SIZE_MAX)) in xe_gem_create_ioctl()
1998 return -EINVAL; in xe_gem_create_ioctl()
2000 if (XE_IOCTL_DBG(xe, args->size & ~PAGE_MASK)) in xe_gem_create_ioctl()
2001 return -EINVAL; in xe_gem_create_ioctl()
2004 if (args->flags & DRM_XE_GEM_CREATE_FLAG_DEFER_BACKING) in xe_gem_create_ioctl()
2007 if (args->flags & DRM_XE_GEM_CREATE_FLAG_SCANOUT) in xe_gem_create_ioctl()
2010 bo_flags |= args->placement << (ffs(XE_BO_FLAG_SYSTEM) - 1); in xe_gem_create_ioctl()
2015 !(xe->info.vram_flags & XE_VRAM_FLAGS_NEED64K) && in xe_gem_create_ioctl()
2016 IS_ALIGNED(args->size, SZ_64K)) in xe_gem_create_ioctl()
2019 if (args->flags & DRM_XE_GEM_CREATE_FLAG_NEEDS_VISIBLE_VRAM) { in xe_gem_create_ioctl()
2021 return -EINVAL; in xe_gem_create_ioctl()
2026 if (XE_IOCTL_DBG(xe, !args->cpu_caching || in xe_gem_create_ioctl()
2027 args->cpu_caching > DRM_XE_GEM_CPU_CACHING_WC)) in xe_gem_create_ioctl()
2028 return -EINVAL; in xe_gem_create_ioctl()
2031 args->cpu_caching != DRM_XE_GEM_CPU_CACHING_WC)) in xe_gem_create_ioctl()
2032 return -EINVAL; in xe_gem_create_ioctl()
2035 args->cpu_caching == DRM_XE_GEM_CPU_CACHING_WB)) in xe_gem_create_ioctl()
2036 return -EINVAL; in xe_gem_create_ioctl()
2038 if (args->vm_id) { in xe_gem_create_ioctl()
2039 vm = xe_vm_lookup(xef, args->vm_id); in xe_gem_create_ioctl()
2041 return -ENOENT; in xe_gem_create_ioctl()
2047 bo = xe_bo_create_user(xe, NULL, vm, args->size, args->cpu_caching, in xe_gem_create_ioctl()
2058 err = drm_gem_handle_create(file, &bo->ttm.base, &handle); in xe_gem_create_ioctl()
2062 args->handle = handle; in xe_gem_create_ioctl()
2087 if (XE_IOCTL_DBG(xe, args->extensions) || in xe_gem_mmap_offset_ioctl()
2088 XE_IOCTL_DBG(xe, args->reserved[0] || args->reserved[1])) in xe_gem_mmap_offset_ioctl()
2089 return -EINVAL; in xe_gem_mmap_offset_ioctl()
2091 if (XE_IOCTL_DBG(xe, args->flags)) in xe_gem_mmap_offset_ioctl()
2092 return -EINVAL; in xe_gem_mmap_offset_ioctl()
2094 gem_obj = drm_gem_object_lookup(file, args->handle); in xe_gem_mmap_offset_ioctl()
2096 return -ENOENT; in xe_gem_mmap_offset_ioctl()
2099 args->offset = drm_vma_node_offset_addr(&gem_obj->vma_node); in xe_gem_mmap_offset_ioctl()
2106 * xe_bo_lock() - Lock the buffer object's dma_resv object
2113 * Return: 0 on success, -EINTR if @intr is true and the wait for a
2120 return dma_resv_lock_interruptible(bo->ttm.base.resv, NULL); in xe_bo_lock()
2122 dma_resv_lock(bo->ttm.base.resv, NULL); in xe_bo_lock()
2128 * xe_bo_unlock() - Unlock the buffer object's dma_resv object
2135 dma_resv_unlock(bo->ttm.base.resv); in xe_bo_unlock()
2139 * xe_bo_can_migrate - Whether a buffer object likely can be migrated
2157 if (bo->ttm.type == ttm_bo_type_kernel) in xe_bo_can_migrate()
2160 if (bo->ttm.type == ttm_bo_type_sg) in xe_bo_can_migrate()
2163 for (cur_place = 0; cur_place < bo->placement.num_placement; in xe_bo_can_migrate()
2165 if (bo->placements[cur_place].mem_type == mem_type) in xe_bo_can_migrate()
2175 place->mem_type = mem_type; in xe_place_from_ttm_type()
2179 * xe_bo_migrate - Migrate an object to the desired region id
2191 * return -EINTR or -ERESTARTSYS if signal pending.
2195 struct xe_device *xe = ttm_to_xe_device(bo->ttm.bdev); in xe_bo_migrate()
2205 if (bo->ttm.resource->mem_type == mem_type) in xe_bo_migrate()
2209 return -EBUSY; in xe_bo_migrate()
2212 return -EINVAL; in xe_bo_migrate()
2222 drm_WARN_ON(&xe->drm, mem_type == XE_PL_STOLEN); in xe_bo_migrate()
2227 add_vram(xe, bo, &requested, bo->flags, mem_type, &c); in xe_bo_migrate()
2230 return ttm_bo_validate(&bo->ttm, &placement, &ctx); in xe_bo_migrate()
2234 * xe_bo_evict - Evict an object to evict placement
2253 xe_evict_flags(&bo->ttm, &placement); in xe_bo_evict()
2254 ret = ttm_bo_validate(&bo->ttm, &placement, &ctx); in xe_bo_evict()
2258 dma_resv_wait_timeout(bo->ttm.base.resv, DMA_RESV_USAGE_KERNEL, in xe_bo_evict()
2265 * xe_bo_needs_ccs_pages - Whether a bo needs to back up CCS pages when
2278 if (!xe_device_has_flat_ccs(xe) || bo->ttm.type != ttm_bo_type_device) in xe_bo_needs_ccs_pages()
2284 * non-VRAM addresses. in xe_bo_needs_ccs_pages()
2286 if (IS_DGFX(xe) && (bo->flags & XE_BO_FLAG_SYSTEM)) in xe_bo_needs_ccs_pages()
2293 * __xe_bo_release_dummy() - Dummy kref release function
2303 * xe_bo_put_commit() - Put bos whose put was deferred by xe_bo_put_deferred().
2323 drm_gem_object_free(&bo->ttm.base.refcount); in xe_bo_put_commit()
2331 if (bo->client) in xe_bo_put()
2332 might_lock(&bo->client->bos_lock); in xe_bo_put()
2334 if (bo->ggtt_node && bo->ggtt_node->ggtt) in xe_bo_put()
2335 might_lock(&bo->ggtt_node->ggtt->lock); in xe_bo_put()
2336 drm_gem_object_put(&bo->ttm.base); in xe_bo_put()
2341 * xe_bo_dumb_create - Create a dumb bo as backing for a fb
2357 int cpp = DIV_ROUND_UP(args->bpp, 8); in xe_bo_dumb_create()
2360 xe->info.vram_flags & XE_VRAM_FLAGS_NEED64K ? SZ_64K : SZ_4K); in xe_bo_dumb_create()
2362 args->pitch = ALIGN(args->width * cpp, 64); in xe_bo_dumb_create()
2363 args->size = ALIGN(mul_u32_u32(args->pitch, args->height), in xe_bo_dumb_create()
2366 bo = xe_bo_create_user(xe, NULL, NULL, args->size, in xe_bo_dumb_create()
2374 err = drm_gem_handle_create(file_priv, &bo->ttm.base, &handle); in xe_bo_dumb_create()
2375 /* drop reference from allocate - handle holds it now */ in xe_bo_dumb_create()
2376 drm_gem_object_put(&bo->ttm.base); in xe_bo_dumb_create()
2378 args->handle = handle; in xe_bo_dumb_create()
2384 struct ttm_buffer_object *tbo = &bo->ttm; in xe_bo_runtime_pm_release_mmap_offset()
2385 struct ttm_device *bdev = tbo->bdev; in xe_bo_runtime_pm_release_mmap_offset()
2387 drm_vma_node_unmap(&tbo->base.vma_node, bdev->dev_mapping); in xe_bo_runtime_pm_release_mmap_offset()
2389 list_del_init(&bo->vram_userfault_link); in xe_bo_runtime_pm_release_mmap_offset()