Lines Matching +full:embedded +full:- +full:trace +full:- +full:extension
1 // SPDX-License-Identifier: MIT
8 #include <linux/dma-buf.h>
22 #include <trace/events/gpu_mem.h>
91 return res->mem_type == XE_PL_STOLEN && IS_DGFX(xe); in resource_is_stolen_vram()
96 return mem_type_is_vram(res->mem_type); in resource_is_vram()
101 return resource_is_vram(bo->ttm.resource) || in xe_bo_is_vram()
102 resource_is_stolen_vram(xe_bo_device(bo), bo->ttm.resource); in xe_bo_is_vram()
107 return bo->ttm.resource->mem_type == XE_PL_STOLEN; in xe_bo_is_stolen()
111 * xe_bo_has_single_placement - check if BO is placed only in one memory location
121 return bo->placement.num_placement == 1; in xe_bo_has_single_placement()
125 * xe_bo_is_stolen_devmem - check if BO is of stolen type accessed via PCI BAR
140 * xe_bo_is_vm_bound - check if BO has any mappings through VM_BIND
152 return !list_empty(&bo->ttm.base.gpuva.list); in xe_bo_is_vm_bound()
157 return bo->flags & XE_BO_FLAG_USER; in xe_bo_is_user()
166 tile = &xe->tiles[mem_type == XE_PL_STOLEN ? 0 : (mem_type - XE_PL_VRAM0)]; in mem_type_to_migrate()
167 return tile->migrate; in mem_type_to_migrate()
172 struct xe_device *xe = ttm_to_xe_device(res->bo->bdev); in res_to_mem_region()
177 mgr = ttm_manager_type(&xe->ttm, res->mem_type); in res_to_mem_region()
187 xe_assert(xe, *c < ARRAY_SIZE(bo->placements)); in try_add_system()
189 bo->placements[*c] = (struct ttm_place) { in try_add_system()
220 struct ttm_resource_manager *mgr = ttm_manager_type(&xe->ttm, mem_type); in add_vram()
226 xe_assert(xe, *c < ARRAY_SIZE(bo->placements)); in add_vram()
229 xe_assert(xe, vram && vram->usable_size); in add_vram()
230 io_size = vram->io_size; in add_vram()
235 if (io_size < vram->usable_size) { in add_vram()
251 add_vram(xe, bo, bo->placements, bo_flags, XE_PL_VRAM0, c); in try_add_vram()
253 add_vram(xe, bo, bo->placements, bo_flags, XE_PL_VRAM1, c); in try_add_vram()
260 xe_assert(xe, *c < ARRAY_SIZE(bo->placements)); in try_add_stolen()
262 bo->placements[*c] = (struct ttm_place) { in try_add_stolen()
281 return -EINVAL; in __xe_bo_placement_for_flags()
283 bo->placement = (struct ttm_placement) { in __xe_bo_placement_for_flags()
285 .placement = bo->placements, in __xe_bo_placement_for_flags()
301 struct xe_device *xe = container_of(tbo->bdev, typeof(*xe), ttm); in xe_evict_flags()
302 bool device_unplugged = drm_dev_is_unplugged(&xe->drm); in xe_evict_flags()
307 if (tbo->type == ttm_bo_type_sg) { in xe_evict_flags()
308 placement->num_placement = 0; in xe_evict_flags()
317 if (bo->flags & XE_BO_FLAG_CPU_ADDR_MIRROR) { in xe_evict_flags()
322 if (device_unplugged && !tbo->base.dma_buf) { in xe_evict_flags()
331 switch (tbo->resource->mem_type) { in xe_evict_flags()
344 /* struct xe_ttm_tt - Subclassed ttm_tt for xe */
356 unsigned long num_pages = tt->num_pages; in xe_tt_map_sg()
359 XE_WARN_ON((tt->page_flags & TTM_TT_FLAG_EXTERNAL) && in xe_tt_map_sg()
360 !(tt->page_flags & TTM_TT_FLAG_EXTERNAL_MAPPABLE)); in xe_tt_map_sg()
362 if (xe_tt->sg) in xe_tt_map_sg()
365 ret = sg_alloc_table_from_pages_segment(&xe_tt->sgt, tt->pages, in xe_tt_map_sg()
368 xe_sg_segment_size(xe->drm.dev), in xe_tt_map_sg()
373 xe_tt->sg = &xe_tt->sgt; in xe_tt_map_sg()
374 ret = dma_map_sgtable(xe->drm.dev, xe_tt->sg, DMA_BIDIRECTIONAL, in xe_tt_map_sg()
377 sg_free_table(xe_tt->sg); in xe_tt_map_sg()
378 xe_tt->sg = NULL; in xe_tt_map_sg()
389 if (xe_tt->sg) { in xe_tt_unmap_sg()
390 dma_unmap_sgtable(xe->drm.dev, xe_tt->sg, in xe_tt_unmap_sg()
392 sg_free_table(xe_tt->sg); in xe_tt_unmap_sg()
393 xe_tt->sg = NULL; in xe_tt_unmap_sg()
399 struct ttm_tt *tt = bo->ttm.ttm; in xe_bo_sg()
402 return xe_tt->sg; in xe_bo_sg()
413 if (xe_tt->purgeable) in xe_ttm_tt_account_add()
414 xe_shrinker_mod_pages(xe->mem.shrinker, 0, tt->num_pages); in xe_ttm_tt_account_add()
416 xe_shrinker_mod_pages(xe->mem.shrinker, tt->num_pages, 0); in xe_ttm_tt_account_add()
423 if (xe_tt->purgeable) in xe_ttm_tt_account_subtract()
424 xe_shrinker_mod_pages(xe->mem.shrinker, 0, -(long)tt->num_pages); in xe_ttm_tt_account_subtract()
426 xe_shrinker_mod_pages(xe->mem.shrinker, -(long)tt->num_pages, 0); in xe_ttm_tt_account_subtract()
435 atomic64_add_return(num_pages, &xe->global_total_pages); in update_global_total_pages()
437 trace_gpu_mem_total(xe->drm.primary->index, 0, in update_global_total_pages()
457 tt = &xe_tt->ttm; in xe_ttm_tt_create()
471 switch (bo->cpu_caching) { in xe_ttm_tt_create()
480 WARN_ON((bo->flags & XE_BO_FLAG_USER) && !bo->cpu_caching); in xe_ttm_tt_create()
483 * Display scanout is always non-coherent with the CPU cache. in xe_ttm_tt_create()
486 * non-coherent and require a CPU:WC mapping. in xe_ttm_tt_create()
488 if ((!bo->cpu_caching && bo->flags & XE_BO_FLAG_SCANOUT) || in xe_ttm_tt_create()
489 (xe->info.graphics_verx100 >= 1270 && in xe_ttm_tt_create()
490 bo->flags & XE_BO_FLAG_PAGETABLE)) in xe_ttm_tt_create()
494 if (bo->flags & XE_BO_FLAG_NEEDS_UC) { in xe_ttm_tt_create()
496 * Valid only for internally-created buffers only, for in xe_ttm_tt_create()
499 xe_assert(xe, bo->cpu_caching == 0); in xe_ttm_tt_create()
503 if (ttm_bo->type != ttm_bo_type_sg) in xe_ttm_tt_create()
506 err = ttm_tt_init(tt, &bo->ttm, page_flags, caching, extra_pages); in xe_ttm_tt_create()
512 if (ttm_bo->type != ttm_bo_type_sg) { in xe_ttm_tt_create()
531 * dma-bufs are not populated with pages, and the dma- in xe_ttm_tt_populate()
534 if ((tt->page_flags & TTM_TT_FLAG_EXTERNAL) && in xe_ttm_tt_populate()
535 !(tt->page_flags & TTM_TT_FLAG_EXTERNAL_MAPPABLE)) in xe_ttm_tt_populate()
538 if (ttm_tt_is_backed_up(tt) && !xe_tt->purgeable) { in xe_ttm_tt_populate()
542 err = ttm_pool_alloc(&ttm_dev->pool, tt, ctx); in xe_ttm_tt_populate()
547 xe_tt->purgeable = false; in xe_ttm_tt_populate()
549 update_global_total_pages(ttm_dev, tt->num_pages); in xe_ttm_tt_populate()
558 if ((tt->page_flags & TTM_TT_FLAG_EXTERNAL) && in xe_ttm_tt_unpopulate()
559 !(tt->page_flags & TTM_TT_FLAG_EXTERNAL_MAPPABLE)) in xe_ttm_tt_unpopulate()
564 ttm_pool_free(&ttm_dev->pool, tt); in xe_ttm_tt_unpopulate()
566 update_global_total_pages(ttm_dev, -(long)tt->num_pages); in xe_ttm_tt_unpopulate()
580 return vres->used_visible_size == mem->size; in xe_ttm_resource_visible()
588 switch (mem->mem_type) { in xe_ttm_io_mem_reserve()
597 return -EINVAL; in xe_ttm_io_mem_reserve()
599 mem->bus.offset = mem->start << PAGE_SHIFT; in xe_ttm_io_mem_reserve()
601 if (vram->mapping && in xe_ttm_io_mem_reserve()
602 mem->placement & TTM_PL_FLAG_CONTIGUOUS) in xe_ttm_io_mem_reserve()
603 mem->bus.addr = (u8 __force *)vram->mapping + in xe_ttm_io_mem_reserve()
604 mem->bus.offset; in xe_ttm_io_mem_reserve()
606 mem->bus.offset += vram->io_start; in xe_ttm_io_mem_reserve()
607 mem->bus.is_iomem = true; in xe_ttm_io_mem_reserve()
610 mem->bus.caching = ttm_write_combined; in xe_ttm_io_mem_reserve()
616 return -EINVAL; in xe_ttm_io_mem_reserve()
625 struct drm_gem_object *obj = &bo->ttm.base; in xe_bo_trigger_rebind()
630 dma_resv_assert_held(bo->ttm.base.resv); in xe_bo_trigger_rebind()
632 if (!list_empty(&bo->ttm.base.gpuva.list)) { in xe_bo_trigger_rebind()
633 dma_resv_iter_begin(&cursor, bo->ttm.base.resv, in xe_bo_trigger_rebind()
641 struct xe_vm *vm = gpuvm_to_vm(vm_bo->vm); in xe_bo_trigger_rebind()
652 if (ctx->no_wait_gpu && in xe_bo_trigger_rebind()
653 !dma_resv_test_signaled(bo->ttm.base.resv, in xe_bo_trigger_rebind()
655 return -EBUSY; in xe_bo_trigger_rebind()
657 timeout = dma_resv_wait_timeout(bo->ttm.base.resv, in xe_bo_trigger_rebind()
659 ctx->interruptible, in xe_bo_trigger_rebind()
662 return -ETIME; in xe_bo_trigger_rebind()
683 * The dma-buf map_attachment() / unmap_attachment() is hooked up here.
694 struct dma_buf_attachment *attach = ttm_bo->base.import_attach; in xe_bo_move_dmabuf()
695 struct xe_ttm_tt *xe_tt = container_of(ttm_bo->ttm, struct xe_ttm_tt, in xe_bo_move_dmabuf()
697 struct xe_device *xe = ttm_to_xe_device(ttm_bo->bdev); in xe_bo_move_dmabuf()
698 bool device_unplugged = drm_dev_is_unplugged(&xe->drm); in xe_bo_move_dmabuf()
702 xe_assert(xe, ttm_bo->ttm); in xe_bo_move_dmabuf()
704 if (device_unplugged && new_res->mem_type == XE_PL_SYSTEM && in xe_bo_move_dmabuf()
705 ttm_bo->sg) { in xe_bo_move_dmabuf()
706 dma_resv_wait_timeout(ttm_bo->base.resv, DMA_RESV_USAGE_BOOKKEEP, in xe_bo_move_dmabuf()
708 dma_buf_unmap_attachment(attach, ttm_bo->sg, DMA_BIDIRECTIONAL); in xe_bo_move_dmabuf()
709 ttm_bo->sg = NULL; in xe_bo_move_dmabuf()
712 if (new_res->mem_type == XE_PL_SYSTEM) in xe_bo_move_dmabuf()
715 if (ttm_bo->sg) { in xe_bo_move_dmabuf()
716 dma_buf_unmap_attachment(attach, ttm_bo->sg, DMA_BIDIRECTIONAL); in xe_bo_move_dmabuf()
717 ttm_bo->sg = NULL; in xe_bo_move_dmabuf()
724 ttm_bo->sg = sg; in xe_bo_move_dmabuf()
725 xe_tt->sg = sg; in xe_bo_move_dmabuf()
734 * xe_bo_move_notify - Notify subsystems of a pending move
748 * Return: 0 on success, -EINTR or -ERESTARTSYS if interrupted in fault mode,
754 struct ttm_buffer_object *ttm_bo = &bo->ttm; in xe_bo_move_notify()
755 struct xe_device *xe = ttm_to_xe_device(ttm_bo->bdev); in xe_bo_move_notify()
756 struct ttm_resource *old_mem = ttm_bo->resource; in xe_bo_move_notify()
757 u32 old_mem_type = old_mem ? old_mem->mem_type : XE_PL_SYSTEM; in xe_bo_move_notify()
766 return -EINVAL; in xe_bo_move_notify()
773 /* Don't call move_notify() for imported dma-bufs. */ in xe_bo_move_notify()
774 if (ttm_bo->base.dma_buf && !ttm_bo->base.import_attach) in xe_bo_move_notify()
775 dma_buf_move_notify(ttm_bo->base.dma_buf); in xe_bo_move_notify()
783 mutex_lock(&xe->mem_access.vram_userfault.lock); in xe_bo_move_notify()
784 if (!list_empty(&bo->vram_userfault_link)) in xe_bo_move_notify()
785 list_del_init(&bo->vram_userfault_link); in xe_bo_move_notify()
786 mutex_unlock(&xe->mem_access.vram_userfault.lock); in xe_bo_move_notify()
797 struct xe_device *xe = ttm_to_xe_device(ttm_bo->bdev); in xe_bo_move()
799 struct ttm_resource *old_mem = ttm_bo->resource; in xe_bo_move()
800 u32 old_mem_type = old_mem ? old_mem->mem_type : XE_PL_SYSTEM; in xe_bo_move()
801 struct ttm_tt *ttm = ttm_bo->ttm; in xe_bo_move()
813 if (new_mem->mem_type == XE_PL_TT) in xe_bo_move()
820 if (ttm_bo->type == ttm_bo_type_sg) { in xe_bo_move()
821 if (new_mem->mem_type == XE_PL_SYSTEM) in xe_bo_move()
830 move_lacks_source = !old_mem || (handle_system_ccs ? (!bo->ccs_cleared) : in xe_bo_move()
833 needs_clear = (ttm && ttm->page_flags & TTM_TT_FLAG_ZERO_ALLOC) || in xe_bo_move()
834 (!ttm && ttm_bo->type == ttm_bo_type_device); in xe_bo_move()
836 if (new_mem->mem_type == XE_PL_TT) { in xe_bo_move()
847 if (!move_lacks_source && (bo->flags & XE_BO_FLAG_CPU_ADDR_MIRROR) && in xe_bo_move()
848 new_mem->mem_type == XE_PL_SYSTEM) { in xe_bo_move()
851 drm_dbg(&xe->drm, "Evict system allocator BO success\n"); in xe_bo_move()
854 drm_dbg(&xe->drm, "Evict system allocator BO failed=%pe\n", in xe_bo_move()
861 if (old_mem_type == XE_PL_SYSTEM && new_mem->mem_type == XE_PL_TT && !handle_system_ccs) { in xe_bo_move()
867 * Failed multi-hop where the old_mem is still marked as in xe_bo_move()
871 new_mem->mem_type == XE_PL_TT) { in xe_bo_move()
883 new_mem->mem_type == XE_PL_SYSTEM) { in xe_bo_move()
884 long timeout = dma_resv_wait_timeout(ttm_bo->base.resv, in xe_bo_move()
902 new_mem->mem_type == XE_PL_SYSTEM))) { in xe_bo_move()
903 hop->fpfn = 0; in xe_bo_move()
904 hop->lpfn = 0; in xe_bo_move()
905 hop->mem_type = XE_PL_TT; in xe_bo_move()
906 hop->flags = TTM_PL_FLAG_TEMPORARY; in xe_bo_move()
907 ret = -EMULTIHOP; in xe_bo_move()
911 if (bo->tile) in xe_bo_move()
912 migrate = bo->tile->migrate; in xe_bo_move()
914 migrate = mem_type_to_migrate(xe, new_mem->mem_type); in xe_bo_move()
918 migrate = xe->tiles[0].migrate; in xe_bo_move()
921 trace_xe_bo_move(bo, new_mem->mem_type, old_mem_type, move_lacks_source); in xe_bo_move()
929 drm_WARN_ON(&xe->drm, handle_system_ccs); in xe_bo_move()
936 if (mem_type_is_vram(new_mem->mem_type)) in xe_bo_move()
962 * bo->resource == NULL, so just attach the in xe_bo_move()
965 dma_resv_add_fence(ttm_bo->base.resv, fence, in xe_bo_move()
974 * CCS meta data is migrated from TT -> SMEM. So, let us detach the in xe_bo_move()
978 new_mem->mem_type == XE_PL_SYSTEM) in xe_bo_move()
982 ((move_lacks_source && new_mem->mem_type == XE_PL_TT) || in xe_bo_move()
983 (old_mem_type == XE_PL_SYSTEM && new_mem->mem_type == XE_PL_TT)) && in xe_bo_move()
988 if ((!ttm_bo->resource || ttm_bo->resource->mem_type == XE_PL_SYSTEM) && in xe_bo_move()
989 ttm_bo->ttm) { in xe_bo_move()
990 long timeout = dma_resv_wait_timeout(ttm_bo->base.resv, in xe_bo_move()
1000 xe_tt_unmap_sg(xe, ttm_bo->ttm); in xe_bo_move()
1010 struct xe_device *xe = ttm_to_xe_device(bo->bdev); in xe_bo_shrink_purge()
1014 if (bo->resource->mem_type != XE_PL_SYSTEM) { in xe_bo_shrink_purge()
1025 xe_tt_unmap_sg(xe, bo->ttm); in xe_bo_shrink_purge()
1029 *scanned += bo->ttm->num_pages; in xe_bo_shrink_purge()
1036 xe_ttm_tt_account_subtract(xe, bo->ttm); in xe_bo_shrink_purge()
1052 drm_gem_for_each_gpuvm_bo(vm_bo, &bo->base) { in xe_bo_eviction_valuable()
1053 if (xe_vm_is_validating(gpuvm_to_vm(vm_bo->vm))) in xe_bo_eviction_valuable()
1061 * xe_bo_shrink() - Try to shrink an xe bo.
1068 * Try to shrink- or purge a bo, and if it succeeds, unmap dma.
1070 * (ghost bos), but only if the struct ttm_tt is embedded in
1082 struct ttm_tt *tt = bo->ttm; in xe_bo_shrink()
1084 struct ttm_place place = {.mem_type = bo->resource->mem_type}; in xe_bo_shrink()
1086 struct xe_device *xe = ttm_to_xe_device(bo->bdev); in xe_bo_shrink()
1090 if (!(tt->page_flags & TTM_TT_FLAG_EXTERNAL_MAPPABLE) || in xe_bo_shrink()
1091 (flags.purge && !xe_tt->purgeable)) in xe_bo_shrink()
1092 return -EBUSY; in xe_bo_shrink()
1095 return -EBUSY; in xe_bo_shrink()
1100 if (xe_tt->purgeable) { in xe_bo_shrink()
1101 if (bo->resource->mem_type != XE_PL_SYSTEM) in xe_bo_shrink()
1108 /* System CCS needs gpu copy when moving PL_TT -> PL_SYSTEM */ in xe_bo_shrink()
1109 needs_rpm = (!IS_DGFX(xe) && bo->resource->mem_type != XE_PL_SYSTEM && in xe_bo_shrink()
1114 *scanned += tt->num_pages; in xe_bo_shrink()
1132 * xe_bo_notifier_prepare_pinned() - Prepare a pinned VRAM object to be backed
1143 struct xe_device *xe = ttm_to_xe_device(bo->ttm.bdev); in xe_bo_notifier_prepare_pinned()
1149 xe_validation_guard(&ctx, &xe->val, &exec, (struct xe_val_flags) {.exclusive = true}, ret) { in xe_bo_notifier_prepare_pinned()
1150 ret = drm_exec_lock_obj(&exec, &bo->ttm.base); in xe_bo_notifier_prepare_pinned()
1153 xe_assert(xe, !bo->backup_obj); in xe_bo_notifier_prepare_pinned()
1166 if (bo->flags & XE_BO_FLAG_PINNED_NORESTORE) in xe_bo_notifier_prepare_pinned()
1169 backup = xe_bo_init_locked(xe, NULL, NULL, bo->ttm.base.resv, NULL, xe_bo_size(bo), in xe_bo_notifier_prepare_pinned()
1180 backup->parent_obj = xe_bo_get(bo); /* Released by bo_destroy */ in xe_bo_notifier_prepare_pinned()
1181 ttm_bo_pin(&backup->ttm); in xe_bo_notifier_prepare_pinned()
1182 bo->backup_obj = backup; in xe_bo_notifier_prepare_pinned()
1189 * xe_bo_notifier_unprepare_pinned() - Undo the previous prepare operation.
1200 if (bo->backup_obj) { in xe_bo_notifier_unprepare_pinned()
1201 ttm_bo_unpin(&bo->backup_obj->ttm); in xe_bo_notifier_unprepare_pinned()
1202 xe_bo_put(bo->backup_obj); in xe_bo_notifier_unprepare_pinned()
1203 bo->backup_obj = NULL; in xe_bo_notifier_unprepare_pinned()
1216 if (xe_bo_is_user(bo) || (bo->flags & XE_BO_FLAG_PINNED_LATE_RESTORE)) { in xe_bo_evict_pinned_copy()
1220 if (bo->tile) in xe_bo_evict_pinned_copy()
1221 migrate = bo->tile->migrate; in xe_bo_evict_pinned_copy()
1223 migrate = mem_type_to_migrate(xe, bo->ttm.resource->mem_type); in xe_bo_evict_pinned_copy()
1225 xe_assert(xe, bo->ttm.base.resv == backup->ttm.base.resv); in xe_bo_evict_pinned_copy()
1226 ret = dma_resv_reserve_fences(bo->ttm.base.resv, 1); in xe_bo_evict_pinned_copy()
1230 fence = xe_migrate_copy(migrate, bo, backup, bo->ttm.resource, in xe_bo_evict_pinned_copy()
1231 backup->ttm.resource, false); in xe_bo_evict_pinned_copy()
1237 dma_resv_add_fence(bo->ttm.base.resv, fence, in xe_bo_evict_pinned_copy()
1245 if (iosys_map_is_null(&bo->vmap)) { in xe_bo_evict_pinned_copy()
1252 xe_map_memcpy_from(xe, backup->vmap.vaddr, &bo->vmap, 0, in xe_bo_evict_pinned_copy()
1256 if (!bo->backup_obj) in xe_bo_evict_pinned_copy()
1257 bo->backup_obj = backup; in xe_bo_evict_pinned_copy()
1268 * xe_bo_evict_pinned() - Evict a pinned VRAM object to system memory
1274 * suspend-resume.
1280 struct xe_device *xe = ttm_to_xe_device(bo->ttm.bdev); in xe_bo_evict_pinned()
1283 struct xe_bo *backup = bo->backup_obj; in xe_bo_evict_pinned()
1287 xe_validation_guard(&ctx, &xe->val, &exec, (struct xe_val_flags) {.exclusive = true}, ret) { in xe_bo_evict_pinned()
1288 ret = drm_exec_lock_obj(&exec, &bo->ttm.base); in xe_bo_evict_pinned()
1292 if (WARN_ON(!bo->ttm.resource)) { in xe_bo_evict_pinned()
1293 ret = -EINVAL; in xe_bo_evict_pinned()
1298 ret = -EINVAL; in xe_bo_evict_pinned()
1305 if (bo->flags & XE_BO_FLAG_PINNED_NORESTORE) in xe_bo_evict_pinned()
1309 backup = xe_bo_init_locked(xe, NULL, NULL, bo->ttm.base.resv, NULL, in xe_bo_evict_pinned()
1320 backup->parent_obj = xe_bo_get(bo); /* Released by bo_destroy */ in xe_bo_evict_pinned()
1334 * xe_bo_restore_pinned() - Restore a pinned VRAM object
1340 * suspend-resume.
1350 struct xe_device *xe = ttm_to_xe_device(bo->ttm.bdev); in xe_bo_restore_pinned()
1351 struct xe_bo *backup = bo->backup_obj; in xe_bo_restore_pinned()
1361 ret = ttm_bo_validate(&backup->ttm, &backup->placement, &ctx); in xe_bo_restore_pinned()
1366 if (xe_bo_is_user(bo) || (bo->flags & XE_BO_FLAG_PINNED_LATE_RESTORE)) { in xe_bo_restore_pinned()
1370 if (bo->tile) in xe_bo_restore_pinned()
1371 migrate = bo->tile->migrate; in xe_bo_restore_pinned()
1373 migrate = mem_type_to_migrate(xe, bo->ttm.resource->mem_type); in xe_bo_restore_pinned()
1375 ret = dma_resv_reserve_fences(bo->ttm.base.resv, 1); in xe_bo_restore_pinned()
1380 backup->ttm.resource, bo->ttm.resource, in xe_bo_restore_pinned()
1387 dma_resv_add_fence(bo->ttm.base.resv, fence, in xe_bo_restore_pinned()
1395 if (iosys_map_is_null(&bo->vmap)) { in xe_bo_restore_pinned()
1402 xe_map_memcpy_to(xe, &bo->vmap, 0, backup->vmap.vaddr, in xe_bo_restore_pinned()
1406 bo->backup_obj = NULL; in xe_bo_restore_pinned()
1410 if (!bo->backup_obj) { in xe_bo_restore_pinned()
1412 ttm_bo_unpin(&backup->ttm); in xe_bo_restore_pinned()
1424 struct ttm_buffer_object *ttm_bo = &bo->ttm; in xe_bo_dma_unmap_pinned()
1425 struct ttm_tt *tt = ttm_bo->ttm; in xe_bo_dma_unmap_pinned()
1430 if (ttm_bo->type == ttm_bo_type_sg && ttm_bo->sg) { in xe_bo_dma_unmap_pinned()
1431 dma_buf_unmap_attachment(ttm_bo->base.import_attach, in xe_bo_dma_unmap_pinned()
1432 ttm_bo->sg, in xe_bo_dma_unmap_pinned()
1434 ttm_bo->sg = NULL; in xe_bo_dma_unmap_pinned()
1435 xe_tt->sg = NULL; in xe_bo_dma_unmap_pinned()
1436 } else if (xe_tt->sg) { in xe_bo_dma_unmap_pinned()
1437 dma_unmap_sgtable(ttm_to_xe_device(ttm_bo->bdev)->drm.dev, in xe_bo_dma_unmap_pinned()
1438 xe_tt->sg, in xe_bo_dma_unmap_pinned()
1440 sg_free_table(xe_tt->sg); in xe_bo_dma_unmap_pinned()
1441 xe_tt->sg = NULL; in xe_bo_dma_unmap_pinned()
1455 if (ttm_bo->resource->mem_type == XE_PL_STOLEN) in xe_ttm_io_mem_pfn()
1458 vram = res_to_mem_region(ttm_bo->resource); in xe_ttm_io_mem_pfn()
1459 xe_res_first(ttm_bo->resource, (u64)page_offset << PAGE_SHIFT, 0, &cursor); in xe_ttm_io_mem_pfn()
1460 return (vram->io_start + cursor.start) >> PAGE_SHIFT; in xe_ttm_io_mem_pfn()
1471 struct xe_device *xe = ttm_to_xe_device(ttm_bo->bdev); in xe_ttm_bo_lock_in_destructor()
1474 xe_assert(xe, !kref_read(&ttm_bo->kref)); in xe_ttm_bo_lock_in_destructor()
1482 spin_lock(&ttm_bo->bdev->lru_lock); in xe_ttm_bo_lock_in_destructor()
1483 locked = dma_resv_trylock(ttm_bo->base.resv); in xe_ttm_bo_lock_in_destructor()
1484 spin_unlock(&ttm_bo->bdev->lru_lock); in xe_ttm_bo_lock_in_destructor()
1501 xe_assert(xe_bo_device(bo), !(bo->created && kref_read(&ttm_bo->base.refcount))); in xe_ttm_bo_release_notify()
1507 if (ttm_bo->base.resv != &ttm_bo->base._resv) in xe_ttm_bo_release_notify()
1519 dma_resv_for_each_fence(&cursor, ttm_bo->base.resv, in xe_ttm_bo_release_notify()
1526 dma_resv_replace_fences(ttm_bo->base.resv, in xe_ttm_bo_release_notify()
1527 fence->context, in xe_ttm_bo_release_notify()
1534 dma_resv_unlock(ttm_bo->base.resv); in xe_ttm_bo_release_notify()
1544 if (IS_VF_CCS_READY(ttm_to_xe_device(ttm_bo->bdev))) in xe_ttm_bo_delete_mem_notify()
1549 * dma-buf attachment. in xe_ttm_bo_delete_mem_notify()
1551 if (ttm_bo->type == ttm_bo_type_sg && ttm_bo->sg) { in xe_ttm_bo_delete_mem_notify()
1552 struct xe_ttm_tt *xe_tt = container_of(ttm_bo->ttm, in xe_ttm_bo_delete_mem_notify()
1555 dma_buf_unmap_attachment(ttm_bo->base.import_attach, ttm_bo->sg, in xe_ttm_bo_delete_mem_notify()
1557 ttm_bo->sg = NULL; in xe_ttm_bo_delete_mem_notify()
1558 xe_tt->sg = NULL; in xe_ttm_bo_delete_mem_notify()
1564 struct xe_device *xe = ttm_to_xe_device(ttm_bo->bdev); in xe_ttm_bo_purge()
1566 if (ttm_bo->ttm) { in xe_ttm_bo_purge()
1570 drm_WARN_ON(&xe->drm, ret); in xe_ttm_bo_purge()
1581 if (ttm_bo->ttm) { in xe_ttm_bo_swap_notify()
1583 container_of(ttm_bo->ttm, struct xe_ttm_tt, ttm); in xe_ttm_bo_swap_notify()
1585 if (xe_tt->purgeable) in xe_ttm_bo_swap_notify()
1595 struct xe_device *xe = ttm_to_xe_device(ttm_bo->bdev); in xe_ttm_access_memory()
1605 if (!mem_type_is_vram(ttm_bo->resource->mem_type)) in xe_ttm_access_memory()
1606 return -EIO; in xe_ttm_access_memory()
1608 if (!xe_ttm_resource_visible(ttm_bo->resource) || len >= SZ_16K) { in xe_ttm_access_memory()
1610 mem_type_to_migrate(xe, ttm_bo->resource->mem_type); in xe_ttm_access_memory()
1617 vram = res_to_mem_region(ttm_bo->resource); in xe_ttm_access_memory()
1618 xe_res_first(ttm_bo->resource, offset & PAGE_MASK, in xe_ttm_access_memory()
1619 xe_bo_size(bo) - (offset & PAGE_MASK), &cursor); in xe_ttm_access_memory()
1623 int byte_count = min((int)(PAGE_SIZE - page_offset), bytes_left); in xe_ttm_access_memory()
1625 iosys_map_set_vaddr_iomem(&vmap, (u8 __iomem *)vram->mapping + in xe_ttm_access_memory()
1634 bytes_left -= byte_count; in xe_ttm_access_memory()
1662 struct xe_device *xe = ttm_to_xe_device(ttm_bo->bdev); in xe_ttm_bo_destroy()
1666 if (bo->ttm.base.import_attach) in xe_ttm_bo_destroy()
1667 drm_prime_gem_destroy(&bo->ttm.base, NULL); in xe_ttm_bo_destroy()
1668 drm_gem_object_release(&bo->ttm.base); in xe_ttm_bo_destroy()
1670 xe_assert(xe, list_empty(&ttm_bo->base.gpuva.list)); in xe_ttm_bo_destroy()
1673 if (bo->ggtt_node[id] && bo->ggtt_node[id]->base.size) in xe_ttm_bo_destroy()
1674 xe_ggtt_remove_bo(tile->mem.ggtt, bo); in xe_ttm_bo_destroy()
1677 if (bo->client) in xe_ttm_bo_destroy()
1681 if (bo->vm && xe_bo_is_user(bo)) in xe_ttm_bo_destroy()
1682 xe_vm_put(bo->vm); in xe_ttm_bo_destroy()
1684 if (bo->parent_obj) in xe_ttm_bo_destroy()
1685 xe_bo_put(bo->parent_obj); in xe_ttm_bo_destroy()
1687 mutex_lock(&xe->mem_access.vram_userfault.lock); in xe_ttm_bo_destroy()
1688 if (!list_empty(&bo->vram_userfault_link)) in xe_ttm_bo_destroy()
1689 list_del(&bo->vram_userfault_link); in xe_ttm_bo_destroy()
1690 mutex_unlock(&xe->mem_access.vram_userfault.lock); in xe_ttm_bo_destroy()
1719 if (bo->vm && !xe_vm_in_fault_mode(bo->vm)) { in xe_gem_object_close()
1723 ttm_bo_set_bulk_move(&bo->ttm, NULL); in xe_gem_object_close()
1731 * NOTE: The following atomic checks are platform-specific. For example, in should_migrate_to_smem()
1736 return bo->attr.atomic_access == DRM_XE_ATOMIC_GLOBAL || in should_migrate_to_smem()
1737 bo->attr.atomic_access == DRM_XE_ATOMIC_CPU; in should_migrate_to_smem()
1744 if (ctx->no_wait_gpu) in xe_bo_wait_usage_kernel()
1745 return dma_resv_test_signaled(bo->ttm.base.resv, DMA_RESV_USAGE_KERNEL) ? in xe_bo_wait_usage_kernel()
1746 0 : -EBUSY; in xe_bo_wait_usage_kernel()
1748 lerr = dma_resv_wait_timeout(bo->ttm.base.resv, DMA_RESV_USAGE_KERNEL, in xe_bo_wait_usage_kernel()
1749 ctx->interruptible, MAX_SCHEDULE_TIMEOUT); in xe_bo_wait_usage_kernel()
1753 return -EBUSY; in xe_bo_wait_usage_kernel()
1762 struct ttm_buffer_object *tbo = &bo->ttm; in xe_bo_fault_migrate()
1765 if (ttm_manager_type(tbo->bdev, tbo->resource->mem_type)->use_tt) { in xe_bo_fault_migrate()
1768 err = ttm_bo_populate(&bo->ttm, ctx); in xe_bo_fault_migrate()
1770 xe_assert(xe_bo_device(bo), bo->flags & XE_BO_FLAG_SYSTEM); in xe_bo_fault_migrate()
1784 ret = ttm_bo_vm_fault_reserved(vmf, vmf->vma->vm_page_prot, in __xe_bo_cpu_fault()
1793 mem_type_is_vram(bo->ttm.resource->mem_type)) { in __xe_bo_cpu_fault()
1794 mutex_lock(&xe->mem_access.vram_userfault.lock); in __xe_bo_cpu_fault()
1795 if (list_empty(&bo->vram_userfault_link)) in __xe_bo_cpu_fault()
1796 list_add(&bo->vram_userfault_link, in __xe_bo_cpu_fault()
1797 &xe->mem_access.vram_userfault.list); in __xe_bo_cpu_fault()
1798 mutex_unlock(&xe->mem_access.vram_userfault.lock); in __xe_bo_cpu_fault()
1808 case -EINTR: in xe_err_to_fault_t()
1809 case -ERESTARTSYS: in xe_err_to_fault_t()
1810 case -EAGAIN: in xe_err_to_fault_t()
1812 case -ENOMEM: in xe_err_to_fault_t()
1813 case -ENOSPC: in xe_err_to_fault_t()
1823 dma_resv_assert_held(tbo->base.resv); in xe_ttm_bo_is_imported()
1825 return tbo->ttm && in xe_ttm_bo_is_imported()
1826 (tbo->ttm->page_flags & (TTM_TT_FLAG_EXTERNAL | TTM_TT_FLAG_EXTERNAL_MAPPABLE)) == in xe_ttm_bo_is_imported()
1833 struct ttm_buffer_object *tbo = &bo->ttm; in xe_bo_cpu_fault_fastpath()
1847 err = xe_validation_ctx_init(&ctx, &xe->val, NULL, in xe_bo_cpu_fault_fastpath()
1855 if (!dma_resv_trylock(tbo->base.resv)) in xe_bo_cpu_fault_fastpath()
1860 drm_dbg(&xe->drm, "CPU trying to access an imported buffer object.\n"); in xe_bo_cpu_fault_fastpath()
1867 if (err != -ENOMEM && err != -ENOSPC && err != -EBUSY) in xe_bo_cpu_fault_fastpath()
1872 if (dma_resv_test_signaled(bo->ttm.base.resv, DMA_RESV_USAGE_KERNEL)) in xe_bo_cpu_fault_fastpath()
1876 dma_resv_unlock(tbo->base.resv); in xe_bo_cpu_fault_fastpath()
1888 struct ttm_buffer_object *tbo = vmf->vma->vm_private_data; in xe_bo_cpu_fault()
1889 struct drm_device *ddev = tbo->base.dev; in xe_bo_cpu_fault()
1892 bool needs_rpm = bo->flags & XE_BO_FLAG_VRAM_MASK; in xe_bo_cpu_fault()
1900 if (!drm_dev_enter(&xe->drm, &idx)) in xe_bo_cpu_fault()
1901 return ttm_bo_vm_dummy_page(vmf, vmf->vma->vm_page_prot); in xe_bo_cpu_fault()
1907 if (fault_flag_allow_retry_first(vmf->flags)) { in xe_bo_cpu_fault()
1908 if (vmf->flags & FAULT_FLAG_RETRY_NOWAIT) in xe_bo_cpu_fault()
1912 mmap_read_unlock(vmf->vma->vm_mm); in xe_bo_cpu_fault()
1935 xe_validation_guard(&ctx, &xe->val, &exec, (struct xe_val_flags) {.interruptible = true}, in xe_bo_cpu_fault()
1943 err = drm_exec_lock_obj(&exec, &tbo->base); in xe_bo_cpu_fault()
1949 err = -EFAULT; in xe_bo_cpu_fault()
1950 drm_dbg(&xe->drm, "CPU trying to access an imported buffer object.\n"); in xe_bo_cpu_fault()
1986 struct ttm_buffer_object *ttm_bo = vma->vm_private_data; in xe_bo_vm_access()
1999 * xe_bo_read() - Read from an xe_bo
2013 ret = ttm_bo_access(&bo->ttm, offset, dst, size, 0); in xe_bo_read()
2015 ret = -EIO; in xe_bo_read()
2038 * xe_bo_alloc - Allocate storage for a struct xe_bo
2048 * ERR_PTR(-ENOMEM) on error.
2055 return ERR_PTR(-ENOMEM); in xe_bo_alloc()
2061 * xe_bo_free - Free storage allocated using xe_bo_alloc()
2064 * Refer to xe_bo_alloc() documentation for valid use-cases.
2072 * xe_bo_init_locked() - Initialize or create an xe_bo.
2077 * GGTT binding if any. Only to be non-NULL for ttm_bo_type_kernel bos.
2113 return ERR_PTR(-EINVAL); in xe_bo_init_locked()
2118 return ERR_PTR(-EINVAL); in xe_bo_init_locked()
2122 ((xe->info.vram_flags & XE_VRAM_FLAGS_NEED64K) || in xe_bo_init_locked()
2138 return ERR_PTR(-EINVAL); in xe_bo_init_locked()
2146 bo->ccs_cleared = false; in xe_bo_init_locked()
2147 bo->tile = tile; in xe_bo_init_locked()
2148 bo->flags = flags; in xe_bo_init_locked()
2149 bo->cpu_caching = cpu_caching; in xe_bo_init_locked()
2150 bo->ttm.base.funcs = &xe_gem_object_funcs; in xe_bo_init_locked()
2151 bo->ttm.priority = XE_BO_PRIORITY_NORMAL; in xe_bo_init_locked()
2152 INIT_LIST_HEAD(&bo->pinned_link); in xe_bo_init_locked()
2154 INIT_LIST_HEAD(&bo->client_link); in xe_bo_init_locked()
2156 INIT_LIST_HEAD(&bo->vram_userfault_link); in xe_bo_init_locked()
2158 drm_gem_private_object_init(&xe->drm, &bo->ttm.base, size); in xe_bo_init_locked()
2165 xe_validation_assert_exec(xe, exec, &bo->ttm.base); in xe_bo_init_locked()
2167 err = __xe_bo_placement_for_flags(xe, bo, bo->flags); in xe_bo_init_locked()
2169 xe_ttm_bo_destroy(&bo->ttm); in xe_bo_init_locked()
2176 bo->flags & XE_BO_FLAG_DEFER_BACKING) ? &sys_placement : in xe_bo_init_locked()
2177 &bo->placement; in xe_bo_init_locked()
2178 err = ttm_bo_init_reserved(&xe->ttm, &bo->ttm, type, in xe_bo_init_locked()
2188 * dma-resv using the DMA_RESV_USAGE_KERNEL slot. in xe_bo_init_locked()
2202 long timeout = dma_resv_wait_timeout(bo->ttm.base.resv, in xe_bo_init_locked()
2209 dma_resv_unlock(bo->ttm.base.resv); in xe_bo_init_locked()
2215 bo->created = true; in xe_bo_init_locked()
2217 ttm_bo_set_bulk_move(&bo->ttm, bulk); in xe_bo_init_locked()
2219 ttm_bo_move_to_lru_tail_unlocked(&bo->ttm); in xe_bo_init_locked()
2229 struct ttm_place *place = bo->placements; in __xe_bo_fixed_placement()
2232 return -EINVAL; in __xe_bo_fixed_placement()
2234 place->flags = TTM_PL_FLAG_CONTIGUOUS; in __xe_bo_fixed_placement()
2235 place->fpfn = start >> PAGE_SHIFT; in __xe_bo_fixed_placement()
2236 place->lpfn = end >> PAGE_SHIFT; in __xe_bo_fixed_placement()
2240 place->mem_type = XE_PL_VRAM0; in __xe_bo_fixed_placement()
2243 place->mem_type = XE_PL_VRAM1; in __xe_bo_fixed_placement()
2246 place->mem_type = XE_PL_STOLEN; in __xe_bo_fixed_placement()
2251 return -EINVAL; in __xe_bo_fixed_placement()
2254 bo->placement = (struct ttm_placement) { in __xe_bo_fixed_placement()
2291 &vm->lru_bulk_move : NULL, size, in __xe_bo_create_locked()
2296 bo->min_align = alignment; in __xe_bo_create_locked()
2307 bo->vm = vm; in __xe_bo_create_locked()
2309 if (bo->flags & XE_BO_FLAG_GGTT) { in __xe_bo_create_locked()
2313 if (!(bo->flags & XE_BO_FLAG_GGTT_ALL)) { in __xe_bo_create_locked()
2321 if (t != tile && !(bo->flags & XE_BO_FLAG_GGTTx(t))) in __xe_bo_create_locked()
2325 err = xe_ggtt_insert_bo_at(t->mem.ggtt, bo, in __xe_bo_create_locked()
2329 err = xe_ggtt_insert_bo(t->mem.ggtt, bo, exec); in __xe_bo_create_locked()
2347 * xe_bo_create_locked() - Create a BO
2350 * GGTT binding if any. Only to be non-NULL for ttm_bo_type_kernel bos.
2357 * Create a locked xe BO with no range- nor alignment restrictions.
2380 xe_validation_guard(&ctx, &xe->val, &exec, (struct xe_val_flags) {.interruptible = intr}, in xe_bo_create_novm()
2397 * xe_bo_create_user() - Create a user BO
2406 * Create a bo on behalf of user-space.
2435 * xe_bo_create_pin_range_novm() - Create and pin a BO with range options.
2438 * GGTT binding if any. Only to be non-NULL for ttm_bo_type_kernel bos.
2445 * Create an Xe BO with range- and options. If @start and @end indicate
2460 xe_validation_guard(&ctx, &xe->val, &exec, (struct xe_val_flags) {}, err) { in xe_bo_create_pin_range_novm()
2526 * xe_bo_create_pin_map_at_novm() - Create pinned and mapped bo at optional VRAM offset
2529 * GGTT binding if any. Only to be non-NULL for ttm_bo_type_kernel bos.
2541 * In particular, the function may return ERR_PTR(%-EINTR) if @intr was set
2554 xe_validation_guard(&ctx, &xe->val, &exec, (struct xe_val_flags) {.interruptible = intr}, in xe_bo_create_pin_map_at_novm()
2569 * xe_bo_create_pin_map() - Create pinned and mapped bo
2574 * GGTT binding if any. Only to be non-NULL for ttm_bo_type_kernel bos.
2585 * In particular, the function may return ERR_PTR(%-EINTR) if @exec was
2598 * xe_bo_create_pin_map_novm() - Create pinned and mapped bo
2601 * GGTT binding if any. Only to be non-NULL for ttm_bo_type_kernel bos.
2611 * In particular, the function may return ERR_PTR(%-EINTR) if @intr was set
2637 ret = devm_add_action_or_reset(xe->drm.dev, __xe_bo_unpin_map_no_vm, bo); in xe_managed_bo_create_pin_map()
2646 devm_release_action(xe_bo_device(bo)->drm.dev, __xe_bo_unpin_map_no_vm, bo); in xe_managed_bo_unpin_map_no_vm()
2657 xe_map_memcpy_to(xe, &bo->vmap, 0, data, size); in xe_managed_bo_create_from_data()
2680 dst_flags |= (*src)->flags & (XE_BO_FLAG_GGTT_INVALIDATE | in xe_managed_bo_reinit_in_vram()
2684 xe_assert(xe, !(*src)->vmap.is_iomem); in xe_managed_bo_reinit_in_vram()
2686 bo = xe_managed_bo_create_from_data(xe, tile, (*src)->vmap.vaddr, in xe_managed_bo_reinit_in_vram()
2691 devm_release_action(xe->drm.dev, __xe_bo_unpin_map_no_vm, *src); in xe_managed_bo_reinit_in_vram()
2703 struct xe_device *xe = ttm_to_xe_device(res->bo->bdev); in vram_region_gpu_offset()
2705 switch (res->mem_type) { in vram_region_gpu_offset()
2712 return res_to_mem_region(res)->dpa_base; in vram_region_gpu_offset()
2718 * xe_bo_pin_external - pin an external BO
2723 * Pin an external (not tied to a VM, can be exported via dma-buf / prime FD)
2734 xe_assert(xe, !bo->vm); in xe_bo_pin_external()
2744 spin_lock(&xe->pinned.lock); in xe_bo_pin_external()
2745 list_add_tail(&bo->pinned_link, &xe->pinned.late.external); in xe_bo_pin_external()
2746 spin_unlock(&xe->pinned.lock); in xe_bo_pin_external()
2749 ttm_bo_pin(&bo->ttm); in xe_bo_pin_external()
2750 if (bo->ttm.ttm && ttm_tt_is_populated(bo->ttm.ttm)) in xe_bo_pin_external()
2751 xe_ttm_tt_account_subtract(xe, bo->ttm.ttm); in xe_bo_pin_external()
2757 ttm_bo_move_to_lru_tail_unlocked(&bo->ttm); in xe_bo_pin_external()
2763 * xe_bo_pin() - Pin a kernel bo after potentially migrating it
2767 * Attempts to migrate a bo to @bo->placement. If that succeeds,
2774 struct ttm_place *place = &bo->placements[0]; in xe_bo_pin()
2782 xe_assert(xe, bo->flags & (XE_BO_FLAG_PINNED | in xe_bo_pin()
2786 * No reason we can't support pinning imported dma-bufs we just don't in xe_bo_pin()
2787 * expect to pin an imported dma-buf. in xe_bo_pin()
2789 xe_assert(xe, !bo->ttm.base.import_attach); in xe_bo_pin()
2798 if (mem_type_is_vram(place->mem_type) || bo->flags & XE_BO_FLAG_GGTT) { in xe_bo_pin()
2799 spin_lock(&xe->pinned.lock); in xe_bo_pin()
2800 if (bo->flags & XE_BO_FLAG_PINNED_LATE_RESTORE) in xe_bo_pin()
2801 list_add_tail(&bo->pinned_link, &xe->pinned.late.kernel_bo_present); in xe_bo_pin()
2803 list_add_tail(&bo->pinned_link, &xe->pinned.early.kernel_bo_present); in xe_bo_pin()
2804 spin_unlock(&xe->pinned.lock); in xe_bo_pin()
2807 ttm_bo_pin(&bo->ttm); in xe_bo_pin()
2808 if (bo->ttm.ttm && ttm_tt_is_populated(bo->ttm.ttm)) in xe_bo_pin()
2809 xe_ttm_tt_account_subtract(xe, bo->ttm.ttm); in xe_bo_pin()
2815 ttm_bo_move_to_lru_tail_unlocked(&bo->ttm); in xe_bo_pin()
2821 * xe_bo_unpin_external - unpin an external BO
2824 * Unpin an external (not tied to a VM, can be exported via dma-buf / prime FD)
2834 xe_assert(xe, !bo->vm); in xe_bo_unpin_external()
2838 spin_lock(&xe->pinned.lock); in xe_bo_unpin_external()
2839 if (bo->ttm.pin_count == 1 && !list_empty(&bo->pinned_link)) in xe_bo_unpin_external()
2840 list_del_init(&bo->pinned_link); in xe_bo_unpin_external()
2841 spin_unlock(&xe->pinned.lock); in xe_bo_unpin_external()
2843 ttm_bo_unpin(&bo->ttm); in xe_bo_unpin_external()
2844 if (bo->ttm.ttm && ttm_tt_is_populated(bo->ttm.ttm)) in xe_bo_unpin_external()
2845 xe_ttm_tt_account_add(xe, bo->ttm.ttm); in xe_bo_unpin_external()
2851 ttm_bo_move_to_lru_tail_unlocked(&bo->ttm); in xe_bo_unpin_external()
2856 struct ttm_place *place = &bo->placements[0]; in xe_bo_unpin()
2859 xe_assert(xe, !bo->ttm.base.import_attach); in xe_bo_unpin()
2862 if (mem_type_is_vram(place->mem_type) || bo->flags & XE_BO_FLAG_GGTT) { in xe_bo_unpin()
2863 spin_lock(&xe->pinned.lock); in xe_bo_unpin()
2864 xe_assert(xe, !list_empty(&bo->pinned_link)); in xe_bo_unpin()
2865 list_del_init(&bo->pinned_link); in xe_bo_unpin()
2866 spin_unlock(&xe->pinned.lock); in xe_bo_unpin()
2868 if (bo->backup_obj) { in xe_bo_unpin()
2869 if (xe_bo_is_pinned(bo->backup_obj)) in xe_bo_unpin()
2870 ttm_bo_unpin(&bo->backup_obj->ttm); in xe_bo_unpin()
2871 xe_bo_put(bo->backup_obj); in xe_bo_unpin()
2872 bo->backup_obj = NULL; in xe_bo_unpin()
2875 ttm_bo_unpin(&bo->ttm); in xe_bo_unpin()
2876 if (bo->ttm.ttm && ttm_tt_is_populated(bo->ttm.ttm)) in xe_bo_unpin()
2877 xe_ttm_tt_account_add(xe, bo->ttm.ttm); in xe_bo_unpin()
2881 * xe_bo_validate() - Make sure the bo is in an allowed placement
2895 * -EINTR or -ERESTARTSYS if internal waits are interrupted by a signal.
2911 lockdep_assert_held(&vm->lock); in xe_bo_validate()
2920 xe_validation_assert_exec(xe_bo_device(bo), exec, &bo->ttm.base); in xe_bo_validate()
2921 ret = ttm_bo_validate(&bo->ttm, &bo->placement, &ctx); in xe_bo_validate()
2929 if (bo->destroy == &xe_ttm_bo_destroy) in xe_bo_is_xe_bo()
2949 offset &= (PAGE_SIZE - 1); in __xe_bo_addr()
2952 xe_assert(xe, bo->ttm.ttm); in __xe_bo_addr()
2960 xe_res_first(bo->ttm.resource, page << PAGE_SHIFT, in __xe_bo_addr()
2962 return cur.start + offset + vram_region_gpu_offset(bo->ttm.resource); in __xe_bo_addr()
2968 if (!READ_ONCE(bo->ttm.pin_count)) in xe_bo_addr()
2975 struct xe_device *xe = ttm_to_xe_device(bo->ttm.bdev); in xe_bo_vmap()
2982 if (drm_WARN_ON(&xe->drm, !(bo->flags & XE_BO_FLAG_NEEDS_CPU_ACCESS) || in xe_bo_vmap()
2983 !force_contiguous(bo->flags))) in xe_bo_vmap()
2984 return -EINVAL; in xe_bo_vmap()
2986 if (!iosys_map_is_null(&bo->vmap)) in xe_bo_vmap()
2996 ret = ttm_bo_kmap(&bo->ttm, 0, xe_bo_size(bo) >> PAGE_SHIFT, &bo->kmap); in xe_bo_vmap()
3000 virtual = ttm_kmap_obj_virtual(&bo->kmap, &is_iomem); in xe_bo_vmap()
3002 iosys_map_set_vaddr_iomem(&bo->vmap, (void __iomem *)virtual); in xe_bo_vmap()
3004 iosys_map_set_vaddr(&bo->vmap, virtual); in xe_bo_vmap()
3011 if (!iosys_map_is_null(&bo->vmap)) { in __xe_bo_vunmap()
3012 iosys_map_clear(&bo->vmap); in __xe_bo_vunmap()
3013 ttm_bo_kunmap(&bo->kmap); in __xe_bo_vunmap()
3030 return -EINVAL; in gem_create_set_pxp_type()
3032 return xe_pxp_key_assign(xe->pxp, bo); in gem_create_set_pxp_type()
3045 u64 extension) in gem_create_user_ext_set_property() argument
3047 u64 __user *address = u64_to_user_ptr(extension); in gem_create_user_ext_set_property()
3054 return -EFAULT; in gem_create_user_ext_set_property()
3060 return -EINVAL; in gem_create_user_ext_set_property()
3064 return -EINVAL; in gem_create_user_ext_set_property()
3071 u64 extension);
3087 return -E2BIG; in gem_create_user_extensions()
3091 return -EFAULT; in gem_create_user_extensions()
3095 return -EINVAL; in gem_create_user_extensions()
3124 if (XE_IOCTL_DBG(xe, args->pad[0] || args->pad[1] || args->pad[2]) || in xe_gem_create_ioctl()
3125 XE_IOCTL_DBG(xe, args->reserved[0] || args->reserved[1])) in xe_gem_create_ioctl()
3126 return -EINVAL; in xe_gem_create_ioctl()
3129 if (XE_IOCTL_DBG(xe, (args->placement & ~xe->info.mem_region_mask) || in xe_gem_create_ioctl()
3130 !args->placement)) in xe_gem_create_ioctl()
3131 return -EINVAL; in xe_gem_create_ioctl()
3133 if (XE_IOCTL_DBG(xe, args->flags & in xe_gem_create_ioctl()
3137 return -EINVAL; in xe_gem_create_ioctl()
3139 if (XE_IOCTL_DBG(xe, args->handle)) in xe_gem_create_ioctl()
3140 return -EINVAL; in xe_gem_create_ioctl()
3142 if (XE_IOCTL_DBG(xe, !args->size)) in xe_gem_create_ioctl()
3143 return -EINVAL; in xe_gem_create_ioctl()
3145 if (XE_IOCTL_DBG(xe, args->size > SIZE_MAX)) in xe_gem_create_ioctl()
3146 return -EINVAL; in xe_gem_create_ioctl()
3148 if (XE_IOCTL_DBG(xe, args->size & ~PAGE_MASK)) in xe_gem_create_ioctl()
3149 return -EINVAL; in xe_gem_create_ioctl()
3152 if (args->flags & DRM_XE_GEM_CREATE_FLAG_DEFER_BACKING) in xe_gem_create_ioctl()
3155 if (args->flags & DRM_XE_GEM_CREATE_FLAG_SCANOUT) in xe_gem_create_ioctl()
3158 bo_flags |= args->placement << (ffs(XE_BO_FLAG_SYSTEM) - 1); in xe_gem_create_ioctl()
3163 !(xe->info.vram_flags & XE_VRAM_FLAGS_NEED64K) && in xe_gem_create_ioctl()
3164 IS_ALIGNED(args->size, SZ_64K)) in xe_gem_create_ioctl()
3167 if (args->flags & DRM_XE_GEM_CREATE_FLAG_NEEDS_VISIBLE_VRAM) { in xe_gem_create_ioctl()
3169 return -EINVAL; in xe_gem_create_ioctl()
3174 if (XE_IOCTL_DBG(xe, !args->cpu_caching || in xe_gem_create_ioctl()
3175 args->cpu_caching > DRM_XE_GEM_CPU_CACHING_WC)) in xe_gem_create_ioctl()
3176 return -EINVAL; in xe_gem_create_ioctl()
3179 args->cpu_caching != DRM_XE_GEM_CPU_CACHING_WC)) in xe_gem_create_ioctl()
3180 return -EINVAL; in xe_gem_create_ioctl()
3183 args->cpu_caching == DRM_XE_GEM_CPU_CACHING_WB)) in xe_gem_create_ioctl()
3184 return -EINVAL; in xe_gem_create_ioctl()
3186 if (args->vm_id) { in xe_gem_create_ioctl()
3187 vm = xe_vm_lookup(xef, args->vm_id); in xe_gem_create_ioctl()
3189 return -ENOENT; in xe_gem_create_ioctl()
3193 xe_validation_guard(&ctx, &xe->val, &exec, (struct xe_val_flags) {.interruptible = true}, in xe_gem_create_ioctl()
3201 bo = xe_bo_create_user(xe, vm, args->size, args->cpu_caching, in xe_gem_create_ioctl()
3213 if (args->extensions) { in xe_gem_create_ioctl()
3214 err = gem_create_user_extensions(xe, bo, args->extensions, 0); in xe_gem_create_ioctl()
3219 err = drm_gem_handle_create(file, &bo->ttm.base, &handle); in xe_gem_create_ioctl()
3223 args->handle = handle; in xe_gem_create_ioctl()
3248 if (XE_IOCTL_DBG(xe, args->extensions) || in xe_gem_mmap_offset_ioctl()
3249 XE_IOCTL_DBG(xe, args->reserved[0] || args->reserved[1])) in xe_gem_mmap_offset_ioctl()
3250 return -EINVAL; in xe_gem_mmap_offset_ioctl()
3252 if (XE_IOCTL_DBG(xe, args->flags & in xe_gem_mmap_offset_ioctl()
3254 return -EINVAL; in xe_gem_mmap_offset_ioctl()
3256 if (args->flags & DRM_XE_MMAP_OFFSET_FLAG_PCI_BARRIER) { in xe_gem_mmap_offset_ioctl()
3258 return -EINVAL; in xe_gem_mmap_offset_ioctl()
3260 if (XE_IOCTL_DBG(xe, args->handle)) in xe_gem_mmap_offset_ioctl()
3261 return -EINVAL; in xe_gem_mmap_offset_ioctl()
3264 return -EINVAL; in xe_gem_mmap_offset_ioctl()
3268 args->offset = XE_PCI_BARRIER_MMAP_OFFSET; in xe_gem_mmap_offset_ioctl()
3272 gem_obj = drm_gem_object_lookup(file, args->handle); in xe_gem_mmap_offset_ioctl()
3274 return -ENOENT; in xe_gem_mmap_offset_ioctl()
3277 args->offset = drm_vma_node_offset_addr(&gem_obj->vma_node); in xe_gem_mmap_offset_ioctl()
3284 * xe_bo_lock() - Lock the buffer object's dma_resv object
3291 * Return: 0 on success, -EINTR if @intr is true and the wait for a
3298 return dma_resv_lock_interruptible(bo->ttm.base.resv, NULL); in xe_bo_lock()
3300 dma_resv_lock(bo->ttm.base.resv, NULL); in xe_bo_lock()
3306 * xe_bo_unlock() - Unlock the buffer object's dma_resv object
3313 dma_resv_unlock(bo->ttm.base.resv); in xe_bo_unlock()
3317 * xe_bo_can_migrate - Whether a buffer object likely can be migrated
3335 if (bo->ttm.type == ttm_bo_type_kernel) in xe_bo_can_migrate()
3338 if (bo->ttm.type == ttm_bo_type_sg) in xe_bo_can_migrate()
3341 for (cur_place = 0; cur_place < bo->placement.num_placement; in xe_bo_can_migrate()
3343 if (bo->placements[cur_place].mem_type == mem_type) in xe_bo_can_migrate()
3353 place->mem_type = mem_type; in xe_place_from_ttm_type()
3357 * xe_bo_migrate - Migrate an object to the desired region id
3372 * return -EINTR or -ERESTARTSYS if signal pending.
3377 struct xe_device *xe = ttm_to_xe_device(bo->ttm.bdev); in xe_bo_migrate()
3389 if (bo->ttm.resource->mem_type == mem_type) in xe_bo_migrate()
3393 return -EBUSY; in xe_bo_migrate()
3396 return -EINVAL; in xe_bo_migrate()
3406 drm_WARN_ON(&xe->drm, mem_type == XE_PL_STOLEN); in xe_bo_migrate()
3411 add_vram(xe, bo, &requested, bo->flags, mem_type, &c); in xe_bo_migrate()
3414 if (!tctx->no_wait_gpu) in xe_bo_migrate()
3415 xe_validation_assert_exec(xe_bo_device(bo), exec, &bo->ttm.base); in xe_bo_migrate()
3416 return ttm_bo_validate(&bo->ttm, &placement, tctx); in xe_bo_migrate()
3420 * xe_bo_evict - Evict an object to evict placement
3439 xe_evict_flags(&bo->ttm, &placement); in xe_bo_evict()
3440 ret = ttm_bo_validate(&bo->ttm, &placement, &ctx); in xe_bo_evict()
3444 dma_resv_wait_timeout(bo->ttm.base.resv, DMA_RESV_USAGE_KERNEL, in xe_bo_evict()
3451 * xe_bo_needs_ccs_pages - Whether a bo needs to back up CCS pages when
3464 if (!xe_device_has_flat_ccs(xe) || bo->ttm.type != ttm_bo_type_device) in xe_bo_needs_ccs_pages()
3470 * non-VRAM addresses. in xe_bo_needs_ccs_pages()
3472 if (IS_DGFX(xe) && (bo->flags & XE_BO_FLAG_SYSTEM)) in xe_bo_needs_ccs_pages()
3480 if (bo->cpu_caching == DRM_XE_GEM_CPU_CACHING_WB) in xe_bo_needs_ccs_pages()
3487 * __xe_bo_release_dummy() - Dummy kref release function
3488 * @kref: The embedded struct kref.
3497 * xe_bo_put_commit() - Put bos whose put was deferred by xe_bo_put_deferred().
3517 drm_gem_object_free(&bo->ttm.base.refcount); in xe_bo_put_commit()
3524 xe_bo_put_commit(&bo_dev->async_list); in xe_bo_dev_work_func()
3528 * xe_bo_dev_init() - Initialize BO dev to manage async BO freeing
3533 INIT_WORK(&bo_dev->async_free, xe_bo_dev_work_func); in xe_bo_dev_init()
3537 * xe_bo_dev_fini() - Finalize BO dev managing async BO freeing
3542 flush_work(&bo_dev->async_free); in xe_bo_dev_fini()
3553 if (bo->client) in xe_bo_put()
3554 might_lock(&bo->client->bos_lock); in xe_bo_put()
3557 if (bo->ggtt_node[id] && bo->ggtt_node[id]->ggtt) in xe_bo_put()
3558 xe_ggtt_might_lock(bo->ggtt_node[id]->ggtt); in xe_bo_put()
3559 drm_gem_object_put(&bo->ttm.base); in xe_bo_put()
3564 * xe_bo_dumb_create - Create a dumb bo as backing for a fb
3580 int cpp = DIV_ROUND_UP(args->bpp, 8); in xe_bo_dumb_create()
3583 xe->info.vram_flags & XE_VRAM_FLAGS_NEED64K ? SZ_64K : SZ_4K); in xe_bo_dumb_create()
3585 args->pitch = ALIGN(args->width * cpp, 64); in xe_bo_dumb_create()
3586 args->size = ALIGN(mul_u32_u32(args->pitch, args->height), in xe_bo_dumb_create()
3589 bo = xe_bo_create_user(xe, NULL, args->size, in xe_bo_dumb_create()
3597 err = drm_gem_handle_create(file_priv, &bo->ttm.base, &handle); in xe_bo_dumb_create()
3598 /* drop reference from allocate - handle holds it now */ in xe_bo_dumb_create()
3599 drm_gem_object_put(&bo->ttm.base); in xe_bo_dumb_create()
3601 args->handle = handle; in xe_bo_dumb_create()
3607 struct ttm_buffer_object *tbo = &bo->ttm; in xe_bo_runtime_pm_release_mmap_offset()
3608 struct ttm_device *bdev = tbo->bdev; in xe_bo_runtime_pm_release_mmap_offset()
3610 drm_vma_node_unmap(&tbo->base.vma_node, bdev->dev_mapping); in xe_bo_runtime_pm_release_mmap_offset()
3612 list_del_init(&bo->vram_userfault_link); in xe_bo_runtime_pm_release_mmap_offset()