Lines Matching refs:xe

77 static bool resource_is_stolen_vram(struct xe_device *xe, struct ttm_resource *res)  in resource_is_stolen_vram()  argument
79 return res->mem_type == XE_PL_STOLEN && IS_DGFX(xe); in resource_is_stolen_vram()
133 mem_type_to_migrate(struct xe_device *xe, u32 mem_type) in mem_type_to_migrate() argument
137 xe_assert(xe, mem_type == XE_PL_STOLEN || mem_type_is_vram(mem_type)); in mem_type_to_migrate()
138 tile = &xe->tiles[mem_type == XE_PL_STOLEN ? 0 : (mem_type - XE_PL_VRAM0)]; in mem_type_to_migrate()
144 struct xe_device *xe = ttm_to_xe_device(res->bo->bdev); in res_to_mem_region() local
147 xe_assert(xe, resource_is_vram(res)); in res_to_mem_region()
148 mgr = ttm_manager_type(&xe->ttm, res->mem_type); in res_to_mem_region()
152 static void try_add_system(struct xe_device *xe, struct xe_bo *bo, in try_add_system() argument
156 xe_assert(xe, *c < ARRAY_SIZE(bo->placements)); in try_add_system()
174 static void add_vram(struct xe_device *xe, struct xe_bo *bo, in add_vram() argument
181 xe_assert(xe, *c < ARRAY_SIZE(bo->placements)); in add_vram()
183 vram = to_xe_ttm_vram_mgr(ttm_manager_type(&xe->ttm, mem_type))->vram; in add_vram()
184 xe_assert(xe, vram && vram->usable_size); in add_vram()
202 static void try_add_vram(struct xe_device *xe, struct xe_bo *bo, in try_add_vram() argument
206 add_vram(xe, bo, bo->placements, bo_flags, XE_PL_VRAM0, c); in try_add_vram()
208 add_vram(xe, bo, bo->placements, bo_flags, XE_PL_VRAM1, c); in try_add_vram()
211 static void try_add_stolen(struct xe_device *xe, struct xe_bo *bo, in try_add_stolen() argument
215 xe_assert(xe, *c < ARRAY_SIZE(bo->placements)); in try_add_stolen()
226 static int __xe_bo_placement_for_flags(struct xe_device *xe, struct xe_bo *bo, in __xe_bo_placement_for_flags() argument
231 try_add_vram(xe, bo, bo_flags, &c); in __xe_bo_placement_for_flags()
232 try_add_system(xe, bo, bo_flags, &c); in __xe_bo_placement_for_flags()
233 try_add_stolen(xe, bo, bo_flags, &c); in __xe_bo_placement_for_flags()
246 int xe_bo_placement_for_flags(struct xe_device *xe, struct xe_bo *bo, in xe_bo_placement_for_flags() argument
250 return __xe_bo_placement_for_flags(xe, bo, bo_flags); in xe_bo_placement_for_flags()
348 struct xe_device *xe = xe_bo_device(bo); in xe_ttm_tt_create() local
358 tt->dev = xe->drm.dev; in xe_ttm_tt_create()
362 extra_pages = DIV_ROUND_UP(xe_device_ccs_bytes(xe, bo->size), in xe_ttm_tt_create()
371 if (!IS_DGFX(xe)) { in xe_ttm_tt_create()
390 (xe->info.graphics_verx100 >= 1270 && in xe_ttm_tt_create()
400 xe_assert(xe, bo->cpu_caching == 0); in xe_ttm_tt_create()
459 struct xe_device *xe = ttm_to_xe_device(bdev); in xe_ttm_io_mem_reserve() local
487 return xe_ttm_stolen_io_mem_reserve(xe, mem); in xe_ttm_io_mem_reserve()
493 static int xe_bo_trigger_rebind(struct xe_device *xe, struct xe_bo *bo, in xe_bo_trigger_rebind() argument
570 struct xe_device *xe = ttm_to_xe_device(ttm_bo->bdev); in xe_bo_move_dmabuf() local
573 xe_assert(xe, attach); in xe_bo_move_dmabuf()
574 xe_assert(xe, ttm_bo->ttm); in xe_bo_move_dmabuf()
619 struct xe_device *xe = ttm_to_xe_device(ttm_bo->bdev); in xe_bo_move_notify() local
633 ret = xe_bo_trigger_rebind(xe, bo, ctx); in xe_bo_move_notify()
647 mutex_lock(&xe->mem_access.vram_userfault.lock); in xe_bo_move_notify()
650 mutex_unlock(&xe->mem_access.vram_userfault.lock); in xe_bo_move_notify()
661 struct xe_device *xe = ttm_to_xe_device(ttm_bo->bdev); in xe_bo_move() local
671 bool handle_system_ccs = (!IS_DGFX(xe) && xe_bo_needs_ccs_pages(bo) && in xe_bo_move()
764 migrate = mem_type_to_migrate(xe, new_mem->mem_type); in xe_bo_move()
766 migrate = mem_type_to_migrate(xe, old_mem_type); in xe_bo_move()
768 migrate = xe->tiles[0].migrate; in xe_bo_move()
770 xe_assert(xe, migrate); in xe_bo_move()
772 if (xe_rpm_reclaim_safe(xe)) { in xe_bo_move()
777 xe_pm_runtime_get(xe); in xe_bo_move()
779 drm_WARN_ON(&xe->drm, handle_system_ccs); in xe_bo_move()
780 xe_pm_runtime_get_noresume(xe); in xe_bo_move()
805 xe_pm_runtime_put(xe); in xe_bo_move()
809 xe_assert(xe, new_mem->start == in xe_bo_move()
831 xe_pm_runtime_put(xe); in xe_bo_move()
856 xe_pm_runtime_put(xe); in xe_bo_move()
1027 struct xe_device *xe = ttm_to_xe_device(ttm_bo->bdev); in xe_ttm_bo_lock_in_destructor() local
1030 xe_assert(xe, !kref_read(&ttm_bo->kref)); in xe_ttm_bo_lock_in_destructor()
1041 xe_assert(xe, locked); in xe_ttm_bo_lock_in_destructor()
1115 struct xe_device *xe = ttm_to_xe_device(ttm_bo->bdev); in xe_ttm_bo_purge() local
1121 drm_WARN_ON(&xe->drm, ret); in xe_ttm_bo_purge()
1146 struct xe_device *xe = ttm_to_xe_device(ttm_bo->bdev); in xe_ttm_access_memory() local
1153 xe_device_assert_mem_access(xe); in xe_ttm_access_memory()
1173 xe_map_memcpy_to(xe, &vmap, page_offset, buf, byte_count); in xe_ttm_access_memory()
1175 xe_map_memcpy_from(xe, buf, &vmap, page_offset, byte_count); in xe_ttm_access_memory()
1206 struct xe_device *xe = ttm_to_xe_device(ttm_bo->bdev); in xe_ttm_bo_destroy() local
1214 xe_assert(xe, list_empty(&ttm_bo->base.gpuva.list)); in xe_ttm_bo_destroy()
1216 for_each_tile(tile, xe, id) in xe_ttm_bo_destroy()
1228 mutex_lock(&xe->mem_access.vram_userfault.lock); in xe_ttm_bo_destroy()
1231 mutex_unlock(&xe->mem_access.vram_userfault.lock); in xe_ttm_bo_destroy()
1273 struct xe_device *xe = to_xe_device(ddev); in xe_gem_fault() local
1280 xe_pm_runtime_get(xe); in xe_gem_fault()
1302 mutex_lock(&xe->mem_access.vram_userfault.lock); in xe_gem_fault()
1304 list_add(&bo->vram_userfault_link, &xe->mem_access.vram_userfault.list); in xe_gem_fault()
1305 mutex_unlock(&xe->mem_access.vram_userfault.lock); in xe_gem_fault()
1311 xe_pm_runtime_put(xe); in xe_gem_fault()
1321 struct xe_device *xe = xe_bo_device(bo); in xe_bo_vm_access() local
1324 xe_pm_runtime_get(xe); in xe_bo_vm_access()
1326 xe_pm_runtime_put(xe); in xe_bo_vm_access()
1404 struct xe_bo *___xe_bo_create_locked(struct xe_device *xe, struct xe_bo *bo, in ___xe_bo_create_locked() argument
1421 xe_assert(xe, !tile || type == ttm_bo_type_kernel); in ___xe_bo_create_locked()
1434 ((xe->info.vram_flags & XE_VRAM_FLAGS_NEED64K) || in ___xe_bo_create_locked()
1471 drm_gem_private_object_init(&xe->drm, &bo->ttm.base, size); in ___xe_bo_create_locked()
1479 err = __xe_bo_placement_for_flags(xe, bo, bo->flags); in ___xe_bo_create_locked()
1490 err = ttm_bo_init_reserved(&xe->ttm, &bo->ttm, type, in ___xe_bo_create_locked()
1536 static int __xe_bo_fixed_placement(struct xe_device *xe, in __xe_bo_fixed_placement() argument
1575 __xe_bo_create_locked(struct xe_device *xe, in __xe_bo_create_locked() argument
1593 err = __xe_bo_fixed_placement(xe, bo, flags, start, end, size); in __xe_bo_create_locked()
1600 bo = ___xe_bo_create_locked(xe, bo, tile, vm ? xe_vm_resv(vm) : NULL, in __xe_bo_create_locked()
1627 tile = xe_device_get_root_tile(xe); in __xe_bo_create_locked()
1629 xe_assert(xe, tile); in __xe_bo_create_locked()
1632 for_each_tile(t, xe, id) { in __xe_bo_create_locked()
1657 xe_bo_create_locked_range(struct xe_device *xe, in xe_bo_create_locked_range() argument
1662 return __xe_bo_create_locked(xe, tile, vm, size, start, end, 0, type, in xe_bo_create_locked_range()
1666 struct xe_bo *xe_bo_create_locked(struct xe_device *xe, struct xe_tile *tile, in xe_bo_create_locked() argument
1670 return __xe_bo_create_locked(xe, tile, vm, size, 0, ~0ULL, 0, type, in xe_bo_create_locked()
1674 struct xe_bo *xe_bo_create_user(struct xe_device *xe, struct xe_tile *tile, in xe_bo_create_user() argument
1679 struct xe_bo *bo = __xe_bo_create_locked(xe, tile, vm, size, 0, ~0ULL, in xe_bo_create_user()
1688 struct xe_bo *xe_bo_create(struct xe_device *xe, struct xe_tile *tile, in xe_bo_create() argument
1692 struct xe_bo *bo = xe_bo_create_locked(xe, tile, vm, size, type, flags); in xe_bo_create()
1700 struct xe_bo *xe_bo_create_pin_map_at(struct xe_device *xe, struct xe_tile *tile, in xe_bo_create_pin_map_at() argument
1705 return xe_bo_create_pin_map_at_aligned(xe, tile, vm, size, offset, in xe_bo_create_pin_map_at()
1709 struct xe_bo *xe_bo_create_pin_map_at_aligned(struct xe_device *xe, in xe_bo_create_pin_map_at_aligned() argument
1722 xe_ttm_stolen_cpu_access_needs_ggtt(xe)) in xe_bo_create_pin_map_at_aligned()
1725 bo = xe_bo_create_locked_range(xe, tile, vm, size, start, end, type, in xe_bo_create_pin_map_at_aligned()
1751 struct xe_bo *xe_bo_create_pin_map(struct xe_device *xe, struct xe_tile *tile, in xe_bo_create_pin_map() argument
1755 return xe_bo_create_pin_map_at(xe, tile, vm, size, ~0ull, type, flags); in xe_bo_create_pin_map()
1758 struct xe_bo *xe_bo_create_from_data(struct xe_device *xe, struct xe_tile *tile, in xe_bo_create_from_data() argument
1762 struct xe_bo *bo = xe_bo_create_pin_map(xe, tile, NULL, in xe_bo_create_from_data()
1768 xe_map_memcpy_to(xe, &bo->vmap, 0, data, size); in xe_bo_create_from_data()
1778 struct xe_bo *xe_managed_bo_create_pin_map(struct xe_device *xe, struct xe_tile *tile, in xe_managed_bo_create_pin_map() argument
1784 bo = xe_bo_create_pin_map(xe, tile, NULL, size, ttm_bo_type_kernel, flags); in xe_managed_bo_create_pin_map()
1788 ret = devm_add_action_or_reset(xe->drm.dev, __xe_bo_unpin_map_no_vm, bo); in xe_managed_bo_create_pin_map()
1795 struct xe_bo *xe_managed_bo_create_from_data(struct xe_device *xe, struct xe_tile *tile, in xe_managed_bo_create_from_data() argument
1798 struct xe_bo *bo = xe_managed_bo_create_pin_map(xe, tile, ALIGN(size, PAGE_SIZE), flags); in xe_managed_bo_create_from_data()
1803 xe_map_memcpy_to(xe, &bo->vmap, 0, data, size); in xe_managed_bo_create_from_data()
1821 int xe_managed_bo_reinit_in_vram(struct xe_device *xe, struct xe_tile *tile, struct xe_bo **src) in xe_managed_bo_reinit_in_vram() argument
1828 xe_assert(xe, IS_DGFX(xe)); in xe_managed_bo_reinit_in_vram()
1829 xe_assert(xe, !(*src)->vmap.is_iomem); in xe_managed_bo_reinit_in_vram()
1831 bo = xe_managed_bo_create_from_data(xe, tile, (*src)->vmap.vaddr, in xe_managed_bo_reinit_in_vram()
1836 devm_release_action(xe->drm.dev, __xe_bo_unpin_map_no_vm, *src); in xe_managed_bo_reinit_in_vram()
1848 struct xe_device *xe = ttm_to_xe_device(res->bo->bdev); in vram_region_gpu_offset() local
1851 return xe_ttm_stolen_gpu_offset(xe); in vram_region_gpu_offset()
1868 struct xe_device *xe = xe_bo_device(bo); in xe_bo_pin_external() local
1871 xe_assert(xe, !bo->vm); in xe_bo_pin_external()
1872 xe_assert(xe, xe_bo_is_user(bo)); in xe_bo_pin_external()
1880 spin_lock(&xe->pinned.lock); in xe_bo_pin_external()
1882 &xe->pinned.external_vram); in xe_bo_pin_external()
1883 spin_unlock(&xe->pinned.lock); in xe_bo_pin_external()
1901 struct xe_device *xe = xe_bo_device(bo); in xe_bo_pin() local
1905 xe_assert(xe, !xe_bo_is_user(bo)); in xe_bo_pin()
1908 xe_assert(xe, bo->flags & (XE_BO_FLAG_PINNED | in xe_bo_pin()
1915 xe_assert(xe, !bo->ttm.base.import_attach); in xe_bo_pin()
1918 xe_assert(xe, !xe_bo_is_pinned(bo)); in xe_bo_pin()
1929 if (IS_DGFX(xe) && !(IS_ENABLED(CONFIG_DRM_XE_DEBUG) && in xe_bo_pin()
1932 xe_assert(xe, place->flags & TTM_PL_FLAG_CONTIGUOUS); in xe_bo_pin()
1941 spin_lock(&xe->pinned.lock); in xe_bo_pin()
1942 list_add_tail(&bo->pinned_link, &xe->pinned.kernel_bo_present); in xe_bo_pin()
1943 spin_unlock(&xe->pinned.lock); in xe_bo_pin()
1969 struct xe_device *xe = xe_bo_device(bo); in xe_bo_unpin_external() local
1971 xe_assert(xe, !bo->vm); in xe_bo_unpin_external()
1972 xe_assert(xe, xe_bo_is_pinned(bo)); in xe_bo_unpin_external()
1973 xe_assert(xe, xe_bo_is_user(bo)); in xe_bo_unpin_external()
1975 spin_lock(&xe->pinned.lock); in xe_bo_unpin_external()
1978 spin_unlock(&xe->pinned.lock); in xe_bo_unpin_external()
1992 struct xe_device *xe = xe_bo_device(bo); in xe_bo_unpin() local
1994 xe_assert(xe, !bo->ttm.base.import_attach); in xe_bo_unpin()
1995 xe_assert(xe, xe_bo_is_pinned(bo)); in xe_bo_unpin()
1998 spin_lock(&xe->pinned.lock); in xe_bo_unpin()
1999 xe_assert(xe, !list_empty(&bo->pinned_link)); in xe_bo_unpin()
2001 spin_unlock(&xe->pinned.lock); in xe_bo_unpin()
2058 struct xe_device *xe = xe_bo_device(bo); in __xe_bo_addr() local
2062 xe_assert(xe, page_size <= PAGE_SIZE); in __xe_bo_addr()
2067 xe_assert(xe, bo->ttm.ttm); in __xe_bo_addr()
2090 struct xe_device *xe = ttm_to_xe_device(bo->ttm.bdev); in xe_bo_vmap() local
2097 if (drm_WARN_ON(&xe->drm, !(bo->flags & XE_BO_FLAG_NEEDS_CPU_ACCESS) || in xe_bo_vmap()
2141 struct xe_device *xe = to_xe_device(dev); in xe_gem_create_ioctl() local
2150 if (XE_IOCTL_DBG(xe, args->extensions) || in xe_gem_create_ioctl()
2151 XE_IOCTL_DBG(xe, args->pad[0] || args->pad[1] || args->pad[2]) || in xe_gem_create_ioctl()
2152 XE_IOCTL_DBG(xe, args->reserved[0] || args->reserved[1])) in xe_gem_create_ioctl()
2156 if (XE_IOCTL_DBG(xe, (args->placement & ~xe->info.mem_region_mask) || in xe_gem_create_ioctl()
2160 if (XE_IOCTL_DBG(xe, args->flags & in xe_gem_create_ioctl()
2166 if (XE_IOCTL_DBG(xe, args->handle)) in xe_gem_create_ioctl()
2169 if (XE_IOCTL_DBG(xe, !args->size)) in xe_gem_create_ioctl()
2172 if (XE_IOCTL_DBG(xe, args->size > SIZE_MAX)) in xe_gem_create_ioctl()
2175 if (XE_IOCTL_DBG(xe, args->size & ~PAGE_MASK)) in xe_gem_create_ioctl()
2190 !(xe->info.vram_flags & XE_VRAM_FLAGS_NEED64K) && in xe_gem_create_ioctl()
2195 if (XE_IOCTL_DBG(xe, !(bo_flags & XE_BO_FLAG_VRAM_MASK))) in xe_gem_create_ioctl()
2201 if (XE_IOCTL_DBG(xe, !args->cpu_caching || in xe_gem_create_ioctl()
2205 if (XE_IOCTL_DBG(xe, bo_flags & XE_BO_FLAG_VRAM_MASK && in xe_gem_create_ioctl()
2209 if (XE_IOCTL_DBG(xe, bo_flags & XE_BO_FLAG_SCANOUT && in xe_gem_create_ioctl()
2215 if (XE_IOCTL_DBG(xe, !vm)) in xe_gem_create_ioctl()
2222 bo = xe_bo_create_user(xe, NULL, vm, args->size, args->cpu_caching, in xe_gem_create_ioctl()
2258 struct xe_device *xe = to_xe_device(dev); in xe_gem_mmap_offset_ioctl() local
2262 if (XE_IOCTL_DBG(xe, args->extensions) || in xe_gem_mmap_offset_ioctl()
2263 XE_IOCTL_DBG(xe, args->reserved[0] || args->reserved[1])) in xe_gem_mmap_offset_ioctl()
2266 if (XE_IOCTL_DBG(xe, args->flags)) in xe_gem_mmap_offset_ioctl()
2270 if (XE_IOCTL_DBG(xe, !gem_obj)) in xe_gem_mmap_offset_ioctl()
2370 struct xe_device *xe = ttm_to_xe_device(bo->ttm.bdev); in xe_bo_migrate() local
2398 drm_WARN_ON(&xe->drm, mem_type == XE_PL_STOLEN); in xe_bo_migrate()
2403 add_vram(xe, bo, &requested, bo->flags, mem_type, &c); in xe_bo_migrate()
2450 struct xe_device *xe = xe_bo_device(bo); in xe_bo_needs_ccs_pages() local
2452 if (GRAPHICS_VER(xe) >= 20 && IS_DGFX(xe)) in xe_bo_needs_ccs_pages()
2455 if (!xe_device_has_flat_ccs(xe) || bo->ttm.type != ttm_bo_type_device) in xe_bo_needs_ccs_pages()
2463 if (IS_DGFX(xe) && (bo->flags & XE_BO_FLAG_SYSTEM)) in xe_bo_needs_ccs_pages()
2535 struct xe_device *xe = to_xe_device(dev); in xe_bo_dumb_create() local
2541 xe->info.vram_flags & XE_VRAM_FLAGS_NEED64K ? SZ_64K : SZ_4K); in xe_bo_dumb_create()
2547 bo = xe_bo_create_user(xe, NULL, NULL, args->size, in xe_bo_dumb_create()
2549 XE_BO_FLAG_VRAM_IF_DGFX(xe_device_get_root_tile(xe)) | in xe_bo_dumb_create()