Lines Matching refs:xe

87 static bool resource_is_stolen_vram(struct xe_device *xe, struct ttm_resource *res)  in resource_is_stolen_vram()  argument
89 return res->mem_type == XE_PL_STOLEN && IS_DGFX(xe); in resource_is_stolen_vram()
159 mem_type_to_migrate(struct xe_device *xe, u32 mem_type) in mem_type_to_migrate() argument
163 xe_assert(xe, mem_type == XE_PL_STOLEN || mem_type_is_vram(mem_type)); in mem_type_to_migrate()
164 tile = &xe->tiles[mem_type == XE_PL_STOLEN ? 0 : (mem_type - XE_PL_VRAM0)]; in mem_type_to_migrate()
170 struct xe_device *xe = ttm_to_xe_device(res->bo->bdev); in res_to_mem_region() local
174 xe_assert(xe, resource_is_vram(res)); in res_to_mem_region()
175 mgr = ttm_manager_type(&xe->ttm, res->mem_type); in res_to_mem_region()
181 static void try_add_system(struct xe_device *xe, struct xe_bo *bo, in try_add_system() argument
185 xe_assert(xe, *c < ARRAY_SIZE(bo->placements)); in try_add_system()
210 static void add_vram(struct xe_device *xe, struct xe_bo *bo, in add_vram() argument
214 struct ttm_resource_manager *mgr = ttm_manager_type(&xe->ttm, mem_type); in add_vram()
220 xe_assert(xe, *c < ARRAY_SIZE(bo->placements)); in add_vram()
223 xe_assert(xe, vram && vram->usable_size); in add_vram()
241 static void try_add_vram(struct xe_device *xe, struct xe_bo *bo, in try_add_vram() argument
245 add_vram(xe, bo, bo->placements, bo_flags, XE_PL_VRAM0, c); in try_add_vram()
247 add_vram(xe, bo, bo->placements, bo_flags, XE_PL_VRAM1, c); in try_add_vram()
250 static void try_add_stolen(struct xe_device *xe, struct xe_bo *bo, in try_add_stolen() argument
254 xe_assert(xe, *c < ARRAY_SIZE(bo->placements)); in try_add_stolen()
265 static int __xe_bo_placement_for_flags(struct xe_device *xe, struct xe_bo *bo, in __xe_bo_placement_for_flags() argument
270 try_add_vram(xe, bo, bo_flags, &c); in __xe_bo_placement_for_flags()
271 try_add_system(xe, bo, bo_flags, &c); in __xe_bo_placement_for_flags()
272 try_add_stolen(xe, bo, bo_flags, &c); in __xe_bo_placement_for_flags()
285 int xe_bo_placement_for_flags(struct xe_device *xe, struct xe_bo *bo, in xe_bo_placement_for_flags() argument
289 return __xe_bo_placement_for_flags(xe, bo, bo_flags); in xe_bo_placement_for_flags()
295 struct xe_device *xe = container_of(tbo->bdev, typeof(*xe), ttm); in xe_evict_flags() local
296 bool device_unplugged = drm_dev_is_unplugged(&xe->drm); in xe_evict_flags()
347 static int xe_tt_map_sg(struct xe_device *xe, struct ttm_tt *tt) in xe_tt_map_sg() argument
362 xe_sg_segment_size(xe->drm.dev), in xe_tt_map_sg()
368 ret = dma_map_sgtable(xe->drm.dev, xe_tt->sg, DMA_BIDIRECTIONAL, in xe_tt_map_sg()
379 static void xe_tt_unmap_sg(struct xe_device *xe, struct ttm_tt *tt) in xe_tt_unmap_sg() argument
384 dma_unmap_sgtable(xe->drm.dev, xe_tt->sg, in xe_tt_unmap_sg()
403 static void xe_ttm_tt_account_add(struct xe_device *xe, struct ttm_tt *tt) in xe_ttm_tt_account_add() argument
408 xe_shrinker_mod_pages(xe->mem.shrinker, 0, tt->num_pages); in xe_ttm_tt_account_add()
410 xe_shrinker_mod_pages(xe->mem.shrinker, tt->num_pages, 0); in xe_ttm_tt_account_add()
413 static void xe_ttm_tt_account_subtract(struct xe_device *xe, struct ttm_tt *tt) in xe_ttm_tt_account_subtract() argument
418 xe_shrinker_mod_pages(xe->mem.shrinker, 0, -(long)tt->num_pages); in xe_ttm_tt_account_subtract()
420 xe_shrinker_mod_pages(xe->mem.shrinker, -(long)tt->num_pages, 0); in xe_ttm_tt_account_subtract()
427 struct xe_device *xe = ttm_to_xe_device(ttm_dev); in update_global_total_pages() local
429 atomic64_add_return(num_pages, &xe->global_total_pages); in update_global_total_pages()
431 trace_gpu_mem_total(xe->drm.primary->index, 0, in update_global_total_pages()
440 struct xe_device *xe = xe_bo_device(bo); in xe_ttm_tt_create() local
455 extra_pages = DIV_ROUND_UP(xe_device_ccs_bytes(xe, xe_bo_size(bo)), in xe_ttm_tt_create()
464 if (!IS_DGFX(xe)) { in xe_ttm_tt_create()
483 (xe->info.graphics_verx100 >= 1270 && in xe_ttm_tt_create()
493 xe_assert(xe, bo->cpu_caching == 0); in xe_ttm_tt_create()
550 struct xe_device *xe = ttm_to_xe_device(ttm_dev); in xe_ttm_tt_unpopulate() local
556 xe_tt_unmap_sg(xe, tt); in xe_ttm_tt_unpopulate()
559 xe_ttm_tt_account_subtract(xe, tt); in xe_ttm_tt_unpopulate()
580 struct xe_device *xe = ttm_to_xe_device(bdev); in xe_ttm_io_mem_reserve() local
608 return xe_ttm_stolen_io_mem_reserve(xe, mem); in xe_ttm_io_mem_reserve()
614 static int xe_bo_trigger_rebind(struct xe_device *xe, struct xe_bo *bo, in xe_bo_trigger_rebind() argument
691 struct xe_device *xe = ttm_to_xe_device(ttm_bo->bdev); in xe_bo_move_dmabuf() local
692 bool device_unplugged = drm_dev_is_unplugged(&xe->drm); in xe_bo_move_dmabuf()
695 xe_assert(xe, attach); in xe_bo_move_dmabuf()
696 xe_assert(xe, ttm_bo->ttm); in xe_bo_move_dmabuf()
749 struct xe_device *xe = ttm_to_xe_device(ttm_bo->bdev); in xe_bo_move_notify() local
763 ret = xe_bo_trigger_rebind(xe, bo, ctx); in xe_bo_move_notify()
777 mutex_lock(&xe->mem_access.vram_userfault.lock); in xe_bo_move_notify()
780 mutex_unlock(&xe->mem_access.vram_userfault.lock); in xe_bo_move_notify()
791 struct xe_device *xe = ttm_to_xe_device(ttm_bo->bdev); in xe_bo_move() local
801 bool handle_system_ccs = (!IS_DGFX(xe) && xe_bo_needs_ccs_pages(bo) && in xe_bo_move()
808 ret = xe_tt_map_sg(xe, ttm); in xe_bo_move()
831 ret = xe_tt_map_sg(xe, ttm); in xe_bo_move()
845 drm_dbg(&xe->drm, "Evict system allocator BO success\n"); in xe_bo_move()
848 drm_dbg(&xe->drm, "Evict system allocator BO failed=%pe\n", in xe_bo_move()
908 migrate = mem_type_to_migrate(xe, new_mem->mem_type); in xe_bo_move()
910 migrate = mem_type_to_migrate(xe, old_mem_type); in xe_bo_move()
912 migrate = xe->tiles[0].migrate; in xe_bo_move()
914 xe_assert(xe, migrate); in xe_bo_move()
916 if (xe_rpm_reclaim_safe(xe)) { in xe_bo_move()
921 xe_pm_runtime_get(xe); in xe_bo_move()
923 drm_WARN_ON(&xe->drm, handle_system_ccs); in xe_bo_move()
924 xe_pm_runtime_get_noresume(xe); in xe_bo_move()
942 xe_pm_runtime_put(xe); in xe_bo_move()
965 xe_pm_runtime_put(xe); in xe_bo_move()
977 xe_tt_unmap_sg(xe, ttm_bo->ttm); in xe_bo_move()
987 struct xe_device *xe = ttm_to_xe_device(bo->bdev); in xe_bo_shrink_purge() local
1002 xe_tt_unmap_sg(xe, bo->ttm); in xe_bo_shrink_purge()
1013 xe_ttm_tt_account_subtract(xe, bo->ttm); in xe_bo_shrink_purge()
1063 struct xe_device *xe = ttm_to_xe_device(bo->bdev); in xe_bo_shrink() local
1086 needs_rpm = (!IS_DGFX(xe) && bo->resource->mem_type != XE_PL_SYSTEM && in xe_bo_shrink()
1088 if (needs_rpm && !xe_pm_runtime_get_if_active(xe)) in xe_bo_shrink()
1097 xe_pm_runtime_put(xe); in xe_bo_shrink()
1100 xe_ttm_tt_account_subtract(xe, tt); in xe_bo_shrink()
1120 struct xe_device *xe = ttm_to_xe_device(bo->ttm.bdev); in xe_bo_notifier_prepare_pinned() local
1126 xe_assert(xe, !bo->backup_obj); in xe_bo_notifier_prepare_pinned()
1142 backup = ___xe_bo_create_locked(xe, NULL, NULL, bo->ttm.base.resv, NULL, xe_bo_size(bo), in xe_bo_notifier_prepare_pinned()
1195 struct xe_device *xe = ttm_to_xe_device(bo->ttm.bdev); in xe_bo_evict_pinned() local
1220 backup = ___xe_bo_create_locked(xe, NULL, NULL, bo->ttm.base.resv, in xe_bo_evict_pinned()
1240 migrate = mem_type_to_migrate(xe, bo->ttm.resource->mem_type); in xe_bo_evict_pinned()
1274 xe_map_memcpy_from(xe, backup->vmap.vaddr, &bo->vmap, 0, in xe_bo_evict_pinned()
1309 struct xe_device *xe = ttm_to_xe_device(bo->ttm.bdev); in xe_bo_restore_pinned() local
1332 migrate = mem_type_to_migrate(xe, bo->ttm.resource->mem_type); in xe_bo_restore_pinned()
1367 xe_map_memcpy_to(xe, &bo->vmap, 0, backup->vmap.vaddr, in xe_bo_restore_pinned()
1436 struct xe_device *xe = ttm_to_xe_device(ttm_bo->bdev); in xe_ttm_bo_lock_in_destructor() local
1439 xe_assert(xe, !kref_read(&ttm_bo->kref)); in xe_ttm_bo_lock_in_destructor()
1450 xe_assert(xe, locked); in xe_ttm_bo_lock_in_destructor()
1524 struct xe_device *xe = ttm_to_xe_device(ttm_bo->bdev); in xe_ttm_bo_purge() local
1530 drm_WARN_ON(&xe->drm, ret); in xe_ttm_bo_purge()
1555 struct xe_device *xe = ttm_to_xe_device(ttm_bo->bdev); in xe_ttm_access_memory() local
1563 xe_device_assert_mem_access(xe); in xe_ttm_access_memory()
1570 mem_type_to_migrate(xe, ttm_bo->resource->mem_type); in xe_ttm_access_memory()
1588 xe_map_memcpy_to(xe, &vmap, page_offset, buf, byte_count); in xe_ttm_access_memory()
1590 xe_map_memcpy_from(xe, buf, &vmap, page_offset, byte_count); in xe_ttm_access_memory()
1622 struct xe_device *xe = ttm_to_xe_device(ttm_bo->bdev); in xe_ttm_bo_destroy() local
1630 xe_assert(xe, list_empty(&ttm_bo->base.gpuva.list)); in xe_ttm_bo_destroy()
1632 for_each_tile(tile, xe, id) in xe_ttm_bo_destroy()
1647 mutex_lock(&xe->mem_access.vram_userfault.lock); in xe_ttm_bo_destroy()
1650 mutex_unlock(&xe->mem_access.vram_userfault.lock); in xe_ttm_bo_destroy()
1692 struct xe_device *xe = to_xe_device(ddev); in xe_gem_fault() local
1699 xe_pm_runtime_get(xe); in xe_gem_fault()
1721 mutex_lock(&xe->mem_access.vram_userfault.lock); in xe_gem_fault()
1723 list_add(&bo->vram_userfault_link, &xe->mem_access.vram_userfault.list); in xe_gem_fault()
1724 mutex_unlock(&xe->mem_access.vram_userfault.lock); in xe_gem_fault()
1730 xe_pm_runtime_put(xe); in xe_gem_fault()
1740 struct xe_device *xe = xe_bo_device(bo); in xe_bo_vm_access() local
1743 xe_pm_runtime_get(xe); in xe_bo_vm_access()
1745 xe_pm_runtime_put(xe); in xe_bo_vm_access()
1823 struct xe_bo *___xe_bo_create_locked(struct xe_device *xe, struct xe_bo *bo, in ___xe_bo_create_locked() argument
1840 xe_assert(xe, !tile || type == ttm_bo_type_kernel); in ___xe_bo_create_locked()
1853 ((xe->info.vram_flags & XE_VRAM_FLAGS_NEED64K) || in ___xe_bo_create_locked()
1889 drm_gem_private_object_init(&xe->drm, &bo->ttm.base, size); in ___xe_bo_create_locked()
1897 err = __xe_bo_placement_for_flags(xe, bo, bo->flags); in ___xe_bo_create_locked()
1908 err = ttm_bo_init_reserved(&xe->ttm, &bo->ttm, type, in ___xe_bo_create_locked()
1954 static int __xe_bo_fixed_placement(struct xe_device *xe, in __xe_bo_fixed_placement() argument
1993 __xe_bo_create_locked(struct xe_device *xe, in __xe_bo_create_locked() argument
2011 err = __xe_bo_fixed_placement(xe, bo, flags, start, end, size); in __xe_bo_create_locked()
2018 bo = ___xe_bo_create_locked(xe, bo, tile, vm ? xe_vm_resv(vm) : NULL, in __xe_bo_create_locked()
2045 tile = xe_device_get_root_tile(xe); in __xe_bo_create_locked()
2047 xe_assert(xe, tile); in __xe_bo_create_locked()
2050 for_each_tile(t, xe, id) { in __xe_bo_create_locked()
2076 xe_bo_create_locked_range(struct xe_device *xe, in xe_bo_create_locked_range() argument
2081 return __xe_bo_create_locked(xe, tile, vm, size, start, end, 0, type, in xe_bo_create_locked_range()
2085 struct xe_bo *xe_bo_create_locked(struct xe_device *xe, struct xe_tile *tile, in xe_bo_create_locked() argument
2089 return __xe_bo_create_locked(xe, tile, vm, size, 0, ~0ULL, 0, type, in xe_bo_create_locked()
2093 struct xe_bo *xe_bo_create_user(struct xe_device *xe, struct xe_tile *tile, in xe_bo_create_user() argument
2098 struct xe_bo *bo = __xe_bo_create_locked(xe, tile, vm, size, 0, ~0ULL, in xe_bo_create_user()
2107 struct xe_bo *xe_bo_create(struct xe_device *xe, struct xe_tile *tile, in xe_bo_create() argument
2111 struct xe_bo *bo = xe_bo_create_locked(xe, tile, vm, size, type, flags); in xe_bo_create()
2119 struct xe_bo *xe_bo_create_pin_map_at(struct xe_device *xe, struct xe_tile *tile, in xe_bo_create_pin_map_at() argument
2124 return xe_bo_create_pin_map_at_aligned(xe, tile, vm, size, offset, in xe_bo_create_pin_map_at()
2128 struct xe_bo *xe_bo_create_pin_map_at_aligned(struct xe_device *xe, in xe_bo_create_pin_map_at_aligned() argument
2141 xe_ttm_stolen_cpu_access_needs_ggtt(xe)) in xe_bo_create_pin_map_at_aligned()
2144 bo = xe_bo_create_locked_range(xe, tile, vm, size, start, end, type, in xe_bo_create_pin_map_at_aligned()
2170 struct xe_bo *xe_bo_create_pin_map(struct xe_device *xe, struct xe_tile *tile, in xe_bo_create_pin_map() argument
2174 return xe_bo_create_pin_map_at(xe, tile, vm, size, ~0ull, type, flags); in xe_bo_create_pin_map()
2182 struct xe_bo *xe_managed_bo_create_pin_map(struct xe_device *xe, struct xe_tile *tile, in xe_managed_bo_create_pin_map() argument
2188 KUNIT_STATIC_STUB_REDIRECT(xe_managed_bo_create_pin_map, xe, tile, size, flags); in xe_managed_bo_create_pin_map()
2190 bo = xe_bo_create_pin_map(xe, tile, NULL, size, ttm_bo_type_kernel, flags); in xe_managed_bo_create_pin_map()
2194 ret = devm_add_action_or_reset(xe->drm.dev, __xe_bo_unpin_map_no_vm, bo); in xe_managed_bo_create_pin_map()
2201 struct xe_bo *xe_managed_bo_create_from_data(struct xe_device *xe, struct xe_tile *tile, in xe_managed_bo_create_from_data() argument
2204 struct xe_bo *bo = xe_managed_bo_create_pin_map(xe, tile, ALIGN(size, PAGE_SIZE), flags); in xe_managed_bo_create_from_data()
2209 xe_map_memcpy_to(xe, &bo->vmap, 0, data, size); in xe_managed_bo_create_from_data()
2227 int xe_managed_bo_reinit_in_vram(struct xe_device *xe, struct xe_tile *tile, struct xe_bo **src) in xe_managed_bo_reinit_in_vram() argument
2235 xe_assert(xe, IS_DGFX(xe)); in xe_managed_bo_reinit_in_vram()
2236 xe_assert(xe, !(*src)->vmap.is_iomem); in xe_managed_bo_reinit_in_vram()
2238 bo = xe_managed_bo_create_from_data(xe, tile, (*src)->vmap.vaddr, in xe_managed_bo_reinit_in_vram()
2243 devm_release_action(xe->drm.dev, __xe_bo_unpin_map_no_vm, *src); in xe_managed_bo_reinit_in_vram()
2255 struct xe_device *xe = ttm_to_xe_device(res->bo->bdev); in vram_region_gpu_offset() local
2259 return xe_ttm_stolen_gpu_offset(xe); in vram_region_gpu_offset()
2281 struct xe_device *xe = xe_bo_device(bo); in xe_bo_pin_external() local
2284 xe_assert(xe, !bo->vm); in xe_bo_pin_external()
2285 xe_assert(xe, xe_bo_is_user(bo)); in xe_bo_pin_external()
2292 spin_lock(&xe->pinned.lock); in xe_bo_pin_external()
2293 list_add_tail(&bo->pinned_link, &xe->pinned.late.external); in xe_bo_pin_external()
2294 spin_unlock(&xe->pinned.lock); in xe_bo_pin_external()
2299 xe_ttm_tt_account_subtract(xe, bo->ttm.ttm); in xe_bo_pin_external()
2313 struct xe_device *xe = xe_bo_device(bo); in xe_bo_pin() local
2317 xe_assert(xe, !xe_bo_is_user(bo)); in xe_bo_pin()
2320 xe_assert(xe, bo->flags & (XE_BO_FLAG_PINNED | in xe_bo_pin()
2327 xe_assert(xe, !bo->ttm.base.import_attach); in xe_bo_pin()
2330 xe_assert(xe, !xe_bo_is_pinned(bo)); in xe_bo_pin()
2337 spin_lock(&xe->pinned.lock); in xe_bo_pin()
2339 list_add_tail(&bo->pinned_link, &xe->pinned.late.kernel_bo_present); in xe_bo_pin()
2341 list_add_tail(&bo->pinned_link, &xe->pinned.early.kernel_bo_present); in xe_bo_pin()
2342 spin_unlock(&xe->pinned.lock); in xe_bo_pin()
2347 xe_ttm_tt_account_subtract(xe, bo->ttm.ttm); in xe_bo_pin()
2370 struct xe_device *xe = xe_bo_device(bo); in xe_bo_unpin_external() local
2372 xe_assert(xe, !bo->vm); in xe_bo_unpin_external()
2373 xe_assert(xe, xe_bo_is_pinned(bo)); in xe_bo_unpin_external()
2374 xe_assert(xe, xe_bo_is_user(bo)); in xe_bo_unpin_external()
2376 spin_lock(&xe->pinned.lock); in xe_bo_unpin_external()
2379 spin_unlock(&xe->pinned.lock); in xe_bo_unpin_external()
2383 xe_ttm_tt_account_add(xe, bo->ttm.ttm); in xe_bo_unpin_external()
2395 struct xe_device *xe = xe_bo_device(bo); in xe_bo_unpin() local
2397 xe_assert(xe, !bo->ttm.base.import_attach); in xe_bo_unpin()
2398 xe_assert(xe, xe_bo_is_pinned(bo)); in xe_bo_unpin()
2401 spin_lock(&xe->pinned.lock); in xe_bo_unpin()
2402 xe_assert(xe, !list_empty(&bo->pinned_link)); in xe_bo_unpin()
2404 spin_unlock(&xe->pinned.lock); in xe_bo_unpin()
2415 xe_ttm_tt_account_add(xe, bo->ttm.ttm); in xe_bo_unpin()
2476 struct xe_device *xe = xe_bo_device(bo); in __xe_bo_addr() local
2480 xe_assert(xe, page_size <= PAGE_SIZE); in __xe_bo_addr()
2485 xe_assert(xe, bo->ttm.ttm); in __xe_bo_addr()
2508 struct xe_device *xe = ttm_to_xe_device(bo->ttm.bdev); in xe_bo_vmap() local
2515 if (drm_WARN_ON(&xe->drm, !(bo->flags & XE_BO_FLAG_NEEDS_CPU_ACCESS) || in xe_bo_vmap()
2556 static int gem_create_set_pxp_type(struct xe_device *xe, struct xe_bo *bo, u64 value) in gem_create_set_pxp_type() argument
2562 if (XE_IOCTL_DBG(xe, value != DRM_XE_PXP_TYPE_HWDRM)) in gem_create_set_pxp_type()
2565 return xe_pxp_key_assign(xe->pxp, bo); in gem_create_set_pxp_type()
2568 typedef int (*xe_gem_create_set_property_fn)(struct xe_device *xe,
2576 static int gem_create_user_ext_set_property(struct xe_device *xe, in gem_create_user_ext_set_property() argument
2586 if (XE_IOCTL_DBG(xe, err)) in gem_create_user_ext_set_property()
2589 if (XE_IOCTL_DBG(xe, ext.property >= in gem_create_user_ext_set_property()
2591 XE_IOCTL_DBG(xe, ext.pad) || in gem_create_user_ext_set_property()
2592 XE_IOCTL_DBG(xe, ext.property != DRM_XE_GEM_CREATE_EXTENSION_SET_PROPERTY)) in gem_create_user_ext_set_property()
2599 return gem_create_set_property_funcs[idx](xe, bo, ext.value); in gem_create_user_ext_set_property()
2602 typedef int (*xe_gem_create_user_extension_fn)(struct xe_device *xe,
2611 static int gem_create_user_extensions(struct xe_device *xe, struct xe_bo *bo, in gem_create_user_extensions() argument
2619 if (XE_IOCTL_DBG(xe, ext_number >= MAX_USER_EXTENSIONS)) in gem_create_user_extensions()
2623 if (XE_IOCTL_DBG(xe, err)) in gem_create_user_extensions()
2626 if (XE_IOCTL_DBG(xe, ext.pad) || in gem_create_user_extensions()
2627 XE_IOCTL_DBG(xe, ext.name >= ARRAY_SIZE(gem_create_user_extension_funcs))) in gem_create_user_extensions()
2632 err = gem_create_user_extension_funcs[idx](xe, bo, extensions); in gem_create_user_extensions()
2633 if (XE_IOCTL_DBG(xe, err)) in gem_create_user_extensions()
2637 return gem_create_user_extensions(xe, bo, ext.next_extension, in gem_create_user_extensions()
2646 struct xe_device *xe = to_xe_device(dev); in xe_gem_create_ioctl() local
2656 if (XE_IOCTL_DBG(xe, args->pad[0] || args->pad[1] || args->pad[2]) || in xe_gem_create_ioctl()
2657 XE_IOCTL_DBG(xe, args->reserved[0] || args->reserved[1])) in xe_gem_create_ioctl()
2661 if (XE_IOCTL_DBG(xe, (args->placement & ~xe->info.mem_region_mask) || in xe_gem_create_ioctl()
2665 if (XE_IOCTL_DBG(xe, args->flags & in xe_gem_create_ioctl()
2671 if (XE_IOCTL_DBG(xe, args->handle)) in xe_gem_create_ioctl()
2674 if (XE_IOCTL_DBG(xe, !args->size)) in xe_gem_create_ioctl()
2677 if (XE_IOCTL_DBG(xe, args->size > SIZE_MAX)) in xe_gem_create_ioctl()
2680 if (XE_IOCTL_DBG(xe, args->size & ~PAGE_MASK)) in xe_gem_create_ioctl()
2695 !(xe->info.vram_flags & XE_VRAM_FLAGS_NEED64K) && in xe_gem_create_ioctl()
2700 if (XE_IOCTL_DBG(xe, !(bo_flags & XE_BO_FLAG_VRAM_MASK))) in xe_gem_create_ioctl()
2706 if (XE_IOCTL_DBG(xe, !args->cpu_caching || in xe_gem_create_ioctl()
2710 if (XE_IOCTL_DBG(xe, bo_flags & XE_BO_FLAG_VRAM_MASK && in xe_gem_create_ioctl()
2714 if (XE_IOCTL_DBG(xe, bo_flags & XE_BO_FLAG_SCANOUT && in xe_gem_create_ioctl()
2720 if (XE_IOCTL_DBG(xe, !vm)) in xe_gem_create_ioctl()
2731 bo = xe_bo_create_user(xe, NULL, vm, args->size, args->cpu_caching, in xe_gem_create_ioctl()
2745 err = gem_create_user_extensions(xe, bo, args->extensions, 0); in xe_gem_create_ioctl()
2775 struct xe_device *xe = to_xe_device(dev); in xe_gem_mmap_offset_ioctl() local
2779 if (XE_IOCTL_DBG(xe, args->extensions) || in xe_gem_mmap_offset_ioctl()
2780 XE_IOCTL_DBG(xe, args->reserved[0] || args->reserved[1])) in xe_gem_mmap_offset_ioctl()
2783 if (XE_IOCTL_DBG(xe, args->flags & in xe_gem_mmap_offset_ioctl()
2788 if (XE_IOCTL_DBG(xe, !IS_DGFX(xe))) in xe_gem_mmap_offset_ioctl()
2791 if (XE_IOCTL_DBG(xe, args->handle)) in xe_gem_mmap_offset_ioctl()
2794 if (XE_IOCTL_DBG(xe, PAGE_SIZE > SZ_4K)) in xe_gem_mmap_offset_ioctl()
2804 if (XE_IOCTL_DBG(xe, !gem_obj)) in xe_gem_mmap_offset_ioctl()
2904 struct xe_device *xe = ttm_to_xe_device(bo->ttm.bdev); in xe_bo_migrate() local
2932 drm_WARN_ON(&xe->drm, mem_type == XE_PL_STOLEN); in xe_bo_migrate()
2937 add_vram(xe, bo, &requested, bo->flags, mem_type, &c); in xe_bo_migrate()
2982 struct xe_device *xe = xe_bo_device(bo); in xe_bo_needs_ccs_pages() local
2984 if (GRAPHICS_VER(xe) >= 20 && IS_DGFX(xe)) in xe_bo_needs_ccs_pages()
2987 if (!xe_device_has_flat_ccs(xe) || bo->ttm.type != ttm_bo_type_device) in xe_bo_needs_ccs_pages()
2995 if (IS_DGFX(xe) && (bo->flags & XE_BO_FLAG_SYSTEM)) in xe_bo_needs_ccs_pages()
3100 struct xe_device *xe = to_xe_device(dev); in xe_bo_dumb_create() local
3106 xe->info.vram_flags & XE_VRAM_FLAGS_NEED64K ? SZ_64K : SZ_4K); in xe_bo_dumb_create()
3112 bo = xe_bo_create_user(xe, NULL, NULL, args->size, in xe_bo_dumb_create()
3114 XE_BO_FLAG_VRAM_IF_DGFX(xe_device_get_root_tile(xe)) | in xe_bo_dumb_create()