Home
last modified time | relevance | path

Searched +full:0 +full:xe (Results 1 – 25 of 1019) sorted by relevance

12345678910>>...41

/linux/drivers/gpu/drm/xe/
H A Dxe_pm.c33 * DOC: Xe Power Management
35 * Xe PM implements the main routines for both system level suspend states and
55 * to perform the transition from D3hot to D3cold. Xe may disallow this
63 * (PC-states), and/or other low level power states. Xe PM component provides
67 * Also, Xe PM provides get and put functions that Xe driver will use to
94 lock_acquire_shared_recursive(&xe_pm_block_lockdep_map, 0, 1, NULL, _RET_IP_); in xe_pm_block_begin_signalling()
116 * @xe: The xe device about to be suspended.
128 * Return: %0 on success, %-ERESTARTSYS on signal pending or
131 int xe_pm_block_on_suspend(struct xe_device *xe) in xe_pm_block_on_suspend() argument
135 return wait_for_completion_interruptible(&xe->pm_block); in xe_pm_block_on_suspend()
[all …]
H A Dxe_device.c82 struct xe_device *xe = to_xe_device(dev); in xe_file_open() local
100 xef->xe = xe; in xe_file_open()
118 return 0; in xe_file_open()
136 * xe_file_get() - Take a reference to the xe file object
137 * @xef: Pointer to the xe file
139 * Anyone with a pointer to xef must take a reference to the xe file
142 * Return: xe file pointer
151 * xe_file_put() - Drop a reference to the xe file object
152 * @xef: Pointer to the xe file
163 struct xe_device *xe = to_xe_device(dev); in xe_file_close() local
[all …]
H A Dxe_sriov_pf_migration.c19 static struct xe_sriov_migration_state *pf_pick_migration(struct xe_device *xe, unsigned int vfid) in pf_pick_migration() argument
21 xe_assert(xe, IS_SRIOV_PF(xe)); in pf_pick_migration()
22 xe_assert(xe, vfid <= xe_sriov_pf_get_totalvfs(xe)); in pf_pick_migration()
24 return &xe->sriov.pf.vfs[vfid].migration; in pf_pick_migration()
29 * @xe: the &xe_device
34 wait_queue_head_t *xe_sriov_pf_migration_waitqueue(struct xe_device *xe, unsigned int vfid) in xe_sriov_pf_migration_waitqueue() argument
36 return &pf_pick_migration(xe, vfid)->wq; in xe_sriov_pf_migration_waitqueue()
41 * @xe: the &xe_device
45 bool xe_sriov_pf_migration_supported(struct xe_device *xe) in xe_sriov_pf_migration_supported() argument
47 xe_assert(xe, IS_SRIOV_PF(xe)); in xe_sriov_pf_migration_supported()
[all …]
H A Dxe_bo_evict.c15 static int xe_bo_apply_to_pinned(struct xe_device *xe, in xe_bo_apply_to_pinned() argument
22 int ret = 0; in xe_bo_apply_to_pinned()
24 spin_lock(&xe->pinned.lock); in xe_bo_apply_to_pinned()
32 spin_unlock(&xe->pinned.lock); in xe_bo_apply_to_pinned()
36 spin_lock(&xe->pinned.lock); in xe_bo_apply_to_pinned()
44 spin_unlock(&xe->pinned.lock); in xe_bo_apply_to_pinned()
47 spin_lock(&xe->pinned.lock); in xe_bo_apply_to_pinned()
50 spin_unlock(&xe->pinned.lock); in xe_bo_apply_to_pinned()
58 * @xe: xe device
62 * Return: 0 on success, negative error code on error.
[all …]
H A Dxe_pci.c193 static const u16 adls_rpls_ids[] = { INTEL_RPLS_IDS(NOP), 0 };
213 static const u16 adlp_rplu_ids[] = { INTEL_RPLU_IDS(NOP), 0 };
265 static const u16 dg2_g10_ids[] = { INTEL_DG2_G10_IDS(NOP), INTEL_ATS_M150_IDS(NOP), 0 };
266 static const u16 dg2_g11_ids[] = { INTEL_DG2_G11_IDS(NOP), INTEL_ATS_M75_IDS(NOP), 0 };
267 static const u16 dg2_g12_ids[] = { INTEL_DG2_G12_IDS(NOP), 0 };
352 static const u16 bmg_g21_ids[] = { INTEL_BMG_G21_IDS(NOP), 0 };
460 if (negative && strcmp(devices, "!*") == 0) in device_id_in_list()
462 if (!negative && strcmp(devices, "*") == 0) in device_id_in_list()
472 if (negative && tok[0] == '!') in device_id_in_list()
474 else if ((negative && tok[0] != '!') || in device_id_in_list()
[all …]
H A Dxe_sriov_pf_service.c18 * @xe: the &xe_device to initialize
24 void xe_sriov_pf_service_init(struct xe_device *xe) in xe_sriov_pf_service_init() argument
29 xe_assert(xe, IS_SRIOV_PF(xe)); in xe_sriov_pf_service_init()
32 xe->sriov.pf.service.version.base.major = GUC_RELAY_VERSION_BASE_MAJOR; in xe_sriov_pf_service_init()
33 xe->sriov.pf.service.version.base.minor = GUC_RELAY_VERSION_BASE_MINOR; in xe_sriov_pf_service_init()
36 xe->sriov.pf.service.version.latest.major = GUC_RELAY_VERSION_LATEST_MAJOR; in xe_sriov_pf_service_init()
37 xe->sriov.pf.service.version.latest.minor = GUC_RELAY_VERSION_LATEST_MINOR; in xe_sriov_pf_service_init()
40 /* Return: 0 on success or a negative error code on failure. */
41 static int pf_negotiate_version(struct xe_device *xe, in pf_negotiate_version() argument
45 struct xe_sriov_pf_service_version base = xe->sriov.pf.service.version.base; in pf_negotiate_version()
[all …]
H A Dxe_survivability_mode.c45 * # echo 1 > /sys/kernel/config/xe/0000:03:00.0/survivability_mode
69 * (:ref:`xe-device-wedging`) requiring a firmware flash to restore normal operation.
95 static void populate_survivability_info(struct xe_device *xe) in populate_survivability_info() argument
97 struct xe_survivability *survivability = &xe->survivability; in populate_survivability_info()
100 u32 id = 0, reg_value; in populate_survivability_info()
104 mmio = xe_root_tile_mmio(xe); in populate_survivability_info()
121 for (index = 0; id && reg_value; index++, reg_value = info[id].value, in populate_survivability_info()
131 struct xe_device *xe = pdev_to_xe_device(pdev); in log_survivability_info() local
132 struct xe_survivability *survivability = &xe->survivability; in log_survivability_info()
138 for (id = 0; id < MAX_SCRATCH_MMIO; id++) { in log_survivability_info()
[all …]
H A Dxe_nvm.c16 #define GEN12_GUNIT_NVM_BASE 0x00102040
17 #define GEN12_DEBUG_NVM_BASE 0x00101018
19 #define GEN12_CNTL_PROTECTED_NVM_REG 0x0010100C
21 #define GEN12_GUNIT_NVM_SIZE 0x80
22 #define GEN12_DEBUG_NVM_SIZE 0x4
29 [0] = { .name = "DESCRIPTOR", },
44 static bool xe_nvm_non_posted_erase(struct xe_device *xe) in xe_nvm_non_posted_erase() argument
46 struct xe_mmio *mmio = xe_root_tile_mmio(xe); in xe_nvm_non_posted_erase()
48 if (xe->info.platform != XE_BATTLEMAGE) in xe_nvm_non_posted_erase()
54 static bool xe_nvm_writable_override(struct xe_device *xe) in xe_nvm_writable_override() argument
[all …]
H A Dxe_bo.c53 .fpfn = 0,
54 .lpfn = 0,
56 .flags = 0,
68 .fpfn = 0,
69 .lpfn = 0,
74 .fpfn = 0,
75 .lpfn = 0,
87 for (unsigned int __bit_tmp = BIT(0); __bit_tmp <= XE_BO_FLAG_VRAM_MASK; __bit_tmp <<= 1) \
95 static bool resource_is_stolen_vram(struct xe_device *xe, struct ttm_resource *res) in resource_is_stolen_vram() argument
97 return res->mem_type == XE_PL_STOLEN && IS_DGFX(xe); in resource_is_stolen_vram()
167 mem_type_to_migrate(struct xe_device * xe,u32 mem_type) mem_type_to_migrate() argument
178 struct xe_device *xe = ttm_to_xe_device(res->bo->bdev); res_to_mem_region() local
189 try_add_system(struct xe_device * xe,struct xe_bo * bo,u32 bo_flags,u32 * c) try_add_system() argument
222 vram_bo_flag_to_tile_id(struct xe_device * xe,u32 vram_bo_flag) vram_bo_flag_to_tile_id() argument
230 bo_vram_flags_to_vram_placement(struct xe_device * xe,u32 bo_flags,u32 vram_flag,enum ttm_bo_type type) bo_vram_flags_to_vram_placement() argument
243 add_vram(struct xe_device * xe,struct xe_bo * bo,struct ttm_place * places,u32 bo_flags,u32 mem_type,u32 * c) add_vram() argument
274 try_add_vram(struct xe_device * xe,struct xe_bo * bo,u32 bo_flags,enum ttm_bo_type type,u32 * c) try_add_vram() argument
286 try_add_stolen(struct xe_device * xe,struct xe_bo * bo,u32 bo_flags,u32 * c) try_add_stolen() argument
301 __xe_bo_placement_for_flags(struct xe_device * xe,struct xe_bo * bo,u32 bo_flags,enum ttm_bo_type type) __xe_bo_placement_for_flags() argument
321 xe_bo_placement_for_flags(struct xe_device * xe,struct xe_bo * bo,u32 bo_flags,enum ttm_bo_type type) xe_bo_placement_for_flags() argument
331 struct xe_device *xe = container_of(tbo->bdev, typeof(*xe), ttm); xe_evict_flags() local
383 xe_tt_map_sg(struct xe_device * xe,struct ttm_tt * tt) xe_tt_map_sg() argument
415 xe_tt_unmap_sg(struct xe_device * xe,struct ttm_tt * tt) xe_tt_unmap_sg() argument
439 xe_ttm_tt_account_add(struct xe_device * xe,struct ttm_tt * tt) xe_ttm_tt_account_add() argument
449 xe_ttm_tt_account_subtract(struct xe_device * xe,struct ttm_tt * tt) xe_ttm_tt_account_subtract() argument
463 struct xe_device *xe = ttm_to_xe_device(ttm_dev); update_global_total_pages() local
476 struct xe_device *xe = xe_bo_device(bo); xe_ttm_tt_create() local
586 struct xe_device *xe = ttm_to_xe_device(ttm_dev); xe_ttm_tt_unpopulate() local
633 struct xe_device *xe = ttm_to_xe_device(bdev); xe_ttm_io_mem_reserve() local
667 xe_bo_trigger_rebind(struct xe_device * xe,struct xe_bo * bo,const struct ttm_operation_ctx * ctx) xe_bo_trigger_rebind() argument
744 struct xe_device *xe = ttm_to_xe_device(ttm_bo->bdev); xe_bo_move_dmabuf() local
802 struct xe_device *xe = ttm_to_xe_device(ttm_bo->bdev); xe_bo_move_notify() local
844 struct xe_device *xe = ttm_to_xe_device(ttm_bo->bdev); xe_bo_move() local
1057 struct xe_device *xe = ttm_to_xe_device(bo->bdev); xe_bo_shrink_purge() local
1133 struct xe_device *xe = ttm_to_xe_device(bo->bdev); xe_bo_shrink() local
1190 struct xe_device *xe = ttm_to_xe_device(bo->ttm.bdev); xe_bo_notifier_prepare_pinned() local
1259 struct xe_device *xe = xe_bo_device(bo); xe_bo_evict_pinned_copy() local
1327 struct xe_device *xe = ttm_to_xe_device(bo->ttm.bdev); xe_bo_evict_pinned() local
1397 struct xe_device *xe = ttm_to_xe_device(bo->ttm.bdev); xe_bo_restore_pinned() local
1518 struct xe_device *xe = ttm_to_xe_device(ttm_bo->bdev); xe_ttm_bo_lock_in_destructor() local
1604 struct xe_device *xe = ttm_to_xe_device(ttm_bo->bdev); xe_ttm_bo_purge() local
1635 struct xe_device *xe = ttm_to_xe_device(ttm_bo->bdev); xe_ttm_access_memory() local
1702 struct xe_device *xe = ttm_to_xe_device(ttm_bo->bdev); xe_ttm_bo_destroy() local
1818 __xe_bo_cpu_fault(struct vm_fault * vmf,struct xe_device * xe,struct xe_bo * bo) __xe_bo_cpu_fault() argument
1870 xe_bo_cpu_fault_fastpath(struct vm_fault * vmf,struct xe_device * xe,struct xe_bo * bo,bool needs_rpm) xe_bo_cpu_fault_fastpath() argument
1930 struct xe_device *xe = to_xe_device(ddev); xe_bo_cpu_fault() local
2028 struct xe_device *xe = xe_bo_device(bo); xe_bo_vm_access() local
2132 xe_bo_init_locked(struct xe_device * xe,struct xe_bo * bo,struct xe_tile * tile,struct dma_resv * resv,struct ttm_lru_bulk_move * bulk,size_t size,u16 cpu_caching,enum ttm_bo_type type,u32 flags,struct drm_exec * exec) xe_bo_init_locked() argument
2264 __xe_bo_fixed_placement(struct xe_device * xe,struct xe_bo * bo,enum ttm_bo_type type,u32 flags,u64 start,u64 end,u64 size) __xe_bo_fixed_placement() argument
2306 __xe_bo_create_locked(struct xe_device * xe,struct xe_tile * tile,struct xe_vm * vm,size_t size,u64 start,u64 end,u16 cpu_caching,enum ttm_bo_type type,u32 flags,u64 alignment,struct drm_exec * exec) __xe_bo_create_locked() argument
2404 xe_bo_create_locked(struct xe_device * xe,struct xe_tile * tile,struct xe_vm * vm,size_t size,enum ttm_bo_type type,u32 flags,struct drm_exec * exec) xe_bo_create_locked() argument
2413 xe_bo_create_novm(struct xe_device * xe,struct xe_tile * tile,size_t size,u16 cpu_caching,enum ttm_bo_type type,u32 flags,u64 alignment,bool intr) xe_bo_create_novm() argument
2453 xe_bo_create_user(struct xe_device * xe,struct xe_vm * vm,size_t size,u16 cpu_caching,u32 flags,struct drm_exec * exec) xe_bo_create_user() argument
2494 xe_bo_create_pin_range_novm(struct xe_device * xe,struct xe_tile * tile,size_t size,u64 start,u64 end,enum ttm_bo_type type,u32 flags) xe_bo_create_pin_range_novm() argument
2526 xe_bo_create_pin_map_at_aligned(struct xe_device * xe,struct xe_tile * tile,struct xe_vm * vm,size_t size,u64 offset,enum ttm_bo_type type,u32 flags,u64 alignment,struct drm_exec * exec) xe_bo_create_pin_map_at_aligned() argument
2588 xe_bo_create_pin_map_at_novm(struct xe_device * xe,struct xe_tile * tile,size_t size,u64 offset,enum ttm_bo_type type,u32 flags,u64 alignment,bool intr) xe_bo_create_pin_map_at_novm() argument
2631 xe_bo_create_pin_map(struct xe_device * xe,struct xe_tile * tile,struct xe_vm * vm,size_t size,enum ttm_bo_type type,u32 flags,struct drm_exec * exec) xe_bo_create_pin_map() argument
2657 xe_bo_create_pin_map_novm(struct xe_device * xe,struct xe_tile * tile,size_t size,enum ttm_bo_type type,u32 flags,bool intr) xe_bo_create_pin_map_novm() argument
2669 xe_managed_bo_create_pin_map(struct xe_device * xe,struct xe_tile * tile,size_t size,u32 flags) xe_managed_bo_create_pin_map() argument
2692 xe_managed_bo_create_from_data(struct xe_device * xe,struct xe_tile * tile,const void * data,size_t size,u32 flags) xe_managed_bo_create_from_data() argument
2718 xe_managed_bo_reinit_in_vram(struct xe_device * xe,struct xe_tile * tile,struct xe_bo ** src) xe_managed_bo_reinit_in_vram() argument
2746 struct xe_device *xe = ttm_to_xe_device(res->bo->bdev); vram_region_gpu_offset() local
2774 struct xe_device *xe = xe_bo_device(bo); xe_bo_pin_external() local
2818 struct xe_device *xe = xe_bo_device(bo); xe_bo_pin() local
2875 struct xe_device *xe = xe_bo_device(bo); xe_bo_unpin_external() local
2900 struct xe_device *xe = xe_bo_device(bo); xe_bo_unpin() local
2986 struct xe_device *xe = xe_bo_device(bo); __xe_bo_addr() local
3018 struct xe_device *xe = ttm_to_xe_device(bo->ttm.bdev); xe_bo_vmap() local
3066 gem_create_set_pxp_type(struct xe_device * xe,struct xe_bo * bo,u64 value) gem_create_set_pxp_type() argument
3086 gem_create_user_ext_set_property(struct xe_device * xe,struct xe_bo * bo,u64 extension) gem_create_user_ext_set_property() argument
3121 gem_create_user_extensions(struct xe_device * xe,struct xe_bo * bo,u64 extensions,int ext_number) gem_create_user_extensions() argument
3156 struct xe_device *xe = to_xe_device(dev); xe_gem_create_ioctl() local
3287 struct xe_device *xe = to_xe_device(dev); xe_gem_mmap_offset_ioctl() local
3420 struct xe_device *xe = ttm_to_xe_device(bo->ttm.bdev); xe_bo_migrate() local
3502 struct xe_device *xe = xe_bo_device(bo); xe_bo_needs_ccs_pages() local
3620 struct xe_device *xe = to_xe_device(dev); xe_bo_dumb_create() local
[all...]
H A Dxe_heci_gsc.c20 #define GSC_BAR_LENGTH 0x00000FFC
103 if (heci_gsc->irq >= 0) in xe_heci_gsc_fini()
109 static int heci_gsc_irq_setup(struct xe_device *xe) in heci_gsc_irq_setup() argument
111 struct xe_heci_gsc *heci_gsc = &xe->heci_gsc; in heci_gsc_irq_setup()
114 heci_gsc->irq = irq_alloc_desc(0); in heci_gsc_irq_setup()
115 if (heci_gsc->irq < 0) { in heci_gsc_irq_setup()
116 drm_err(&xe->drm, "gsc irq error %d\n", heci_gsc->irq); in heci_gsc_irq_setup()
121 if (ret < 0) in heci_gsc_irq_setup()
122 drm_err(&xe->drm, "gsc irq init failed %d\n", ret); in heci_gsc_irq_setup()
127 static int heci_gsc_add_device(struct xe_device *xe, const struct heci_gsc_def *def) in heci_gsc_add_device() argument
[all …]
H A Dxe_late_bind_fw.c58 struct xe_device *xe = late_bind_fw_to_xe(lb_fw); in parse_cpd_header() local
63 u32 offset = 0; in parse_cpd_header()
67 xe_assert(xe, manifest_entry); in parse_cpd_header()
73 drm_err(&xe->drm, "%s late binding fw: Invalid CPD header length %u!\n", in parse_cpd_header()
80 drm_err(&xe->drm, "%s late binding fw: too small! %zu < %zu\n", in parse_cpd_header()
87 for (i = 0; i < header->num_of_entries; i++, entry++) in parse_cpd_header()
88 if (strcmp(entry->name, manifest_entry) == 0) in parse_cpd_header()
92 drm_err(&xe->drm, "%s late binding fw: Failed to find manifest_entry\n", in parse_cpd_header()
99 drm_err(&xe->drm, "%s late binding fw: too small! %zu < %zu\n", in parse_cpd_header()
108 return 0; in parse_cpd_header()
[all …]
H A Dxe_vram.c28 static void resize_bar(struct xe_device *xe, int resno, resource_size_t size) in resize_bar() argument
30 struct pci_dev *pdev = to_pci_dev(xe->drm.dev); in resize_bar()
34 ret = pci_resize_resource(pdev, resno, bar_size, 0); in resize_bar()
36 …drm_info(&xe->drm, "Failed to resize BAR%d to %dM (%pe). Consider enabling 'Resizable BAR' support… in resize_bar()
41 drm_info(&xe->drm, "BAR%d resized to %dM\n", resno, 1 << bar_size); in resize_bar()
48 void xe_vram_resize_bar(struct xe_device *xe) in xe_vram_resize_bar() argument
51 struct pci_dev *pdev = to_pci_dev(xe->drm.dev); in xe_vram_resize_bar()
62 if (force_vram_bar_size < 0) in xe_vram_resize_bar()
71 drm_info(&xe->drm, in xe_vram_resize_bar()
72 "Requested size: %lluMiB is not supported by rebar sizes: 0x%llx. Leaving default: %lluMiB\n", in xe_vram_resize_bar()
[all …]
H A Dxe_shrinker.c19 * @xe: Back pointer to the device.
27 struct xe_device *xe; member
57 static s64 __xe_shrinker_walk(struct xe_device *xe, in __xe_shrinker_walk() argument
63 s64 freed = 0, lret; in __xe_shrinker_walk()
66 struct ttm_resource_manager *man = ttm_manager_type(&xe->ttm, mem_type); in __xe_shrinker_walk()
82 if (lret < 0) in __xe_shrinker_walk()
90 xe_assert(xe, !IS_ERR(ttm_bo)); in __xe_shrinker_walk()
102 static s64 xe_shrinker_walk(struct xe_device *xe, in xe_shrinker_walk() argument
113 lret = __xe_shrinker_walk(xe, ctx, save_flags, to_scan, scanned); in xe_shrinker_walk()
115 if (lret < 0 || *scanned >= to_scan) in xe_shrinker_walk()
[all …]
H A Dxe_step.c41 [0] = { COMMON_STEP(A0) },
46 [0] = { COMMON_STEP(A0) },
51 [0x0] = { COMMON_STEP(A0) },
52 [0x1] = { COMMON_STEP(A0) },
53 [0x4] = { COMMON_STEP(B0) },
54 [0x8] = { COMMON_STEP(C0) },
55 [0xC] = { COMMON_STEP(D0) },
59 [0x4] = { COMMON_STEP(D0) },
60 [0xC] = { COMMON_STEP(D0) },
64 [0x0] = { COMMON_STEP(A0) },
[all …]
H A Dxe_exec_queue.c57 XE_EXEC_QUEUE_JOB_TIMEOUT = 0,
63 static int exec_queue_user_extensions(struct xe_device *xe, struct xe_exec_queue *q,
70 for (i = 0; i < XE_EXEC_QUEUE_TLB_INVAL_COUNT; ++i) in __xe_exec_queue_free()
85 static int alloc_dep_schedulers(struct xe_device *xe, struct xe_exec_queue *q) in alloc_dep_schedulers() argument
90 for (i = 0; i < XE_EXEC_QUEUE_TLB_INVAL_COUNT; ++i) { in alloc_dep_schedulers()
106 dep_scheduler = xe_dep_scheduler_create(xe, wq, q->name, in alloc_dep_schedulers()
115 return 0; in alloc_dep_schedulers()
118 static struct xe_exec_queue *__xe_exec_queue_alloc(struct xe_device *xe, in __xe_exec_queue_alloc() argument
163 err = alloc_dep_schedulers(xe, q); in __xe_exec_queue_alloc()
178 err = exec_queue_user_extensions(xe, in __xe_exec_queue_alloc()
257 xe_exec_queue_create(struct xe_device * xe,struct xe_vm * vm,u32 logical_mask,u16 width,struct xe_hw_engine * hwe,u32 flags,u64 extensions) xe_exec_queue_create() argument
300 xe_exec_queue_create_class(struct xe_device * xe,struct xe_gt * gt,struct xe_vm * vm,enum xe_engine_class class,u32 flags,u64 extensions) xe_exec_queue_create_class() argument
342 xe_exec_queue_create_bind(struct xe_device * xe,struct xe_tile * tile,u32 flags,u64 extensions) xe_exec_queue_create_bind() argument
467 xe_exec_queue_device_get_max_priority(struct xe_device * xe) xe_exec_queue_device_get_max_priority() argument
473 exec_queue_set_priority(struct xe_device * xe,struct xe_exec_queue * q,u64 value) exec_queue_set_priority() argument
538 exec_queue_set_timeslice(struct xe_device * xe,struct xe_exec_queue * q,u64 value) exec_queue_set_timeslice() argument
555 exec_queue_set_pxp_type(struct xe_device * xe,struct xe_exec_queue * q,u64 value) exec_queue_set_pxp_type() argument
580 exec_queue_user_ext_set_property(struct xe_device * xe,struct xe_exec_queue * q,u64 extension) exec_queue_user_ext_set_property() argument
617 exec_queue_user_extensions(struct xe_device * xe,struct xe_exec_queue * q,u64 extensions,int ext_number) exec_queue_user_extensions() argument
650 calc_validate_logical_mask(struct xe_device * xe,struct drm_xe_engine_class_instance * eci,u16 width,u16 num_placements) calc_validate_logical_mask() argument
704 struct xe_device *xe = to_xe_device(dev); xe_exec_queue_create_ioctl() local
837 struct xe_device *xe = to_xe_device(dev); xe_exec_queue_get_property_ioctl() local
933 struct xe_device *xe = gt_to_xe(q->gt); xe_exec_queue_update_run_ticks() local
989 struct xe_device *xe = to_xe_device(dev); xe_exec_queue_destroy_ioctl() local
[all...]
H A Dxe_pagefault.c24 * DOC: Xe page faults
26 * Xe page faults are handled in two layers. The producer layer interacts with
60 return 0; in xe_pagefault_begin()
78 needs_vram = xe_vma_need_vram_for_atomic(vm->xe, vma, atomic); in xe_pagefault_handle_vma()
79 if (needs_vram < 0 || (needs_vram && xe_vma_is_userptr(vma))) in xe_pagefault_handle_vma()
80 return needs_vram < 0 ? needs_vram : -EACCES; in xe_pagefault_handle_vma()
91 return 0; in xe_pagefault_handle_vma()
104 xe_validation_ctx_init(&ctx, &vm->xe->val, &exec, (struct xe_val_flags) {}); in xe_pagefault_handle_vma()
142 static struct xe_vm *xe_pagefault_asid_to_vm(struct xe_device *xe, u32 asid) in xe_pagefault_asid_to_vm() argument
146 down_read(&xe->usm.lock); in xe_pagefault_asid_to_vm()
[all …]
H A Dxe_gsc_submit.c31 * as we use unique identifier for each user, with handle 0 being reserved for
61 * @xe: the Xe device
70 u32 xe_gsc_emit_header(struct xe_device *xe, struct iosys_map *map, u32 offset, in xe_gsc_emit_header() argument
73 xe_assert(xe, !(host_session_id & HOST_SESSION_CLIENT_MASK)); in xe_gsc_emit_header()
78 xe_map_memset(xe, map, offset, 0, GSC_HDR_SIZE); in xe_gsc_emit_header()
80 mtl_gsc_header_wr(xe, map, offset, validity_marker, GSC_HECI_VALIDITY_MARKER); in xe_gsc_emit_header()
81 mtl_gsc_header_wr(xe, map, offset, heci_client_id, heci_client_id); in xe_gsc_emit_header()
82 mtl_gsc_header_wr(xe, map, offset, host_session_handle, host_session_id); in xe_gsc_emit_header()
83 mtl_gsc_header_wr(xe, map, offset, header_version, MTL_GSC_HEADER_VERSION); in xe_gsc_emit_header()
84 mtl_gsc_header_wr(xe, map, offset, message_size, payload_size + GSC_HDR_SIZE); in xe_gsc_emit_header()
[all …]
H A Dxe_vm.c57 * Return: %0 on success. See drm_exec_lock_obj() for error codes.
97 return 0; in alloc_preempt_fences()
108 return 0; in alloc_preempt_fences()
114 bool vf_migration = IS_SRIOV_VF(vm->xe) && in wait_for_existing_preempt_fences()
115 xe_sriov_vf_migration_supported(vm->xe); in wait_for_existing_preempt_fences()
127 xe_assert(vm->xe, vf_migration); in wait_for_existing_preempt_fences()
132 if (timeout < 0 || q->lr.pfence->error == -ETIME) in wait_for_existing_preempt_fences()
140 return 0; in wait_for_existing_preempt_fences()
165 xe_assert(vm->xe, link != list); in arm_preempt_fences()
183 return 0; in add_preempt_fences()
429 struct xe_device *xe = vm->xe; vm_suspend_rebind_worker() local
1154 struct xe_device *xe = xe_vma_vm(vma)->xe; xe_vma_destroy_unlocked() local
1286 struct xe_device *xe = xe_bo_device(bo); pde_pat_index() local
1354 xelp_pte_encode_addr(struct xe_device * xe,u64 addr,u16 pat_index,u32 pt_level,bool devmem,u64 flags) xelp_pte_encode_addr() argument
1400 xe_vm_create_scratch(struct xe_device * xe,struct xe_tile * tile,struct xe_vm * vm,struct drm_exec * exec) xe_vm_create_scratch() argument
1456 xe_vm_create(struct xe_device * xe,u32 flags,struct xe_file * xef) xe_vm_create() argument
1671 struct xe_device *xe = vm->xe; xe_vm_close() local
1714 struct xe_device *xe = vm->xe; xe_vm_close_and_put() local
1824 struct xe_device *xe = vm->xe; vm_destroy_work_func() local
1906 struct xe_device *xe = to_xe_device(dev); xe_vm_create_ioctl() local
1974 struct xe_device *xe = to_xe_device(dev); xe_vm_destroy_ioctl() local
2043 struct xe_device *xe = to_xe_device(dev); xe_vm_query_vmas_attrs_ioctl() local
2153 print_op(struct xe_device * xe,struct drm_gpuva_op * op) print_op() argument
2194 print_op(struct xe_device * xe,struct drm_gpuva_op * op) print_op() argument
2587 struct xe_device *xe = vm->xe; vm_bind_ioctl_ops_parse() local
3316 vm_bind_ioctl_check_args(struct xe_device * xe,struct xe_vm * vm,struct drm_xe_vm_bind * args,struct drm_xe_vm_bind_op ** bind_ops) vm_bind_ioctl_check_args() argument
3487 xe_vm_bind_ioctl_validate_bo(struct xe_device * xe,struct xe_bo * bo,u64 addr,u64 range,u64 obj_offset,u16 pat_index,u32 op,u32 bind_flags) xe_vm_bind_ioctl_validate_bo() argument
3544 struct xe_device *xe = to_xe_device(dev); xe_vm_bind_ioctl() local
3954 struct xe_device *xe = xe_vma_vm(vma)->xe; xe_vm_invalidate_vma() local
4215 xe_vma_need_vram_for_atomic(struct xe_device * xe,struct xe_vma * vma,bool is_atomic) xe_vma_need_vram_for_atomic() argument
[all...]
H A Dxe_migrate.c91 * Although MI_STORE_DATA_IMM's "length" field is 10-bits, 0x3FE is the largest
93 * (val-2) format, this translates to 0x400 dwords for the true maximum length
95 * address (2 dwords), that leaves 0x3FD dwords (0x1FE qwords) for PTE values.
97 #define MAX_PTE_PER_SDI 0x1FEU
123 static u64 xe_migrate_vram_ofs(struct xe_device *xe, u64 addr, bool is_comp_pte) in xe_migrate_vram_ofs() argument
131 if (GRAPHICS_VER(xe) >= 20 && is_comp_pte) in xe_migrate_vram_ofs()
133 (xe->mem.vram), SZ_1G); in xe_migrate_vram_ofs()
135 addr -= xe_vram_region_dpa_base(xe->mem.vram); in xe_migrate_vram_ofs()
139 static void xe_migrate_program_identity(struct xe_device *xe, struc argument
187 struct xe_device *xe = tile_to_xe(tile); xe_migrate_prepare_vm() local
389 xe_migrate_needs_ccs_emit(struct xe_device * xe) xe_migrate_needs_ccs_emit() argument
413 struct xe_device *xe = tile_to_xe(tile); xe_migrate_lock_prepare_vm() local
439 struct xe_device *xe = tile_to_xe(tile); xe_migrate_init() local
516 max_mem_transfer_per_pass(struct xe_device * xe) max_mem_transfer_per_pass() argument
526 struct xe_device *xe = tile_to_xe(m->tile); xe_migrate_res_sizes() local
603 struct xe_device *xe = tile_to_xe(m->tile); emit_pte() local
667 struct xe_device *xe = gt_to_xe(gt); emit_copy_ccs() local
707 struct xe_device *xe = gt_to_xe(gt); emit_xy_fast_copy() local
781 struct xe_device *xe = gt_to_xe(gt); emit_copy() local
863 struct xe_device *xe = gt_to_xe(gt); xe_migrate_copy() local
1104 struct xe_device *xe = gt_to_xe(gt); xe_migrate_ccs_rw_copy() local
1235 struct xe_device *xe = xe_bo_device(vram_bo); xe_migrate_vram_copy_chunk() local
1339 struct xe_device *xe = gt_to_xe(gt); emit_clear_link_copy() local
1362 struct xe_device *xe = gt_to_xe(gt); emit_clear_main_copy() local
1457 struct xe_device *xe = gt_to_xe(gt); xe_migrate_clear() local
1713 struct xe_device *xe = tile_to_xe(tile); __xe_migrate_update_pgtables() local
2041 xe_migrate_copy_pitch(struct xe_device * xe,u32 len) xe_migrate_copy_pitch() argument
2069 struct xe_device *xe = gt_to_xe(gt); xe_migrate_vram() local
2229 xe_migrate_dma_unmap(struct xe_device * xe,struct drm_pagemap_addr * pagemap_addr,int len,int write) xe_migrate_dma_unmap() argument
2245 xe_migrate_dma_map(struct xe_device * xe,void * buf,int len,int write) xe_migrate_dma_map() argument
2307 struct xe_device *xe = tile_to_xe(tile); xe_migrate_access_memory() local
[all...]
/linux/drivers/gpu/drm/xe/tests/
H A Dxe_sriov_pf_service_kunit.c19 struct xe_device *xe; in pf_service_test_init() local
24 xe = test->priv; in pf_service_test_init()
25 KUNIT_ASSERT_EQ(test, xe_sriov_init(xe), 0); in pf_service_test_init()
27 xe_sriov_pf_service_init(xe); in pf_service_test_init()
33 KUNIT_ASSERT_NE(test, 0, xe->sriov.pf.service.version.base.major); in pf_service_test_init()
34 KUNIT_ASSERT_NE(test, 0, xe->sriov.pf.service.version.latest.major); in pf_service_test_init()
35 KUNIT_ASSERT_LE(test, xe->sriov.pf.service.version.base.major, in pf_service_test_init()
36 xe->sriov.pf.service.version.latest.major); in pf_service_test_init()
37 if (xe->sriov.pf.service.version.base.major == xe->sriov.pf.service.version.latest.major) in pf_service_test_init()
38 KUNIT_ASSERT_LE(test, xe->sriov.pf.service.version.base.minor, in pf_service_test_init()
[all …]
H A Dxe_kunit_helpers.c36 struct xe_device *xe; in xe_kunit_helper_alloc_xe_device() local
38 xe = drm_kunit_helper_alloc_drm_device(test, dev, in xe_kunit_helper_alloc_xe_device()
41 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, xe); in xe_kunit_helper_alloc_xe_device()
42 return xe; in xe_kunit_helper_alloc_xe_device()
69 * Return: Always 0.
73 struct xe_device *xe; in xe_kunit_helper_xe_device_test_init() local
80 xe = xe_kunit_helper_alloc_xe_device(test, dev); in xe_kunit_helper_xe_device_test_init()
81 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, xe); in xe_kunit_helper_xe_device_test_init()
83 err = xe_pci_fake_device_init(xe); in xe_kunit_helper_xe_device_test_init()
84 KUNIT_ASSERT_EQ(test, err, 0); in xe_kunit_helper_xe_device_test_init()
[all …]
H A Dxe_bo.c53 if (dma_fence_wait_timeout(fence, false, 5 * HZ) <= 0) { in ccs_test_migrate()
74 if (timeout <= 0) { in ccs_test_migrate()
100 if (cpu_map[0] != get_val) { in ccs_test_migrate()
102 "Expected CCS readout 0x%016llx, got 0x%016llx.\n", in ccs_test_migrate()
104 (unsigned long long)cpu_map[0]); in ccs_test_migrate()
113 "Expected CCS readout 0x%016llx, got 0x%016llx.\n", in ccs_test_migrate()
119 cpu_map[0] = assign_val; in ccs_test_migrate()
126 static void ccs_test_run_tile(struct xe_device *xe, struct xe_tile *tile, in ccs_test_run_tile() argument
137 if (IS_DGFX(xe)) in ccs_test_run_tile()
142 bo = xe_bo_create_user(xe, NULL, SZ_1M, DRM_XE_GEM_CPU_CACHING_WC, in ccs_test_run_tile()
[all …]
/linux/Documentation/ABI/testing/
H A Dsysfs-driver-intel-xe-hwmon1 What: /sys/bus/pci/drivers/xe/.../hwmon/hwmon<i>/power1_max
4 Contact: intel-xe@lists.freedesktop.org
9 exceeds this limit. A read value of 0 means that the PL1
10 power limit is disabled, writing 0 disables the
11 limit. Writing values > 0 and <= TDP will enable the power limit.
13 Only supported for particular Intel Xe graphics platforms.
15 What: /sys/bus/pci/drivers/xe/.../hwmon/hwmon<i>/power1_rated_max
18 Contact: intel-xe@lists.freedesktop.org
21 Only supported for particular Intel Xe graphics platforms.
24 What: /sys/bus/pci/drivers/xe/.../hwmon/hwmon<i>/energy1_input
[all …]
/linux/include/drm/intel/
H A Dxe_sriov_vfio.h24 * @xe: the PF &xe_device obtained by calling xe_sriov_vfio_get_pf()
28 bool xe_sriov_vfio_migration_supported(struct xe_device *xe);
32 * @xe: the PF &xe_device obtained by calling xe_sriov_vfio_get_pf()
33 * @vfid: the VF identifier (can't be 0)
38 * Return: 0 on success or a negative error code on failure.
40 int xe_sriov_vfio_wait_flr_done(struct xe_device *xe, unsigned int vfid);
44 * @xe: the PF &xe_device obtained by calling xe_sriov_vfio_get_pf()
45 * @vfid: the VF identifier (can't be 0)
49 * Return: 0 on success or a negative error code on failure.
51 int xe_sriov_vfio_suspend_device(struct xe_device *xe, unsigned int vfid);
[all …]
/linux/drivers/gpu/drm/amd/include/asic_reg/umc/
H A Dumc_6_7_0_sh_mask.h29 …C_UMC0_MCUMC_STATUST0__ErrorCode__SHIFT 0x0
30 …_UMC0_MCUMC_STATUST0__ErrorCodeExt__SHIFT 0x10
31 …_UMC0_MCUMC_STATUST0__RESERV22__SHIFT 0x16
32 …_UMC0_MCUMC_STATUST0__AddrLsb__SHIFT 0x18
33 …_UMC0_MCUMC_STATUST0__RESERV30__SHIFT 0x1e
34 …_UMC0_MCUMC_STATUST0__ErrCoreId__SHIFT 0x20
35 …_UMC0_MCUMC_STATUST0__RESERV38__SHIFT 0x26
36 …_UMC0_MCUMC_STATUST0__Scrub__SHIFT 0x28
37 …_UMC0_MCUMC_STATUST0__RESERV41__SHIFT 0x29
38 …_UMC0_MCUMC_STATUST0__Poison__SHIFT 0x2b
[all …]

12345678910>>...41