Home
last modified time | relevance | path

Searched refs:gt_to_xe (Results 1 – 25 of 35) sorted by relevance

12

/linux/drivers/gpu/drm/xe/
H A Dxe_gt_sriov_pf_migration.c34 xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt))); in pf_pick_gt_migration()
36 xe_gt_assert(gt, vfid <= xe_sriov_pf_get_totalvfs(gt_to_xe(gt))); in pf_pick_gt_migration()
71 data = xe_sriov_packet_alloc(gt_to_xe(gt)); in pf_save_vf_ggtt_mig_data()
126 xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt))); in xe_gt_sriov_pf_migration_ggtt_save()
128 xe_gt_assert(gt, vfid <= xe_sriov_pf_get_totalvfs(gt_to_xe(gt))); in xe_gt_sriov_pf_migration_ggtt_save()
146 xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt))); in xe_gt_sriov_pf_migration_ggtt_restore()
148 xe_gt_assert(gt, vfid <= xe_sriov_pf_get_totalvfs(gt_to_xe(gt))); in xe_gt_sriov_pf_migration_ggtt_restore()
237 return xe_sriov_pf_migration_supported(gt_to_xe(gt)); in pf_migration_supported()
252 data = xe_sriov_packet_alloc(gt_to_xe(gt)); in pf_save_vf_guc_mig_data()
313 xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt))); in xe_gt_sriov_pf_migration_guc_save()
[all …]
H A Dxe_gt.c146 struct xe_device *xe = gt_to_xe(gt); in xe_gt_enable_comp_1wcoh()
375 ret = emit_job_sync(q, bb, HZ, IS_SRIOV_VF(gt_to_xe(gt))); in emit_wa_job()
384 struct xe_device *xe = gt_to_xe(gt); in xe_gt_record_default_lrcs()
460 struct xe_device *xe = gt_to_xe(gt); in wa_14026539277()
491 if (IS_SRIOV_PF(gt_to_xe(gt))) { in xe_gt_init_early()
497 if (IS_SRIOV_VF(gt_to_xe(gt))) { in xe_gt_init_early()
503 xe_reg_sr_init(&gt->reg_sr, "GT", gt_to_xe(gt)); in xe_gt_init_early()
555 p = drm_dbg_printer(&gt_to_xe(gt)->drm, DRM_UT_DRIVER, prefix); in dump_pat_on_error()
581 if (IS_SRIOV_PF(gt_to_xe(gt))) in gt_init_with_gt_forcewake()
658 if (gt_to_xe(gt)->info.has_usm) { in gt_init_with_all_forcewake()
[all …]
H A Dxe_gt_sriov_pf_helpers.h23 #define xe_gt_sriov_pf_assert_vfid(gt, vfid) xe_sriov_pf_assert_vfid(gt_to_xe(gt), (vfid))
27 return xe_sriov_pf_get_totalvfs(gt_to_xe(gt)); in xe_gt_sriov_pf_get_totalvfs()
32 return xe_sriov_pf_master_mutex(gt_to_xe(gt)); in xe_gt_sriov_pf_master_mutex()
H A Dxe_tlb_inval_job.c141 xe_pm_runtime_get_noresume(gt_to_xe(q->gt)); in xe_tlb_inval_job_create()
166 struct xe_device *xe = gt_to_xe(job->q->gt); in xe_tlb_inval_job_add_page_reclaim()
181 struct xe_device *xe = gt_to_xe(q->gt); in xe_tlb_inval_job_destroy()
212 xe_assert(gt_to_xe(job->q->gt), !xa_load(&job->dep.drm.dependencies, 0)); in xe_tlb_inval_job_alloc_dep()
248 xe_assert(gt_to_xe(job->q->gt), in xe_tlb_inval_job_push()
255 xe_assert(gt_to_xe(job->q->gt), !xa_is_err(ptr)); in xe_tlb_inval_job_push()
H A Dxe_gt_sriov_pf_monitor.c28 xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt))); in xe_gt_sriov_pf_monitor_flr()
38 xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt))); in pf_update_event_counter()
80 struct xe_device *xe = gt_to_xe(gt); in xe_gt_sriov_pf_monitor_process_guc2pf()
124 xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt))); in xe_gt_sriov_pf_monitor_print_events()
H A Dxe_hw_engine.c340 if (xe_device_has_msix(gt_to_xe(hwe->gt))) in xe_hw_engine_enable_ring()
356 if (GRAPHICS_VER(gt_to_xe(gt)) >= 35) in xe_hw_engine_match_fixed_cslice_mode()
419 struct xe_device *xe = gt_to_xe(gt); in hw_engine_setup_default_state()
523 hwe->irq_offset = xe_device_has_msix(gt_to_xe(gt)) ? in hw_engine_init_early()
572 xe_reg_sr_init(&hwe->reg_sr, hwe->name, gt_to_xe(gt)); in hw_engine_init_early()
577 xe_reg_sr_init(&hwe->reg_whitelist, hwe->name, gt_to_xe(gt)); in hw_engine_init_early()
589 if (!IS_SRIOV_VF(gt_to_xe(hwe->gt)) && XE_GT_WA(gt, 16023105232)) { in adjust_idledly()
610 struct xe_device *xe = gt_to_xe(gt); in hw_engine_init()
678 struct xe_device *xe = gt_to_xe(gt); in read_media_fuses()
745 struct xe_device *xe = gt_to_xe(gt); in read_copy_fuses()
[all …]
H A Dxe_gt_ccs_mode.c23 struct xe_device *xe = gt_to_xe(gt); in __xe_gt_apply_ccs_mode()
87 if (!gt->ccs_mode || IS_SRIOV_VF(gt_to_xe(gt))) in xe_gt_apply_ccs_mode()
123 struct xe_device *xe = gt_to_xe(gt); in ccs_mode_store()
207 struct xe_device *xe = gt_to_xe(gt); in xe_gt_ccs_mode_sysfs_init()
H A Dxe_gt_sriov_pf_debugfs.c120 struct xe_device *xe = gt_to_xe(gt); \
256 guard(xe_pm_runtime)(gt_to_xe(gt)); in sched_groups_write()
328 guard(xe_pm_runtime)(gt_to_xe(gt)); in sched_groups_config_write()
478 struct xe_device *xe = gt_to_xe(gt); \
537 struct xe_device *xe = gt_to_xe(gt); in set_threshold()
629 struct xe_device *xe = gt_to_xe(gt); in control_write()
786 struct xe_device *xe = gt_to_xe(gt); in pf_add_compat_attrs()
808 struct xe_device *xe = gt_to_xe(gt); in pf_populate_gt()
852 xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt))); in xe_gt_sriov_pf_debugfs_populate()
854 xe_gt_assert(gt, extract_priv(parent->d_parent) == gt_to_xe(gt) || in xe_gt_sriov_pf_debugfs_populate()
[all …]
H A Dxe_gsc.c48 struct xe_device *xe = gt_to_xe(gt); in memcpy_fw()
132 struct xe_device *xe = gt_to_xe(gt); in query_compatibility_version()
205 struct xe_device *xe = gt_to_xe(gt); in gsc_upload()
343 xe_device_declare_wedged(gt_to_xe(gt)); in gsc_er_complete()
354 struct xe_device *xe = gt_to_xe(gt); in gsc_work()
425 else if (gt_to_xe(gt)->info.platform >= XE_BATTLEMAGE && !xe_uc_fw_is_available(&gsc->fw)) in xe_gsc_init()
460 struct xe_device *xe = gt_to_xe(gt); in xe_gsc_init_post_hwconfig()
517 struct xe_device *xe = gt_to_xe(gt); in xe_gsc_load_start()
H A Dxe_gt_throttle.c86 struct xe_device *xe = gt_to_xe(gt); in xe_gt_throttle_get_limit_reasons()
124 struct xe_device *xe = gt_to_xe(gt); in reasons_show()
250 struct xe_device *xe = gt_to_xe(gt); in gt_throttle_sysfs_fini()
258 struct xe_device *xe = gt_to_xe(gt); in xe_gt_throttle_init()
H A Dxe_lrc.c81 return gt_to_xe(lrc->fence_ctx.gt); in lrc_to_xe()
87 struct xe_device *xe = gt_to_xe(gt); in gt_engine_needs_indirect_ctx()
115 struct xe_device *xe = gt_to_xe(gt); in xe_gt_lrc_hang_replay_size()
660 struct xe_device *xe = gt_to_xe(hwe->gt); in set_memory_based_intr()
687 struct xe_device *xe = gt_to_xe(hwe->gt); in lrc_ring_mi_mode()
758 return xe_gt_lrc_hang_replay_size(gt, class) - xe_lrc_reg_size(gt_to_xe(gt)); in xe_lrc_engine_state_size()
987 set_offsets(regs, reg_offsets(gt_to_xe(gt), hwe->class), hwe); in empty_lrc_data()
1031 if (!xe_device_uses_memirq(gt_to_xe(gt))) in xe_lrc_update_memirq_regs_with_address()
1035 regs_len = xe_lrc_reg_size(gt_to_xe(gt)); in xe_lrc_update_memirq_regs_with_address()
1036 xe_map_memcpy_from(gt_to_xe(gt), regs, &map, 0, regs_len); in xe_lrc_update_memirq_regs_with_address()
[all …]
H A Dxe_guc.c183 if (CCS_INSTANCES(gt) && GRAPHICS_VERx100(gt_to_xe(gt)) >= 1270) in needs_wa_dual_queue()
298 struct xe_device *xe = gt_to_xe(gt); in guc_action_register_g2g_buffer()
318 struct xe_device *xe = gt_to_xe(gt); in guc_action_deregister_g2g_buffer()
408 struct xe_device *xe = gt_to_xe(near_gt); in guc_g2g_register()
419 xe_assert(xe, xe == gt_to_xe(far_gt)); in guc_g2g_register()
446 struct xe_device *xe = gt_to_xe(gt); in guc_g2g_size()
468 struct xe_device *xe = gt_to_xe(gt); in guc_g2g_alloc()
522 struct xe_device *xe = gt_to_xe(gt); in guc_g2g_start()
964 if (IS_SRIOV_VF(gt_to_xe(gt))) in xe_guc_reset()
1664 if (IS_SRIOV_VF(gt_to_xe(gt))) in xe_guc_sw_0_irq_handler()
[all …]
H A Dxe_gsc_proxy.c224 struct xe_device *xe = gt_to_xe(gt); in proxy_query()
415 struct xe_device *xe = gt_to_xe(gt); in proxy_channel_alloc()
441 struct xe_device *xe = gt_to_xe(gt); in xe_gsc_proxy_stop()
461 struct xe_device *xe = gt_to_xe(gt); in xe_gsc_proxy_remove()
H A Dxe_oa.c1660 xe_pm_runtime_get(gt_to_xe(gt)); in xe_oa_release()
1664 xe_pm_runtime_put(gt_to_xe(gt)); in xe_oa_release()
1667 drm_dev_put(&gt_to_xe(gt)->drm); in xe_oa_release()
1904 xe_pm_runtime_get(gt_to_xe(gt)); in xe_oa_timestamp_frequency()
1906 xe_pm_runtime_put(gt_to_xe(gt)); in xe_oa_timestamp_frequency()
2511 if (xe_gt_is_main_type(gt) || GRAPHICS_VER(gt_to_xe(gt)) < 20) in num_oa_units_per_gt()
2517 return xe_device_has_mert(gt_to_xe(gt)) ? 2 : 1; in num_oa_units_per_gt()
2518 else if (!IS_DGFX(gt_to_xe(gt))) in num_oa_units_per_gt()
2526 if (GRAPHICS_VERx100(gt_to_xe(hwe->gt)) < 1270) in __hwe_oam_unit()
2531 if (GRAPHICS_VER(gt_to_xe(hwe->gt)) < 20) in __hwe_oam_unit()
[all …]
H A Dxe_eu_stall.c235 struct xe_device *xe = gt_to_xe(gt); in xe_eu_stall_init()
438 struct xe_device *xe = gt_to_xe(gt); in clear_dropped_eviction_line_bit()
667 xe_pm_runtime_get(gt_to_xe(gt)); in xe_eu_stall_stream_enable()
671 xe_pm_runtime_put(gt_to_xe(gt)); in xe_eu_stall_stream_enable()
760 stream->data_record_size = xe_eu_stall_data_record_size(gt_to_xe(gt)); in xe_eu_stall_stream_init()
836 xe_pm_runtime_put(gt_to_xe(gt)); in xe_eu_stall_disable_locked()
H A Dxe_exec_queue.c150 xe_pxp_exec_queue_remove(gt_to_xe(q->gt)->pxp, q); in __xe_exec_queue_free()
276 xe_assert(gt_to_xe(q->gt), idx < q->width); in xe_exec_queue_set_lrc()
297 xe_assert(gt_to_xe(q->gt), idx < q->width); in xe_exec_queue_get_lrc()
347 if (GRAPHICS_VER(gt_to_xe(q->gt)) >= 20) in __xe_exec_queue_init()
580 xe_assert(gt_to_xe(q->gt), atomic_read(&q->job_cnt) == 0); in xe_exec_queue_destroy()
586 xe_pxp_exec_queue_remove(gt_to_xe(q->gt)->pxp, q); in xe_exec_queue_destroy()
1238 if (IS_SRIOV_PF(gt_to_xe(gt)) && xe_gt_sriov_pf_sched_groups_enabled(gt)) in has_sched_groups()
1241 if (IS_SRIOV_VF(gt_to_xe(gt)) && xe_gt_sriov_vf_sched_groups_enabled(gt)) in has_sched_groups()
1533 struct xe_device *xe = gt_to_xe(q->gt); in xe_exec_queue_update_run_ticks()
1766 xe_assert(gt_to_xe(q->gt), type == XE_EXEC_QUEUE_TLB_INVAL_MEDIA_GT || in xe_exec_queue_tlb_inval_last_fence_put_unlocked()
H A Dxe_ring_ops.c331 i = emit_copy_timestamp(gt_to_xe(gt), lrc, dw, i); in __emit_job_gen12_simple()
384 struct xe_device *xe = gt_to_xe(gt); in __emit_job_gen12_video()
437 struct xe_device *xe = gt_to_xe(gt); in __emit_job_gen12_render_compute()
498 struct xe_device *xe = gt_to_xe(gt); in emit_migration_job_gen12()
622 struct xe_device *xe = gt_to_xe(gt); in xe_ring_ops_get()
H A Dxe_pagefault.c173 struct xe_device *xe = gt_to_xe(gt); in xe_pagefault_service()
309 xe_pagefault_save_to_vm(gt_to_xe(pf.gt), &pf); in xe_pagefault_queue_work()
324 queue_work(gt_to_xe(pf.gt)->usm.pf_wq, w); in xe_pagefault_queue_work()
H A Dxe_gt_sysfs.c35 struct xe_device *xe = gt_to_xe(gt); in xe_gt_sysfs_init()
H A Dxe_trace_lrc.h19 #define __dev_name_lrc(lrc) dev_name(gt_to_xe((lrc)->fence_ctx.gt)->drm.dev)
H A Dxe_gt_freq.c63 return gt_to_xe(kobj_to_gt(dev->kobj.parent)); in dev_to_xe()
276 struct xe_device *xe = gt_to_xe(gt); in xe_gt_freq_init()
H A Dxe_guc_ads.c51 return gt_to_xe(ads_to_gt(ads)); in ads_to_xe()
462 struct xe_device *xe = gt_to_xe(gt); in fill_engine_enable_masks()
528 struct xe_device *xe = gt_to_xe(gt); in guc_mapping_table_init_invalid()
541 struct xe_device *xe = gt_to_xe(gt); in guc_mapping_table_init()
560 struct xe_device *xe = gt_to_xe(gt); in guc_get_capture_engine_mask()
H A Dxe_gt_types.h100 #define gt_to_xe(gt__) \ macro
H A Dxe_migrate.c695 struct xe_device *xe = gt_to_xe(gt); in emit_copy_ccs()
710 num_ccs_blks = DIV_ROUND_UP(xe_device_ccs_bytes(gt_to_xe(gt), size), in emit_copy_ccs()
735 struct xe_device *xe = gt_to_xe(gt); in emit_xy_fast_copy()
809 struct xe_device *xe = gt_to_xe(gt); in emit_copy()
873 struct xe_device *xe = gt_to_xe(gt); in __xe_migrate_copy()
1168 struct xe_device *xe = gt_to_xe(gt); in xe_migrate_ccs_rw_copy()
1466 struct xe_device *xe = gt_to_xe(gt); in emit_clear_link_copy()
1489 struct xe_device *xe = gt_to_xe(gt); in emit_clear_main_copy()
1570 struct xe_device *xe = gt_to_xe(gt); in xe_migrate_clear()
2183 struct xe_device *xe = gt_to_xe(gt); in xe_migrate_vram()
H A Dxe_guc.h98 return gt_to_xe(guc_to_gt(guc)); in guc_to_xe()

12