xref: /linux/drivers/gpu/drm/xe/xe_gt.h (revision 74ba587f402d5501af2c85e50cf1e4044263b6ca)
1 /* SPDX-License-Identifier: MIT */
2 /*
3  * Copyright © 2022 Intel Corporation
4  */
5 
6 #ifndef _XE_GT_H_
7 #define _XE_GT_H_
8 
9 #include <linux/fault-inject.h>
10 
11 #include <drm/drm_util.h>
12 
13 #include "xe_device.h"
14 #include "xe_device_types.h"
15 #include "xe_gt_sriov_vf.h"
16 #include "xe_hw_engine.h"
17 
18 #define for_each_hw_engine(hwe__, gt__, id__) \
19 	for ((id__) = 0; (id__) < ARRAY_SIZE((gt__)->hw_engines); (id__)++) \
20 		for_each_if(((hwe__) = (gt__)->hw_engines + (id__)) && \
21 			  xe_hw_engine_is_valid((hwe__)))
22 
23 #define CCS_MASK(gt) (((gt)->info.engine_mask & XE_HW_ENGINE_CCS_MASK) >> XE_HW_ENGINE_CCS0)
24 
25 #define GT_VER(gt) ({ \
26 	typeof(gt) gt_ = (gt); \
27 	struct xe_device *xe = gt_to_xe(gt_); \
28 	xe_gt_is_media_type(gt_) ? MEDIA_VER(xe) : GRAPHICS_VER(xe); \
29 })
30 
31 extern struct fault_attr gt_reset_failure;
32 static inline bool xe_fault_inject_gt_reset(void)
33 {
34 	return IS_ENABLED(CONFIG_DEBUG_FS) && should_fail(&gt_reset_failure, 1);
35 }
36 
37 struct xe_gt *xe_gt_alloc(struct xe_tile *tile);
38 int xe_gt_init_early(struct xe_gt *gt);
39 int xe_gt_init(struct xe_gt *gt);
40 void xe_gt_mmio_init(struct xe_gt *gt);
41 void xe_gt_declare_wedged(struct xe_gt *gt);
42 int xe_gt_record_default_lrcs(struct xe_gt *gt);
43 
44 /**
45  * xe_gt_record_user_engines - save data related to engines available to
46  * userspace
47  * @gt: GT structure
48  *
49  * Walk the available HW engines from gt->info.engine_mask and calculate data
50  * related to those engines that may be used by userspace. To be used whenever
51  * available engines change in runtime (e.g. with ccs_mode) or during
52  * initialization
53  */
54 void xe_gt_record_user_engines(struct xe_gt *gt);
55 
56 void xe_gt_suspend_prepare(struct xe_gt *gt);
57 int xe_gt_suspend(struct xe_gt *gt);
58 void xe_gt_shutdown(struct xe_gt *gt);
59 int xe_gt_resume(struct xe_gt *gt);
60 void xe_gt_reset_async(struct xe_gt *gt);
61 void xe_gt_sanitize(struct xe_gt *gt);
62 int xe_gt_sanitize_freq(struct xe_gt *gt);
63 
64 /**
65  * xe_gt_wait_for_reset - wait for gt's async reset to finalize.
66  * @gt: GT structure
67  * Return:
68  * %true if it waited for the work to finish execution,
69  * %false if there was no scheduled reset or it was done.
70  */
71 static inline bool xe_gt_wait_for_reset(struct xe_gt *gt)
72 {
73 	return flush_work(&gt->reset.worker);
74 }
75 
76 /**
77  * xe_gt_reset - perform synchronous reset
78  * @gt: GT structure
79  * Return:
80  * %true if it waited for the reset to finish,
81  * %false if there was no scheduled reset.
82  */
83 static inline bool xe_gt_reset(struct xe_gt *gt)
84 {
85 	xe_gt_reset_async(gt);
86 	return xe_gt_wait_for_reset(gt);
87 }
88 
89 /**
90  * xe_gt_any_hw_engine_by_reset_domain - scan the list of engines and return the
91  * first that matches the same reset domain as @class
92  * @gt: GT structure
93  * @class: hw engine class to lookup
94  */
95 struct xe_hw_engine *
96 xe_gt_any_hw_engine_by_reset_domain(struct xe_gt *gt, enum xe_engine_class class);
97 
98 /**
99  * xe_gt_any_hw_engine - scan the list of engines and return the
100  * first available
101  * @gt: GT structure
102  */
103 struct xe_hw_engine *xe_gt_any_hw_engine(struct xe_gt *gt);
104 
105 struct xe_hw_engine *xe_gt_hw_engine(struct xe_gt *gt,
106 				     enum xe_engine_class class,
107 				     u16 instance,
108 				     bool logical);
109 
110 static inline bool xe_gt_has_indirect_ring_state(struct xe_gt *gt)
111 {
112 	return gt->info.has_indirect_ring_state &&
113 	       xe_device_uc_enabled(gt_to_xe(gt));
114 }
115 
116 static inline bool xe_gt_is_main_type(struct xe_gt *gt)
117 {
118 	return gt->info.type == XE_GT_TYPE_MAIN;
119 }
120 
121 static inline bool xe_gt_is_media_type(struct xe_gt *gt)
122 {
123 	return gt->info.type == XE_GT_TYPE_MEDIA;
124 }
125 
126 static inline bool xe_gt_is_usm_hwe(struct xe_gt *gt, struct xe_hw_engine *hwe)
127 {
128 	struct xe_device *xe = gt_to_xe(gt);
129 
130 	return xe->info.has_usm && hwe->class == XE_ENGINE_CLASS_COPY &&
131 		hwe->instance == gt->usm.reserved_bcs_instance;
132 }
133 
134 /**
135  * xe_gt_recovery_pending() - GT recovery pending
136  * @gt: the &xe_gt
137  *
138  * Return: True if GT recovery in pending, False otherwise
139  */
140 static inline bool xe_gt_recovery_pending(struct xe_gt *gt)
141 {
142 	return IS_SRIOV_VF(gt_to_xe(gt)) &&
143 		xe_gt_sriov_vf_recovery_pending(gt);
144 }
145 
146 #endif
147