xref: /linux/drivers/gpu/drm/xe/xe_gt.h (revision ca220141fa8ebae09765a242076b2b77338106b0)
1 /* SPDX-License-Identifier: MIT */
2 /*
3  * Copyright © 2022 Intel Corporation
4  */
5 
6 #ifndef _XE_GT_H_
7 #define _XE_GT_H_
8 
9 #include <linux/fault-inject.h>
10 
11 #include <drm/drm_util.h>
12 
13 #include "xe_device.h"
14 #include "xe_device_types.h"
15 #include "xe_gt_sriov_vf.h"
16 #include "xe_hw_engine.h"
17 
18 #define for_each_hw_engine(hwe__, gt__, id__) \
19 	for ((id__) = 0; (id__) < ARRAY_SIZE((gt__)->hw_engines); (id__)++) \
20 		for_each_if(((hwe__) = (gt__)->hw_engines + (id__)) && \
21 			  xe_hw_engine_is_valid((hwe__)))
22 
23 #define XE_ENGINE_INSTANCES_FROM_MASK(gt, NAME) \
24 	(((gt)->info.engine_mask & XE_HW_ENGINE_##NAME##_MASK) >> XE_HW_ENGINE_##NAME##0)
25 
26 #define RCS_INSTANCES(gt) XE_ENGINE_INSTANCES_FROM_MASK(gt, RCS)
27 #define VCS_INSTANCES(gt) XE_ENGINE_INSTANCES_FROM_MASK(gt, VCS)
28 #define VECS_INSTANCES(gt) XE_ENGINE_INSTANCES_FROM_MASK(gt, VECS)
29 #define CCS_INSTANCES(gt) XE_ENGINE_INSTANCES_FROM_MASK(gt, CCS)
30 #define GSCCS_INSTANCES(gt) XE_ENGINE_INSTANCES_FROM_MASK(gt, GSCCS)
31 
32 /* Our devices have up to 4 media slices */
33 #define MAX_MEDIA_SLICES 4
34 
35 #define GT_VER(gt) ({ \
36 	typeof(gt) gt_ = (gt); \
37 	struct xe_device *xe = gt_to_xe(gt_); \
38 	xe_gt_is_media_type(gt_) ? MEDIA_VER(xe) : GRAPHICS_VER(xe); \
39 })
40 
41 extern struct fault_attr gt_reset_failure;
42 static inline bool xe_fault_inject_gt_reset(void)
43 {
44 	return IS_ENABLED(CONFIG_DEBUG_FS) && should_fail(&gt_reset_failure, 1);
45 }
46 
47 struct xe_gt *xe_gt_alloc(struct xe_tile *tile);
48 int xe_gt_init_early(struct xe_gt *gt);
49 int xe_gt_init(struct xe_gt *gt);
50 void xe_gt_mmio_init(struct xe_gt *gt);
51 void xe_gt_declare_wedged(struct xe_gt *gt);
52 int xe_gt_record_default_lrcs(struct xe_gt *gt);
53 
54 /**
55  * xe_gt_record_user_engines - save data related to engines available to
56  * userspace
57  * @gt: GT structure
58  *
59  * Walk the available HW engines from gt->info.engine_mask and calculate data
60  * related to those engines that may be used by userspace. To be used whenever
61  * available engines change in runtime (e.g. with ccs_mode) or during
62  * initialization
63  */
64 void xe_gt_record_user_engines(struct xe_gt *gt);
65 
66 void xe_gt_suspend_prepare(struct xe_gt *gt);
67 int xe_gt_suspend(struct xe_gt *gt);
68 void xe_gt_shutdown(struct xe_gt *gt);
69 int xe_gt_resume(struct xe_gt *gt);
70 void xe_gt_reset_async(struct xe_gt *gt);
71 int xe_gt_runtime_resume(struct xe_gt *gt);
72 int xe_gt_runtime_suspend(struct xe_gt *gt);
73 void xe_gt_sanitize(struct xe_gt *gt);
74 int xe_gt_sanitize_freq(struct xe_gt *gt);
75 
76 /**
77  * xe_gt_wait_for_reset - wait for gt's async reset to finalize.
78  * @gt: GT structure
79  * Return:
80  * %true if it waited for the work to finish execution,
81  * %false if there was no scheduled reset or it was done.
82  */
83 static inline bool xe_gt_wait_for_reset(struct xe_gt *gt)
84 {
85 	return flush_work(&gt->reset.worker);
86 }
87 
88 /**
89  * xe_gt_reset - perform synchronous reset
90  * @gt: GT structure
91  * Return:
92  * %true if it waited for the reset to finish,
93  * %false if there was no scheduled reset.
94  */
95 static inline bool xe_gt_reset(struct xe_gt *gt)
96 {
97 	xe_gt_reset_async(gt);
98 	return xe_gt_wait_for_reset(gt);
99 }
100 
101 /**
102  * xe_gt_any_hw_engine_by_reset_domain - scan the list of engines and return the
103  * first that matches the same reset domain as @class
104  * @gt: GT structure
105  * @class: hw engine class to lookup
106  */
107 struct xe_hw_engine *
108 xe_gt_any_hw_engine_by_reset_domain(struct xe_gt *gt, enum xe_engine_class class);
109 
110 /**
111  * xe_gt_any_hw_engine - scan the list of engines and return the
112  * first available
113  * @gt: GT structure
114  */
115 struct xe_hw_engine *xe_gt_any_hw_engine(struct xe_gt *gt);
116 
117 struct xe_hw_engine *xe_gt_hw_engine(struct xe_gt *gt,
118 				     enum xe_engine_class class,
119 				     u16 instance,
120 				     bool logical);
121 
122 static inline bool xe_gt_has_indirect_ring_state(struct xe_gt *gt)
123 {
124 	return gt->info.has_indirect_ring_state &&
125 	       xe_device_uc_enabled(gt_to_xe(gt));
126 }
127 
128 static inline bool xe_gt_is_main_type(struct xe_gt *gt)
129 {
130 	return gt->info.type == XE_GT_TYPE_MAIN;
131 }
132 
133 static inline bool xe_gt_is_media_type(struct xe_gt *gt)
134 {
135 	return gt->info.type == XE_GT_TYPE_MEDIA;
136 }
137 
138 static inline bool xe_gt_is_usm_hwe(struct xe_gt *gt, struct xe_hw_engine *hwe)
139 {
140 	struct xe_device *xe = gt_to_xe(gt);
141 
142 	return xe->info.has_usm && hwe->class == XE_ENGINE_CLASS_COPY &&
143 		hwe->instance == gt->usm.reserved_bcs_instance;
144 }
145 
146 /**
147  * xe_gt_recovery_pending() - GT recovery pending
148  * @gt: the &xe_gt
149  *
150  * Return: True if GT recovery in pending, False otherwise
151  */
152 static inline bool xe_gt_recovery_pending(struct xe_gt *gt)
153 {
154 	return IS_SRIOV_VF(gt_to_xe(gt)) &&
155 		xe_gt_sriov_vf_recovery_pending(gt);
156 }
157 
158 #endif
159