1 /* SPDX-License-Identifier: MIT */
2 /*
3 * Copyright © 2022 Intel Corporation
4 */
5
6 #ifndef _XE_GT_H_
7 #define _XE_GT_H_
8
9 #include <linux/fault-inject.h>
10
11 #include <drm/drm_util.h>
12
13 #include "xe_device.h"
14 #include "xe_device_types.h"
15 #include "xe_hw_engine.h"
16
17 #define for_each_hw_engine(hwe__, gt__, id__) \
18 for ((id__) = 0; (id__) < ARRAY_SIZE((gt__)->hw_engines); (id__)++) \
19 for_each_if(((hwe__) = (gt__)->hw_engines + (id__)) && \
20 xe_hw_engine_is_valid((hwe__)))
21
22 #define CCS_MASK(gt) (((gt)->info.engine_mask & XE_HW_ENGINE_CCS_MASK) >> XE_HW_ENGINE_CCS0)
23
24 extern struct fault_attr gt_reset_failure;
xe_fault_inject_gt_reset(void)25 static inline bool xe_fault_inject_gt_reset(void)
26 {
27 return should_fail(>_reset_failure, 1);
28 }
29
30 struct xe_gt *xe_gt_alloc(struct xe_tile *tile);
31 int xe_gt_init_hwconfig(struct xe_gt *gt);
32 int xe_gt_init_early(struct xe_gt *gt);
33 int xe_gt_init(struct xe_gt *gt);
34 void xe_gt_mmio_init(struct xe_gt *gt);
35 void xe_gt_declare_wedged(struct xe_gt *gt);
36 int xe_gt_record_default_lrcs(struct xe_gt *gt);
37
38 /**
39 * xe_gt_record_user_engines - save data related to engines available to
40 * userspace
41 * @gt: GT structure
42 *
43 * Walk the available HW engines from gt->info.engine_mask and calculate data
44 * related to those engines that may be used by userspace. To be used whenever
45 * available engines change in runtime (e.g. with ccs_mode) or during
46 * initialization
47 */
48 void xe_gt_record_user_engines(struct xe_gt *gt);
49
50 void xe_gt_suspend_prepare(struct xe_gt *gt);
51 int xe_gt_suspend(struct xe_gt *gt);
52 void xe_gt_shutdown(struct xe_gt *gt);
53 int xe_gt_resume(struct xe_gt *gt);
54 void xe_gt_reset_async(struct xe_gt *gt);
55 void xe_gt_sanitize(struct xe_gt *gt);
56 int xe_gt_sanitize_freq(struct xe_gt *gt);
57 void xe_gt_remove(struct xe_gt *gt);
58
59 /**
60 * xe_gt_wait_for_reset - wait for gt's async reset to finalize.
61 * @gt: GT structure
62 * Return:
63 * %true if it waited for the work to finish execution,
64 * %false if there was no scheduled reset or it was done.
65 */
xe_gt_wait_for_reset(struct xe_gt * gt)66 static inline bool xe_gt_wait_for_reset(struct xe_gt *gt)
67 {
68 return flush_work(>->reset.worker);
69 }
70
71 /**
72 * xe_gt_reset - perform synchronous reset
73 * @gt: GT structure
74 * Return:
75 * %true if it waited for the reset to finish,
76 * %false if there was no scheduled reset.
77 */
xe_gt_reset(struct xe_gt * gt)78 static inline bool xe_gt_reset(struct xe_gt *gt)
79 {
80 xe_gt_reset_async(gt);
81 return xe_gt_wait_for_reset(gt);
82 }
83
84 /**
85 * xe_gt_any_hw_engine_by_reset_domain - scan the list of engines and return the
86 * first that matches the same reset domain as @class
87 * @gt: GT structure
88 * @class: hw engine class to lookup
89 */
90 struct xe_hw_engine *
91 xe_gt_any_hw_engine_by_reset_domain(struct xe_gt *gt, enum xe_engine_class class);
92
93 /**
94 * xe_gt_any_hw_engine - scan the list of engines and return the
95 * first available
96 * @gt: GT structure
97 */
98 struct xe_hw_engine *xe_gt_any_hw_engine(struct xe_gt *gt);
99
100 struct xe_hw_engine *xe_gt_hw_engine(struct xe_gt *gt,
101 enum xe_engine_class class,
102 u16 instance,
103 bool logical);
104
xe_gt_has_indirect_ring_state(struct xe_gt * gt)105 static inline bool xe_gt_has_indirect_ring_state(struct xe_gt *gt)
106 {
107 return gt->info.has_indirect_ring_state &&
108 xe_device_uc_enabled(gt_to_xe(gt));
109 }
110
xe_gt_is_media_type(struct xe_gt * gt)111 static inline bool xe_gt_is_media_type(struct xe_gt *gt)
112 {
113 return gt->info.type == XE_GT_TYPE_MEDIA;
114 }
115
xe_gt_is_usm_hwe(struct xe_gt * gt,struct xe_hw_engine * hwe)116 static inline bool xe_gt_is_usm_hwe(struct xe_gt *gt, struct xe_hw_engine *hwe)
117 {
118 struct xe_device *xe = gt_to_xe(gt);
119
120 return xe->info.has_usm && hwe->class == XE_ENGINE_CLASS_COPY &&
121 hwe->instance == gt->usm.reserved_bcs_instance;
122 }
123
124 #endif
125