1 /* SPDX-License-Identifier: MIT */ 2 /* 3 * Copyright © 2022 Intel Corporation 4 */ 5 6 #ifndef _XE_GT_H_ 7 #define _XE_GT_H_ 8 9 #include <linux/fault-inject.h> 10 11 #include <drm/drm_util.h> 12 13 #include "xe_device.h" 14 #include "xe_device_types.h" 15 #include "xe_hw_engine.h" 16 17 #define for_each_hw_engine(hwe__, gt__, id__) \ 18 for ((id__) = 0; (id__) < ARRAY_SIZE((gt__)->hw_engines); (id__)++) \ 19 for_each_if(((hwe__) = (gt__)->hw_engines + (id__)) && \ 20 xe_hw_engine_is_valid((hwe__))) 21 22 #define CCS_MASK(gt) (((gt)->info.engine_mask & XE_HW_ENGINE_CCS_MASK) >> XE_HW_ENGINE_CCS0) 23 24 extern struct fault_attr gt_reset_failure; 25 static inline bool xe_fault_inject_gt_reset(void) 26 { 27 return should_fail(>_reset_failure, 1); 28 } 29 30 struct xe_gt *xe_gt_alloc(struct xe_tile *tile); 31 int xe_gt_init_hwconfig(struct xe_gt *gt); 32 int xe_gt_init_early(struct xe_gt *gt); 33 int xe_gt_init(struct xe_gt *gt); 34 void xe_gt_mmio_init(struct xe_gt *gt); 35 void xe_gt_declare_wedged(struct xe_gt *gt); 36 int xe_gt_record_default_lrcs(struct xe_gt *gt); 37 38 /** 39 * xe_gt_record_user_engines - save data related to engines available to 40 * userspace 41 * @gt: GT structure 42 * 43 * Walk the available HW engines from gt->info.engine_mask and calculate data 44 * related to those engines that may be used by userspace. To be used whenever 45 * available engines change in runtime (e.g. with ccs_mode) or during 46 * initialization 47 */ 48 void xe_gt_record_user_engines(struct xe_gt *gt); 49 50 void xe_gt_suspend_prepare(struct xe_gt *gt); 51 int xe_gt_suspend(struct xe_gt *gt); 52 void xe_gt_shutdown(struct xe_gt *gt); 53 int xe_gt_resume(struct xe_gt *gt); 54 void xe_gt_reset_async(struct xe_gt *gt); 55 void xe_gt_sanitize(struct xe_gt *gt); 56 int xe_gt_sanitize_freq(struct xe_gt *gt); 57 58 /** 59 * xe_gt_wait_for_reset - wait for gt's async reset to finalize. 60 * @gt: GT structure 61 * Return: 62 * %true if it waited for the work to finish execution, 63 * %false if there was no scheduled reset or it was done. 64 */ 65 static inline bool xe_gt_wait_for_reset(struct xe_gt *gt) 66 { 67 return flush_work(>->reset.worker); 68 } 69 70 /** 71 * xe_gt_reset - perform synchronous reset 72 * @gt: GT structure 73 * Return: 74 * %true if it waited for the reset to finish, 75 * %false if there was no scheduled reset. 76 */ 77 static inline bool xe_gt_reset(struct xe_gt *gt) 78 { 79 xe_gt_reset_async(gt); 80 return xe_gt_wait_for_reset(gt); 81 } 82 83 /** 84 * xe_gt_any_hw_engine_by_reset_domain - scan the list of engines and return the 85 * first that matches the same reset domain as @class 86 * @gt: GT structure 87 * @class: hw engine class to lookup 88 */ 89 struct xe_hw_engine * 90 xe_gt_any_hw_engine_by_reset_domain(struct xe_gt *gt, enum xe_engine_class class); 91 92 /** 93 * xe_gt_any_hw_engine - scan the list of engines and return the 94 * first available 95 * @gt: GT structure 96 */ 97 struct xe_hw_engine *xe_gt_any_hw_engine(struct xe_gt *gt); 98 99 struct xe_hw_engine *xe_gt_hw_engine(struct xe_gt *gt, 100 enum xe_engine_class class, 101 u16 instance, 102 bool logical); 103 104 static inline bool xe_gt_has_indirect_ring_state(struct xe_gt *gt) 105 { 106 return gt->info.has_indirect_ring_state && 107 xe_device_uc_enabled(gt_to_xe(gt)); 108 } 109 110 static inline bool xe_gt_is_media_type(struct xe_gt *gt) 111 { 112 return gt->info.type == XE_GT_TYPE_MEDIA; 113 } 114 115 static inline bool xe_gt_is_usm_hwe(struct xe_gt *gt, struct xe_hw_engine *hwe) 116 { 117 struct xe_device *xe = gt_to_xe(gt); 118 119 return xe->info.has_usm && hwe->class == XE_ENGINE_CLASS_COPY && 120 hwe->instance == gt->usm.reserved_bcs_instance; 121 } 122 123 #endif 124