1 /* SPDX-License-Identifier: GPL-2.0 OR MIT */ 2 /* 3 * Copyright 2014-2022 Advanced Micro Devices, Inc. 4 * 5 * Permission is hereby granted, free of charge, to any person obtaining a 6 * copy of this software and associated documentation files (the "Software"), 7 * to deal in the Software without restriction, including without limitation 8 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 9 * and/or sell copies of the Software, and to permit persons to whom the 10 * Software is furnished to do so, subject to the following conditions: 11 * 12 * The above copyright notice and this permission notice shall be included in 13 * all copies or substantial portions of the Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 19 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 21 * OTHER DEALINGS IN THE SOFTWARE. 22 * 23 */ 24 25 #ifndef KFD_MQD_MANAGER_H_ 26 #define KFD_MQD_MANAGER_H_ 27 28 #include "kfd_priv.h" 29 30 #define KFD_MAX_NUM_SE 8 31 #define KFD_MAX_NUM_SH_PER_SE 2 32 33 /** 34 * struct mqd_manager 35 * 36 * @init_mqd: Allocates the mqd buffer on local gpu memory and initialize it. 37 * 38 * @load_mqd: Loads the mqd to a concrete hqd slot. Used only for no cp 39 * scheduling mode. 40 * 41 * @update_mqd: Handles a update call for the MQD 42 * 43 * @destroy_mqd: Destroys the HQD slot and by that preempt the relevant queue. 44 * Used only for no cp scheduling. 45 * 46 * @free_mqd: Releases the mqd buffer from local gpu memory. 47 * 48 * @is_occupied: Checks if the relevant HQD slot is occupied. 49 * 50 * @get_wave_state: Retrieves context save state and optionally copies the 51 * control stack, if kept in the MQD, to the given userspace address. 52 * 53 * @mqd_mutex: Mqd manager mutex. 54 * 55 * @dev: The kfd device structure coupled with this module. 56 * 57 * MQD stands for Memory Queue Descriptor which represents the current queue 58 * state in the memory and initiate the HQD (Hardware Queue Descriptor) state. 59 * This structure is actually a base class for the different types of MQDs 60 * structures for the variant ASICs that should be supported in the future. 61 * This base class is also contains all the MQD specific operations. 62 * Another important thing to mention is that each queue has a MQD that keeps 63 * his state (or context) after each preemption or reassignment. 64 * Basically there are a instances of the mqd manager class per MQD type per 65 * ASIC. Currently the kfd driver supports only Kaveri so there are instances 66 * per KFD_MQD_TYPE for each device. 67 * 68 */ 69 extern int pipe_priority_map[]; 70 struct mqd_manager { 71 struct kfd_mem_obj* (*allocate_mqd)(struct mqd_manager *mm, 72 struct queue_properties *q); 73 74 void (*init_mqd)(struct mqd_manager *mm, void **mqd, 75 struct kfd_mem_obj *mqd_mem_obj, uint64_t *gart_addr, 76 struct queue_properties *q); 77 78 int (*load_mqd)(struct mqd_manager *mm, void *mqd, 79 uint32_t pipe_id, uint32_t queue_id, 80 struct queue_properties *p, 81 struct mm_struct *mms); 82 83 void (*update_mqd)(struct mqd_manager *mm, void *mqd, 84 struct queue_properties *q, 85 struct mqd_update_info *minfo); 86 87 int (*destroy_mqd)(struct mqd_manager *mm, void *mqd, 88 enum kfd_preempt_type type, 89 unsigned int timeout, uint32_t pipe_id, 90 uint32_t queue_id); 91 92 void (*free_mqd)(struct mqd_manager *mm, void *mqd, 93 struct kfd_mem_obj *mqd_mem_obj); 94 95 bool (*is_occupied)(struct mqd_manager *mm, void *mqd, 96 uint64_t queue_address, uint32_t pipe_id, 97 uint32_t queue_id); 98 99 int (*get_wave_state)(struct mqd_manager *mm, void *mqd, 100 struct queue_properties *q, 101 void __user *ctl_stack, 102 u32 *ctl_stack_used_size, 103 u32 *save_area_used_size); 104 105 int (*get_checkpoint_info)(struct mqd_manager *mm, void *mqd, 106 uint32_t *ctl_stack_size); 107 108 void (*checkpoint_mqd)(struct mqd_manager *mm, 109 void *mqd, 110 void *mqd_dst, 111 void *ctl_stack_dst); 112 113 void (*restore_mqd)(struct mqd_manager *mm, void **mqd, 114 struct kfd_mem_obj *mqd_mem_obj, uint64_t *gart_addr, 115 struct queue_properties *p, 116 const void *mqd_src, 117 const void *ctl_stack_src, 118 const u32 ctl_stack_size); 119 120 #if defined(CONFIG_DEBUG_FS) 121 int (*debugfs_show_mqd)(struct seq_file *m, void *data); 122 #endif 123 bool (*check_preemption_failed)(struct mqd_manager *mm, void *mqd); 124 uint64_t (*mqd_stride)(struct mqd_manager *mm, 125 struct queue_properties *p); 126 127 struct mutex mqd_mutex; 128 struct kfd_node *dev; 129 uint32_t mqd_size; 130 }; 131 132 struct mqd_user_context_save_area_header { 133 /* Byte offset from start of user context 134 * save area to the last saved top (lowest 135 * address) of control stack data. Must be 136 * 4 byte aligned. 137 */ 138 uint32_t control_stack_offset; 139 140 /* Byte size of the last saved control stack 141 * data. Must be 4 byte aligned. 142 */ 143 uint32_t control_stack_size; 144 145 /* Byte offset from start of user context save 146 * area to the last saved base (lowest address) 147 * of wave state data. Must be 4 byte aligned. 148 */ 149 uint32_t wave_state_offset; 150 151 /* Byte size of the last saved wave state data. 152 * Must be 4 byte aligned. 153 */ 154 uint32_t wave_state_size; 155 }; 156 157 struct kfd_mem_obj *allocate_hiq_mqd(struct mqd_manager *mm, 158 struct queue_properties *q); 159 160 struct kfd_mem_obj *allocate_sdma_mqd(struct mqd_manager *mm, 161 struct queue_properties *q); 162 void free_mqd_hiq_sdma(struct mqd_manager *mm, void *mqd, 163 struct kfd_mem_obj *mqd_mem_obj); 164 165 void mqd_symmetrically_map_cu_mask(struct mqd_manager *mm, 166 const uint32_t *cu_mask, uint32_t cu_mask_count, 167 uint32_t *se_mask, uint32_t inst); 168 169 int kfd_hiq_load_mqd_kiq(struct mqd_manager *mm, void *mqd, 170 uint32_t pipe_id, uint32_t queue_id, 171 struct queue_properties *p, struct mm_struct *mms); 172 173 int kfd_destroy_mqd_cp(struct mqd_manager *mm, void *mqd, 174 enum kfd_preempt_type type, unsigned int timeout, 175 uint32_t pipe_id, uint32_t queue_id); 176 177 void kfd_free_mqd_cp(struct mqd_manager *mm, void *mqd, 178 struct kfd_mem_obj *mqd_mem_obj); 179 180 bool kfd_is_occupied_cp(struct mqd_manager *mm, void *mqd, 181 uint64_t queue_address, uint32_t pipe_id, 182 uint32_t queue_id); 183 184 int kfd_load_mqd_sdma(struct mqd_manager *mm, void *mqd, 185 uint32_t pipe_id, uint32_t queue_id, 186 struct queue_properties *p, struct mm_struct *mms); 187 188 int kfd_destroy_mqd_sdma(struct mqd_manager *mm, void *mqd, 189 enum kfd_preempt_type type, unsigned int timeout, 190 uint32_t pipe_id, uint32_t queue_id); 191 192 bool kfd_is_occupied_sdma(struct mqd_manager *mm, void *mqd, 193 uint64_t queue_address, uint32_t pipe_id, 194 uint32_t queue_id); 195 196 void kfd_get_hiq_xcc_mqd(struct kfd_node *dev, 197 struct kfd_mem_obj *mqd_mem_obj, uint32_t virtual_xcc_id); 198 199 uint64_t kfd_hiq_mqd_stride(struct kfd_node *dev); 200 uint64_t kfd_mqd_stride(struct mqd_manager *mm, 201 struct queue_properties *q); 202 bool kfd_check_hiq_mqd_doorbell_id(struct kfd_node *node, uint32_t doorbell_id, 203 uint32_t inst); 204 #endif /* KFD_MQD_MANAGER_H_ */ 205