1 /* 2 * Copyright 2014 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 */ 22 23 /* 24 * This file defines the private interface between the 25 * AMD kernel graphics drivers and the AMD KFD. 26 */ 27 28 #ifndef KGD_KFD_INTERFACE_H_INCLUDED 29 #define KGD_KFD_INTERFACE_H_INCLUDED 30 31 #include <linux/types.h> 32 #include <linux/bitmap.h> 33 #include <linux/dma-fence.h> 34 #include "amdgpu_irq.h" 35 #include "amdgpu_gfx.h" 36 37 struct pci_dev; 38 struct amdgpu_device; 39 40 struct kfd_dev; 41 struct kgd_mem; 42 43 enum kfd_preempt_type { 44 KFD_PREEMPT_TYPE_WAVEFRONT_DRAIN = 0, 45 KFD_PREEMPT_TYPE_WAVEFRONT_RESET, 46 KFD_PREEMPT_TYPE_WAVEFRONT_SAVE 47 }; 48 49 struct kfd_vm_fault_info { 50 uint64_t page_addr; 51 uint32_t vmid; 52 uint32_t mc_id; 53 uint32_t status; 54 bool prot_valid; 55 bool prot_read; 56 bool prot_write; 57 bool prot_exec; 58 }; 59 60 /* For getting GPU local memory information from KGD */ 61 struct kfd_local_mem_info { 62 uint64_t local_mem_size_private; 63 uint64_t local_mem_size_public; 64 uint32_t vram_width; 65 uint32_t mem_clk_max; 66 }; 67 68 enum kgd_memory_pool { 69 KGD_POOL_SYSTEM_CACHEABLE = 1, 70 KGD_POOL_SYSTEM_WRITECOMBINE = 2, 71 KGD_POOL_FRAMEBUFFER = 3, 72 }; 73 74 struct kfd_cu_occupancy { 75 u32 wave_cnt; 76 u32 doorbell_off; 77 }; 78 79 /** 80 * enum kfd_sched_policy 81 * 82 * @KFD_SCHED_POLICY_HWS: H/W scheduling policy known as command processor (cp) 83 * scheduling. In this scheduling mode we're using the firmware code to 84 * schedule the user mode queues and kernel queues such as HIQ and DIQ. 85 * the HIQ queue is used as a special queue that dispatches the configuration 86 * to the cp and the user mode queues list that are currently running. 87 * the DIQ queue is a debugging queue that dispatches debugging commands to the 88 * firmware. 89 * in this scheduling mode user mode queues over subscription feature is 90 * enabled. 91 * 92 * @KFD_SCHED_POLICY_HWS_NO_OVERSUBSCRIPTION: The same as above but the over 93 * subscription feature disabled. 94 * 95 * @KFD_SCHED_POLICY_NO_HWS: no H/W scheduling policy is a mode which directly 96 * set the command processor registers and sets the queues "manually". This 97 * mode is used *ONLY* for debugging proposes. 98 * 99 */ 100 enum kfd_sched_policy { 101 KFD_SCHED_POLICY_HWS = 0, 102 KFD_SCHED_POLICY_HWS_NO_OVERSUBSCRIPTION, 103 KFD_SCHED_POLICY_NO_HWS 104 }; 105 106 struct kgd2kfd_shared_resources { 107 /* Bit n == 1 means VMID n is available for KFD. */ 108 unsigned int compute_vmid_bitmap; 109 110 /* number of pipes per mec */ 111 uint32_t num_pipe_per_mec; 112 113 /* number of queues per pipe */ 114 uint32_t num_queue_per_pipe; 115 116 /* Bit n == 1 means Queue n is available for KFD */ 117 DECLARE_BITMAP(cp_queue_bitmap, AMDGPU_MAX_QUEUES); 118 119 /* SDMA doorbell assignments (SOC15 and later chips only). Only 120 * specific doorbells are routed to each SDMA engine. Others 121 * are routed to IH and VCN. They are not usable by the CP. 122 */ 123 uint32_t *sdma_doorbell_idx; 124 125 /* From SOC15 onward, the doorbell index range not usable for CP 126 * queues. 127 */ 128 uint32_t non_cp_doorbells_start; 129 uint32_t non_cp_doorbells_end; 130 131 /* Base address of doorbell aperture. */ 132 phys_addr_t doorbell_physical_address; 133 134 /* Size in bytes of doorbell aperture. */ 135 size_t doorbell_aperture_size; 136 137 /* Number of bytes at start of aperture reserved for KGD. */ 138 size_t doorbell_start_offset; 139 140 /* GPUVM address space size in bytes */ 141 uint64_t gpuvm_size; 142 143 /* Minor device number of the render node */ 144 int drm_render_minor; 145 146 bool enable_mes; 147 }; 148 149 struct tile_config { 150 uint32_t *tile_config_ptr; 151 uint32_t *macro_tile_config_ptr; 152 uint32_t num_tile_configs; 153 uint32_t num_macro_tile_configs; 154 155 uint32_t gb_addr_config; 156 uint32_t num_banks; 157 uint32_t num_ranks; 158 }; 159 160 #define KFD_MAX_NUM_OF_QUEUES_PER_DEVICE_DEFAULT 4096 161 162 /** 163 * struct kfd2kgd_calls 164 * 165 * @program_sh_mem_settings: A function that should initiate the memory 166 * properties such as main aperture memory type (cache / non cached) and 167 * secondary aperture base address, size and memory type. 168 * This function is used only for no cp scheduling mode. 169 * 170 * @set_pasid_vmid_mapping: Exposes pasid/vmid pair to the H/W for no cp 171 * scheduling mode. Only used for no cp scheduling mode. 172 * 173 * @hqd_load: Loads the mqd structure to a H/W hqd slot. used only for no cp 174 * sceduling mode. 175 * 176 * @hqd_sdma_load: Loads the SDMA mqd structure to a H/W SDMA hqd slot. 177 * used only for no HWS mode. 178 * 179 * @hqd_dump: Dumps CPC HQD registers to an array of address-value pairs. 180 * Array is allocated with kmalloc, needs to be freed with kfree by caller. 181 * 182 * @hqd_sdma_dump: Dumps SDMA HQD registers to an array of address-value pairs. 183 * Array is allocated with kmalloc, needs to be freed with kfree by caller. 184 * 185 * @hqd_is_occupies: Checks if a hqd slot is occupied. 186 * 187 * @hqd_destroy: Destructs and preempts the queue assigned to that hqd slot. 188 * 189 * @hqd_sdma_is_occupied: Checks if an SDMA hqd slot is occupied. 190 * 191 * @hqd_sdma_destroy: Destructs and preempts the SDMA queue assigned to that 192 * SDMA hqd slot. 193 * 194 * @set_scratch_backing_va: Sets VA for scratch backing memory of a VMID. 195 * Only used for no cp scheduling mode 196 * 197 * @set_vm_context_page_table_base: Program page table base for a VMID 198 * 199 * @invalidate_tlbs: Invalidate TLBs for a specific PASID 200 * 201 * @invalidate_tlbs_vmid: Invalidate TLBs for a specific VMID 202 * 203 * @read_vmid_from_vmfault_reg: On Hawaii the VMID is not set in the 204 * IH ring entry. This function allows the KFD ISR to get the VMID 205 * from the fault status register as early as possible. 206 * 207 * @get_cu_occupancy: Function pointer that returns to caller the number 208 * of wave fronts that are in flight for all of the queues of a process 209 * as identified by its pasid. It is important to note that the value 210 * returned by this function is a snapshot of current moment and cannot 211 * guarantee any minimum for the number of waves in-flight. This function 212 * is defined for devices that belong to GFX9 and later GFX families. Care 213 * must be taken in calling this function as it is not defined for devices 214 * that belong to GFX8 and below GFX families. 215 * 216 * This structure contains function pointers to services that the kgd driver 217 * provides to amdkfd driver. 218 * 219 */ 220 struct kfd2kgd_calls { 221 /* Register access functions */ 222 void (*program_sh_mem_settings)(struct amdgpu_device *adev, uint32_t vmid, 223 uint32_t sh_mem_config, uint32_t sh_mem_ape1_base, 224 uint32_t sh_mem_ape1_limit, uint32_t sh_mem_bases, 225 uint32_t inst); 226 227 int (*set_pasid_vmid_mapping)(struct amdgpu_device *adev, u32 pasid, 228 unsigned int vmid, uint32_t inst); 229 230 int (*init_interrupts)(struct amdgpu_device *adev, uint32_t pipe_id, 231 uint32_t inst); 232 233 int (*hqd_load)(struct amdgpu_device *adev, void *mqd, uint32_t pipe_id, 234 uint32_t queue_id, uint32_t __user *wptr, 235 uint32_t wptr_shift, uint32_t wptr_mask, 236 struct mm_struct *mm, uint32_t inst); 237 238 int (*hiq_mqd_load)(struct amdgpu_device *adev, void *mqd, 239 uint32_t pipe_id, uint32_t queue_id, 240 uint32_t doorbell_off, uint32_t inst); 241 242 int (*hqd_sdma_load)(struct amdgpu_device *adev, void *mqd, 243 uint32_t __user *wptr, struct mm_struct *mm); 244 245 int (*hqd_dump)(struct amdgpu_device *adev, 246 uint32_t pipe_id, uint32_t queue_id, 247 uint32_t (**dump)[2], uint32_t *n_regs, uint32_t inst); 248 249 int (*hqd_sdma_dump)(struct amdgpu_device *adev, 250 uint32_t engine_id, uint32_t queue_id, 251 uint32_t (**dump)[2], uint32_t *n_regs); 252 253 bool (*hqd_is_occupied)(struct amdgpu_device *adev, 254 uint64_t queue_address, uint32_t pipe_id, 255 uint32_t queue_id, uint32_t inst); 256 257 int (*hqd_destroy)(struct amdgpu_device *adev, void *mqd, 258 enum kfd_preempt_type reset_type, 259 unsigned int timeout, uint32_t pipe_id, 260 uint32_t queue_id, uint32_t inst); 261 262 bool (*hqd_sdma_is_occupied)(struct amdgpu_device *adev, void *mqd); 263 264 int (*hqd_sdma_destroy)(struct amdgpu_device *adev, void *mqd, 265 unsigned int timeout); 266 267 int (*wave_control_execute)(struct amdgpu_device *adev, 268 uint32_t gfx_index_val, 269 uint32_t sq_cmd, uint32_t inst); 270 bool (*get_atc_vmid_pasid_mapping_info)(struct amdgpu_device *adev, 271 uint8_t vmid, 272 uint16_t *p_pasid); 273 274 /* No longer needed from GFXv9 onward. The scratch base address is 275 * passed to the shader by the CP. It's the user mode driver's 276 * responsibility. 277 */ 278 void (*set_scratch_backing_va)(struct amdgpu_device *adev, 279 uint64_t va, uint32_t vmid); 280 281 void (*set_vm_context_page_table_base)(struct amdgpu_device *adev, 282 uint32_t vmid, uint64_t page_table_base); 283 uint32_t (*read_vmid_from_vmfault_reg)(struct amdgpu_device *adev); 284 285 uint32_t (*enable_debug_trap)(struct amdgpu_device *adev, 286 bool restore_dbg_registers, 287 uint32_t vmid); 288 uint32_t (*disable_debug_trap)(struct amdgpu_device *adev, 289 bool keep_trap_enabled, 290 uint32_t vmid); 291 int (*validate_trap_override_request)(struct amdgpu_device *adev, 292 uint32_t trap_override, 293 uint32_t *trap_mask_supported); 294 uint32_t (*set_wave_launch_trap_override)(struct amdgpu_device *adev, 295 uint32_t vmid, 296 uint32_t trap_override, 297 uint32_t trap_mask_bits, 298 uint32_t trap_mask_request, 299 uint32_t *trap_mask_prev, 300 uint32_t kfd_dbg_trap_cntl_prev); 301 uint32_t (*set_wave_launch_mode)(struct amdgpu_device *adev, 302 uint8_t wave_launch_mode, 303 uint32_t vmid); 304 uint32_t (*set_address_watch)(struct amdgpu_device *adev, 305 uint64_t watch_address, 306 uint32_t watch_address_mask, 307 uint32_t watch_id, 308 uint32_t watch_mode, 309 uint32_t debug_vmid, 310 uint32_t inst); 311 uint32_t (*clear_address_watch)(struct amdgpu_device *adev, 312 uint32_t watch_id); 313 void (*get_iq_wait_times)(struct amdgpu_device *adev, 314 uint32_t *wait_times, 315 uint32_t inst); 316 void (*build_grace_period_packet_info)(struct amdgpu_device *adev, 317 uint32_t wait_times, 318 uint32_t grace_period, 319 uint32_t *reg_offset, 320 uint32_t *reg_data); 321 void (*get_cu_occupancy)(struct amdgpu_device *adev, 322 struct kfd_cu_occupancy *cu_occupancy, 323 int *max_waves_per_cu, uint32_t inst); 324 void (*program_trap_handler_settings)(struct amdgpu_device *adev, 325 uint32_t vmid, uint64_t tba_addr, uint64_t tma_addr, 326 uint32_t inst); 327 uint64_t (*hqd_get_pq_addr)(struct amdgpu_device *adev, 328 uint32_t pipe_id, uint32_t queue_id, 329 uint32_t inst); 330 uint64_t (*hqd_reset)(struct amdgpu_device *adev, 331 uint32_t pipe_id, uint32_t queue_id, 332 uint32_t inst, unsigned int utimeout); 333 }; 334 335 #endif /* KGD_KFD_INTERFACE_H_INCLUDED */ 336