1 /* 2 * Copyright 2023 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 */ 23 #include <linux/delay.h> 24 #include <linux/kernel.h> 25 #include <linux/firmware.h> 26 #include <linux/module.h> 27 #include <linux/pci.h> 28 #include "amdgpu.h" 29 #include "amdgpu_gfx.h" 30 #include "amdgpu_psp.h" 31 #include "amdgpu_smu.h" 32 #include "imu_v12_0.h" 33 #include "soc24.h" 34 #include "nvd.h" 35 36 #include "gc/gc_12_0_0_offset.h" 37 #include "gc/gc_12_0_0_sh_mask.h" 38 #include "soc24_enum.h" 39 #include "ivsrcid/gfx/irqsrcs_gfx_11_0_0.h" 40 41 #include "soc15.h" 42 #include "clearstate_gfx12.h" 43 #include "v12_structs.h" 44 #include "gfx_v12_0.h" 45 #include "nbif_v6_3_1.h" 46 #include "mes_v12_0.h" 47 48 #define GFX12_NUM_GFX_RINGS 1 49 #define GFX12_MEC_HPD_SIZE 2048 50 51 #define RLCG_UCODE_LOADING_START_ADDRESS 0x00002000L 52 53 MODULE_FIRMWARE("amdgpu/gc_12_0_0_pfp.bin"); 54 MODULE_FIRMWARE("amdgpu/gc_12_0_0_me.bin"); 55 MODULE_FIRMWARE("amdgpu/gc_12_0_0_mec.bin"); 56 MODULE_FIRMWARE("amdgpu/gc_12_0_0_rlc.bin"); 57 MODULE_FIRMWARE("amdgpu/gc_12_0_0_toc.bin"); 58 MODULE_FIRMWARE("amdgpu/gc_12_0_1_pfp.bin"); 59 MODULE_FIRMWARE("amdgpu/gc_12_0_1_me.bin"); 60 MODULE_FIRMWARE("amdgpu/gc_12_0_1_mec.bin"); 61 MODULE_FIRMWARE("amdgpu/gc_12_0_1_rlc.bin"); 62 MODULE_FIRMWARE("amdgpu/gc_12_0_1_toc.bin"); 63 64 static const struct amdgpu_hwip_reg_entry gc_reg_list_12_0[] = { 65 SOC15_REG_ENTRY_STR(GC, 0, regGRBM_STATUS), 66 SOC15_REG_ENTRY_STR(GC, 0, regGRBM_STATUS2), 67 SOC15_REG_ENTRY_STR(GC, 0, regGRBM_STATUS3), 68 SOC15_REG_ENTRY_STR(GC, 0, regCP_STALLED_STAT1), 69 SOC15_REG_ENTRY_STR(GC, 0, regCP_STALLED_STAT2), 70 SOC15_REG_ENTRY_STR(GC, 0, regCP_STALLED_STAT3), 71 SOC15_REG_ENTRY_STR(GC, 0, regCP_CPC_STALLED_STAT1), 72 SOC15_REG_ENTRY_STR(GC, 0, regCP_CPF_STALLED_STAT1), 73 SOC15_REG_ENTRY_STR(GC, 0, regCP_BUSY_STAT), 74 SOC15_REG_ENTRY_STR(GC, 0, regCP_CPC_BUSY_STAT), 75 SOC15_REG_ENTRY_STR(GC, 0, regCP_CPF_BUSY_STAT), 76 SOC15_REG_ENTRY_STR(GC, 0, regCP_CPC_BUSY_STAT2), 77 SOC15_REG_ENTRY_STR(GC, 0, regCP_CPF_BUSY_STAT2), 78 SOC15_REG_ENTRY_STR(GC, 0, regCP_CPF_STATUS), 79 SOC15_REG_ENTRY_STR(GC, 0, regCP_GFX_ERROR), 80 SOC15_REG_ENTRY_STR(GC, 0, regCP_GFX_HPD_STATUS0), 81 SOC15_REG_ENTRY_STR(GC, 0, regCP_RB_BASE), 82 SOC15_REG_ENTRY_STR(GC, 0, regCP_RB_RPTR), 83 SOC15_REG_ENTRY_STR(GC, 0, regCP_RB_WPTR), 84 SOC15_REG_ENTRY_STR(GC, 0, regCP_RB0_BASE), 85 SOC15_REG_ENTRY_STR(GC, 0, regCP_RB0_RPTR), 86 SOC15_REG_ENTRY_STR(GC, 0, regCP_RB0_WPTR), 87 SOC15_REG_ENTRY_STR(GC, 0, regCP_IB1_CMD_BUFSZ), 88 SOC15_REG_ENTRY_STR(GC, 0, regCP_IB2_CMD_BUFSZ), 89 SOC15_REG_ENTRY_STR(GC, 0, regCP_IB1_BASE_LO), 90 SOC15_REG_ENTRY_STR(GC, 0, regCP_IB1_BASE_HI), 91 SOC15_REG_ENTRY_STR(GC, 0, regCP_IB1_BUFSZ), 92 SOC15_REG_ENTRY_STR(GC, 0, regCP_IB2_BASE_LO), 93 SOC15_REG_ENTRY_STR(GC, 0, regCP_IB2_BASE_HI), 94 SOC15_REG_ENTRY_STR(GC, 0, regCP_IB2_BUFSZ), 95 SOC15_REG_ENTRY_STR(GC, 0, regCPF_UTCL1_STATUS), 96 SOC15_REG_ENTRY_STR(GC, 0, regCPC_UTCL1_STATUS), 97 SOC15_REG_ENTRY_STR(GC, 0, regCPG_UTCL1_STATUS), 98 SOC15_REG_ENTRY_STR(GC, 0, regIA_UTCL1_STATUS), 99 SOC15_REG_ENTRY_STR(GC, 0, regIA_UTCL1_STATUS_2), 100 SOC15_REG_ENTRY_STR(GC, 0, regPA_CL_CNTL_STATUS), 101 SOC15_REG_ENTRY_STR(GC, 0, regRMI_UTCL1_STATUS), 102 SOC15_REG_ENTRY_STR(GC, 0, regSQC_CACHES), 103 SOC15_REG_ENTRY_STR(GC, 0, regSQG_STATUS), 104 SOC15_REG_ENTRY_STR(GC, 0, regWD_UTCL1_STATUS), 105 SOC15_REG_ENTRY_STR(GC, 0, regGCVM_L2_PROTECTION_FAULT_CNTL), 106 SOC15_REG_ENTRY_STR(GC, 0, regGCVM_L2_PROTECTION_FAULT_STATUS_LO32), 107 SOC15_REG_ENTRY_STR(GC, 0, regGCVM_L2_PROTECTION_FAULT_STATUS_HI32), 108 SOC15_REG_ENTRY_STR(GC, 0, regCP_DEBUG), 109 SOC15_REG_ENTRY_STR(GC, 0, regCP_MEC_CNTL), 110 SOC15_REG_ENTRY_STR(GC, 0, regCP_MES_CNTL), 111 SOC15_REG_ENTRY_STR(GC, 0, regCP_MES_INSTR_PNTR), 112 SOC15_REG_ENTRY_STR(GC, 0, regCP_ME_INSTR_PNTR), 113 SOC15_REG_ENTRY_STR(GC, 0, regCP_PFP_INSTR_PNTR), 114 SOC15_REG_ENTRY_STR(GC, 0, regCP_CPC_STATUS), 115 SOC15_REG_ENTRY_STR(GC, 0, regCP_GFX_RS64_INSTR_PNTR0), 116 SOC15_REG_ENTRY_STR(GC, 0, regCP_GFX_RS64_INSTR_PNTR1), 117 SOC15_REG_ENTRY_STR(GC, 0, regCP_MEC_RS64_INSTR_PNTR), 118 119 /* cp header registers */ 120 SOC15_REG_ENTRY_STR(GC, 0, regCP_MEC_ME1_HEADER_DUMP), 121 SOC15_REG_ENTRY_STR(GC, 0, regCP_PFP_HEADER_DUMP), 122 SOC15_REG_ENTRY_STR(GC, 0, regCP_ME_HEADER_DUMP), 123 SOC15_REG_ENTRY_STR(GC, 0, regCP_MES_HEADER_DUMP), 124 /* SE status registers */ 125 SOC15_REG_ENTRY_STR(GC, 0, regGRBM_STATUS_SE0), 126 SOC15_REG_ENTRY_STR(GC, 0, regGRBM_STATUS_SE1), 127 SOC15_REG_ENTRY_STR(GC, 0, regGRBM_STATUS_SE2), 128 SOC15_REG_ENTRY_STR(GC, 0, regGRBM_STATUS_SE3) 129 }; 130 131 static const struct amdgpu_hwip_reg_entry gc_cp_reg_list_12[] = { 132 /* compute registers */ 133 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_VMID), 134 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_PERSISTENT_STATE), 135 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_PIPE_PRIORITY), 136 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_QUEUE_PRIORITY), 137 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_QUANTUM), 138 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_PQ_BASE), 139 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_PQ_BASE_HI), 140 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_PQ_RPTR), 141 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_PQ_WPTR_POLL_ADDR), 142 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_PQ_WPTR_POLL_ADDR_HI), 143 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_PQ_DOORBELL_CONTROL), 144 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_PQ_CONTROL), 145 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_IB_BASE_ADDR), 146 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_IB_BASE_ADDR_HI), 147 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_IB_RPTR), 148 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_IB_CONTROL), 149 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_DEQUEUE_REQUEST), 150 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_EOP_BASE_ADDR), 151 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_EOP_BASE_ADDR_HI), 152 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_EOP_CONTROL), 153 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_EOP_RPTR), 154 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_EOP_WPTR), 155 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_EOP_EVENTS), 156 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_CTX_SAVE_BASE_ADDR_LO), 157 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_CTX_SAVE_BASE_ADDR_HI), 158 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_CTX_SAVE_CONTROL), 159 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_CNTL_STACK_OFFSET), 160 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_CNTL_STACK_SIZE), 161 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_WG_STATE_OFFSET), 162 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_CTX_SAVE_SIZE), 163 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_GDS_RESOURCE_STATE), 164 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_ERROR), 165 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_EOP_WPTR_MEM), 166 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_PQ_WPTR_LO), 167 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_PQ_WPTR_HI), 168 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_SUSPEND_CNTL_STACK_OFFSET), 169 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_SUSPEND_CNTL_STACK_DW_CNT), 170 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_SUSPEND_WG_STATE_OFFSET), 171 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_DEQUEUE_STATUS) 172 }; 173 174 static const struct amdgpu_hwip_reg_entry gc_gfx_queue_reg_list_12[] = { 175 /* gfx queue registers */ 176 SOC15_REG_ENTRY_STR(GC, 0, regCP_GFX_HQD_ACTIVE), 177 SOC15_REG_ENTRY_STR(GC, 0, regCP_GFX_HQD_VMID), 178 SOC15_REG_ENTRY_STR(GC, 0, regCP_GFX_HQD_QUEUE_PRIORITY), 179 SOC15_REG_ENTRY_STR(GC, 0, regCP_GFX_HQD_QUANTUM), 180 SOC15_REG_ENTRY_STR(GC, 0, regCP_GFX_HQD_BASE), 181 SOC15_REG_ENTRY_STR(GC, 0, regCP_GFX_HQD_BASE_HI), 182 SOC15_REG_ENTRY_STR(GC, 0, regCP_GFX_HQD_OFFSET), 183 SOC15_REG_ENTRY_STR(GC, 0, regCP_GFX_HQD_CNTL), 184 SOC15_REG_ENTRY_STR(GC, 0, regCP_GFX_HQD_CSMD_RPTR), 185 SOC15_REG_ENTRY_STR(GC, 0, regCP_GFX_HQD_WPTR), 186 SOC15_REG_ENTRY_STR(GC, 0, regCP_GFX_HQD_WPTR_HI), 187 SOC15_REG_ENTRY_STR(GC, 0, regCP_GFX_HQD_DEQUEUE_REQUEST), 188 SOC15_REG_ENTRY_STR(GC, 0, regCP_GFX_HQD_MAPPED), 189 SOC15_REG_ENTRY_STR(GC, 0, regCP_GFX_HQD_QUE_MGR_CONTROL), 190 SOC15_REG_ENTRY_STR(GC, 0, regCP_GFX_HQD_HQ_CONTROL0), 191 SOC15_REG_ENTRY_STR(GC, 0, regCP_GFX_HQD_HQ_STATUS0), 192 SOC15_REG_ENTRY_STR(GC, 0, regCP_GFX_MQD_BASE_ADDR), 193 SOC15_REG_ENTRY_STR(GC, 0, regCP_GFX_MQD_BASE_ADDR_HI), 194 SOC15_REG_ENTRY_STR(GC, 0, regCP_RB_WPTR_POLL_ADDR_LO), 195 SOC15_REG_ENTRY_STR(GC, 0, regCP_RB_WPTR_POLL_ADDR_HI), 196 SOC15_REG_ENTRY_STR(GC, 0, regCP_RB_RPTR), 197 SOC15_REG_ENTRY_STR(GC, 0, regCP_IB1_BASE_LO), 198 SOC15_REG_ENTRY_STR(GC, 0, regCP_IB1_BASE_HI), 199 SOC15_REG_ENTRY_STR(GC, 0, regCP_IB1_CMD_BUFSZ), 200 SOC15_REG_ENTRY_STR(GC, 0, regCP_IB1_BUFSZ) 201 }; 202 203 static const struct soc15_reg_golden golden_settings_gc_12_0_rev0[] = { 204 SOC15_REG_GOLDEN_VALUE(GC, 0, regDB_MEM_CONFIG, 0x0000000f, 0x0000000f), 205 SOC15_REG_GOLDEN_VALUE(GC, 0, regCB_HW_CONTROL_1, 0x03000000, 0x03000000), 206 SOC15_REG_GOLDEN_VALUE(GC, 0, regGL2C_CTRL5, 0x00000070, 0x00000020) 207 }; 208 209 static const struct soc15_reg_golden golden_settings_gc_12_0[] = { 210 SOC15_REG_GOLDEN_VALUE(GC, 0, regDB_MEM_CONFIG, 0x00008000, 0x00008000), 211 }; 212 213 #define DEFAULT_SH_MEM_CONFIG \ 214 ((SH_MEM_ADDRESS_MODE_64 << SH_MEM_CONFIG__ADDRESS_MODE__SHIFT) | \ 215 (SH_MEM_ALIGNMENT_MODE_UNALIGNED << SH_MEM_CONFIG__ALIGNMENT_MODE__SHIFT) | \ 216 (3 << SH_MEM_CONFIG__INITIAL_INST_PREFETCH__SHIFT)) 217 218 static void gfx_v12_0_disable_gpa_mode(struct amdgpu_device *adev); 219 static void gfx_v12_0_set_ring_funcs(struct amdgpu_device *adev); 220 static void gfx_v12_0_set_irq_funcs(struct amdgpu_device *adev); 221 static void gfx_v12_0_set_rlc_funcs(struct amdgpu_device *adev); 222 static void gfx_v12_0_set_mqd_funcs(struct amdgpu_device *adev); 223 static void gfx_v12_0_set_imu_funcs(struct amdgpu_device *adev); 224 static int gfx_v12_0_get_cu_info(struct amdgpu_device *adev, 225 struct amdgpu_cu_info *cu_info); 226 static uint64_t gfx_v12_0_get_gpu_clock_counter(struct amdgpu_device *adev); 227 static void gfx_v12_0_select_se_sh(struct amdgpu_device *adev, u32 se_num, 228 u32 sh_num, u32 instance, int xcc_id); 229 static u32 gfx_v12_0_get_wgp_active_bitmap_per_sh(struct amdgpu_device *adev); 230 231 static void gfx_v12_0_ring_emit_frame_cntl(struct amdgpu_ring *ring, bool start, bool secure); 232 static void gfx_v12_0_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg, 233 uint32_t val); 234 static int gfx_v12_0_wait_for_rlc_autoload_complete(struct amdgpu_device *adev); 235 static void gfx_v12_0_ring_invalidate_tlbs(struct amdgpu_ring *ring, 236 uint16_t pasid, uint32_t flush_type, 237 bool all_hub, uint8_t dst_sel); 238 static void gfx_v12_0_set_safe_mode(struct amdgpu_device *adev, int xcc_id); 239 static void gfx_v12_0_unset_safe_mode(struct amdgpu_device *adev, int xcc_id); 240 static void gfx_v12_0_update_perf_clk(struct amdgpu_device *adev, 241 bool enable); 242 243 static void gfx_v12_0_kiq_set_resources(struct amdgpu_ring *kiq_ring, 244 uint64_t queue_mask) 245 { 246 amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_SET_RESOURCES, 6)); 247 amdgpu_ring_write(kiq_ring, PACKET3_SET_RESOURCES_VMID_MASK(0) | 248 PACKET3_SET_RESOURCES_QUEUE_TYPE(0)); /* vmid_mask:0 queue_type:0 (KIQ) */ 249 amdgpu_ring_write(kiq_ring, lower_32_bits(queue_mask)); /* queue mask lo */ 250 amdgpu_ring_write(kiq_ring, upper_32_bits(queue_mask)); /* queue mask hi */ 251 amdgpu_ring_write(kiq_ring, 0); /* gws mask lo */ 252 amdgpu_ring_write(kiq_ring, 0); /* gws mask hi */ 253 amdgpu_ring_write(kiq_ring, 0); /* oac mask */ 254 amdgpu_ring_write(kiq_ring, 0); 255 } 256 257 static void gfx_v12_0_kiq_map_queues(struct amdgpu_ring *kiq_ring, 258 struct amdgpu_ring *ring) 259 { 260 uint64_t mqd_addr = amdgpu_bo_gpu_offset(ring->mqd_obj); 261 uint64_t wptr_addr = ring->wptr_gpu_addr; 262 uint32_t me = 0, eng_sel = 0; 263 264 switch (ring->funcs->type) { 265 case AMDGPU_RING_TYPE_COMPUTE: 266 me = 1; 267 eng_sel = 0; 268 break; 269 case AMDGPU_RING_TYPE_GFX: 270 me = 0; 271 eng_sel = 4; 272 break; 273 case AMDGPU_RING_TYPE_MES: 274 me = 2; 275 eng_sel = 5; 276 break; 277 default: 278 WARN_ON(1); 279 } 280 281 amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_MAP_QUEUES, 5)); 282 /* Q_sel:0, vmid:0, vidmem: 1, engine:0, num_Q:1*/ 283 amdgpu_ring_write(kiq_ring, /* Q_sel: 0, vmid: 0, engine: 0, num_Q: 1 */ 284 PACKET3_MAP_QUEUES_QUEUE_SEL(0) | /* Queue_Sel */ 285 PACKET3_MAP_QUEUES_VMID(0) | /* VMID */ 286 PACKET3_MAP_QUEUES_QUEUE(ring->queue) | 287 PACKET3_MAP_QUEUES_PIPE(ring->pipe) | 288 PACKET3_MAP_QUEUES_ME((me)) | 289 PACKET3_MAP_QUEUES_QUEUE_TYPE(0) | /*queue_type: normal compute queue */ 290 PACKET3_MAP_QUEUES_ALLOC_FORMAT(0) | /* alloc format: all_on_one_pipe */ 291 PACKET3_MAP_QUEUES_ENGINE_SEL(eng_sel) | 292 PACKET3_MAP_QUEUES_NUM_QUEUES(1)); /* num_queues: must be 1 */ 293 amdgpu_ring_write(kiq_ring, PACKET3_MAP_QUEUES_DOORBELL_OFFSET(ring->doorbell_index)); 294 amdgpu_ring_write(kiq_ring, lower_32_bits(mqd_addr)); 295 amdgpu_ring_write(kiq_ring, upper_32_bits(mqd_addr)); 296 amdgpu_ring_write(kiq_ring, lower_32_bits(wptr_addr)); 297 amdgpu_ring_write(kiq_ring, upper_32_bits(wptr_addr)); 298 } 299 300 static void gfx_v12_0_kiq_unmap_queues(struct amdgpu_ring *kiq_ring, 301 struct amdgpu_ring *ring, 302 enum amdgpu_unmap_queues_action action, 303 u64 gpu_addr, u64 seq) 304 { 305 struct amdgpu_device *adev = kiq_ring->adev; 306 uint32_t eng_sel = ring->funcs->type == AMDGPU_RING_TYPE_GFX ? 4 : 0; 307 308 if (adev->enable_mes && !adev->gfx.kiq[0].ring.sched.ready) { 309 amdgpu_mes_unmap_legacy_queue(adev, ring, action, gpu_addr, seq); 310 return; 311 } 312 313 amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_UNMAP_QUEUES, 4)); 314 amdgpu_ring_write(kiq_ring, /* Q_sel: 0, vmid: 0, engine: 0, num_Q: 1 */ 315 PACKET3_UNMAP_QUEUES_ACTION(action) | 316 PACKET3_UNMAP_QUEUES_QUEUE_SEL(0) | 317 PACKET3_UNMAP_QUEUES_ENGINE_SEL(eng_sel) | 318 PACKET3_UNMAP_QUEUES_NUM_QUEUES(1)); 319 amdgpu_ring_write(kiq_ring, 320 PACKET3_UNMAP_QUEUES_DOORBELL_OFFSET0(ring->doorbell_index)); 321 322 if (action == PREEMPT_QUEUES_NO_UNMAP) { 323 amdgpu_ring_write(kiq_ring, lower_32_bits(gpu_addr)); 324 amdgpu_ring_write(kiq_ring, upper_32_bits(gpu_addr)); 325 amdgpu_ring_write(kiq_ring, seq); 326 } else { 327 amdgpu_ring_write(kiq_ring, 0); 328 amdgpu_ring_write(kiq_ring, 0); 329 amdgpu_ring_write(kiq_ring, 0); 330 } 331 } 332 333 static void gfx_v12_0_kiq_query_status(struct amdgpu_ring *kiq_ring, 334 struct amdgpu_ring *ring, 335 u64 addr, u64 seq) 336 { 337 uint32_t eng_sel = ring->funcs->type == AMDGPU_RING_TYPE_GFX ? 4 : 0; 338 339 amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_QUERY_STATUS, 5)); 340 amdgpu_ring_write(kiq_ring, 341 PACKET3_QUERY_STATUS_CONTEXT_ID(0) | 342 PACKET3_QUERY_STATUS_INTERRUPT_SEL(0) | 343 PACKET3_QUERY_STATUS_COMMAND(2)); 344 amdgpu_ring_write(kiq_ring, /* Q_sel: 0, vmid: 0, engine: 0, num_Q: 1 */ 345 PACKET3_QUERY_STATUS_DOORBELL_OFFSET(ring->doorbell_index) | 346 PACKET3_QUERY_STATUS_ENG_SEL(eng_sel)); 347 amdgpu_ring_write(kiq_ring, lower_32_bits(addr)); 348 amdgpu_ring_write(kiq_ring, upper_32_bits(addr)); 349 amdgpu_ring_write(kiq_ring, lower_32_bits(seq)); 350 amdgpu_ring_write(kiq_ring, upper_32_bits(seq)); 351 } 352 353 static void gfx_v12_0_kiq_invalidate_tlbs(struct amdgpu_ring *kiq_ring, 354 uint16_t pasid, 355 uint32_t flush_type, 356 bool all_hub) 357 { 358 gfx_v12_0_ring_invalidate_tlbs(kiq_ring, pasid, flush_type, all_hub, 1); 359 } 360 361 static const struct kiq_pm4_funcs gfx_v12_0_kiq_pm4_funcs = { 362 .kiq_set_resources = gfx_v12_0_kiq_set_resources, 363 .kiq_map_queues = gfx_v12_0_kiq_map_queues, 364 .kiq_unmap_queues = gfx_v12_0_kiq_unmap_queues, 365 .kiq_query_status = gfx_v12_0_kiq_query_status, 366 .kiq_invalidate_tlbs = gfx_v12_0_kiq_invalidate_tlbs, 367 .set_resources_size = 8, 368 .map_queues_size = 7, 369 .unmap_queues_size = 6, 370 .query_status_size = 7, 371 .invalidate_tlbs_size = 2, 372 }; 373 374 static void gfx_v12_0_set_kiq_pm4_funcs(struct amdgpu_device *adev) 375 { 376 adev->gfx.kiq[0].pmf = &gfx_v12_0_kiq_pm4_funcs; 377 } 378 379 static void gfx_v12_0_wait_reg_mem(struct amdgpu_ring *ring, int eng_sel, 380 int mem_space, int opt, uint32_t addr0, 381 uint32_t addr1, uint32_t ref, 382 uint32_t mask, uint32_t inv) 383 { 384 amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5)); 385 amdgpu_ring_write(ring, 386 /* memory (1) or register (0) */ 387 (WAIT_REG_MEM_MEM_SPACE(mem_space) | 388 WAIT_REG_MEM_OPERATION(opt) | /* wait */ 389 WAIT_REG_MEM_FUNCTION(3) | /* equal */ 390 WAIT_REG_MEM_ENGINE(eng_sel))); 391 392 if (mem_space) 393 BUG_ON(addr0 & 0x3); /* Dword align */ 394 amdgpu_ring_write(ring, addr0); 395 amdgpu_ring_write(ring, addr1); 396 amdgpu_ring_write(ring, ref); 397 amdgpu_ring_write(ring, mask); 398 amdgpu_ring_write(ring, inv); /* poll interval */ 399 } 400 401 static int gfx_v12_0_ring_test_ring(struct amdgpu_ring *ring) 402 { 403 struct amdgpu_device *adev = ring->adev; 404 uint32_t scratch = SOC15_REG_OFFSET(GC, 0, regSCRATCH_REG0); 405 uint32_t tmp = 0; 406 unsigned i; 407 int r; 408 409 WREG32(scratch, 0xCAFEDEAD); 410 r = amdgpu_ring_alloc(ring, 5); 411 if (r) { 412 dev_err(adev->dev, 413 "amdgpu: cp failed to lock ring %d (%d).\n", 414 ring->idx, r); 415 return r; 416 } 417 418 if (ring->funcs->type == AMDGPU_RING_TYPE_KIQ) { 419 gfx_v12_0_ring_emit_wreg(ring, scratch, 0xDEADBEEF); 420 } else { 421 amdgpu_ring_write(ring, PACKET3(PACKET3_SET_UCONFIG_REG, 1)); 422 amdgpu_ring_write(ring, scratch - 423 PACKET3_SET_UCONFIG_REG_START); 424 amdgpu_ring_write(ring, 0xDEADBEEF); 425 } 426 amdgpu_ring_commit(ring); 427 428 for (i = 0; i < adev->usec_timeout; i++) { 429 tmp = RREG32(scratch); 430 if (tmp == 0xDEADBEEF) 431 break; 432 if (amdgpu_emu_mode == 1) 433 msleep(1); 434 else 435 udelay(1); 436 } 437 438 if (i >= adev->usec_timeout) 439 r = -ETIMEDOUT; 440 return r; 441 } 442 443 static int gfx_v12_0_ring_test_ib(struct amdgpu_ring *ring, long timeout) 444 { 445 struct amdgpu_device *adev = ring->adev; 446 struct amdgpu_ib ib; 447 struct dma_fence *f = NULL; 448 unsigned index; 449 uint64_t gpu_addr; 450 volatile uint32_t *cpu_ptr; 451 long r; 452 453 /* MES KIQ fw hasn't indirect buffer support for now */ 454 if (adev->enable_mes_kiq && 455 ring->funcs->type == AMDGPU_RING_TYPE_KIQ) 456 return 0; 457 458 memset(&ib, 0, sizeof(ib)); 459 460 if (ring->is_mes_queue) { 461 uint32_t padding, offset; 462 463 offset = amdgpu_mes_ctx_get_offs(ring, AMDGPU_MES_CTX_IB_OFFS); 464 padding = amdgpu_mes_ctx_get_offs(ring, 465 AMDGPU_MES_CTX_PADDING_OFFS); 466 467 ib.gpu_addr = amdgpu_mes_ctx_get_offs_gpu_addr(ring, offset); 468 ib.ptr = amdgpu_mes_ctx_get_offs_cpu_addr(ring, offset); 469 470 gpu_addr = amdgpu_mes_ctx_get_offs_gpu_addr(ring, padding); 471 cpu_ptr = amdgpu_mes_ctx_get_offs_cpu_addr(ring, padding); 472 *cpu_ptr = cpu_to_le32(0xCAFEDEAD); 473 } else { 474 r = amdgpu_device_wb_get(adev, &index); 475 if (r) 476 return r; 477 478 gpu_addr = adev->wb.gpu_addr + (index * 4); 479 adev->wb.wb[index] = cpu_to_le32(0xCAFEDEAD); 480 cpu_ptr = &adev->wb.wb[index]; 481 482 r = amdgpu_ib_get(adev, NULL, 16, AMDGPU_IB_POOL_DIRECT, &ib); 483 if (r) { 484 dev_err(adev->dev, "amdgpu: failed to get ib (%ld).\n", r); 485 goto err1; 486 } 487 } 488 489 ib.ptr[0] = PACKET3(PACKET3_WRITE_DATA, 3); 490 ib.ptr[1] = WRITE_DATA_DST_SEL(5) | WR_CONFIRM; 491 ib.ptr[2] = lower_32_bits(gpu_addr); 492 ib.ptr[3] = upper_32_bits(gpu_addr); 493 ib.ptr[4] = 0xDEADBEEF; 494 ib.length_dw = 5; 495 496 r = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f); 497 if (r) 498 goto err2; 499 500 r = dma_fence_wait_timeout(f, false, timeout); 501 if (r == 0) { 502 r = -ETIMEDOUT; 503 goto err2; 504 } else if (r < 0) { 505 goto err2; 506 } 507 508 if (le32_to_cpu(*cpu_ptr) == 0xDEADBEEF) 509 r = 0; 510 else 511 r = -EINVAL; 512 err2: 513 if (!ring->is_mes_queue) 514 amdgpu_ib_free(&ib, NULL); 515 dma_fence_put(f); 516 err1: 517 if (!ring->is_mes_queue) 518 amdgpu_device_wb_free(adev, index); 519 return r; 520 } 521 522 static void gfx_v12_0_free_microcode(struct amdgpu_device *adev) 523 { 524 amdgpu_ucode_release(&adev->gfx.pfp_fw); 525 amdgpu_ucode_release(&adev->gfx.me_fw); 526 amdgpu_ucode_release(&adev->gfx.rlc_fw); 527 amdgpu_ucode_release(&adev->gfx.mec_fw); 528 529 kfree(adev->gfx.rlc.register_list_format); 530 } 531 532 static int gfx_v12_0_init_toc_microcode(struct amdgpu_device *adev, const char *ucode_prefix) 533 { 534 const struct psp_firmware_header_v1_0 *toc_hdr; 535 int err = 0; 536 537 err = amdgpu_ucode_request(adev, &adev->psp.toc_fw, 538 AMDGPU_UCODE_REQUIRED, 539 "amdgpu/%s_toc.bin", ucode_prefix); 540 if (err) 541 goto out; 542 543 toc_hdr = (const struct psp_firmware_header_v1_0 *)adev->psp.toc_fw->data; 544 adev->psp.toc.fw_version = le32_to_cpu(toc_hdr->header.ucode_version); 545 adev->psp.toc.feature_version = le32_to_cpu(toc_hdr->sos.fw_version); 546 adev->psp.toc.size_bytes = le32_to_cpu(toc_hdr->header.ucode_size_bytes); 547 adev->psp.toc.start_addr = (uint8_t *)toc_hdr + 548 le32_to_cpu(toc_hdr->header.ucode_array_offset_bytes); 549 return 0; 550 out: 551 amdgpu_ucode_release(&adev->psp.toc_fw); 552 return err; 553 } 554 555 static int gfx_v12_0_init_microcode(struct amdgpu_device *adev) 556 { 557 char ucode_prefix[15]; 558 int err; 559 const struct rlc_firmware_header_v2_0 *rlc_hdr; 560 uint16_t version_major; 561 uint16_t version_minor; 562 563 DRM_DEBUG("\n"); 564 565 amdgpu_ucode_ip_version_decode(adev, GC_HWIP, ucode_prefix, sizeof(ucode_prefix)); 566 567 err = amdgpu_ucode_request(adev, &adev->gfx.pfp_fw, 568 AMDGPU_UCODE_REQUIRED, 569 "amdgpu/%s_pfp.bin", ucode_prefix); 570 if (err) 571 goto out; 572 amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_RS64_PFP); 573 amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_RS64_PFP_P0_STACK); 574 575 err = amdgpu_ucode_request(adev, &adev->gfx.me_fw, 576 AMDGPU_UCODE_REQUIRED, 577 "amdgpu/%s_me.bin", ucode_prefix); 578 if (err) 579 goto out; 580 amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_RS64_ME); 581 amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_RS64_ME_P0_STACK); 582 583 if (!amdgpu_sriov_vf(adev)) { 584 err = amdgpu_ucode_request(adev, &adev->gfx.rlc_fw, 585 AMDGPU_UCODE_REQUIRED, 586 "amdgpu/%s_rlc.bin", ucode_prefix); 587 if (err) 588 goto out; 589 rlc_hdr = (const struct rlc_firmware_header_v2_0 *)adev->gfx.rlc_fw->data; 590 version_major = le16_to_cpu(rlc_hdr->header.header_version_major); 591 version_minor = le16_to_cpu(rlc_hdr->header.header_version_minor); 592 err = amdgpu_gfx_rlc_init_microcode(adev, version_major, version_minor); 593 if (err) 594 goto out; 595 } 596 597 err = amdgpu_ucode_request(adev, &adev->gfx.mec_fw, 598 AMDGPU_UCODE_REQUIRED, 599 "amdgpu/%s_mec.bin", ucode_prefix); 600 if (err) 601 goto out; 602 amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_RS64_MEC); 603 amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_RS64_MEC_P0_STACK); 604 amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_RS64_MEC_P1_STACK); 605 606 if (adev->firmware.load_type == AMDGPU_FW_LOAD_RLC_BACKDOOR_AUTO) 607 err = gfx_v12_0_init_toc_microcode(adev, ucode_prefix); 608 609 /* only one MEC for gfx 12 */ 610 adev->gfx.mec2_fw = NULL; 611 612 if (adev->gfx.imu.funcs) { 613 if (adev->gfx.imu.funcs->init_microcode) { 614 err = adev->gfx.imu.funcs->init_microcode(adev); 615 if (err) 616 dev_err(adev->dev, "Failed to load imu firmware!\n"); 617 } 618 } 619 620 out: 621 if (err) { 622 amdgpu_ucode_release(&adev->gfx.pfp_fw); 623 amdgpu_ucode_release(&adev->gfx.me_fw); 624 amdgpu_ucode_release(&adev->gfx.rlc_fw); 625 amdgpu_ucode_release(&adev->gfx.mec_fw); 626 } 627 628 return err; 629 } 630 631 static u32 gfx_v12_0_get_csb_size(struct amdgpu_device *adev) 632 { 633 u32 count = 0; 634 const struct cs_section_def *sect = NULL; 635 const struct cs_extent_def *ext = NULL; 636 637 count += 1; 638 639 for (sect = gfx12_cs_data; sect->section != NULL; ++sect) { 640 if (sect->id == SECT_CONTEXT) { 641 for (ext = sect->section; ext->extent != NULL; ++ext) 642 count += 2 + ext->reg_count; 643 } else 644 return 0; 645 } 646 647 return count; 648 } 649 650 static void gfx_v12_0_get_csb_buffer(struct amdgpu_device *adev, 651 volatile u32 *buffer) 652 { 653 u32 count = 0, clustercount = 0, i; 654 const struct cs_section_def *sect = NULL; 655 const struct cs_extent_def *ext = NULL; 656 657 if (adev->gfx.rlc.cs_data == NULL) 658 return; 659 if (buffer == NULL) 660 return; 661 662 count += 1; 663 664 for (sect = adev->gfx.rlc.cs_data; sect->section != NULL; ++sect) { 665 if (sect->id == SECT_CONTEXT) { 666 for (ext = sect->section; ext->extent != NULL; ++ext) { 667 clustercount++; 668 buffer[count++] = ext->reg_count; 669 buffer[count++] = ext->reg_index; 670 671 for (i = 0; i < ext->reg_count; i++) 672 buffer[count++] = cpu_to_le32(ext->extent[i]); 673 } 674 } else 675 return; 676 } 677 678 buffer[0] = clustercount; 679 } 680 681 static void gfx_v12_0_rlc_fini(struct amdgpu_device *adev) 682 { 683 /* clear state block */ 684 amdgpu_bo_free_kernel(&adev->gfx.rlc.clear_state_obj, 685 &adev->gfx.rlc.clear_state_gpu_addr, 686 (void **)&adev->gfx.rlc.cs_ptr); 687 688 /* jump table block */ 689 amdgpu_bo_free_kernel(&adev->gfx.rlc.cp_table_obj, 690 &adev->gfx.rlc.cp_table_gpu_addr, 691 (void **)&adev->gfx.rlc.cp_table_ptr); 692 } 693 694 static void gfx_v12_0_init_rlcg_reg_access_ctrl(struct amdgpu_device *adev) 695 { 696 struct amdgpu_rlcg_reg_access_ctrl *reg_access_ctrl; 697 698 reg_access_ctrl = &adev->gfx.rlc.reg_access_ctrl[0]; 699 reg_access_ctrl->scratch_reg0 = SOC15_REG_OFFSET(GC, 0, regSCRATCH_REG0); 700 reg_access_ctrl->scratch_reg1 = SOC15_REG_OFFSET(GC, 0, regSCRATCH_REG1); 701 reg_access_ctrl->scratch_reg2 = SOC15_REG_OFFSET(GC, 0, regSCRATCH_REG2); 702 reg_access_ctrl->scratch_reg3 = SOC15_REG_OFFSET(GC, 0, regSCRATCH_REG3); 703 reg_access_ctrl->grbm_cntl = SOC15_REG_OFFSET(GC, 0, regGRBM_GFX_CNTL); 704 reg_access_ctrl->grbm_idx = SOC15_REG_OFFSET(GC, 0, regGRBM_GFX_INDEX); 705 reg_access_ctrl->spare_int = SOC15_REG_OFFSET(GC, 0, regRLC_SPARE_INT_0); 706 adev->gfx.rlc.rlcg_reg_access_supported = true; 707 } 708 709 static int gfx_v12_0_rlc_init(struct amdgpu_device *adev) 710 { 711 const struct cs_section_def *cs_data; 712 int r; 713 714 adev->gfx.rlc.cs_data = gfx12_cs_data; 715 716 cs_data = adev->gfx.rlc.cs_data; 717 718 if (cs_data) { 719 /* init clear state block */ 720 r = amdgpu_gfx_rlc_init_csb(adev); 721 if (r) 722 return r; 723 } 724 725 /* init spm vmid with 0xf */ 726 if (adev->gfx.rlc.funcs->update_spm_vmid) 727 adev->gfx.rlc.funcs->update_spm_vmid(adev, NULL, 0xf); 728 729 return 0; 730 } 731 732 static void gfx_v12_0_mec_fini(struct amdgpu_device *adev) 733 { 734 amdgpu_bo_free_kernel(&adev->gfx.mec.hpd_eop_obj, NULL, NULL); 735 amdgpu_bo_free_kernel(&adev->gfx.mec.mec_fw_obj, NULL, NULL); 736 amdgpu_bo_free_kernel(&adev->gfx.mec.mec_fw_data_obj, NULL, NULL); 737 } 738 739 static void gfx_v12_0_me_init(struct amdgpu_device *adev) 740 { 741 bitmap_zero(adev->gfx.me.queue_bitmap, AMDGPU_MAX_GFX_QUEUES); 742 743 amdgpu_gfx_graphics_queue_acquire(adev); 744 } 745 746 static int gfx_v12_0_mec_init(struct amdgpu_device *adev) 747 { 748 int r; 749 u32 *hpd; 750 size_t mec_hpd_size; 751 752 bitmap_zero(adev->gfx.mec_bitmap[0].queue_bitmap, AMDGPU_MAX_COMPUTE_QUEUES); 753 754 /* take ownership of the relevant compute queues */ 755 amdgpu_gfx_compute_queue_acquire(adev); 756 mec_hpd_size = adev->gfx.num_compute_rings * GFX12_MEC_HPD_SIZE; 757 758 if (mec_hpd_size) { 759 r = amdgpu_bo_create_reserved(adev, mec_hpd_size, PAGE_SIZE, 760 AMDGPU_GEM_DOMAIN_GTT, 761 &adev->gfx.mec.hpd_eop_obj, 762 &adev->gfx.mec.hpd_eop_gpu_addr, 763 (void **)&hpd); 764 if (r) { 765 dev_warn(adev->dev, "(%d) create HDP EOP bo failed\n", r); 766 gfx_v12_0_mec_fini(adev); 767 return r; 768 } 769 770 memset(hpd, 0, mec_hpd_size); 771 772 amdgpu_bo_kunmap(adev->gfx.mec.hpd_eop_obj); 773 amdgpu_bo_unreserve(adev->gfx.mec.hpd_eop_obj); 774 } 775 776 return 0; 777 } 778 779 static uint32_t wave_read_ind(struct amdgpu_device *adev, uint32_t wave, uint32_t address) 780 { 781 WREG32_SOC15(GC, 0, regSQ_IND_INDEX, 782 (wave << SQ_IND_INDEX__WAVE_ID__SHIFT) | 783 (address << SQ_IND_INDEX__INDEX__SHIFT)); 784 return RREG32_SOC15(GC, 0, regSQ_IND_DATA); 785 } 786 787 static void wave_read_regs(struct amdgpu_device *adev, uint32_t wave, 788 uint32_t thread, uint32_t regno, 789 uint32_t num, uint32_t *out) 790 { 791 WREG32_SOC15(GC, 0, regSQ_IND_INDEX, 792 (wave << SQ_IND_INDEX__WAVE_ID__SHIFT) | 793 (regno << SQ_IND_INDEX__INDEX__SHIFT) | 794 (thread << SQ_IND_INDEX__WORKITEM_ID__SHIFT) | 795 (SQ_IND_INDEX__AUTO_INCR_MASK)); 796 while (num--) 797 *(out++) = RREG32_SOC15(GC, 0, regSQ_IND_DATA); 798 } 799 800 static void gfx_v12_0_read_wave_data(struct amdgpu_device *adev, 801 uint32_t xcc_id, 802 uint32_t simd, uint32_t wave, 803 uint32_t *dst, int *no_fields) 804 { 805 /* in gfx12 the SIMD_ID is specified as part of the INSTANCE 806 * field when performing a select_se_sh so it should be 807 * zero here */ 808 WARN_ON(simd != 0); 809 810 /* type 4 wave data */ 811 dst[(*no_fields)++] = 4; 812 dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_STATUS); 813 dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_PC_LO); 814 dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_PC_HI); 815 dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_EXEC_LO); 816 dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_EXEC_HI); 817 dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_HW_ID1); 818 dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_HW_ID2); 819 dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_GPR_ALLOC); 820 dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_LDS_ALLOC); 821 dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_IB_STS); 822 dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_IB_STS2); 823 dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_IB_DBG1); 824 dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_M0); 825 dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_MODE); 826 dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_STATE_PRIV); 827 dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_EXCP_FLAG_PRIV); 828 dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_EXCP_FLAG_USER); 829 dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_TRAP_CTRL); 830 dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_ACTIVE); 831 dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_VALID_AND_IDLE); 832 dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_DVGPR_ALLOC_LO); 833 dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_DVGPR_ALLOC_HI); 834 dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_SCHED_MODE); 835 } 836 837 static void gfx_v12_0_read_wave_sgprs(struct amdgpu_device *adev, 838 uint32_t xcc_id, uint32_t simd, 839 uint32_t wave, uint32_t start, 840 uint32_t size, uint32_t *dst) 841 { 842 WARN_ON(simd != 0); 843 844 wave_read_regs( 845 adev, wave, 0, start + SQIND_WAVE_SGPRS_OFFSET, size, 846 dst); 847 } 848 849 static void gfx_v12_0_read_wave_vgprs(struct amdgpu_device *adev, 850 uint32_t xcc_id, uint32_t simd, 851 uint32_t wave, uint32_t thread, 852 uint32_t start, uint32_t size, 853 uint32_t *dst) 854 { 855 wave_read_regs( 856 adev, wave, thread, 857 start + SQIND_WAVE_VGPRS_OFFSET, size, dst); 858 } 859 860 static void gfx_v12_0_select_me_pipe_q(struct amdgpu_device *adev, 861 u32 me, u32 pipe, u32 q, u32 vm, u32 xcc_id) 862 { 863 soc24_grbm_select(adev, me, pipe, q, vm); 864 } 865 866 static const struct amdgpu_gfx_funcs gfx_v12_0_gfx_funcs = { 867 .get_gpu_clock_counter = &gfx_v12_0_get_gpu_clock_counter, 868 .select_se_sh = &gfx_v12_0_select_se_sh, 869 .read_wave_data = &gfx_v12_0_read_wave_data, 870 .read_wave_sgprs = &gfx_v12_0_read_wave_sgprs, 871 .read_wave_vgprs = &gfx_v12_0_read_wave_vgprs, 872 .select_me_pipe_q = &gfx_v12_0_select_me_pipe_q, 873 .update_perfmon_mgcg = &gfx_v12_0_update_perf_clk, 874 }; 875 876 static int gfx_v12_0_gpu_early_init(struct amdgpu_device *adev) 877 { 878 879 switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { 880 case IP_VERSION(12, 0, 0): 881 case IP_VERSION(12, 0, 1): 882 adev->gfx.config.max_hw_contexts = 8; 883 adev->gfx.config.sc_prim_fifo_size_frontend = 0x20; 884 adev->gfx.config.sc_prim_fifo_size_backend = 0x100; 885 adev->gfx.config.sc_hiz_tile_fifo_size = 0; 886 adev->gfx.config.sc_earlyz_tile_fifo_size = 0x4C0; 887 break; 888 default: 889 BUG(); 890 break; 891 } 892 893 return 0; 894 } 895 896 static int gfx_v12_0_gfx_ring_init(struct amdgpu_device *adev, int ring_id, 897 int me, int pipe, int queue) 898 { 899 int r; 900 struct amdgpu_ring *ring; 901 unsigned int irq_type; 902 903 ring = &adev->gfx.gfx_ring[ring_id]; 904 905 ring->me = me; 906 ring->pipe = pipe; 907 ring->queue = queue; 908 909 ring->ring_obj = NULL; 910 ring->use_doorbell = true; 911 912 if (!ring_id) 913 ring->doorbell_index = adev->doorbell_index.gfx_ring0 << 1; 914 else 915 ring->doorbell_index = adev->doorbell_index.gfx_ring1 << 1; 916 ring->vm_hub = AMDGPU_GFXHUB(0); 917 sprintf(ring->name, "gfx_%d.%d.%d", ring->me, ring->pipe, ring->queue); 918 919 irq_type = AMDGPU_CP_IRQ_GFX_ME0_PIPE0_EOP + ring->pipe; 920 r = amdgpu_ring_init(adev, ring, 1024, &adev->gfx.eop_irq, irq_type, 921 AMDGPU_RING_PRIO_DEFAULT, NULL); 922 if (r) 923 return r; 924 return 0; 925 } 926 927 static int gfx_v12_0_compute_ring_init(struct amdgpu_device *adev, int ring_id, 928 int mec, int pipe, int queue) 929 { 930 int r; 931 unsigned irq_type; 932 struct amdgpu_ring *ring; 933 unsigned int hw_prio; 934 935 ring = &adev->gfx.compute_ring[ring_id]; 936 937 /* mec0 is me1 */ 938 ring->me = mec + 1; 939 ring->pipe = pipe; 940 ring->queue = queue; 941 942 ring->ring_obj = NULL; 943 ring->use_doorbell = true; 944 ring->doorbell_index = (adev->doorbell_index.mec_ring0 + ring_id) << 1; 945 ring->eop_gpu_addr = adev->gfx.mec.hpd_eop_gpu_addr 946 + (ring_id * GFX12_MEC_HPD_SIZE); 947 ring->vm_hub = AMDGPU_GFXHUB(0); 948 sprintf(ring->name, "comp_%d.%d.%d", ring->me, ring->pipe, ring->queue); 949 950 irq_type = AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP 951 + ((ring->me - 1) * adev->gfx.mec.num_pipe_per_mec) 952 + ring->pipe; 953 hw_prio = amdgpu_gfx_is_high_priority_compute_queue(adev, ring) ? 954 AMDGPU_GFX_PIPE_PRIO_HIGH : AMDGPU_GFX_PIPE_PRIO_NORMAL; 955 /* type-2 packets are deprecated on MEC, use type-3 instead */ 956 r = amdgpu_ring_init(adev, ring, 1024, &adev->gfx.eop_irq, irq_type, 957 hw_prio, NULL); 958 if (r) 959 return r; 960 961 return 0; 962 } 963 964 static struct { 965 SOC24_FIRMWARE_ID id; 966 unsigned int offset; 967 unsigned int size; 968 unsigned int size_x16; 969 } rlc_autoload_info[SOC24_FIRMWARE_ID_MAX]; 970 971 #define RLC_TOC_OFFSET_DWUNIT 8 972 #define RLC_SIZE_MULTIPLE 1024 973 #define RLC_TOC_UMF_SIZE_inM 23ULL 974 #define RLC_TOC_FORMAT_API 165ULL 975 976 static void gfx_v12_0_parse_rlc_toc(struct amdgpu_device *adev, void *rlc_toc) 977 { 978 RLC_TABLE_OF_CONTENT_V2 *ucode = rlc_toc; 979 980 while (ucode && (ucode->id > SOC24_FIRMWARE_ID_INVALID)) { 981 rlc_autoload_info[ucode->id].id = ucode->id; 982 rlc_autoload_info[ucode->id].offset = 983 ucode->offset * RLC_TOC_OFFSET_DWUNIT * 4; 984 rlc_autoload_info[ucode->id].size = 985 ucode->size_x16 ? ucode->size * RLC_SIZE_MULTIPLE * 4 : 986 ucode->size * 4; 987 ucode++; 988 } 989 } 990 991 static uint32_t gfx_v12_0_calc_toc_total_size(struct amdgpu_device *adev) 992 { 993 uint32_t total_size = 0; 994 SOC24_FIRMWARE_ID id; 995 996 gfx_v12_0_parse_rlc_toc(adev, adev->psp.toc.start_addr); 997 998 for (id = SOC24_FIRMWARE_ID_RLC_G_UCODE; id < SOC24_FIRMWARE_ID_MAX; id++) 999 total_size += rlc_autoload_info[id].size; 1000 1001 /* In case the offset in rlc toc ucode is aligned */ 1002 if (total_size < rlc_autoload_info[SOC24_FIRMWARE_ID_MAX-1].offset) 1003 total_size = rlc_autoload_info[SOC24_FIRMWARE_ID_MAX-1].offset + 1004 rlc_autoload_info[SOC24_FIRMWARE_ID_MAX-1].size; 1005 if (total_size < (RLC_TOC_UMF_SIZE_inM << 20)) 1006 total_size = RLC_TOC_UMF_SIZE_inM << 20; 1007 1008 return total_size; 1009 } 1010 1011 static int gfx_v12_0_rlc_autoload_buffer_init(struct amdgpu_device *adev) 1012 { 1013 int r; 1014 uint32_t total_size; 1015 1016 total_size = gfx_v12_0_calc_toc_total_size(adev); 1017 1018 r = amdgpu_bo_create_reserved(adev, total_size, 64 * 1024, 1019 AMDGPU_GEM_DOMAIN_VRAM, 1020 &adev->gfx.rlc.rlc_autoload_bo, 1021 &adev->gfx.rlc.rlc_autoload_gpu_addr, 1022 (void **)&adev->gfx.rlc.rlc_autoload_ptr); 1023 1024 if (r) { 1025 dev_err(adev->dev, "(%d) failed to create fw autoload bo\n", r); 1026 return r; 1027 } 1028 1029 return 0; 1030 } 1031 1032 static void gfx_v12_0_rlc_backdoor_autoload_copy_ucode(struct amdgpu_device *adev, 1033 SOC24_FIRMWARE_ID id, 1034 const void *fw_data, 1035 uint32_t fw_size) 1036 { 1037 uint32_t toc_offset; 1038 uint32_t toc_fw_size; 1039 char *ptr = adev->gfx.rlc.rlc_autoload_ptr; 1040 1041 if (id <= SOC24_FIRMWARE_ID_INVALID || id >= SOC24_FIRMWARE_ID_MAX) 1042 return; 1043 1044 toc_offset = rlc_autoload_info[id].offset; 1045 toc_fw_size = rlc_autoload_info[id].size; 1046 1047 if (fw_size == 0) 1048 fw_size = toc_fw_size; 1049 1050 if (fw_size > toc_fw_size) 1051 fw_size = toc_fw_size; 1052 1053 memcpy(ptr + toc_offset, fw_data, fw_size); 1054 1055 if (fw_size < toc_fw_size) 1056 memset(ptr + toc_offset + fw_size, 0, toc_fw_size - fw_size); 1057 } 1058 1059 static void 1060 gfx_v12_0_rlc_backdoor_autoload_copy_toc_ucode(struct amdgpu_device *adev) 1061 { 1062 void *data; 1063 uint32_t size; 1064 uint32_t *toc_ptr; 1065 1066 data = adev->psp.toc.start_addr; 1067 size = rlc_autoload_info[SOC24_FIRMWARE_ID_RLC_TOC].size; 1068 1069 toc_ptr = (uint32_t *)data + size / 4 - 2; 1070 *toc_ptr = (RLC_TOC_FORMAT_API << 24) | 0x1; 1071 1072 gfx_v12_0_rlc_backdoor_autoload_copy_ucode(adev, SOC24_FIRMWARE_ID_RLC_TOC, 1073 data, size); 1074 } 1075 1076 static void 1077 gfx_v12_0_rlc_backdoor_autoload_copy_gfx_ucode(struct amdgpu_device *adev) 1078 { 1079 const __le32 *fw_data; 1080 uint32_t fw_size; 1081 const struct gfx_firmware_header_v2_0 *cpv2_hdr; 1082 const struct rlc_firmware_header_v2_0 *rlc_hdr; 1083 const struct rlc_firmware_header_v2_1 *rlcv21_hdr; 1084 const struct rlc_firmware_header_v2_2 *rlcv22_hdr; 1085 uint16_t version_major, version_minor; 1086 1087 /* pfp ucode */ 1088 cpv2_hdr = (const struct gfx_firmware_header_v2_0 *) 1089 adev->gfx.pfp_fw->data; 1090 /* instruction */ 1091 fw_data = (const __le32 *)(adev->gfx.pfp_fw->data + 1092 le32_to_cpu(cpv2_hdr->ucode_offset_bytes)); 1093 fw_size = le32_to_cpu(cpv2_hdr->ucode_size_bytes); 1094 gfx_v12_0_rlc_backdoor_autoload_copy_ucode(adev, SOC24_FIRMWARE_ID_RS64_PFP, 1095 fw_data, fw_size); 1096 /* data */ 1097 fw_data = (const __le32 *)(adev->gfx.pfp_fw->data + 1098 le32_to_cpu(cpv2_hdr->data_offset_bytes)); 1099 fw_size = le32_to_cpu(cpv2_hdr->data_size_bytes); 1100 gfx_v12_0_rlc_backdoor_autoload_copy_ucode(adev, SOC24_FIRMWARE_ID_RS64_PFP_P0_STACK, 1101 fw_data, fw_size); 1102 gfx_v12_0_rlc_backdoor_autoload_copy_ucode(adev, SOC24_FIRMWARE_ID_RS64_PFP_P1_STACK, 1103 fw_data, fw_size); 1104 /* me ucode */ 1105 cpv2_hdr = (const struct gfx_firmware_header_v2_0 *) 1106 adev->gfx.me_fw->data; 1107 /* instruction */ 1108 fw_data = (const __le32 *)(adev->gfx.me_fw->data + 1109 le32_to_cpu(cpv2_hdr->ucode_offset_bytes)); 1110 fw_size = le32_to_cpu(cpv2_hdr->ucode_size_bytes); 1111 gfx_v12_0_rlc_backdoor_autoload_copy_ucode(adev, SOC24_FIRMWARE_ID_RS64_ME, 1112 fw_data, fw_size); 1113 /* data */ 1114 fw_data = (const __le32 *)(adev->gfx.me_fw->data + 1115 le32_to_cpu(cpv2_hdr->data_offset_bytes)); 1116 fw_size = le32_to_cpu(cpv2_hdr->data_size_bytes); 1117 gfx_v12_0_rlc_backdoor_autoload_copy_ucode(adev, SOC24_FIRMWARE_ID_RS64_ME_P0_STACK, 1118 fw_data, fw_size); 1119 gfx_v12_0_rlc_backdoor_autoload_copy_ucode(adev, SOC24_FIRMWARE_ID_RS64_ME_P1_STACK, 1120 fw_data, fw_size); 1121 /* mec ucode */ 1122 cpv2_hdr = (const struct gfx_firmware_header_v2_0 *) 1123 adev->gfx.mec_fw->data; 1124 /* instruction */ 1125 fw_data = (const __le32 *) (adev->gfx.mec_fw->data + 1126 le32_to_cpu(cpv2_hdr->ucode_offset_bytes)); 1127 fw_size = le32_to_cpu(cpv2_hdr->ucode_size_bytes); 1128 gfx_v12_0_rlc_backdoor_autoload_copy_ucode(adev, SOC24_FIRMWARE_ID_RS64_MEC, 1129 fw_data, fw_size); 1130 /* data */ 1131 fw_data = (const __le32 *) (adev->gfx.mec_fw->data + 1132 le32_to_cpu(cpv2_hdr->data_offset_bytes)); 1133 fw_size = le32_to_cpu(cpv2_hdr->data_size_bytes); 1134 gfx_v12_0_rlc_backdoor_autoload_copy_ucode(adev, SOC24_FIRMWARE_ID_RS64_MEC_P0_STACK, 1135 fw_data, fw_size); 1136 gfx_v12_0_rlc_backdoor_autoload_copy_ucode(adev, SOC24_FIRMWARE_ID_RS64_MEC_P1_STACK, 1137 fw_data, fw_size); 1138 gfx_v12_0_rlc_backdoor_autoload_copy_ucode(adev, SOC24_FIRMWARE_ID_RS64_MEC_P2_STACK, 1139 fw_data, fw_size); 1140 gfx_v12_0_rlc_backdoor_autoload_copy_ucode(adev, SOC24_FIRMWARE_ID_RS64_MEC_P3_STACK, 1141 fw_data, fw_size); 1142 1143 /* rlc ucode */ 1144 rlc_hdr = (const struct rlc_firmware_header_v2_0 *) 1145 adev->gfx.rlc_fw->data; 1146 fw_data = (const __le32 *)(adev->gfx.rlc_fw->data + 1147 le32_to_cpu(rlc_hdr->header.ucode_array_offset_bytes)); 1148 fw_size = le32_to_cpu(rlc_hdr->header.ucode_size_bytes); 1149 gfx_v12_0_rlc_backdoor_autoload_copy_ucode(adev, SOC24_FIRMWARE_ID_RLC_G_UCODE, 1150 fw_data, fw_size); 1151 1152 version_major = le16_to_cpu(rlc_hdr->header.header_version_major); 1153 version_minor = le16_to_cpu(rlc_hdr->header.header_version_minor); 1154 if (version_major == 2) { 1155 if (version_minor >= 1) { 1156 rlcv21_hdr = (const struct rlc_firmware_header_v2_1 *)adev->gfx.rlc_fw->data; 1157 1158 fw_data = (const __le32 *)(adev->gfx.rlc_fw->data + 1159 le32_to_cpu(rlcv21_hdr->save_restore_list_gpm_offset_bytes)); 1160 fw_size = le32_to_cpu(rlcv21_hdr->save_restore_list_gpm_size_bytes); 1161 gfx_v12_0_rlc_backdoor_autoload_copy_ucode(adev, SOC24_FIRMWARE_ID_RLCG_SCRATCH, 1162 fw_data, fw_size); 1163 1164 fw_data = (const __le32 *)(adev->gfx.rlc_fw->data + 1165 le32_to_cpu(rlcv21_hdr->save_restore_list_srm_offset_bytes)); 1166 fw_size = le32_to_cpu(rlcv21_hdr->save_restore_list_srm_size_bytes); 1167 gfx_v12_0_rlc_backdoor_autoload_copy_ucode(adev, SOC24_FIRMWARE_ID_RLC_SRM_ARAM, 1168 fw_data, fw_size); 1169 } 1170 if (version_minor >= 2) { 1171 rlcv22_hdr = (const struct rlc_firmware_header_v2_2 *)adev->gfx.rlc_fw->data; 1172 1173 fw_data = (const __le32 *)(adev->gfx.rlc_fw->data + 1174 le32_to_cpu(rlcv22_hdr->rlc_iram_ucode_offset_bytes)); 1175 fw_size = le32_to_cpu(rlcv22_hdr->rlc_iram_ucode_size_bytes); 1176 gfx_v12_0_rlc_backdoor_autoload_copy_ucode(adev, SOC24_FIRMWARE_ID_RLX6_UCODE, 1177 fw_data, fw_size); 1178 1179 fw_data = (const __le32 *)(adev->gfx.rlc_fw->data + 1180 le32_to_cpu(rlcv22_hdr->rlc_dram_ucode_offset_bytes)); 1181 fw_size = le32_to_cpu(rlcv22_hdr->rlc_dram_ucode_size_bytes); 1182 gfx_v12_0_rlc_backdoor_autoload_copy_ucode(adev, SOC24_FIRMWARE_ID_RLX6_DRAM_BOOT, 1183 fw_data, fw_size); 1184 } 1185 } 1186 } 1187 1188 static void 1189 gfx_v12_0_rlc_backdoor_autoload_copy_sdma_ucode(struct amdgpu_device *adev) 1190 { 1191 const __le32 *fw_data; 1192 uint32_t fw_size; 1193 const struct sdma_firmware_header_v3_0 *sdma_hdr; 1194 1195 sdma_hdr = (const struct sdma_firmware_header_v3_0 *) 1196 adev->sdma.instance[0].fw->data; 1197 fw_data = (const __le32 *) (adev->sdma.instance[0].fw->data + 1198 le32_to_cpu(sdma_hdr->ucode_offset_bytes)); 1199 fw_size = le32_to_cpu(sdma_hdr->ucode_size_bytes); 1200 1201 gfx_v12_0_rlc_backdoor_autoload_copy_ucode(adev, SOC24_FIRMWARE_ID_SDMA_UCODE_TH0, 1202 fw_data, fw_size); 1203 } 1204 1205 static void 1206 gfx_v12_0_rlc_backdoor_autoload_copy_mes_ucode(struct amdgpu_device *adev) 1207 { 1208 const __le32 *fw_data; 1209 unsigned fw_size; 1210 const struct mes_firmware_header_v1_0 *mes_hdr; 1211 int pipe, ucode_id, data_id; 1212 1213 for (pipe = 0; pipe < 2; pipe++) { 1214 if (pipe == 0) { 1215 ucode_id = SOC24_FIRMWARE_ID_RS64_MES_P0; 1216 data_id = SOC24_FIRMWARE_ID_RS64_MES_P0_STACK; 1217 } else { 1218 ucode_id = SOC24_FIRMWARE_ID_RS64_MES_P1; 1219 data_id = SOC24_FIRMWARE_ID_RS64_MES_P1_STACK; 1220 } 1221 1222 mes_hdr = (const struct mes_firmware_header_v1_0 *) 1223 adev->mes.fw[pipe]->data; 1224 1225 fw_data = (const __le32 *)(adev->mes.fw[pipe]->data + 1226 le32_to_cpu(mes_hdr->mes_ucode_offset_bytes)); 1227 fw_size = le32_to_cpu(mes_hdr->mes_ucode_size_bytes); 1228 1229 gfx_v12_0_rlc_backdoor_autoload_copy_ucode(adev, ucode_id, fw_data, fw_size); 1230 1231 fw_data = (const __le32 *)(adev->mes.fw[pipe]->data + 1232 le32_to_cpu(mes_hdr->mes_ucode_data_offset_bytes)); 1233 fw_size = le32_to_cpu(mes_hdr->mes_ucode_data_size_bytes); 1234 1235 gfx_v12_0_rlc_backdoor_autoload_copy_ucode(adev, data_id, fw_data, fw_size); 1236 } 1237 } 1238 1239 static int gfx_v12_0_rlc_backdoor_autoload_enable(struct amdgpu_device *adev) 1240 { 1241 uint32_t rlc_g_offset, rlc_g_size; 1242 uint64_t gpu_addr; 1243 uint32_t data; 1244 1245 /* RLC autoload sequence 2: copy ucode */ 1246 gfx_v12_0_rlc_backdoor_autoload_copy_sdma_ucode(adev); 1247 gfx_v12_0_rlc_backdoor_autoload_copy_gfx_ucode(adev); 1248 gfx_v12_0_rlc_backdoor_autoload_copy_mes_ucode(adev); 1249 gfx_v12_0_rlc_backdoor_autoload_copy_toc_ucode(adev); 1250 1251 rlc_g_offset = rlc_autoload_info[SOC24_FIRMWARE_ID_RLC_G_UCODE].offset; 1252 rlc_g_size = rlc_autoload_info[SOC24_FIRMWARE_ID_RLC_G_UCODE].size; 1253 gpu_addr = adev->gfx.rlc.rlc_autoload_gpu_addr + rlc_g_offset - adev->gmc.vram_start; 1254 1255 WREG32_SOC15(GC, 0, regGFX_IMU_RLC_BOOTLOADER_ADDR_HI, upper_32_bits(gpu_addr)); 1256 WREG32_SOC15(GC, 0, regGFX_IMU_RLC_BOOTLOADER_ADDR_LO, lower_32_bits(gpu_addr)); 1257 1258 WREG32_SOC15(GC, 0, regGFX_IMU_RLC_BOOTLOADER_SIZE, rlc_g_size); 1259 1260 if (adev->gfx.imu.funcs && (amdgpu_dpm > 0)) { 1261 /* RLC autoload sequence 3: load IMU fw */ 1262 if (adev->gfx.imu.funcs->load_microcode) 1263 adev->gfx.imu.funcs->load_microcode(adev); 1264 /* RLC autoload sequence 4 init IMU fw */ 1265 if (adev->gfx.imu.funcs->setup_imu) 1266 adev->gfx.imu.funcs->setup_imu(adev); 1267 if (adev->gfx.imu.funcs->start_imu) 1268 adev->gfx.imu.funcs->start_imu(adev); 1269 1270 /* RLC autoload sequence 5 disable gpa mode */ 1271 gfx_v12_0_disable_gpa_mode(adev); 1272 } else { 1273 /* unhalt rlc to start autoload without imu */ 1274 data = RREG32_SOC15(GC, 0, regRLC_GPM_THREAD_ENABLE); 1275 data = REG_SET_FIELD(data, RLC_GPM_THREAD_ENABLE, THREAD0_ENABLE, 1); 1276 data = REG_SET_FIELD(data, RLC_GPM_THREAD_ENABLE, THREAD1_ENABLE, 1); 1277 WREG32_SOC15(GC, 0, regRLC_GPM_THREAD_ENABLE, data); 1278 WREG32_SOC15(GC, 0, regRLC_CNTL, RLC_CNTL__RLC_ENABLE_F32_MASK); 1279 } 1280 1281 return 0; 1282 } 1283 1284 static void gfx_v12_0_alloc_ip_dump(struct amdgpu_device *adev) 1285 { 1286 uint32_t reg_count = ARRAY_SIZE(gc_reg_list_12_0); 1287 uint32_t *ptr; 1288 uint32_t inst; 1289 1290 ptr = kcalloc(reg_count, sizeof(uint32_t), GFP_KERNEL); 1291 if (!ptr) { 1292 DRM_ERROR("Failed to allocate memory for GFX IP Dump\n"); 1293 adev->gfx.ip_dump_core = NULL; 1294 } else { 1295 adev->gfx.ip_dump_core = ptr; 1296 } 1297 1298 /* Allocate memory for compute queue registers for all the instances */ 1299 reg_count = ARRAY_SIZE(gc_cp_reg_list_12); 1300 inst = adev->gfx.mec.num_mec * adev->gfx.mec.num_pipe_per_mec * 1301 adev->gfx.mec.num_queue_per_pipe; 1302 1303 ptr = kcalloc(reg_count * inst, sizeof(uint32_t), GFP_KERNEL); 1304 if (!ptr) { 1305 DRM_ERROR("Failed to allocate memory for Compute Queues IP Dump\n"); 1306 adev->gfx.ip_dump_compute_queues = NULL; 1307 } else { 1308 adev->gfx.ip_dump_compute_queues = ptr; 1309 } 1310 1311 /* Allocate memory for gfx queue registers for all the instances */ 1312 reg_count = ARRAY_SIZE(gc_gfx_queue_reg_list_12); 1313 inst = adev->gfx.me.num_me * adev->gfx.me.num_pipe_per_me * 1314 adev->gfx.me.num_queue_per_pipe; 1315 1316 ptr = kcalloc(reg_count * inst, sizeof(uint32_t), GFP_KERNEL); 1317 if (!ptr) { 1318 DRM_ERROR("Failed to allocate memory for GFX Queues IP Dump\n"); 1319 adev->gfx.ip_dump_gfx_queues = NULL; 1320 } else { 1321 adev->gfx.ip_dump_gfx_queues = ptr; 1322 } 1323 } 1324 1325 static int gfx_v12_0_sw_init(struct amdgpu_ip_block *ip_block) 1326 { 1327 int i, j, k, r, ring_id = 0; 1328 unsigned num_compute_rings; 1329 int xcc_id = 0; 1330 struct amdgpu_device *adev = ip_block->adev; 1331 1332 INIT_DELAYED_WORK(&adev->gfx.idle_work, amdgpu_gfx_profile_idle_work_handler); 1333 1334 switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { 1335 case IP_VERSION(12, 0, 0): 1336 case IP_VERSION(12, 0, 1): 1337 adev->gfx.me.num_me = 1; 1338 adev->gfx.me.num_pipe_per_me = 1; 1339 adev->gfx.me.num_queue_per_pipe = 1; 1340 adev->gfx.mec.num_mec = 2; 1341 adev->gfx.mec.num_pipe_per_mec = 2; 1342 adev->gfx.mec.num_queue_per_pipe = 4; 1343 break; 1344 default: 1345 adev->gfx.me.num_me = 1; 1346 adev->gfx.me.num_pipe_per_me = 1; 1347 adev->gfx.me.num_queue_per_pipe = 1; 1348 adev->gfx.mec.num_mec = 1; 1349 adev->gfx.mec.num_pipe_per_mec = 4; 1350 adev->gfx.mec.num_queue_per_pipe = 8; 1351 break; 1352 } 1353 1354 switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { 1355 case IP_VERSION(12, 0, 0): 1356 case IP_VERSION(12, 0, 1): 1357 if (adev->gfx.me_fw_version >= 2480 && 1358 adev->gfx.pfp_fw_version >= 2530 && 1359 adev->gfx.mec_fw_version >= 2680 && 1360 adev->mes.fw_version[0] >= 100) 1361 adev->gfx.enable_cleaner_shader = true; 1362 break; 1363 default: 1364 adev->gfx.enable_cleaner_shader = false; 1365 break; 1366 } 1367 1368 /* recalculate compute rings to use based on hardware configuration */ 1369 num_compute_rings = (adev->gfx.mec.num_pipe_per_mec * 1370 adev->gfx.mec.num_queue_per_pipe) / 2; 1371 adev->gfx.num_compute_rings = min(adev->gfx.num_compute_rings, 1372 num_compute_rings); 1373 1374 /* EOP Event */ 1375 r = amdgpu_irq_add_id(adev, SOC21_IH_CLIENTID_GRBM_CP, 1376 GFX_11_0_0__SRCID__CP_EOP_INTERRUPT, 1377 &adev->gfx.eop_irq); 1378 if (r) 1379 return r; 1380 1381 /* Bad opcode Event */ 1382 r = amdgpu_irq_add_id(adev, SOC21_IH_CLIENTID_GRBM_CP, 1383 GFX_11_0_0__SRCID__CP_BAD_OPCODE_ERROR, 1384 &adev->gfx.bad_op_irq); 1385 if (r) 1386 return r; 1387 1388 /* Privileged reg */ 1389 r = amdgpu_irq_add_id(adev, SOC21_IH_CLIENTID_GRBM_CP, 1390 GFX_11_0_0__SRCID__CP_PRIV_REG_FAULT, 1391 &adev->gfx.priv_reg_irq); 1392 if (r) 1393 return r; 1394 1395 /* Privileged inst */ 1396 r = amdgpu_irq_add_id(adev, SOC21_IH_CLIENTID_GRBM_CP, 1397 GFX_11_0_0__SRCID__CP_PRIV_INSTR_FAULT, 1398 &adev->gfx.priv_inst_irq); 1399 if (r) 1400 return r; 1401 1402 adev->gfx.gfx_current_status = AMDGPU_GFX_NORMAL_MODE; 1403 1404 gfx_v12_0_me_init(adev); 1405 1406 r = gfx_v12_0_rlc_init(adev); 1407 if (r) { 1408 dev_err(adev->dev, "Failed to init rlc BOs!\n"); 1409 return r; 1410 } 1411 1412 r = gfx_v12_0_mec_init(adev); 1413 if (r) { 1414 dev_err(adev->dev, "Failed to init MEC BOs!\n"); 1415 return r; 1416 } 1417 1418 /* set up the gfx ring */ 1419 for (i = 0; i < adev->gfx.me.num_me; i++) { 1420 for (j = 0; j < adev->gfx.me.num_queue_per_pipe; j++) { 1421 for (k = 0; k < adev->gfx.me.num_pipe_per_me; k++) { 1422 if (!amdgpu_gfx_is_me_queue_enabled(adev, i, k, j)) 1423 continue; 1424 1425 r = gfx_v12_0_gfx_ring_init(adev, ring_id, 1426 i, k, j); 1427 if (r) 1428 return r; 1429 ring_id++; 1430 } 1431 } 1432 } 1433 1434 ring_id = 0; 1435 /* set up the compute queues - allocate horizontally across pipes */ 1436 for (i = 0; i < adev->gfx.mec.num_mec; ++i) { 1437 for (j = 0; j < adev->gfx.mec.num_queue_per_pipe; j++) { 1438 for (k = 0; k < adev->gfx.mec.num_pipe_per_mec; k++) { 1439 if (!amdgpu_gfx_is_mec_queue_enabled(adev, 1440 0, i, k, j)) 1441 continue; 1442 1443 r = gfx_v12_0_compute_ring_init(adev, ring_id, 1444 i, k, j); 1445 if (r) 1446 return r; 1447 1448 ring_id++; 1449 } 1450 } 1451 } 1452 1453 adev->gfx.gfx_supported_reset = 1454 amdgpu_get_soft_full_reset_mask(&adev->gfx.gfx_ring[0]); 1455 adev->gfx.compute_supported_reset = 1456 amdgpu_get_soft_full_reset_mask(&adev->gfx.compute_ring[0]); 1457 switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { 1458 case IP_VERSION(12, 0, 0): 1459 case IP_VERSION(12, 0, 1): 1460 if ((adev->gfx.me_fw_version >= 2660) && 1461 (adev->gfx.mec_fw_version >= 2920)) { 1462 adev->gfx.compute_supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE; 1463 adev->gfx.gfx_supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE; 1464 } 1465 } 1466 1467 if (!adev->enable_mes_kiq) { 1468 r = amdgpu_gfx_kiq_init(adev, GFX12_MEC_HPD_SIZE, 0); 1469 if (r) { 1470 dev_err(adev->dev, "Failed to init KIQ BOs!\n"); 1471 return r; 1472 } 1473 1474 r = amdgpu_gfx_kiq_init_ring(adev, xcc_id); 1475 if (r) 1476 return r; 1477 } 1478 1479 r = amdgpu_gfx_mqd_sw_init(adev, sizeof(struct v12_compute_mqd), 0); 1480 if (r) 1481 return r; 1482 1483 /* allocate visible FB for rlc auto-loading fw */ 1484 if (adev->firmware.load_type == AMDGPU_FW_LOAD_RLC_BACKDOOR_AUTO) { 1485 r = gfx_v12_0_rlc_autoload_buffer_init(adev); 1486 if (r) 1487 return r; 1488 } 1489 1490 r = gfx_v12_0_gpu_early_init(adev); 1491 if (r) 1492 return r; 1493 1494 gfx_v12_0_alloc_ip_dump(adev); 1495 1496 r = amdgpu_gfx_sysfs_init(adev); 1497 if (r) 1498 return r; 1499 1500 return 0; 1501 } 1502 1503 static void gfx_v12_0_pfp_fini(struct amdgpu_device *adev) 1504 { 1505 amdgpu_bo_free_kernel(&adev->gfx.pfp.pfp_fw_obj, 1506 &adev->gfx.pfp.pfp_fw_gpu_addr, 1507 (void **)&adev->gfx.pfp.pfp_fw_ptr); 1508 1509 amdgpu_bo_free_kernel(&adev->gfx.pfp.pfp_fw_data_obj, 1510 &adev->gfx.pfp.pfp_fw_data_gpu_addr, 1511 (void **)&adev->gfx.pfp.pfp_fw_data_ptr); 1512 } 1513 1514 static void gfx_v12_0_me_fini(struct amdgpu_device *adev) 1515 { 1516 amdgpu_bo_free_kernel(&adev->gfx.me.me_fw_obj, 1517 &adev->gfx.me.me_fw_gpu_addr, 1518 (void **)&adev->gfx.me.me_fw_ptr); 1519 1520 amdgpu_bo_free_kernel(&adev->gfx.me.me_fw_data_obj, 1521 &adev->gfx.me.me_fw_data_gpu_addr, 1522 (void **)&adev->gfx.me.me_fw_data_ptr); 1523 } 1524 1525 static void gfx_v12_0_rlc_autoload_buffer_fini(struct amdgpu_device *adev) 1526 { 1527 amdgpu_bo_free_kernel(&adev->gfx.rlc.rlc_autoload_bo, 1528 &adev->gfx.rlc.rlc_autoload_gpu_addr, 1529 (void **)&adev->gfx.rlc.rlc_autoload_ptr); 1530 } 1531 1532 static int gfx_v12_0_sw_fini(struct amdgpu_ip_block *ip_block) 1533 { 1534 int i; 1535 struct amdgpu_device *adev = ip_block->adev; 1536 1537 for (i = 0; i < adev->gfx.num_gfx_rings; i++) 1538 amdgpu_ring_fini(&adev->gfx.gfx_ring[i]); 1539 for (i = 0; i < adev->gfx.num_compute_rings; i++) 1540 amdgpu_ring_fini(&adev->gfx.compute_ring[i]); 1541 1542 amdgpu_gfx_mqd_sw_fini(adev, 0); 1543 1544 if (!adev->enable_mes_kiq) { 1545 amdgpu_gfx_kiq_free_ring(&adev->gfx.kiq[0].ring); 1546 amdgpu_gfx_kiq_fini(adev, 0); 1547 } 1548 1549 gfx_v12_0_pfp_fini(adev); 1550 gfx_v12_0_me_fini(adev); 1551 gfx_v12_0_rlc_fini(adev); 1552 gfx_v12_0_mec_fini(adev); 1553 1554 if (adev->firmware.load_type == AMDGPU_FW_LOAD_RLC_BACKDOOR_AUTO) 1555 gfx_v12_0_rlc_autoload_buffer_fini(adev); 1556 1557 gfx_v12_0_free_microcode(adev); 1558 1559 amdgpu_gfx_sysfs_fini(adev); 1560 1561 kfree(adev->gfx.ip_dump_core); 1562 kfree(adev->gfx.ip_dump_compute_queues); 1563 kfree(adev->gfx.ip_dump_gfx_queues); 1564 1565 return 0; 1566 } 1567 1568 static void gfx_v12_0_select_se_sh(struct amdgpu_device *adev, u32 se_num, 1569 u32 sh_num, u32 instance, int xcc_id) 1570 { 1571 u32 data; 1572 1573 if (instance == 0xffffffff) 1574 data = REG_SET_FIELD(0, GRBM_GFX_INDEX, 1575 INSTANCE_BROADCAST_WRITES, 1); 1576 else 1577 data = REG_SET_FIELD(0, GRBM_GFX_INDEX, INSTANCE_INDEX, 1578 instance); 1579 1580 if (se_num == 0xffffffff) 1581 data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SE_BROADCAST_WRITES, 1582 1); 1583 else 1584 data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SE_INDEX, se_num); 1585 1586 if (sh_num == 0xffffffff) 1587 data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SA_BROADCAST_WRITES, 1588 1); 1589 else 1590 data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SA_INDEX, sh_num); 1591 1592 WREG32_SOC15(GC, 0, regGRBM_GFX_INDEX, data); 1593 } 1594 1595 static u32 gfx_v12_0_get_sa_active_bitmap(struct amdgpu_device *adev) 1596 { 1597 u32 gc_disabled_sa_mask, gc_user_disabled_sa_mask, sa_mask; 1598 1599 gc_disabled_sa_mask = RREG32_SOC15(GC, 0, regGRBM_CC_GC_SA_UNIT_DISABLE); 1600 gc_disabled_sa_mask = REG_GET_FIELD(gc_disabled_sa_mask, 1601 GRBM_CC_GC_SA_UNIT_DISABLE, 1602 SA_DISABLE); 1603 gc_user_disabled_sa_mask = RREG32_SOC15(GC, 0, regGRBM_GC_USER_SA_UNIT_DISABLE); 1604 gc_user_disabled_sa_mask = REG_GET_FIELD(gc_user_disabled_sa_mask, 1605 GRBM_GC_USER_SA_UNIT_DISABLE, 1606 SA_DISABLE); 1607 sa_mask = amdgpu_gfx_create_bitmask(adev->gfx.config.max_sh_per_se * 1608 adev->gfx.config.max_shader_engines); 1609 1610 return sa_mask & (~(gc_disabled_sa_mask | gc_user_disabled_sa_mask)); 1611 } 1612 1613 static u32 gfx_v12_0_get_rb_active_bitmap(struct amdgpu_device *adev) 1614 { 1615 u32 gc_disabled_rb_mask, gc_user_disabled_rb_mask; 1616 u32 rb_mask; 1617 1618 gc_disabled_rb_mask = RREG32_SOC15(GC, 0, regCC_RB_BACKEND_DISABLE); 1619 gc_disabled_rb_mask = REG_GET_FIELD(gc_disabled_rb_mask, 1620 CC_RB_BACKEND_DISABLE, 1621 BACKEND_DISABLE); 1622 gc_user_disabled_rb_mask = RREG32_SOC15(GC, 0, regGC_USER_RB_BACKEND_DISABLE); 1623 gc_user_disabled_rb_mask = REG_GET_FIELD(gc_user_disabled_rb_mask, 1624 GC_USER_RB_BACKEND_DISABLE, 1625 BACKEND_DISABLE); 1626 rb_mask = amdgpu_gfx_create_bitmask(adev->gfx.config.max_backends_per_se * 1627 adev->gfx.config.max_shader_engines); 1628 1629 return rb_mask & (~(gc_disabled_rb_mask | gc_user_disabled_rb_mask)); 1630 } 1631 1632 static void gfx_v12_0_setup_rb(struct amdgpu_device *adev) 1633 { 1634 u32 rb_bitmap_per_sa; 1635 u32 rb_bitmap_width_per_sa; 1636 u32 max_sa; 1637 u32 active_sa_bitmap; 1638 u32 global_active_rb_bitmap; 1639 u32 active_rb_bitmap = 0; 1640 u32 i; 1641 1642 /* query sa bitmap from SA_UNIT_DISABLE registers */ 1643 active_sa_bitmap = gfx_v12_0_get_sa_active_bitmap(adev); 1644 /* query rb bitmap from RB_BACKEND_DISABLE registers */ 1645 global_active_rb_bitmap = gfx_v12_0_get_rb_active_bitmap(adev); 1646 1647 /* generate active rb bitmap according to active sa bitmap */ 1648 max_sa = adev->gfx.config.max_shader_engines * 1649 adev->gfx.config.max_sh_per_se; 1650 rb_bitmap_width_per_sa = adev->gfx.config.max_backends_per_se / 1651 adev->gfx.config.max_sh_per_se; 1652 rb_bitmap_per_sa = amdgpu_gfx_create_bitmask(rb_bitmap_width_per_sa); 1653 1654 for (i = 0; i < max_sa; i++) { 1655 if (active_sa_bitmap & (1 << i)) 1656 active_rb_bitmap |= (rb_bitmap_per_sa << (i * rb_bitmap_width_per_sa)); 1657 } 1658 1659 active_rb_bitmap &= global_active_rb_bitmap; 1660 adev->gfx.config.backend_enable_mask = active_rb_bitmap; 1661 adev->gfx.config.num_rbs = hweight32(active_rb_bitmap); 1662 } 1663 1664 #define LDS_APP_BASE 0x1 1665 #define SCRATCH_APP_BASE 0x2 1666 1667 static void gfx_v12_0_init_compute_vmid(struct amdgpu_device *adev) 1668 { 1669 int i; 1670 uint32_t sh_mem_bases; 1671 uint32_t data; 1672 1673 /* 1674 * Configure apertures: 1675 * LDS: 0x60000000'00000000 - 0x60000001'00000000 (4GB) 1676 * Scratch: 0x60000001'00000000 - 0x60000002'00000000 (4GB) 1677 * GPUVM: 0x60010000'00000000 - 0x60020000'00000000 (1TB) 1678 */ 1679 sh_mem_bases = (LDS_APP_BASE << SH_MEM_BASES__SHARED_BASE__SHIFT) | 1680 SCRATCH_APP_BASE; 1681 1682 mutex_lock(&adev->srbm_mutex); 1683 for (i = adev->vm_manager.first_kfd_vmid; i < AMDGPU_NUM_VMID; i++) { 1684 soc24_grbm_select(adev, 0, 0, 0, i); 1685 /* CP and shaders */ 1686 WREG32_SOC15(GC, 0, regSH_MEM_CONFIG, DEFAULT_SH_MEM_CONFIG); 1687 WREG32_SOC15(GC, 0, regSH_MEM_BASES, sh_mem_bases); 1688 1689 /* Enable trap for each kfd vmid. */ 1690 data = RREG32_SOC15(GC, 0, regSPI_GDBG_PER_VMID_CNTL); 1691 data = REG_SET_FIELD(data, SPI_GDBG_PER_VMID_CNTL, TRAP_EN, 1); 1692 WREG32_SOC15(GC, 0, regSPI_GDBG_PER_VMID_CNTL, data); 1693 } 1694 soc24_grbm_select(adev, 0, 0, 0, 0); 1695 mutex_unlock(&adev->srbm_mutex); 1696 } 1697 1698 static void gfx_v12_0_tcp_harvest(struct amdgpu_device *adev) 1699 { 1700 /* TODO: harvest feature to be added later. */ 1701 } 1702 1703 static void gfx_v12_0_get_tcc_info(struct amdgpu_device *adev) 1704 { 1705 } 1706 1707 static void gfx_v12_0_constants_init(struct amdgpu_device *adev) 1708 { 1709 u32 tmp; 1710 int i; 1711 1712 if (!amdgpu_sriov_vf(adev)) 1713 WREG32_FIELD15_PREREG(GC, 0, GRBM_CNTL, READ_TIMEOUT, 0xff); 1714 1715 gfx_v12_0_setup_rb(adev); 1716 gfx_v12_0_get_cu_info(adev, &adev->gfx.cu_info); 1717 gfx_v12_0_get_tcc_info(adev); 1718 adev->gfx.config.pa_sc_tile_steering_override = 0; 1719 1720 /* XXX SH_MEM regs */ 1721 /* where to put LDS, scratch, GPUVM in FSA64 space */ 1722 mutex_lock(&adev->srbm_mutex); 1723 for (i = 0; i < adev->vm_manager.id_mgr[AMDGPU_GFXHUB(0)].num_ids; i++) { 1724 soc24_grbm_select(adev, 0, 0, 0, i); 1725 /* CP and shaders */ 1726 WREG32_SOC15(GC, 0, regSH_MEM_CONFIG, DEFAULT_SH_MEM_CONFIG); 1727 if (i != 0) { 1728 tmp = REG_SET_FIELD(0, SH_MEM_BASES, PRIVATE_BASE, 1729 (adev->gmc.private_aperture_start >> 48)); 1730 tmp = REG_SET_FIELD(tmp, SH_MEM_BASES, SHARED_BASE, 1731 (adev->gmc.shared_aperture_start >> 48)); 1732 WREG32_SOC15(GC, 0, regSH_MEM_BASES, tmp); 1733 } 1734 } 1735 soc24_grbm_select(adev, 0, 0, 0, 0); 1736 1737 mutex_unlock(&adev->srbm_mutex); 1738 1739 gfx_v12_0_init_compute_vmid(adev); 1740 } 1741 1742 static u32 gfx_v12_0_get_cpg_int_cntl(struct amdgpu_device *adev, 1743 int me, int pipe) 1744 { 1745 if (me != 0) 1746 return 0; 1747 1748 switch (pipe) { 1749 case 0: 1750 return SOC15_REG_OFFSET(GC, 0, regCP_INT_CNTL_RING0); 1751 default: 1752 return 0; 1753 } 1754 } 1755 1756 static u32 gfx_v12_0_get_cpc_int_cntl(struct amdgpu_device *adev, 1757 int me, int pipe) 1758 { 1759 /* 1760 * amdgpu controls only the first MEC. That's why this function only 1761 * handles the setting of interrupts for this specific MEC. All other 1762 * pipes' interrupts are set by amdkfd. 1763 */ 1764 if (me != 1) 1765 return 0; 1766 1767 switch (pipe) { 1768 case 0: 1769 return SOC15_REG_OFFSET(GC, 0, regCP_ME1_PIPE0_INT_CNTL); 1770 case 1: 1771 return SOC15_REG_OFFSET(GC, 0, regCP_ME1_PIPE1_INT_CNTL); 1772 default: 1773 return 0; 1774 } 1775 } 1776 1777 static void gfx_v12_0_enable_gui_idle_interrupt(struct amdgpu_device *adev, 1778 bool enable) 1779 { 1780 u32 tmp, cp_int_cntl_reg; 1781 int i, j; 1782 1783 if (amdgpu_sriov_vf(adev)) 1784 return; 1785 1786 for (i = 0; i < adev->gfx.me.num_me; i++) { 1787 for (j = 0; j < adev->gfx.me.num_pipe_per_me; j++) { 1788 cp_int_cntl_reg = gfx_v12_0_get_cpg_int_cntl(adev, i, j); 1789 1790 if (cp_int_cntl_reg) { 1791 tmp = RREG32_SOC15_IP(GC, cp_int_cntl_reg); 1792 tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CNTX_BUSY_INT_ENABLE, 1793 enable ? 1 : 0); 1794 tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CNTX_EMPTY_INT_ENABLE, 1795 enable ? 1 : 0); 1796 tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CMP_BUSY_INT_ENABLE, 1797 enable ? 1 : 0); 1798 tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, GFX_IDLE_INT_ENABLE, 1799 enable ? 1 : 0); 1800 WREG32_SOC15_IP(GC, cp_int_cntl_reg, tmp); 1801 } 1802 } 1803 } 1804 } 1805 1806 static int gfx_v12_0_init_csb(struct amdgpu_device *adev) 1807 { 1808 adev->gfx.rlc.funcs->get_csb_buffer(adev, adev->gfx.rlc.cs_ptr); 1809 1810 WREG32_SOC15(GC, 0, regRLC_CSIB_ADDR_HI, 1811 adev->gfx.rlc.clear_state_gpu_addr >> 32); 1812 WREG32_SOC15(GC, 0, regRLC_CSIB_ADDR_LO, 1813 adev->gfx.rlc.clear_state_gpu_addr & 0xfffffffc); 1814 WREG32_SOC15(GC, 0, regRLC_CSIB_LENGTH, adev->gfx.rlc.clear_state_size); 1815 1816 return 0; 1817 } 1818 1819 static void gfx_v12_0_rlc_stop(struct amdgpu_device *adev) 1820 { 1821 u32 tmp = RREG32_SOC15(GC, 0, regRLC_CNTL); 1822 1823 tmp = REG_SET_FIELD(tmp, RLC_CNTL, RLC_ENABLE_F32, 0); 1824 WREG32_SOC15(GC, 0, regRLC_CNTL, tmp); 1825 } 1826 1827 static void gfx_v12_0_rlc_reset(struct amdgpu_device *adev) 1828 { 1829 WREG32_FIELD15_PREREG(GC, 0, GRBM_SOFT_RESET, SOFT_RESET_RLC, 1); 1830 udelay(50); 1831 WREG32_FIELD15_PREREG(GC, 0, GRBM_SOFT_RESET, SOFT_RESET_RLC, 0); 1832 udelay(50); 1833 } 1834 1835 static void gfx_v12_0_rlc_smu_handshake_cntl(struct amdgpu_device *adev, 1836 bool enable) 1837 { 1838 uint32_t rlc_pg_cntl; 1839 1840 rlc_pg_cntl = RREG32_SOC15(GC, 0, regRLC_PG_CNTL); 1841 1842 if (!enable) { 1843 /* RLC_PG_CNTL[23] = 0 (default) 1844 * RLC will wait for handshake acks with SMU 1845 * GFXOFF will be enabled 1846 * RLC_PG_CNTL[23] = 1 1847 * RLC will not issue any message to SMU 1848 * hence no handshake between SMU & RLC 1849 * GFXOFF will be disabled 1850 */ 1851 rlc_pg_cntl |= RLC_PG_CNTL__SMU_HANDSHAKE_DISABLE_MASK; 1852 } else 1853 rlc_pg_cntl &= ~RLC_PG_CNTL__SMU_HANDSHAKE_DISABLE_MASK; 1854 WREG32_SOC15(GC, 0, regRLC_PG_CNTL, rlc_pg_cntl); 1855 } 1856 1857 static void gfx_v12_0_rlc_start(struct amdgpu_device *adev) 1858 { 1859 /* TODO: enable rlc & smu handshake until smu 1860 * and gfxoff feature works as expected */ 1861 if (!(amdgpu_pp_feature_mask & PP_GFXOFF_MASK)) 1862 gfx_v12_0_rlc_smu_handshake_cntl(adev, false); 1863 1864 WREG32_FIELD15_PREREG(GC, 0, RLC_CNTL, RLC_ENABLE_F32, 1); 1865 udelay(50); 1866 } 1867 1868 static void gfx_v12_0_rlc_enable_srm(struct amdgpu_device *adev) 1869 { 1870 uint32_t tmp; 1871 1872 /* enable Save Restore Machine */ 1873 tmp = RREG32(SOC15_REG_OFFSET(GC, 0, regRLC_SRM_CNTL)); 1874 tmp |= RLC_SRM_CNTL__AUTO_INCR_ADDR_MASK; 1875 tmp |= RLC_SRM_CNTL__SRM_ENABLE_MASK; 1876 WREG32(SOC15_REG_OFFSET(GC, 0, regRLC_SRM_CNTL), tmp); 1877 } 1878 1879 static void gfx_v12_0_load_rlcg_microcode(struct amdgpu_device *adev) 1880 { 1881 const struct rlc_firmware_header_v2_0 *hdr; 1882 const __le32 *fw_data; 1883 unsigned i, fw_size; 1884 1885 hdr = (const struct rlc_firmware_header_v2_0 *)adev->gfx.rlc_fw->data; 1886 fw_data = (const __le32 *)(adev->gfx.rlc_fw->data + 1887 le32_to_cpu(hdr->header.ucode_array_offset_bytes)); 1888 fw_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4; 1889 1890 WREG32_SOC15(GC, 0, regRLC_GPM_UCODE_ADDR, 1891 RLCG_UCODE_LOADING_START_ADDRESS); 1892 1893 for (i = 0; i < fw_size; i++) 1894 WREG32_SOC15(GC, 0, regRLC_GPM_UCODE_DATA, 1895 le32_to_cpup(fw_data++)); 1896 1897 WREG32_SOC15(GC, 0, regRLC_GPM_UCODE_ADDR, adev->gfx.rlc_fw_version); 1898 } 1899 1900 static void gfx_v12_0_load_rlc_iram_dram_microcode(struct amdgpu_device *adev) 1901 { 1902 const struct rlc_firmware_header_v2_2 *hdr; 1903 const __le32 *fw_data; 1904 unsigned i, fw_size; 1905 u32 tmp; 1906 1907 hdr = (const struct rlc_firmware_header_v2_2 *)adev->gfx.rlc_fw->data; 1908 1909 fw_data = (const __le32 *)(adev->gfx.rlc_fw->data + 1910 le32_to_cpu(hdr->rlc_iram_ucode_offset_bytes)); 1911 fw_size = le32_to_cpu(hdr->rlc_iram_ucode_size_bytes) / 4; 1912 1913 WREG32_SOC15(GC, 0, regRLC_LX6_IRAM_ADDR, 0); 1914 1915 for (i = 0; i < fw_size; i++) { 1916 if ((amdgpu_emu_mode == 1) && (i % 100 == 99)) 1917 msleep(1); 1918 WREG32_SOC15(GC, 0, regRLC_LX6_IRAM_DATA, 1919 le32_to_cpup(fw_data++)); 1920 } 1921 1922 WREG32_SOC15(GC, 0, regRLC_LX6_IRAM_ADDR, adev->gfx.rlc_fw_version); 1923 1924 fw_data = (const __le32 *)(adev->gfx.rlc_fw->data + 1925 le32_to_cpu(hdr->rlc_dram_ucode_offset_bytes)); 1926 fw_size = le32_to_cpu(hdr->rlc_dram_ucode_size_bytes) / 4; 1927 1928 WREG32_SOC15(GC, 0, regRLC_LX6_DRAM_ADDR, 0); 1929 for (i = 0; i < fw_size; i++) { 1930 if ((amdgpu_emu_mode == 1) && (i % 100 == 99)) 1931 msleep(1); 1932 WREG32_SOC15(GC, 0, regRLC_LX6_DRAM_DATA, 1933 le32_to_cpup(fw_data++)); 1934 } 1935 1936 WREG32_SOC15(GC, 0, regRLC_LX6_IRAM_ADDR, adev->gfx.rlc_fw_version); 1937 1938 tmp = RREG32_SOC15(GC, 0, regRLC_LX6_CNTL); 1939 tmp = REG_SET_FIELD(tmp, RLC_LX6_CNTL, PDEBUG_ENABLE, 1); 1940 tmp = REG_SET_FIELD(tmp, RLC_LX6_CNTL, BRESET, 0); 1941 WREG32_SOC15(GC, 0, regRLC_LX6_CNTL, tmp); 1942 } 1943 1944 static int gfx_v12_0_rlc_load_microcode(struct amdgpu_device *adev) 1945 { 1946 const struct rlc_firmware_header_v2_0 *hdr; 1947 uint16_t version_major; 1948 uint16_t version_minor; 1949 1950 if (!adev->gfx.rlc_fw) 1951 return -EINVAL; 1952 1953 hdr = (const struct rlc_firmware_header_v2_0 *)adev->gfx.rlc_fw->data; 1954 amdgpu_ucode_print_rlc_hdr(&hdr->header); 1955 1956 version_major = le16_to_cpu(hdr->header.header_version_major); 1957 version_minor = le16_to_cpu(hdr->header.header_version_minor); 1958 1959 if (version_major == 2) { 1960 gfx_v12_0_load_rlcg_microcode(adev); 1961 if (amdgpu_dpm == 1) { 1962 if (version_minor >= 2) 1963 gfx_v12_0_load_rlc_iram_dram_microcode(adev); 1964 } 1965 1966 return 0; 1967 } 1968 1969 return -EINVAL; 1970 } 1971 1972 static int gfx_v12_0_rlc_resume(struct amdgpu_device *adev) 1973 { 1974 int r; 1975 1976 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) { 1977 gfx_v12_0_init_csb(adev); 1978 1979 if (!amdgpu_sriov_vf(adev)) /* enable RLC SRM */ 1980 gfx_v12_0_rlc_enable_srm(adev); 1981 } else { 1982 if (amdgpu_sriov_vf(adev)) { 1983 gfx_v12_0_init_csb(adev); 1984 return 0; 1985 } 1986 1987 adev->gfx.rlc.funcs->stop(adev); 1988 1989 /* disable CG */ 1990 WREG32_SOC15(GC, 0, regRLC_CGCG_CGLS_CTRL, 0); 1991 1992 /* disable PG */ 1993 WREG32_SOC15(GC, 0, regRLC_PG_CNTL, 0); 1994 1995 if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT) { 1996 /* legacy rlc firmware loading */ 1997 r = gfx_v12_0_rlc_load_microcode(adev); 1998 if (r) 1999 return r; 2000 } 2001 2002 gfx_v12_0_init_csb(adev); 2003 2004 adev->gfx.rlc.funcs->start(adev); 2005 } 2006 2007 return 0; 2008 } 2009 2010 static void gfx_v12_0_config_gfx_rs64(struct amdgpu_device *adev) 2011 { 2012 const struct gfx_firmware_header_v2_0 *pfp_hdr; 2013 const struct gfx_firmware_header_v2_0 *me_hdr; 2014 const struct gfx_firmware_header_v2_0 *mec_hdr; 2015 uint32_t pipe_id, tmp; 2016 2017 mec_hdr = (const struct gfx_firmware_header_v2_0 *) 2018 adev->gfx.mec_fw->data; 2019 me_hdr = (const struct gfx_firmware_header_v2_0 *) 2020 adev->gfx.me_fw->data; 2021 pfp_hdr = (const struct gfx_firmware_header_v2_0 *) 2022 adev->gfx.pfp_fw->data; 2023 2024 /* config pfp program start addr */ 2025 for (pipe_id = 0; pipe_id < 2; pipe_id++) { 2026 soc24_grbm_select(adev, 0, pipe_id, 0, 0); 2027 WREG32_SOC15(GC, 0, regCP_PFP_PRGRM_CNTR_START, 2028 (pfp_hdr->ucode_start_addr_hi << 30) | 2029 (pfp_hdr->ucode_start_addr_lo >> 2)); 2030 WREG32_SOC15(GC, 0, regCP_PFP_PRGRM_CNTR_START_HI, 2031 pfp_hdr->ucode_start_addr_hi >> 2); 2032 } 2033 soc24_grbm_select(adev, 0, 0, 0, 0); 2034 2035 /* reset pfp pipe */ 2036 tmp = RREG32_SOC15(GC, 0, regCP_ME_CNTL); 2037 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, PFP_PIPE0_RESET, 1); 2038 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, PFP_PIPE1_RESET, 1); 2039 WREG32_SOC15(GC, 0, regCP_ME_CNTL, tmp); 2040 2041 /* clear pfp pipe reset */ 2042 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, PFP_PIPE0_RESET, 0); 2043 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, PFP_PIPE1_RESET, 0); 2044 WREG32_SOC15(GC, 0, regCP_ME_CNTL, tmp); 2045 2046 /* config me program start addr */ 2047 for (pipe_id = 0; pipe_id < 2; pipe_id++) { 2048 soc24_grbm_select(adev, 0, pipe_id, 0, 0); 2049 WREG32_SOC15(GC, 0, regCP_ME_PRGRM_CNTR_START, 2050 (me_hdr->ucode_start_addr_hi << 30) | 2051 (me_hdr->ucode_start_addr_lo >> 2)); 2052 WREG32_SOC15(GC, 0, regCP_ME_PRGRM_CNTR_START_HI, 2053 me_hdr->ucode_start_addr_hi>>2); 2054 } 2055 soc24_grbm_select(adev, 0, 0, 0, 0); 2056 2057 /* reset me pipe */ 2058 tmp = RREG32_SOC15(GC, 0, regCP_ME_CNTL); 2059 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, ME_PIPE0_RESET, 1); 2060 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, ME_PIPE1_RESET, 1); 2061 WREG32_SOC15(GC, 0, regCP_ME_CNTL, tmp); 2062 2063 /* clear me pipe reset */ 2064 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, ME_PIPE0_RESET, 0); 2065 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, ME_PIPE1_RESET, 0); 2066 WREG32_SOC15(GC, 0, regCP_ME_CNTL, tmp); 2067 2068 /* config mec program start addr */ 2069 for (pipe_id = 0; pipe_id < 4; pipe_id++) { 2070 soc24_grbm_select(adev, 1, pipe_id, 0, 0); 2071 WREG32_SOC15(GC, 0, regCP_MEC_RS64_PRGRM_CNTR_START, 2072 mec_hdr->ucode_start_addr_lo >> 2 | 2073 mec_hdr->ucode_start_addr_hi << 30); 2074 WREG32_SOC15(GC, 0, regCP_MEC_RS64_PRGRM_CNTR_START_HI, 2075 mec_hdr->ucode_start_addr_hi >> 2); 2076 } 2077 soc24_grbm_select(adev, 0, 0, 0, 0); 2078 2079 /* reset mec pipe */ 2080 tmp = RREG32_SOC15(GC, 0, regCP_MEC_RS64_CNTL); 2081 tmp = REG_SET_FIELD(tmp, CP_MEC_RS64_CNTL, MEC_PIPE0_RESET, 1); 2082 tmp = REG_SET_FIELD(tmp, CP_MEC_RS64_CNTL, MEC_PIPE1_RESET, 1); 2083 tmp = REG_SET_FIELD(tmp, CP_MEC_RS64_CNTL, MEC_PIPE2_RESET, 1); 2084 tmp = REG_SET_FIELD(tmp, CP_MEC_RS64_CNTL, MEC_PIPE3_RESET, 1); 2085 WREG32_SOC15(GC, 0, regCP_MEC_RS64_CNTL, tmp); 2086 2087 /* clear mec pipe reset */ 2088 tmp = REG_SET_FIELD(tmp, CP_MEC_RS64_CNTL, MEC_PIPE0_RESET, 0); 2089 tmp = REG_SET_FIELD(tmp, CP_MEC_RS64_CNTL, MEC_PIPE1_RESET, 0); 2090 tmp = REG_SET_FIELD(tmp, CP_MEC_RS64_CNTL, MEC_PIPE2_RESET, 0); 2091 tmp = REG_SET_FIELD(tmp, CP_MEC_RS64_CNTL, MEC_PIPE3_RESET, 0); 2092 WREG32_SOC15(GC, 0, regCP_MEC_RS64_CNTL, tmp); 2093 } 2094 2095 static void gfx_v12_0_set_pfp_ucode_start_addr(struct amdgpu_device *adev) 2096 { 2097 const struct gfx_firmware_header_v2_0 *cp_hdr; 2098 unsigned pipe_id, tmp; 2099 2100 cp_hdr = (const struct gfx_firmware_header_v2_0 *) 2101 adev->gfx.pfp_fw->data; 2102 mutex_lock(&adev->srbm_mutex); 2103 for (pipe_id = 0; pipe_id < adev->gfx.me.num_pipe_per_me; pipe_id++) { 2104 soc24_grbm_select(adev, 0, pipe_id, 0, 0); 2105 WREG32_SOC15(GC, 0, regCP_PFP_PRGRM_CNTR_START, 2106 (cp_hdr->ucode_start_addr_hi << 30) | 2107 (cp_hdr->ucode_start_addr_lo >> 2)); 2108 WREG32_SOC15(GC, 0, regCP_PFP_PRGRM_CNTR_START_HI, 2109 cp_hdr->ucode_start_addr_hi>>2); 2110 2111 /* 2112 * Program CP_ME_CNTL to reset given PIPE to take 2113 * effect of CP_PFP_PRGRM_CNTR_START. 2114 */ 2115 tmp = RREG32_SOC15(GC, 0, regCP_ME_CNTL); 2116 if (pipe_id == 0) 2117 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, 2118 PFP_PIPE0_RESET, 1); 2119 else 2120 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, 2121 PFP_PIPE1_RESET, 1); 2122 WREG32_SOC15(GC, 0, regCP_ME_CNTL, tmp); 2123 2124 /* Clear pfp pipe0 reset bit. */ 2125 if (pipe_id == 0) 2126 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, 2127 PFP_PIPE0_RESET, 0); 2128 else 2129 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, 2130 PFP_PIPE1_RESET, 0); 2131 WREG32_SOC15(GC, 0, regCP_ME_CNTL, tmp); 2132 } 2133 soc24_grbm_select(adev, 0, 0, 0, 0); 2134 mutex_unlock(&adev->srbm_mutex); 2135 } 2136 2137 static void gfx_v12_0_set_me_ucode_start_addr(struct amdgpu_device *adev) 2138 { 2139 const struct gfx_firmware_header_v2_0 *cp_hdr; 2140 unsigned pipe_id, tmp; 2141 2142 cp_hdr = (const struct gfx_firmware_header_v2_0 *) 2143 adev->gfx.me_fw->data; 2144 mutex_lock(&adev->srbm_mutex); 2145 for (pipe_id = 0; pipe_id < adev->gfx.me.num_pipe_per_me; pipe_id++) { 2146 soc24_grbm_select(adev, 0, pipe_id, 0, 0); 2147 WREG32_SOC15(GC, 0, regCP_ME_PRGRM_CNTR_START, 2148 (cp_hdr->ucode_start_addr_hi << 30) | 2149 (cp_hdr->ucode_start_addr_lo >> 2) ); 2150 WREG32_SOC15(GC, 0, regCP_ME_PRGRM_CNTR_START_HI, 2151 cp_hdr->ucode_start_addr_hi>>2); 2152 2153 /* 2154 * Program CP_ME_CNTL to reset given PIPE to take 2155 * effect of CP_ME_PRGRM_CNTR_START. 2156 */ 2157 tmp = RREG32_SOC15(GC, 0, regCP_ME_CNTL); 2158 if (pipe_id == 0) 2159 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, 2160 ME_PIPE0_RESET, 1); 2161 else 2162 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, 2163 ME_PIPE1_RESET, 1); 2164 WREG32_SOC15(GC, 0, regCP_ME_CNTL, tmp); 2165 2166 /* Clear pfp pipe0 reset bit. */ 2167 if (pipe_id == 0) 2168 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, 2169 ME_PIPE0_RESET, 0); 2170 else 2171 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, 2172 ME_PIPE1_RESET, 0); 2173 WREG32_SOC15(GC, 0, regCP_ME_CNTL, tmp); 2174 } 2175 soc24_grbm_select(adev, 0, 0, 0, 0); 2176 mutex_unlock(&adev->srbm_mutex); 2177 } 2178 2179 static void gfx_v12_0_set_mec_ucode_start_addr(struct amdgpu_device *adev) 2180 { 2181 const struct gfx_firmware_header_v2_0 *cp_hdr; 2182 unsigned pipe_id; 2183 2184 cp_hdr = (const struct gfx_firmware_header_v2_0 *) 2185 adev->gfx.mec_fw->data; 2186 mutex_lock(&adev->srbm_mutex); 2187 for (pipe_id = 0; pipe_id < adev->gfx.mec.num_pipe_per_mec; pipe_id++) { 2188 soc24_grbm_select(adev, 1, pipe_id, 0, 0); 2189 WREG32_SOC15(GC, 0, regCP_MEC_RS64_PRGRM_CNTR_START, 2190 cp_hdr->ucode_start_addr_lo >> 2 | 2191 cp_hdr->ucode_start_addr_hi << 30); 2192 WREG32_SOC15(GC, 0, regCP_MEC_RS64_PRGRM_CNTR_START_HI, 2193 cp_hdr->ucode_start_addr_hi >> 2); 2194 } 2195 soc24_grbm_select(adev, 0, 0, 0, 0); 2196 mutex_unlock(&adev->srbm_mutex); 2197 } 2198 2199 static int gfx_v12_0_wait_for_rlc_autoload_complete(struct amdgpu_device *adev) 2200 { 2201 uint32_t cp_status; 2202 uint32_t bootload_status; 2203 int i; 2204 2205 for (i = 0; i < adev->usec_timeout; i++) { 2206 cp_status = RREG32_SOC15(GC, 0, regCP_STAT); 2207 bootload_status = RREG32_SOC15(GC, 0, regRLC_RLCS_BOOTLOAD_STATUS); 2208 2209 if ((cp_status == 0) && 2210 (REG_GET_FIELD(bootload_status, 2211 RLC_RLCS_BOOTLOAD_STATUS, BOOTLOAD_COMPLETE) == 1)) { 2212 break; 2213 } 2214 udelay(1); 2215 if (amdgpu_emu_mode) 2216 msleep(10); 2217 } 2218 2219 if (i >= adev->usec_timeout) { 2220 dev_err(adev->dev, "rlc autoload: gc ucode autoload timeout\n"); 2221 return -ETIMEDOUT; 2222 } 2223 2224 if (adev->firmware.load_type == AMDGPU_FW_LOAD_RLC_BACKDOOR_AUTO) { 2225 gfx_v12_0_set_pfp_ucode_start_addr(adev); 2226 gfx_v12_0_set_me_ucode_start_addr(adev); 2227 gfx_v12_0_set_mec_ucode_start_addr(adev); 2228 } 2229 2230 return 0; 2231 } 2232 2233 static int gfx_v12_0_cp_gfx_enable(struct amdgpu_device *adev, bool enable) 2234 { 2235 int i; 2236 u32 tmp = RREG32_SOC15(GC, 0, regCP_ME_CNTL); 2237 2238 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, ME_HALT, enable ? 0 : 1); 2239 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, PFP_HALT, enable ? 0 : 1); 2240 WREG32_SOC15(GC, 0, regCP_ME_CNTL, tmp); 2241 2242 for (i = 0; i < adev->usec_timeout; i++) { 2243 if (RREG32_SOC15(GC, 0, regCP_STAT) == 0) 2244 break; 2245 udelay(1); 2246 } 2247 2248 if (i >= adev->usec_timeout) 2249 DRM_ERROR("failed to %s cp gfx\n", enable ? "unhalt" : "halt"); 2250 2251 return 0; 2252 } 2253 2254 static int gfx_v12_0_cp_gfx_load_pfp_microcode_rs64(struct amdgpu_device *adev) 2255 { 2256 int r; 2257 const struct gfx_firmware_header_v2_0 *pfp_hdr; 2258 const __le32 *fw_ucode, *fw_data; 2259 unsigned i, pipe_id, fw_ucode_size, fw_data_size; 2260 uint32_t tmp; 2261 uint32_t usec_timeout = 50000; /* wait for 50ms */ 2262 2263 pfp_hdr = (const struct gfx_firmware_header_v2_0 *) 2264 adev->gfx.pfp_fw->data; 2265 2266 amdgpu_ucode_print_gfx_hdr(&pfp_hdr->header); 2267 2268 /* instruction */ 2269 fw_ucode = (const __le32 *)(adev->gfx.pfp_fw->data + 2270 le32_to_cpu(pfp_hdr->ucode_offset_bytes)); 2271 fw_ucode_size = le32_to_cpu(pfp_hdr->ucode_size_bytes); 2272 /* data */ 2273 fw_data = (const __le32 *)(adev->gfx.pfp_fw->data + 2274 le32_to_cpu(pfp_hdr->data_offset_bytes)); 2275 fw_data_size = le32_to_cpu(pfp_hdr->data_size_bytes); 2276 2277 /* 64kb align */ 2278 r = amdgpu_bo_create_reserved(adev, fw_ucode_size, 2279 64 * 1024, AMDGPU_GEM_DOMAIN_VRAM, 2280 &adev->gfx.pfp.pfp_fw_obj, 2281 &adev->gfx.pfp.pfp_fw_gpu_addr, 2282 (void **)&adev->gfx.pfp.pfp_fw_ptr); 2283 if (r) { 2284 dev_err(adev->dev, "(%d) failed to create pfp ucode fw bo\n", r); 2285 gfx_v12_0_pfp_fini(adev); 2286 return r; 2287 } 2288 2289 r = amdgpu_bo_create_reserved(adev, fw_data_size, 2290 64 * 1024, AMDGPU_GEM_DOMAIN_VRAM, 2291 &adev->gfx.pfp.pfp_fw_data_obj, 2292 &adev->gfx.pfp.pfp_fw_data_gpu_addr, 2293 (void **)&adev->gfx.pfp.pfp_fw_data_ptr); 2294 if (r) { 2295 dev_err(adev->dev, "(%d) failed to create pfp data fw bo\n", r); 2296 gfx_v12_0_pfp_fini(adev); 2297 return r; 2298 } 2299 2300 memcpy(adev->gfx.pfp.pfp_fw_ptr, fw_ucode, fw_ucode_size); 2301 memcpy(adev->gfx.pfp.pfp_fw_data_ptr, fw_data, fw_data_size); 2302 2303 amdgpu_bo_kunmap(adev->gfx.pfp.pfp_fw_obj); 2304 amdgpu_bo_kunmap(adev->gfx.pfp.pfp_fw_data_obj); 2305 amdgpu_bo_unreserve(adev->gfx.pfp.pfp_fw_obj); 2306 amdgpu_bo_unreserve(adev->gfx.pfp.pfp_fw_data_obj); 2307 2308 if (amdgpu_emu_mode == 1) 2309 adev->hdp.funcs->flush_hdp(adev, NULL); 2310 2311 WREG32_SOC15(GC, 0, regCP_PFP_IC_BASE_LO, 2312 lower_32_bits(adev->gfx.pfp.pfp_fw_gpu_addr)); 2313 WREG32_SOC15(GC, 0, regCP_PFP_IC_BASE_HI, 2314 upper_32_bits(adev->gfx.pfp.pfp_fw_gpu_addr)); 2315 2316 tmp = RREG32_SOC15(GC, 0, regCP_PFP_IC_BASE_CNTL); 2317 tmp = REG_SET_FIELD(tmp, CP_PFP_IC_BASE_CNTL, VMID, 0); 2318 tmp = REG_SET_FIELD(tmp, CP_PFP_IC_BASE_CNTL, CACHE_POLICY, 0); 2319 tmp = REG_SET_FIELD(tmp, CP_PFP_IC_BASE_CNTL, EXE_DISABLE, 0); 2320 WREG32_SOC15(GC, 0, regCP_PFP_IC_BASE_CNTL, tmp); 2321 2322 /* 2323 * Programming any of the CP_PFP_IC_BASE registers 2324 * forces invalidation of the ME L1 I$. Wait for the 2325 * invalidation complete 2326 */ 2327 for (i = 0; i < usec_timeout; i++) { 2328 tmp = RREG32_SOC15(GC, 0, regCP_PFP_IC_OP_CNTL); 2329 if (1 == REG_GET_FIELD(tmp, CP_PFP_IC_OP_CNTL, 2330 INVALIDATE_CACHE_COMPLETE)) 2331 break; 2332 udelay(1); 2333 } 2334 2335 if (i >= usec_timeout) { 2336 dev_err(adev->dev, "failed to invalidate instruction cache\n"); 2337 return -EINVAL; 2338 } 2339 2340 /* Prime the L1 instruction caches */ 2341 tmp = RREG32_SOC15(GC, 0, regCP_PFP_IC_OP_CNTL); 2342 tmp = REG_SET_FIELD(tmp, CP_PFP_IC_OP_CNTL, PRIME_ICACHE, 1); 2343 WREG32_SOC15(GC, 0, regCP_PFP_IC_OP_CNTL, tmp); 2344 /* Waiting for cache primed*/ 2345 for (i = 0; i < usec_timeout; i++) { 2346 tmp = RREG32_SOC15(GC, 0, regCP_PFP_IC_OP_CNTL); 2347 if (1 == REG_GET_FIELD(tmp, CP_PFP_IC_OP_CNTL, 2348 ICACHE_PRIMED)) 2349 break; 2350 udelay(1); 2351 } 2352 2353 if (i >= usec_timeout) { 2354 dev_err(adev->dev, "failed to prime instruction cache\n"); 2355 return -EINVAL; 2356 } 2357 2358 mutex_lock(&adev->srbm_mutex); 2359 for (pipe_id = 0; pipe_id < adev->gfx.me.num_pipe_per_me; pipe_id++) { 2360 soc24_grbm_select(adev, 0, pipe_id, 0, 0); 2361 2362 WREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_BASE0_LO, 2363 lower_32_bits(adev->gfx.pfp.pfp_fw_data_gpu_addr)); 2364 WREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_BASE0_HI, 2365 upper_32_bits(adev->gfx.pfp.pfp_fw_data_gpu_addr)); 2366 } 2367 soc24_grbm_select(adev, 0, 0, 0, 0); 2368 mutex_unlock(&adev->srbm_mutex); 2369 2370 tmp = RREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_BASE_CNTL); 2371 tmp = REG_SET_FIELD(tmp, CP_GFX_RS64_DC_BASE_CNTL, VMID, 0); 2372 tmp = REG_SET_FIELD(tmp, CP_GFX_RS64_DC_BASE_CNTL, CACHE_POLICY, 0); 2373 WREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_BASE_CNTL, tmp); 2374 2375 /* Invalidate the data caches */ 2376 tmp = RREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_OP_CNTL); 2377 tmp = REG_SET_FIELD(tmp, CP_GFX_RS64_DC_OP_CNTL, INVALIDATE_DCACHE, 1); 2378 WREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_OP_CNTL, tmp); 2379 2380 for (i = 0; i < usec_timeout; i++) { 2381 tmp = RREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_OP_CNTL); 2382 if (1 == REG_GET_FIELD(tmp, CP_GFX_RS64_DC_OP_CNTL, 2383 INVALIDATE_DCACHE_COMPLETE)) 2384 break; 2385 udelay(1); 2386 } 2387 2388 if (i >= usec_timeout) { 2389 dev_err(adev->dev, "failed to invalidate RS64 data cache\n"); 2390 return -EINVAL; 2391 } 2392 2393 gfx_v12_0_set_pfp_ucode_start_addr(adev); 2394 2395 return 0; 2396 } 2397 2398 static int gfx_v12_0_cp_gfx_load_me_microcode_rs64(struct amdgpu_device *adev) 2399 { 2400 int r; 2401 const struct gfx_firmware_header_v2_0 *me_hdr; 2402 const __le32 *fw_ucode, *fw_data; 2403 unsigned i, pipe_id, fw_ucode_size, fw_data_size; 2404 uint32_t tmp; 2405 uint32_t usec_timeout = 50000; /* wait for 50ms */ 2406 2407 me_hdr = (const struct gfx_firmware_header_v2_0 *) 2408 adev->gfx.me_fw->data; 2409 2410 amdgpu_ucode_print_gfx_hdr(&me_hdr->header); 2411 2412 /* instruction */ 2413 fw_ucode = (const __le32 *)(adev->gfx.me_fw->data + 2414 le32_to_cpu(me_hdr->ucode_offset_bytes)); 2415 fw_ucode_size = le32_to_cpu(me_hdr->ucode_size_bytes); 2416 /* data */ 2417 fw_data = (const __le32 *)(adev->gfx.me_fw->data + 2418 le32_to_cpu(me_hdr->data_offset_bytes)); 2419 fw_data_size = le32_to_cpu(me_hdr->data_size_bytes); 2420 2421 /* 64kb align*/ 2422 r = amdgpu_bo_create_reserved(adev, fw_ucode_size, 2423 64 * 1024, AMDGPU_GEM_DOMAIN_VRAM, 2424 &adev->gfx.me.me_fw_obj, 2425 &adev->gfx.me.me_fw_gpu_addr, 2426 (void **)&adev->gfx.me.me_fw_ptr); 2427 if (r) { 2428 dev_err(adev->dev, "(%d) failed to create me ucode bo\n", r); 2429 gfx_v12_0_me_fini(adev); 2430 return r; 2431 } 2432 2433 r = amdgpu_bo_create_reserved(adev, fw_data_size, 2434 64 * 1024, AMDGPU_GEM_DOMAIN_VRAM, 2435 &adev->gfx.me.me_fw_data_obj, 2436 &adev->gfx.me.me_fw_data_gpu_addr, 2437 (void **)&adev->gfx.me.me_fw_data_ptr); 2438 if (r) { 2439 dev_err(adev->dev, "(%d) failed to create me data bo\n", r); 2440 gfx_v12_0_pfp_fini(adev); 2441 return r; 2442 } 2443 2444 memcpy(adev->gfx.me.me_fw_ptr, fw_ucode, fw_ucode_size); 2445 memcpy(adev->gfx.me.me_fw_data_ptr, fw_data, fw_data_size); 2446 2447 amdgpu_bo_kunmap(adev->gfx.me.me_fw_obj); 2448 amdgpu_bo_kunmap(adev->gfx.me.me_fw_data_obj); 2449 amdgpu_bo_unreserve(adev->gfx.me.me_fw_obj); 2450 amdgpu_bo_unreserve(adev->gfx.me.me_fw_data_obj); 2451 2452 if (amdgpu_emu_mode == 1) 2453 adev->hdp.funcs->flush_hdp(adev, NULL); 2454 2455 WREG32_SOC15(GC, 0, regCP_ME_IC_BASE_LO, 2456 lower_32_bits(adev->gfx.me.me_fw_gpu_addr)); 2457 WREG32_SOC15(GC, 0, regCP_ME_IC_BASE_HI, 2458 upper_32_bits(adev->gfx.me.me_fw_gpu_addr)); 2459 2460 tmp = RREG32_SOC15(GC, 0, regCP_ME_IC_BASE_CNTL); 2461 tmp = REG_SET_FIELD(tmp, CP_ME_IC_BASE_CNTL, VMID, 0); 2462 tmp = REG_SET_FIELD(tmp, CP_ME_IC_BASE_CNTL, CACHE_POLICY, 0); 2463 tmp = REG_SET_FIELD(tmp, CP_ME_IC_BASE_CNTL, EXE_DISABLE, 0); 2464 WREG32_SOC15(GC, 0, regCP_ME_IC_BASE_CNTL, tmp); 2465 2466 /* 2467 * Programming any of the CP_ME_IC_BASE registers 2468 * forces invalidation of the ME L1 I$. Wait for the 2469 * invalidation complete 2470 */ 2471 for (i = 0; i < usec_timeout; i++) { 2472 tmp = RREG32_SOC15(GC, 0, regCP_ME_IC_OP_CNTL); 2473 if (1 == REG_GET_FIELD(tmp, CP_ME_IC_OP_CNTL, 2474 INVALIDATE_CACHE_COMPLETE)) 2475 break; 2476 udelay(1); 2477 } 2478 2479 if (i >= usec_timeout) { 2480 dev_err(adev->dev, "failed to invalidate instruction cache\n"); 2481 return -EINVAL; 2482 } 2483 2484 /* Prime the instruction caches */ 2485 tmp = RREG32_SOC15(GC, 0, regCP_ME_IC_OP_CNTL); 2486 tmp = REG_SET_FIELD(tmp, CP_ME_IC_OP_CNTL, PRIME_ICACHE, 1); 2487 WREG32_SOC15(GC, 0, regCP_ME_IC_OP_CNTL, tmp); 2488 2489 /* Waiting for instruction cache primed*/ 2490 for (i = 0; i < usec_timeout; i++) { 2491 tmp = RREG32_SOC15(GC, 0, regCP_ME_IC_OP_CNTL); 2492 if (1 == REG_GET_FIELD(tmp, CP_ME_IC_OP_CNTL, 2493 ICACHE_PRIMED)) 2494 break; 2495 udelay(1); 2496 } 2497 2498 if (i >= usec_timeout) { 2499 dev_err(adev->dev, "failed to prime instruction cache\n"); 2500 return -EINVAL; 2501 } 2502 2503 mutex_lock(&adev->srbm_mutex); 2504 for (pipe_id = 0; pipe_id < adev->gfx.me.num_pipe_per_me; pipe_id++) { 2505 soc24_grbm_select(adev, 0, pipe_id, 0, 0); 2506 2507 WREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_BASE1_LO, 2508 lower_32_bits(adev->gfx.me.me_fw_data_gpu_addr)); 2509 WREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_BASE1_HI, 2510 upper_32_bits(adev->gfx.me.me_fw_data_gpu_addr)); 2511 } 2512 soc24_grbm_select(adev, 0, 0, 0, 0); 2513 mutex_unlock(&adev->srbm_mutex); 2514 2515 tmp = RREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_BASE_CNTL); 2516 tmp = REG_SET_FIELD(tmp, CP_GFX_RS64_DC_BASE_CNTL, VMID, 0); 2517 tmp = REG_SET_FIELD(tmp, CP_GFX_RS64_DC_BASE_CNTL, CACHE_POLICY, 0); 2518 WREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_BASE_CNTL, tmp); 2519 2520 /* Invalidate the data caches */ 2521 tmp = RREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_OP_CNTL); 2522 tmp = REG_SET_FIELD(tmp, CP_GFX_RS64_DC_OP_CNTL, INVALIDATE_DCACHE, 1); 2523 WREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_OP_CNTL, tmp); 2524 2525 for (i = 0; i < usec_timeout; i++) { 2526 tmp = RREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_OP_CNTL); 2527 if (1 == REG_GET_FIELD(tmp, CP_GFX_RS64_DC_OP_CNTL, 2528 INVALIDATE_DCACHE_COMPLETE)) 2529 break; 2530 udelay(1); 2531 } 2532 2533 if (i >= usec_timeout) { 2534 dev_err(adev->dev, "failed to invalidate RS64 data cache\n"); 2535 return -EINVAL; 2536 } 2537 2538 gfx_v12_0_set_me_ucode_start_addr(adev); 2539 2540 return 0; 2541 } 2542 2543 static int gfx_v12_0_cp_gfx_load_microcode(struct amdgpu_device *adev) 2544 { 2545 int r; 2546 2547 if (!adev->gfx.me_fw || !adev->gfx.pfp_fw) 2548 return -EINVAL; 2549 2550 gfx_v12_0_cp_gfx_enable(adev, false); 2551 2552 r = gfx_v12_0_cp_gfx_load_pfp_microcode_rs64(adev); 2553 if (r) { 2554 dev_err(adev->dev, "(%d) failed to load pfp fw\n", r); 2555 return r; 2556 } 2557 2558 r = gfx_v12_0_cp_gfx_load_me_microcode_rs64(adev); 2559 if (r) { 2560 dev_err(adev->dev, "(%d) failed to load me fw\n", r); 2561 return r; 2562 } 2563 2564 return 0; 2565 } 2566 2567 static int gfx_v12_0_cp_gfx_start(struct amdgpu_device *adev) 2568 { 2569 /* init the CP */ 2570 WREG32_SOC15(GC, 0, regCP_MAX_CONTEXT, 2571 adev->gfx.config.max_hw_contexts - 1); 2572 WREG32_SOC15(GC, 0, regCP_DEVICE_ID, 1); 2573 2574 if (!amdgpu_async_gfx_ring) 2575 gfx_v12_0_cp_gfx_enable(adev, true); 2576 2577 return 0; 2578 } 2579 2580 static void gfx_v12_0_cp_gfx_switch_pipe(struct amdgpu_device *adev, 2581 CP_PIPE_ID pipe) 2582 { 2583 u32 tmp; 2584 2585 tmp = RREG32_SOC15(GC, 0, regGRBM_GFX_CNTL); 2586 tmp = REG_SET_FIELD(tmp, GRBM_GFX_CNTL, PIPEID, pipe); 2587 2588 WREG32_SOC15(GC, 0, regGRBM_GFX_CNTL, tmp); 2589 } 2590 2591 static void gfx_v12_0_cp_gfx_set_doorbell(struct amdgpu_device *adev, 2592 struct amdgpu_ring *ring) 2593 { 2594 u32 tmp; 2595 2596 tmp = RREG32_SOC15(GC, 0, regCP_RB_DOORBELL_CONTROL); 2597 if (ring->use_doorbell) { 2598 tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL, 2599 DOORBELL_OFFSET, ring->doorbell_index); 2600 tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL, 2601 DOORBELL_EN, 1); 2602 } else { 2603 tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL, 2604 DOORBELL_EN, 0); 2605 } 2606 WREG32_SOC15(GC, 0, regCP_RB_DOORBELL_CONTROL, tmp); 2607 2608 tmp = REG_SET_FIELD(0, CP_RB_DOORBELL_RANGE_LOWER, 2609 DOORBELL_RANGE_LOWER, ring->doorbell_index); 2610 WREG32_SOC15(GC, 0, regCP_RB_DOORBELL_RANGE_LOWER, tmp); 2611 2612 WREG32_SOC15(GC, 0, regCP_RB_DOORBELL_RANGE_UPPER, 2613 CP_RB_DOORBELL_RANGE_UPPER__DOORBELL_RANGE_UPPER_MASK); 2614 } 2615 2616 static int gfx_v12_0_cp_gfx_resume(struct amdgpu_device *adev) 2617 { 2618 struct amdgpu_ring *ring; 2619 u32 tmp; 2620 u32 rb_bufsz; 2621 u64 rb_addr, rptr_addr, wptr_gpu_addr; 2622 u32 i; 2623 2624 /* Set the write pointer delay */ 2625 WREG32_SOC15(GC, 0, regCP_RB_WPTR_DELAY, 0); 2626 2627 /* set the RB to use vmid 0 */ 2628 WREG32_SOC15(GC, 0, regCP_RB_VMID, 0); 2629 2630 /* Init gfx ring 0 for pipe 0 */ 2631 mutex_lock(&adev->srbm_mutex); 2632 gfx_v12_0_cp_gfx_switch_pipe(adev, PIPE_ID0); 2633 2634 /* Set ring buffer size */ 2635 ring = &adev->gfx.gfx_ring[0]; 2636 rb_bufsz = order_base_2(ring->ring_size / 8); 2637 tmp = REG_SET_FIELD(0, CP_RB0_CNTL, RB_BUFSZ, rb_bufsz); 2638 tmp = REG_SET_FIELD(tmp, CP_RB0_CNTL, RB_BLKSZ, rb_bufsz - 2); 2639 WREG32_SOC15(GC, 0, regCP_RB0_CNTL, tmp); 2640 2641 /* Initialize the ring buffer's write pointers */ 2642 ring->wptr = 0; 2643 WREG32_SOC15(GC, 0, regCP_RB0_WPTR, lower_32_bits(ring->wptr)); 2644 WREG32_SOC15(GC, 0, regCP_RB0_WPTR_HI, upper_32_bits(ring->wptr)); 2645 2646 /* set the wb address whether it's enabled or not */ 2647 rptr_addr = ring->rptr_gpu_addr; 2648 WREG32_SOC15(GC, 0, regCP_RB0_RPTR_ADDR, lower_32_bits(rptr_addr)); 2649 WREG32_SOC15(GC, 0, regCP_RB0_RPTR_ADDR_HI, upper_32_bits(rptr_addr) & 2650 CP_RB_RPTR_ADDR_HI__RB_RPTR_ADDR_HI_MASK); 2651 2652 wptr_gpu_addr = ring->wptr_gpu_addr; 2653 WREG32_SOC15(GC, 0, regCP_RB_WPTR_POLL_ADDR_LO, 2654 lower_32_bits(wptr_gpu_addr)); 2655 WREG32_SOC15(GC, 0, regCP_RB_WPTR_POLL_ADDR_HI, 2656 upper_32_bits(wptr_gpu_addr)); 2657 2658 mdelay(1); 2659 WREG32_SOC15(GC, 0, regCP_RB0_CNTL, tmp); 2660 2661 rb_addr = ring->gpu_addr >> 8; 2662 WREG32_SOC15(GC, 0, regCP_RB0_BASE, rb_addr); 2663 WREG32_SOC15(GC, 0, regCP_RB0_BASE_HI, upper_32_bits(rb_addr)); 2664 2665 WREG32_SOC15(GC, 0, regCP_RB_ACTIVE, 1); 2666 2667 gfx_v12_0_cp_gfx_set_doorbell(adev, ring); 2668 mutex_unlock(&adev->srbm_mutex); 2669 2670 /* Switch to pipe 0 */ 2671 mutex_lock(&adev->srbm_mutex); 2672 gfx_v12_0_cp_gfx_switch_pipe(adev, PIPE_ID0); 2673 mutex_unlock(&adev->srbm_mutex); 2674 2675 /* start the ring */ 2676 gfx_v12_0_cp_gfx_start(adev); 2677 2678 for (i = 0; i < adev->gfx.num_gfx_rings; i++) { 2679 ring = &adev->gfx.gfx_ring[i]; 2680 ring->sched.ready = true; 2681 } 2682 2683 return 0; 2684 } 2685 2686 static void gfx_v12_0_cp_compute_enable(struct amdgpu_device *adev, bool enable) 2687 { 2688 u32 data; 2689 2690 data = RREG32_SOC15(GC, 0, regCP_MEC_RS64_CNTL); 2691 data = REG_SET_FIELD(data, CP_MEC_RS64_CNTL, MEC_INVALIDATE_ICACHE, 2692 enable ? 0 : 1); 2693 data = REG_SET_FIELD(data, CP_MEC_RS64_CNTL, MEC_PIPE0_RESET, 2694 enable ? 0 : 1); 2695 data = REG_SET_FIELD(data, CP_MEC_RS64_CNTL, MEC_PIPE1_RESET, 2696 enable ? 0 : 1); 2697 data = REG_SET_FIELD(data, CP_MEC_RS64_CNTL, MEC_PIPE2_RESET, 2698 enable ? 0 : 1); 2699 data = REG_SET_FIELD(data, CP_MEC_RS64_CNTL, MEC_PIPE3_RESET, 2700 enable ? 0 : 1); 2701 data = REG_SET_FIELD(data, CP_MEC_RS64_CNTL, MEC_PIPE0_ACTIVE, 2702 enable ? 1 : 0); 2703 data = REG_SET_FIELD(data, CP_MEC_RS64_CNTL, MEC_PIPE1_ACTIVE, 2704 enable ? 1 : 0); 2705 data = REG_SET_FIELD(data, CP_MEC_RS64_CNTL, MEC_PIPE2_ACTIVE, 2706 enable ? 1 : 0); 2707 data = REG_SET_FIELD(data, CP_MEC_RS64_CNTL, MEC_PIPE3_ACTIVE, 2708 enable ? 1 : 0); 2709 data = REG_SET_FIELD(data, CP_MEC_RS64_CNTL, MEC_HALT, 2710 enable ? 0 : 1); 2711 WREG32_SOC15(GC, 0, regCP_MEC_RS64_CNTL, data); 2712 2713 adev->gfx.kiq[0].ring.sched.ready = enable; 2714 2715 udelay(50); 2716 } 2717 2718 static int gfx_v12_0_cp_compute_load_microcode_rs64(struct amdgpu_device *adev) 2719 { 2720 const struct gfx_firmware_header_v2_0 *mec_hdr; 2721 const __le32 *fw_ucode, *fw_data; 2722 u32 tmp, fw_ucode_size, fw_data_size; 2723 u32 i, usec_timeout = 50000; /* Wait for 50 ms */ 2724 u32 *fw_ucode_ptr, *fw_data_ptr; 2725 int r; 2726 2727 if (!adev->gfx.mec_fw) 2728 return -EINVAL; 2729 2730 gfx_v12_0_cp_compute_enable(adev, false); 2731 2732 mec_hdr = (const struct gfx_firmware_header_v2_0 *)adev->gfx.mec_fw->data; 2733 amdgpu_ucode_print_gfx_hdr(&mec_hdr->header); 2734 2735 fw_ucode = (const __le32 *) (adev->gfx.mec_fw->data + 2736 le32_to_cpu(mec_hdr->ucode_offset_bytes)); 2737 fw_ucode_size = le32_to_cpu(mec_hdr->ucode_size_bytes); 2738 2739 fw_data = (const __le32 *) (adev->gfx.mec_fw->data + 2740 le32_to_cpu(mec_hdr->data_offset_bytes)); 2741 fw_data_size = le32_to_cpu(mec_hdr->data_size_bytes); 2742 2743 r = amdgpu_bo_create_reserved(adev, fw_ucode_size, 2744 64 * 1024, AMDGPU_GEM_DOMAIN_VRAM, 2745 &adev->gfx.mec.mec_fw_obj, 2746 &adev->gfx.mec.mec_fw_gpu_addr, 2747 (void **)&fw_ucode_ptr); 2748 if (r) { 2749 dev_err(adev->dev, "(%d) failed to create mec fw ucode bo\n", r); 2750 gfx_v12_0_mec_fini(adev); 2751 return r; 2752 } 2753 2754 r = amdgpu_bo_create_reserved(adev, 2755 ALIGN(fw_data_size, 64 * 1024) * 2756 adev->gfx.mec.num_pipe_per_mec, 2757 64 * 1024, AMDGPU_GEM_DOMAIN_VRAM, 2758 &adev->gfx.mec.mec_fw_data_obj, 2759 &adev->gfx.mec.mec_fw_data_gpu_addr, 2760 (void **)&fw_data_ptr); 2761 if (r) { 2762 dev_err(adev->dev, "(%d) failed to create mec fw ucode bo\n", r); 2763 gfx_v12_0_mec_fini(adev); 2764 return r; 2765 } 2766 2767 memcpy(fw_ucode_ptr, fw_ucode, fw_ucode_size); 2768 for (i = 0; i < adev->gfx.mec.num_pipe_per_mec; i++) { 2769 memcpy(fw_data_ptr + i * ALIGN(fw_data_size, 64 * 1024) / 4, fw_data, fw_data_size); 2770 } 2771 2772 amdgpu_bo_kunmap(adev->gfx.mec.mec_fw_obj); 2773 amdgpu_bo_kunmap(adev->gfx.mec.mec_fw_data_obj); 2774 amdgpu_bo_unreserve(adev->gfx.mec.mec_fw_obj); 2775 amdgpu_bo_unreserve(adev->gfx.mec.mec_fw_data_obj); 2776 2777 tmp = RREG32_SOC15(GC, 0, regCP_CPC_IC_BASE_CNTL); 2778 tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, VMID, 0); 2779 tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, EXE_DISABLE, 0); 2780 tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, CACHE_POLICY, 0); 2781 WREG32_SOC15(GC, 0, regCP_CPC_IC_BASE_CNTL, tmp); 2782 2783 tmp = RREG32_SOC15(GC, 0, regCP_MEC_DC_BASE_CNTL); 2784 tmp = REG_SET_FIELD(tmp, CP_MEC_DC_BASE_CNTL, VMID, 0); 2785 tmp = REG_SET_FIELD(tmp, CP_MEC_DC_BASE_CNTL, CACHE_POLICY, 0); 2786 WREG32_SOC15(GC, 0, regCP_MEC_DC_BASE_CNTL, tmp); 2787 2788 mutex_lock(&adev->srbm_mutex); 2789 for (i = 0; i < adev->gfx.mec.num_pipe_per_mec; i++) { 2790 soc24_grbm_select(adev, 1, i, 0, 0); 2791 2792 WREG32_SOC15(GC, 0, regCP_MEC_MDBASE_LO, 2793 lower_32_bits(adev->gfx.mec.mec_fw_data_gpu_addr + 2794 i * ALIGN(fw_data_size, 64 * 1024))); 2795 WREG32_SOC15(GC, 0, regCP_MEC_MDBASE_HI, 2796 upper_32_bits(adev->gfx.mec.mec_fw_data_gpu_addr + 2797 i * ALIGN(fw_data_size, 64 * 1024))); 2798 2799 WREG32_SOC15(GC, 0, regCP_CPC_IC_BASE_LO, 2800 lower_32_bits(adev->gfx.mec.mec_fw_gpu_addr)); 2801 WREG32_SOC15(GC, 0, regCP_CPC_IC_BASE_HI, 2802 upper_32_bits(adev->gfx.mec.mec_fw_gpu_addr)); 2803 } 2804 mutex_unlock(&adev->srbm_mutex); 2805 soc24_grbm_select(adev, 0, 0, 0, 0); 2806 2807 /* Trigger an invalidation of the L1 instruction caches */ 2808 tmp = RREG32_SOC15(GC, 0, regCP_MEC_DC_OP_CNTL); 2809 tmp = REG_SET_FIELD(tmp, CP_MEC_DC_OP_CNTL, INVALIDATE_DCACHE, 1); 2810 WREG32_SOC15(GC, 0, regCP_MEC_DC_OP_CNTL, tmp); 2811 2812 /* Wait for invalidation complete */ 2813 for (i = 0; i < usec_timeout; i++) { 2814 tmp = RREG32_SOC15(GC, 0, regCP_MEC_DC_OP_CNTL); 2815 if (1 == REG_GET_FIELD(tmp, CP_MEC_DC_OP_CNTL, 2816 INVALIDATE_DCACHE_COMPLETE)) 2817 break; 2818 udelay(1); 2819 } 2820 2821 if (i >= usec_timeout) { 2822 dev_err(adev->dev, "failed to invalidate instruction cache\n"); 2823 return -EINVAL; 2824 } 2825 2826 /* Trigger an invalidation of the L1 instruction caches */ 2827 tmp = RREG32_SOC15(GC, 0, regCP_CPC_IC_OP_CNTL); 2828 tmp = REG_SET_FIELD(tmp, CP_CPC_IC_OP_CNTL, INVALIDATE_CACHE, 1); 2829 WREG32_SOC15(GC, 0, regCP_CPC_IC_OP_CNTL, tmp); 2830 2831 /* Wait for invalidation complete */ 2832 for (i = 0; i < usec_timeout; i++) { 2833 tmp = RREG32_SOC15(GC, 0, regCP_CPC_IC_OP_CNTL); 2834 if (1 == REG_GET_FIELD(tmp, CP_CPC_IC_OP_CNTL, 2835 INVALIDATE_CACHE_COMPLETE)) 2836 break; 2837 udelay(1); 2838 } 2839 2840 if (i >= usec_timeout) { 2841 dev_err(adev->dev, "failed to invalidate instruction cache\n"); 2842 return -EINVAL; 2843 } 2844 2845 gfx_v12_0_set_mec_ucode_start_addr(adev); 2846 2847 return 0; 2848 } 2849 2850 static void gfx_v12_0_kiq_setting(struct amdgpu_ring *ring) 2851 { 2852 uint32_t tmp; 2853 struct amdgpu_device *adev = ring->adev; 2854 2855 /* tell RLC which is KIQ queue */ 2856 tmp = RREG32_SOC15(GC, 0, regRLC_CP_SCHEDULERS); 2857 tmp &= 0xffffff00; 2858 tmp |= (ring->me << 5) | (ring->pipe << 3) | (ring->queue); 2859 WREG32_SOC15(GC, 0, regRLC_CP_SCHEDULERS, tmp | 0x80); 2860 } 2861 2862 static void gfx_v12_0_cp_set_doorbell_range(struct amdgpu_device *adev) 2863 { 2864 /* set graphics engine doorbell range */ 2865 WREG32_SOC15(GC, 0, regCP_RB_DOORBELL_RANGE_LOWER, 2866 (adev->doorbell_index.gfx_ring0 * 2) << 2); 2867 WREG32_SOC15(GC, 0, regCP_RB_DOORBELL_RANGE_UPPER, 2868 (adev->doorbell_index.gfx_userqueue_end * 2) << 2); 2869 2870 /* set compute engine doorbell range */ 2871 WREG32_SOC15(GC, 0, regCP_MEC_DOORBELL_RANGE_LOWER, 2872 (adev->doorbell_index.kiq * 2) << 2); 2873 WREG32_SOC15(GC, 0, regCP_MEC_DOORBELL_RANGE_UPPER, 2874 (adev->doorbell_index.userqueue_end * 2) << 2); 2875 } 2876 2877 static int gfx_v12_0_gfx_mqd_init(struct amdgpu_device *adev, void *m, 2878 struct amdgpu_mqd_prop *prop) 2879 { 2880 struct v12_gfx_mqd *mqd = m; 2881 uint64_t hqd_gpu_addr, wb_gpu_addr; 2882 uint32_t tmp; 2883 uint32_t rb_bufsz; 2884 2885 /* set up gfx hqd wptr */ 2886 mqd->cp_gfx_hqd_wptr = 0; 2887 mqd->cp_gfx_hqd_wptr_hi = 0; 2888 2889 /* set the pointer to the MQD */ 2890 mqd->cp_mqd_base_addr = prop->mqd_gpu_addr & 0xfffffffc; 2891 mqd->cp_mqd_base_addr_hi = upper_32_bits(prop->mqd_gpu_addr); 2892 2893 /* set up mqd control */ 2894 tmp = RREG32_SOC15(GC, 0, regCP_GFX_MQD_CONTROL); 2895 tmp = REG_SET_FIELD(tmp, CP_GFX_MQD_CONTROL, VMID, 0); 2896 tmp = REG_SET_FIELD(tmp, CP_GFX_MQD_CONTROL, PRIV_STATE, 1); 2897 tmp = REG_SET_FIELD(tmp, CP_GFX_MQD_CONTROL, CACHE_POLICY, 0); 2898 mqd->cp_gfx_mqd_control = tmp; 2899 2900 /* set up gfx_hqd_vimd with 0x0 to indicate the ring buffer's vmid */ 2901 tmp = RREG32_SOC15(GC, 0, regCP_GFX_HQD_VMID); 2902 tmp = REG_SET_FIELD(tmp, CP_GFX_HQD_VMID, VMID, 0); 2903 mqd->cp_gfx_hqd_vmid = 0; 2904 2905 /* set up default queue priority level 2906 * 0x0 = low priority, 0x1 = high priority */ 2907 tmp = RREG32_SOC15(GC, 0, regCP_GFX_HQD_QUEUE_PRIORITY); 2908 tmp = REG_SET_FIELD(tmp, CP_GFX_HQD_QUEUE_PRIORITY, PRIORITY_LEVEL, 0); 2909 mqd->cp_gfx_hqd_queue_priority = tmp; 2910 2911 /* set up time quantum */ 2912 tmp = RREG32_SOC15(GC, 0, regCP_GFX_HQD_QUANTUM); 2913 tmp = REG_SET_FIELD(tmp, CP_GFX_HQD_QUANTUM, QUANTUM_EN, 1); 2914 mqd->cp_gfx_hqd_quantum = tmp; 2915 2916 /* set up gfx hqd base. this is similar as CP_RB_BASE */ 2917 hqd_gpu_addr = prop->hqd_base_gpu_addr >> 8; 2918 mqd->cp_gfx_hqd_base = hqd_gpu_addr; 2919 mqd->cp_gfx_hqd_base_hi = upper_32_bits(hqd_gpu_addr); 2920 2921 /* set up hqd_rptr_addr/_hi, similar as CP_RB_RPTR */ 2922 wb_gpu_addr = prop->rptr_gpu_addr; 2923 mqd->cp_gfx_hqd_rptr_addr = wb_gpu_addr & 0xfffffffc; 2924 mqd->cp_gfx_hqd_rptr_addr_hi = 2925 upper_32_bits(wb_gpu_addr) & 0xffff; 2926 2927 /* set up rb_wptr_poll addr */ 2928 wb_gpu_addr = prop->wptr_gpu_addr; 2929 mqd->cp_rb_wptr_poll_addr_lo = wb_gpu_addr & 0xfffffffc; 2930 mqd->cp_rb_wptr_poll_addr_hi = upper_32_bits(wb_gpu_addr) & 0xffff; 2931 2932 /* set up the gfx_hqd_control, similar as CP_RB0_CNTL */ 2933 rb_bufsz = order_base_2(prop->queue_size / 4) - 1; 2934 tmp = RREG32_SOC15(GC, 0, regCP_GFX_HQD_CNTL); 2935 tmp = REG_SET_FIELD(tmp, CP_GFX_HQD_CNTL, RB_BUFSZ, rb_bufsz); 2936 tmp = REG_SET_FIELD(tmp, CP_GFX_HQD_CNTL, RB_BLKSZ, rb_bufsz - 2); 2937 #ifdef __BIG_ENDIAN 2938 tmp = REG_SET_FIELD(tmp, CP_GFX_HQD_CNTL, BUF_SWAP, 1); 2939 #endif 2940 mqd->cp_gfx_hqd_cntl = tmp; 2941 2942 /* set up cp_doorbell_control */ 2943 tmp = RREG32_SOC15(GC, 0, regCP_RB_DOORBELL_CONTROL); 2944 if (prop->use_doorbell) { 2945 tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL, 2946 DOORBELL_OFFSET, prop->doorbell_index); 2947 tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL, 2948 DOORBELL_EN, 1); 2949 } else 2950 tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL, 2951 DOORBELL_EN, 0); 2952 mqd->cp_rb_doorbell_control = tmp; 2953 2954 /* reset read and write pointers, similar to CP_RB0_WPTR/_RPTR */ 2955 mqd->cp_gfx_hqd_rptr = RREG32_SOC15(GC, 0, regCP_GFX_HQD_RPTR); 2956 2957 /* active the queue */ 2958 mqd->cp_gfx_hqd_active = 1; 2959 2960 return 0; 2961 } 2962 2963 static int gfx_v12_0_kgq_init_queue(struct amdgpu_ring *ring, bool reset) 2964 { 2965 struct amdgpu_device *adev = ring->adev; 2966 struct v12_gfx_mqd *mqd = ring->mqd_ptr; 2967 int mqd_idx = ring - &adev->gfx.gfx_ring[0]; 2968 2969 if (!reset && !amdgpu_in_reset(adev) && !adev->in_suspend) { 2970 memset((void *)mqd, 0, sizeof(*mqd)); 2971 mutex_lock(&adev->srbm_mutex); 2972 soc24_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0); 2973 amdgpu_ring_init_mqd(ring); 2974 soc24_grbm_select(adev, 0, 0, 0, 0); 2975 mutex_unlock(&adev->srbm_mutex); 2976 if (adev->gfx.me.mqd_backup[mqd_idx]) 2977 memcpy_fromio(adev->gfx.me.mqd_backup[mqd_idx], mqd, sizeof(*mqd)); 2978 } else { 2979 /* restore mqd with the backup copy */ 2980 if (adev->gfx.me.mqd_backup[mqd_idx]) 2981 memcpy_toio(mqd, adev->gfx.me.mqd_backup[mqd_idx], sizeof(*mqd)); 2982 /* reset the ring */ 2983 ring->wptr = 0; 2984 *ring->wptr_cpu_addr = 0; 2985 amdgpu_ring_clear_ring(ring); 2986 } 2987 2988 return 0; 2989 } 2990 2991 static int gfx_v12_0_cp_async_gfx_ring_resume(struct amdgpu_device *adev) 2992 { 2993 int r, i; 2994 struct amdgpu_ring *ring; 2995 2996 for (i = 0; i < adev->gfx.num_gfx_rings; i++) { 2997 ring = &adev->gfx.gfx_ring[i]; 2998 2999 r = amdgpu_bo_reserve(ring->mqd_obj, false); 3000 if (unlikely(r != 0)) 3001 goto done; 3002 3003 r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr); 3004 if (!r) { 3005 r = gfx_v12_0_kgq_init_queue(ring, false); 3006 amdgpu_bo_kunmap(ring->mqd_obj); 3007 ring->mqd_ptr = NULL; 3008 } 3009 amdgpu_bo_unreserve(ring->mqd_obj); 3010 if (r) 3011 goto done; 3012 } 3013 3014 r = amdgpu_gfx_enable_kgq(adev, 0); 3015 if (r) 3016 goto done; 3017 3018 r = gfx_v12_0_cp_gfx_start(adev); 3019 if (r) 3020 goto done; 3021 3022 for (i = 0; i < adev->gfx.num_gfx_rings; i++) { 3023 ring = &adev->gfx.gfx_ring[i]; 3024 ring->sched.ready = true; 3025 } 3026 done: 3027 return r; 3028 } 3029 3030 static int gfx_v12_0_compute_mqd_init(struct amdgpu_device *adev, void *m, 3031 struct amdgpu_mqd_prop *prop) 3032 { 3033 struct v12_compute_mqd *mqd = m; 3034 uint64_t hqd_gpu_addr, wb_gpu_addr, eop_base_addr; 3035 uint32_t tmp; 3036 3037 mqd->header = 0xC0310800; 3038 mqd->compute_pipelinestat_enable = 0x00000001; 3039 mqd->compute_static_thread_mgmt_se0 = 0xffffffff; 3040 mqd->compute_static_thread_mgmt_se1 = 0xffffffff; 3041 mqd->compute_static_thread_mgmt_se2 = 0xffffffff; 3042 mqd->compute_static_thread_mgmt_se3 = 0xffffffff; 3043 mqd->compute_misc_reserved = 0x00000007; 3044 3045 eop_base_addr = prop->eop_gpu_addr >> 8; 3046 mqd->cp_hqd_eop_base_addr_lo = eop_base_addr; 3047 mqd->cp_hqd_eop_base_addr_hi = upper_32_bits(eop_base_addr); 3048 3049 /* set the EOP size, register value is 2^(EOP_SIZE+1) dwords */ 3050 tmp = RREG32_SOC15(GC, 0, regCP_HQD_EOP_CONTROL); 3051 tmp = REG_SET_FIELD(tmp, CP_HQD_EOP_CONTROL, EOP_SIZE, 3052 (order_base_2(GFX12_MEC_HPD_SIZE / 4) - 1)); 3053 3054 mqd->cp_hqd_eop_control = tmp; 3055 3056 /* enable doorbell? */ 3057 tmp = RREG32_SOC15(GC, 0, regCP_HQD_PQ_DOORBELL_CONTROL); 3058 3059 if (prop->use_doorbell) { 3060 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL, 3061 DOORBELL_OFFSET, prop->doorbell_index); 3062 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL, 3063 DOORBELL_EN, 1); 3064 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL, 3065 DOORBELL_SOURCE, 0); 3066 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL, 3067 DOORBELL_HIT, 0); 3068 } else { 3069 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL, 3070 DOORBELL_EN, 0); 3071 } 3072 3073 mqd->cp_hqd_pq_doorbell_control = tmp; 3074 3075 /* disable the queue if it's active */ 3076 mqd->cp_hqd_dequeue_request = 0; 3077 mqd->cp_hqd_pq_rptr = 0; 3078 mqd->cp_hqd_pq_wptr_lo = 0; 3079 mqd->cp_hqd_pq_wptr_hi = 0; 3080 3081 /* set the pointer to the MQD */ 3082 mqd->cp_mqd_base_addr_lo = prop->mqd_gpu_addr & 0xfffffffc; 3083 mqd->cp_mqd_base_addr_hi = upper_32_bits(prop->mqd_gpu_addr); 3084 3085 /* set MQD vmid to 0 */ 3086 tmp = RREG32_SOC15(GC, 0, regCP_MQD_CONTROL); 3087 tmp = REG_SET_FIELD(tmp, CP_MQD_CONTROL, VMID, 0); 3088 mqd->cp_mqd_control = tmp; 3089 3090 /* set the pointer to the HQD, this is similar CP_RB0_BASE/_HI */ 3091 hqd_gpu_addr = prop->hqd_base_gpu_addr >> 8; 3092 mqd->cp_hqd_pq_base_lo = hqd_gpu_addr; 3093 mqd->cp_hqd_pq_base_hi = upper_32_bits(hqd_gpu_addr); 3094 3095 /* set up the HQD, this is similar to CP_RB0_CNTL */ 3096 tmp = RREG32_SOC15(GC, 0, regCP_HQD_PQ_CONTROL); 3097 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, QUEUE_SIZE, 3098 (order_base_2(prop->queue_size / 4) - 1)); 3099 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, RPTR_BLOCK_SIZE, 3100 (order_base_2(AMDGPU_GPU_PAGE_SIZE / 4) - 1)); 3101 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, UNORD_DISPATCH, 1); 3102 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, TUNNEL_DISPATCH, 0); 3103 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, PRIV_STATE, 1); 3104 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, KMD_QUEUE, 1); 3105 mqd->cp_hqd_pq_control = tmp; 3106 3107 /* set the wb address whether it's enabled or not */ 3108 wb_gpu_addr = prop->rptr_gpu_addr; 3109 mqd->cp_hqd_pq_rptr_report_addr_lo = wb_gpu_addr & 0xfffffffc; 3110 mqd->cp_hqd_pq_rptr_report_addr_hi = 3111 upper_32_bits(wb_gpu_addr) & 0xffff; 3112 3113 /* only used if CP_PQ_WPTR_POLL_CNTL.CP_PQ_WPTR_POLL_CNTL__EN_MASK=1 */ 3114 wb_gpu_addr = prop->wptr_gpu_addr; 3115 mqd->cp_hqd_pq_wptr_poll_addr_lo = wb_gpu_addr & 0xfffffffc; 3116 mqd->cp_hqd_pq_wptr_poll_addr_hi = upper_32_bits(wb_gpu_addr) & 0xffff; 3117 3118 tmp = 0; 3119 /* enable the doorbell if requested */ 3120 if (prop->use_doorbell) { 3121 tmp = RREG32_SOC15(GC, 0, regCP_HQD_PQ_DOORBELL_CONTROL); 3122 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL, 3123 DOORBELL_OFFSET, prop->doorbell_index); 3124 3125 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL, 3126 DOORBELL_EN, 1); 3127 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL, 3128 DOORBELL_SOURCE, 0); 3129 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL, 3130 DOORBELL_HIT, 0); 3131 } 3132 3133 mqd->cp_hqd_pq_doorbell_control = tmp; 3134 3135 /* reset read and write pointers, similar to CP_RB0_WPTR/_RPTR */ 3136 mqd->cp_hqd_pq_rptr = RREG32_SOC15(GC, 0, regCP_HQD_PQ_RPTR); 3137 3138 /* set the vmid for the queue */ 3139 mqd->cp_hqd_vmid = 0; 3140 3141 tmp = RREG32_SOC15(GC, 0, regCP_HQD_PERSISTENT_STATE); 3142 tmp = REG_SET_FIELD(tmp, CP_HQD_PERSISTENT_STATE, PRELOAD_SIZE, 0x55); 3143 mqd->cp_hqd_persistent_state = tmp; 3144 3145 /* set MIN_IB_AVAIL_SIZE */ 3146 tmp = RREG32_SOC15(GC, 0, regCP_HQD_IB_CONTROL); 3147 tmp = REG_SET_FIELD(tmp, CP_HQD_IB_CONTROL, MIN_IB_AVAIL_SIZE, 3); 3148 mqd->cp_hqd_ib_control = tmp; 3149 3150 /* set static priority for a compute queue/ring */ 3151 mqd->cp_hqd_pipe_priority = prop->hqd_pipe_priority; 3152 mqd->cp_hqd_queue_priority = prop->hqd_queue_priority; 3153 3154 mqd->cp_hqd_active = prop->hqd_active; 3155 3156 return 0; 3157 } 3158 3159 static int gfx_v12_0_kiq_init_register(struct amdgpu_ring *ring) 3160 { 3161 struct amdgpu_device *adev = ring->adev; 3162 struct v12_compute_mqd *mqd = ring->mqd_ptr; 3163 int j; 3164 3165 /* inactivate the queue */ 3166 if (amdgpu_sriov_vf(adev)) 3167 WREG32_SOC15(GC, 0, regCP_HQD_ACTIVE, 0); 3168 3169 /* disable wptr polling */ 3170 WREG32_FIELD15_PREREG(GC, 0, CP_PQ_WPTR_POLL_CNTL, EN, 0); 3171 3172 /* write the EOP addr */ 3173 WREG32_SOC15(GC, 0, regCP_HQD_EOP_BASE_ADDR, 3174 mqd->cp_hqd_eop_base_addr_lo); 3175 WREG32_SOC15(GC, 0, regCP_HQD_EOP_BASE_ADDR_HI, 3176 mqd->cp_hqd_eop_base_addr_hi); 3177 3178 /* set the EOP size, register value is 2^(EOP_SIZE+1) dwords */ 3179 WREG32_SOC15(GC, 0, regCP_HQD_EOP_CONTROL, 3180 mqd->cp_hqd_eop_control); 3181 3182 /* enable doorbell? */ 3183 WREG32_SOC15(GC, 0, regCP_HQD_PQ_DOORBELL_CONTROL, 3184 mqd->cp_hqd_pq_doorbell_control); 3185 3186 /* disable the queue if it's active */ 3187 if (RREG32_SOC15(GC, 0, regCP_HQD_ACTIVE) & 1) { 3188 WREG32_SOC15(GC, 0, regCP_HQD_DEQUEUE_REQUEST, 1); 3189 for (j = 0; j < adev->usec_timeout; j++) { 3190 if (!(RREG32_SOC15(GC, 0, regCP_HQD_ACTIVE) & 1)) 3191 break; 3192 udelay(1); 3193 } 3194 WREG32_SOC15(GC, 0, regCP_HQD_DEQUEUE_REQUEST, 3195 mqd->cp_hqd_dequeue_request); 3196 WREG32_SOC15(GC, 0, regCP_HQD_PQ_RPTR, 3197 mqd->cp_hqd_pq_rptr); 3198 WREG32_SOC15(GC, 0, regCP_HQD_PQ_WPTR_LO, 3199 mqd->cp_hqd_pq_wptr_lo); 3200 WREG32_SOC15(GC, 0, regCP_HQD_PQ_WPTR_HI, 3201 mqd->cp_hqd_pq_wptr_hi); 3202 } 3203 3204 /* set the pointer to the MQD */ 3205 WREG32_SOC15(GC, 0, regCP_MQD_BASE_ADDR, 3206 mqd->cp_mqd_base_addr_lo); 3207 WREG32_SOC15(GC, 0, regCP_MQD_BASE_ADDR_HI, 3208 mqd->cp_mqd_base_addr_hi); 3209 3210 /* set MQD vmid to 0 */ 3211 WREG32_SOC15(GC, 0, regCP_MQD_CONTROL, 3212 mqd->cp_mqd_control); 3213 3214 /* set the pointer to the HQD, this is similar CP_RB0_BASE/_HI */ 3215 WREG32_SOC15(GC, 0, regCP_HQD_PQ_BASE, 3216 mqd->cp_hqd_pq_base_lo); 3217 WREG32_SOC15(GC, 0, regCP_HQD_PQ_BASE_HI, 3218 mqd->cp_hqd_pq_base_hi); 3219 3220 /* set up the HQD, this is similar to CP_RB0_CNTL */ 3221 WREG32_SOC15(GC, 0, regCP_HQD_PQ_CONTROL, 3222 mqd->cp_hqd_pq_control); 3223 3224 /* set the wb address whether it's enabled or not */ 3225 WREG32_SOC15(GC, 0, regCP_HQD_PQ_RPTR_REPORT_ADDR, 3226 mqd->cp_hqd_pq_rptr_report_addr_lo); 3227 WREG32_SOC15(GC, 0, regCP_HQD_PQ_RPTR_REPORT_ADDR_HI, 3228 mqd->cp_hqd_pq_rptr_report_addr_hi); 3229 3230 /* only used if CP_PQ_WPTR_POLL_CNTL.CP_PQ_WPTR_POLL_CNTL__EN_MASK=1 */ 3231 WREG32_SOC15(GC, 0, regCP_HQD_PQ_WPTR_POLL_ADDR, 3232 mqd->cp_hqd_pq_wptr_poll_addr_lo); 3233 WREG32_SOC15(GC, 0, regCP_HQD_PQ_WPTR_POLL_ADDR_HI, 3234 mqd->cp_hqd_pq_wptr_poll_addr_hi); 3235 3236 /* enable the doorbell if requested */ 3237 if (ring->use_doorbell) { 3238 WREG32_SOC15(GC, 0, regCP_MEC_DOORBELL_RANGE_LOWER, 3239 (adev->doorbell_index.kiq * 2) << 2); 3240 WREG32_SOC15(GC, 0, regCP_MEC_DOORBELL_RANGE_UPPER, 3241 (adev->doorbell_index.userqueue_end * 2) << 2); 3242 } 3243 3244 WREG32_SOC15(GC, 0, regCP_HQD_PQ_DOORBELL_CONTROL, 3245 mqd->cp_hqd_pq_doorbell_control); 3246 3247 /* reset read and write pointers, similar to CP_RB0_WPTR/_RPTR */ 3248 WREG32_SOC15(GC, 0, regCP_HQD_PQ_WPTR_LO, 3249 mqd->cp_hqd_pq_wptr_lo); 3250 WREG32_SOC15(GC, 0, regCP_HQD_PQ_WPTR_HI, 3251 mqd->cp_hqd_pq_wptr_hi); 3252 3253 /* set the vmid for the queue */ 3254 WREG32_SOC15(GC, 0, regCP_HQD_VMID, mqd->cp_hqd_vmid); 3255 3256 WREG32_SOC15(GC, 0, regCP_HQD_PERSISTENT_STATE, 3257 mqd->cp_hqd_persistent_state); 3258 3259 /* activate the queue */ 3260 WREG32_SOC15(GC, 0, regCP_HQD_ACTIVE, 3261 mqd->cp_hqd_active); 3262 3263 if (ring->use_doorbell) 3264 WREG32_FIELD15_PREREG(GC, 0, CP_PQ_STATUS, DOORBELL_ENABLE, 1); 3265 3266 return 0; 3267 } 3268 3269 static int gfx_v12_0_kiq_init_queue(struct amdgpu_ring *ring) 3270 { 3271 struct amdgpu_device *adev = ring->adev; 3272 struct v12_compute_mqd *mqd = ring->mqd_ptr; 3273 int mqd_idx = AMDGPU_MAX_COMPUTE_RINGS; 3274 3275 gfx_v12_0_kiq_setting(ring); 3276 3277 if (amdgpu_in_reset(adev)) { /* for GPU_RESET case */ 3278 /* reset MQD to a clean status */ 3279 if (adev->gfx.mec.mqd_backup[mqd_idx]) 3280 memcpy(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(*mqd)); 3281 3282 /* reset ring buffer */ 3283 ring->wptr = 0; 3284 amdgpu_ring_clear_ring(ring); 3285 3286 mutex_lock(&adev->srbm_mutex); 3287 soc24_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0); 3288 gfx_v12_0_kiq_init_register(ring); 3289 soc24_grbm_select(adev, 0, 0, 0, 0); 3290 mutex_unlock(&adev->srbm_mutex); 3291 } else { 3292 memset((void *)mqd, 0, sizeof(*mqd)); 3293 if (amdgpu_sriov_vf(adev) && adev->in_suspend) 3294 amdgpu_ring_clear_ring(ring); 3295 mutex_lock(&adev->srbm_mutex); 3296 soc24_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0); 3297 amdgpu_ring_init_mqd(ring); 3298 gfx_v12_0_kiq_init_register(ring); 3299 soc24_grbm_select(adev, 0, 0, 0, 0); 3300 mutex_unlock(&adev->srbm_mutex); 3301 3302 if (adev->gfx.mec.mqd_backup[mqd_idx]) 3303 memcpy(adev->gfx.mec.mqd_backup[mqd_idx], mqd, sizeof(*mqd)); 3304 } 3305 3306 return 0; 3307 } 3308 3309 static int gfx_v12_0_kcq_init_queue(struct amdgpu_ring *ring, bool reset) 3310 { 3311 struct amdgpu_device *adev = ring->adev; 3312 struct v12_compute_mqd *mqd = ring->mqd_ptr; 3313 int mqd_idx = ring - &adev->gfx.compute_ring[0]; 3314 3315 if (!reset && !amdgpu_in_reset(adev) && !adev->in_suspend) { 3316 memset((void *)mqd, 0, sizeof(*mqd)); 3317 mutex_lock(&adev->srbm_mutex); 3318 soc24_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0); 3319 amdgpu_ring_init_mqd(ring); 3320 soc24_grbm_select(adev, 0, 0, 0, 0); 3321 mutex_unlock(&adev->srbm_mutex); 3322 3323 if (adev->gfx.mec.mqd_backup[mqd_idx]) 3324 memcpy_fromio(adev->gfx.mec.mqd_backup[mqd_idx], mqd, sizeof(*mqd)); 3325 } else { 3326 /* restore MQD to a clean status */ 3327 if (adev->gfx.mec.mqd_backup[mqd_idx]) 3328 memcpy_toio(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(*mqd)); 3329 /* reset ring buffer */ 3330 ring->wptr = 0; 3331 atomic64_set((atomic64_t *)ring->wptr_cpu_addr, 0); 3332 amdgpu_ring_clear_ring(ring); 3333 } 3334 3335 return 0; 3336 } 3337 3338 static int gfx_v12_0_kiq_resume(struct amdgpu_device *adev) 3339 { 3340 struct amdgpu_ring *ring; 3341 int r; 3342 3343 ring = &adev->gfx.kiq[0].ring; 3344 3345 r = amdgpu_bo_reserve(ring->mqd_obj, false); 3346 if (unlikely(r != 0)) 3347 return r; 3348 3349 r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr); 3350 if (unlikely(r != 0)) { 3351 amdgpu_bo_unreserve(ring->mqd_obj); 3352 return r; 3353 } 3354 3355 gfx_v12_0_kiq_init_queue(ring); 3356 amdgpu_bo_kunmap(ring->mqd_obj); 3357 ring->mqd_ptr = NULL; 3358 amdgpu_bo_unreserve(ring->mqd_obj); 3359 ring->sched.ready = true; 3360 return 0; 3361 } 3362 3363 static int gfx_v12_0_kcq_resume(struct amdgpu_device *adev) 3364 { 3365 struct amdgpu_ring *ring = NULL; 3366 int r = 0, i; 3367 3368 if (!amdgpu_async_gfx_ring) 3369 gfx_v12_0_cp_compute_enable(adev, true); 3370 3371 for (i = 0; i < adev->gfx.num_compute_rings; i++) { 3372 ring = &adev->gfx.compute_ring[i]; 3373 3374 r = amdgpu_bo_reserve(ring->mqd_obj, false); 3375 if (unlikely(r != 0)) 3376 goto done; 3377 r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr); 3378 if (!r) { 3379 r = gfx_v12_0_kcq_init_queue(ring, false); 3380 amdgpu_bo_kunmap(ring->mqd_obj); 3381 ring->mqd_ptr = NULL; 3382 } 3383 amdgpu_bo_unreserve(ring->mqd_obj); 3384 if (r) 3385 goto done; 3386 } 3387 3388 r = amdgpu_gfx_enable_kcq(adev, 0); 3389 done: 3390 return r; 3391 } 3392 3393 static int gfx_v12_0_cp_resume(struct amdgpu_device *adev) 3394 { 3395 int r, i; 3396 struct amdgpu_ring *ring; 3397 3398 if (!(adev->flags & AMD_IS_APU)) 3399 gfx_v12_0_enable_gui_idle_interrupt(adev, false); 3400 3401 if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT) { 3402 /* legacy firmware loading */ 3403 r = gfx_v12_0_cp_gfx_load_microcode(adev); 3404 if (r) 3405 return r; 3406 3407 r = gfx_v12_0_cp_compute_load_microcode_rs64(adev); 3408 if (r) 3409 return r; 3410 } 3411 3412 gfx_v12_0_cp_set_doorbell_range(adev); 3413 3414 if (amdgpu_async_gfx_ring) { 3415 gfx_v12_0_cp_compute_enable(adev, true); 3416 gfx_v12_0_cp_gfx_enable(adev, true); 3417 } 3418 3419 if (adev->enable_mes_kiq && adev->mes.kiq_hw_init) 3420 r = amdgpu_mes_kiq_hw_init(adev); 3421 else 3422 r = gfx_v12_0_kiq_resume(adev); 3423 if (r) 3424 return r; 3425 3426 r = gfx_v12_0_kcq_resume(adev); 3427 if (r) 3428 return r; 3429 3430 if (!amdgpu_async_gfx_ring) { 3431 r = gfx_v12_0_cp_gfx_resume(adev); 3432 if (r) 3433 return r; 3434 } else { 3435 r = gfx_v12_0_cp_async_gfx_ring_resume(adev); 3436 if (r) 3437 return r; 3438 } 3439 3440 for (i = 0; i < adev->gfx.num_gfx_rings; i++) { 3441 ring = &adev->gfx.gfx_ring[i]; 3442 r = amdgpu_ring_test_helper(ring); 3443 if (r) 3444 return r; 3445 } 3446 3447 for (i = 0; i < adev->gfx.num_compute_rings; i++) { 3448 ring = &adev->gfx.compute_ring[i]; 3449 r = amdgpu_ring_test_helper(ring); 3450 if (r) 3451 return r; 3452 } 3453 3454 return 0; 3455 } 3456 3457 static void gfx_v12_0_cp_enable(struct amdgpu_device *adev, bool enable) 3458 { 3459 gfx_v12_0_cp_gfx_enable(adev, enable); 3460 gfx_v12_0_cp_compute_enable(adev, enable); 3461 } 3462 3463 static int gfx_v12_0_gfxhub_enable(struct amdgpu_device *adev) 3464 { 3465 int r; 3466 bool value; 3467 3468 r = adev->gfxhub.funcs->gart_enable(adev); 3469 if (r) 3470 return r; 3471 3472 adev->hdp.funcs->flush_hdp(adev, NULL); 3473 3474 value = (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS) ? 3475 false : true; 3476 3477 adev->gfxhub.funcs->set_fault_enable_default(adev, value); 3478 /* TODO investigate why this and the hdp flush above is needed, 3479 * are we missing a flush somewhere else? */ 3480 adev->gmc.gmc_funcs->flush_gpu_tlb(adev, 0, AMDGPU_GFXHUB(0), 0); 3481 3482 return 0; 3483 } 3484 3485 static int get_gb_addr_config(struct amdgpu_device *adev) 3486 { 3487 u32 gb_addr_config; 3488 3489 gb_addr_config = RREG32_SOC15(GC, 0, regGB_ADDR_CONFIG); 3490 if (gb_addr_config == 0) 3491 return -EINVAL; 3492 3493 adev->gfx.config.gb_addr_config_fields.num_pkrs = 3494 1 << REG_GET_FIELD(gb_addr_config, GB_ADDR_CONFIG, NUM_PKRS); 3495 3496 adev->gfx.config.gb_addr_config = gb_addr_config; 3497 3498 adev->gfx.config.gb_addr_config_fields.num_pipes = 1 << 3499 REG_GET_FIELD(adev->gfx.config.gb_addr_config, 3500 GB_ADDR_CONFIG, NUM_PIPES); 3501 3502 adev->gfx.config.max_tile_pipes = 3503 adev->gfx.config.gb_addr_config_fields.num_pipes; 3504 3505 adev->gfx.config.gb_addr_config_fields.max_compress_frags = 1 << 3506 REG_GET_FIELD(adev->gfx.config.gb_addr_config, 3507 GB_ADDR_CONFIG, MAX_COMPRESSED_FRAGS); 3508 adev->gfx.config.gb_addr_config_fields.num_rb_per_se = 1 << 3509 REG_GET_FIELD(adev->gfx.config.gb_addr_config, 3510 GB_ADDR_CONFIG, NUM_RB_PER_SE); 3511 adev->gfx.config.gb_addr_config_fields.num_se = 1 << 3512 REG_GET_FIELD(adev->gfx.config.gb_addr_config, 3513 GB_ADDR_CONFIG, NUM_SHADER_ENGINES); 3514 adev->gfx.config.gb_addr_config_fields.pipe_interleave_size = 1 << (8 + 3515 REG_GET_FIELD(adev->gfx.config.gb_addr_config, 3516 GB_ADDR_CONFIG, PIPE_INTERLEAVE_SIZE)); 3517 3518 return 0; 3519 } 3520 3521 static void gfx_v12_0_disable_gpa_mode(struct amdgpu_device *adev) 3522 { 3523 uint32_t data; 3524 3525 data = RREG32_SOC15(GC, 0, regCPC_PSP_DEBUG); 3526 data |= CPC_PSP_DEBUG__GPA_OVERRIDE_MASK; 3527 WREG32_SOC15(GC, 0, regCPC_PSP_DEBUG, data); 3528 3529 data = RREG32_SOC15(GC, 0, regCPG_PSP_DEBUG); 3530 data |= CPG_PSP_DEBUG__GPA_OVERRIDE_MASK; 3531 WREG32_SOC15(GC, 0, regCPG_PSP_DEBUG, data); 3532 } 3533 3534 static void gfx_v12_0_init_golden_registers(struct amdgpu_device *adev) 3535 { 3536 if (amdgpu_sriov_vf(adev)) 3537 return; 3538 3539 switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { 3540 case IP_VERSION(12, 0, 0): 3541 case IP_VERSION(12, 0, 1): 3542 soc15_program_register_sequence(adev, 3543 golden_settings_gc_12_0, 3544 (const u32)ARRAY_SIZE(golden_settings_gc_12_0)); 3545 3546 if (adev->rev_id == 0) 3547 soc15_program_register_sequence(adev, 3548 golden_settings_gc_12_0_rev0, 3549 (const u32)ARRAY_SIZE(golden_settings_gc_12_0_rev0)); 3550 break; 3551 default: 3552 break; 3553 } 3554 } 3555 3556 static int gfx_v12_0_hw_init(struct amdgpu_ip_block *ip_block) 3557 { 3558 int r; 3559 struct amdgpu_device *adev = ip_block->adev; 3560 3561 if (adev->firmware.load_type == AMDGPU_FW_LOAD_RLC_BACKDOOR_AUTO) { 3562 if (adev->gfx.imu.funcs && (amdgpu_dpm > 0)) { 3563 /* RLC autoload sequence 1: Program rlc ram */ 3564 if (adev->gfx.imu.funcs->program_rlc_ram) 3565 adev->gfx.imu.funcs->program_rlc_ram(adev); 3566 } 3567 /* rlc autoload firmware */ 3568 r = gfx_v12_0_rlc_backdoor_autoload_enable(adev); 3569 if (r) 3570 return r; 3571 } else { 3572 if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT) { 3573 if (adev->gfx.imu.funcs && (amdgpu_dpm > 0)) { 3574 if (adev->gfx.imu.funcs->load_microcode) 3575 adev->gfx.imu.funcs->load_microcode(adev); 3576 if (adev->gfx.imu.funcs->setup_imu) 3577 adev->gfx.imu.funcs->setup_imu(adev); 3578 if (adev->gfx.imu.funcs->start_imu) 3579 adev->gfx.imu.funcs->start_imu(adev); 3580 } 3581 3582 /* disable gpa mode in backdoor loading */ 3583 gfx_v12_0_disable_gpa_mode(adev); 3584 } 3585 } 3586 3587 if ((adev->firmware.load_type == AMDGPU_FW_LOAD_RLC_BACKDOOR_AUTO) || 3588 (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP)) { 3589 r = gfx_v12_0_wait_for_rlc_autoload_complete(adev); 3590 if (r) { 3591 dev_err(adev->dev, "(%d) failed to wait rlc autoload complete\n", r); 3592 return r; 3593 } 3594 } 3595 3596 if (!amdgpu_emu_mode) 3597 gfx_v12_0_init_golden_registers(adev); 3598 3599 adev->gfx.is_poweron = true; 3600 3601 if (get_gb_addr_config(adev)) 3602 DRM_WARN("Invalid gb_addr_config !\n"); 3603 3604 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) 3605 gfx_v12_0_config_gfx_rs64(adev); 3606 3607 r = gfx_v12_0_gfxhub_enable(adev); 3608 if (r) 3609 return r; 3610 3611 if ((adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT || 3612 adev->firmware.load_type == AMDGPU_FW_LOAD_RLC_BACKDOOR_AUTO) && 3613 (amdgpu_dpm == 1)) { 3614 /** 3615 * For gfx 12, rlc firmware loading relies on smu firmware is 3616 * loaded firstly, so in direct type, it has to load smc ucode 3617 * here before rlc. 3618 */ 3619 r = amdgpu_pm_load_smu_firmware(adev, NULL); 3620 if (r) 3621 return r; 3622 } 3623 3624 gfx_v12_0_constants_init(adev); 3625 3626 if (adev->nbio.funcs->gc_doorbell_init) 3627 adev->nbio.funcs->gc_doorbell_init(adev); 3628 3629 r = gfx_v12_0_rlc_resume(adev); 3630 if (r) 3631 return r; 3632 3633 /* 3634 * init golden registers and rlc resume may override some registers, 3635 * reconfig them here 3636 */ 3637 gfx_v12_0_tcp_harvest(adev); 3638 3639 r = gfx_v12_0_cp_resume(adev); 3640 if (r) 3641 return r; 3642 3643 return r; 3644 } 3645 3646 static int gfx_v12_0_hw_fini(struct amdgpu_ip_block *ip_block) 3647 { 3648 struct amdgpu_device *adev = ip_block->adev; 3649 uint32_t tmp; 3650 3651 cancel_delayed_work_sync(&adev->gfx.idle_work); 3652 3653 amdgpu_irq_put(adev, &adev->gfx.priv_reg_irq, 0); 3654 amdgpu_irq_put(adev, &adev->gfx.priv_inst_irq, 0); 3655 amdgpu_irq_put(adev, &adev->gfx.bad_op_irq, 0); 3656 3657 if (!adev->no_hw_access) { 3658 if (amdgpu_async_gfx_ring) { 3659 if (amdgpu_gfx_disable_kgq(adev, 0)) 3660 DRM_ERROR("KGQ disable failed\n"); 3661 } 3662 3663 if (amdgpu_gfx_disable_kcq(adev, 0)) 3664 DRM_ERROR("KCQ disable failed\n"); 3665 3666 amdgpu_mes_kiq_hw_fini(adev); 3667 } 3668 3669 if (amdgpu_sriov_vf(adev)) { 3670 gfx_v12_0_cp_gfx_enable(adev, false); 3671 /* Program KIQ position of RLC_CP_SCHEDULERS during destroy */ 3672 tmp = RREG32_SOC15(GC, 0, regRLC_CP_SCHEDULERS); 3673 tmp &= 0xffffff00; 3674 WREG32_SOC15(GC, 0, regRLC_CP_SCHEDULERS, tmp); 3675 3676 return 0; 3677 } 3678 gfx_v12_0_cp_enable(adev, false); 3679 gfx_v12_0_enable_gui_idle_interrupt(adev, false); 3680 3681 adev->gfxhub.funcs->gart_disable(adev); 3682 3683 adev->gfx.is_poweron = false; 3684 3685 return 0; 3686 } 3687 3688 static int gfx_v12_0_suspend(struct amdgpu_ip_block *ip_block) 3689 { 3690 return gfx_v12_0_hw_fini(ip_block); 3691 } 3692 3693 static int gfx_v12_0_resume(struct amdgpu_ip_block *ip_block) 3694 { 3695 return gfx_v12_0_hw_init(ip_block); 3696 } 3697 3698 static bool gfx_v12_0_is_idle(void *handle) 3699 { 3700 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 3701 3702 if (REG_GET_FIELD(RREG32_SOC15(GC, 0, regGRBM_STATUS), 3703 GRBM_STATUS, GUI_ACTIVE)) 3704 return false; 3705 else 3706 return true; 3707 } 3708 3709 static int gfx_v12_0_wait_for_idle(struct amdgpu_ip_block *ip_block) 3710 { 3711 unsigned i; 3712 u32 tmp; 3713 struct amdgpu_device *adev = ip_block->adev; 3714 3715 for (i = 0; i < adev->usec_timeout; i++) { 3716 /* read MC_STATUS */ 3717 tmp = RREG32_SOC15(GC, 0, regGRBM_STATUS) & 3718 GRBM_STATUS__GUI_ACTIVE_MASK; 3719 3720 if (!REG_GET_FIELD(tmp, GRBM_STATUS, GUI_ACTIVE)) 3721 return 0; 3722 udelay(1); 3723 } 3724 return -ETIMEDOUT; 3725 } 3726 3727 static uint64_t gfx_v12_0_get_gpu_clock_counter(struct amdgpu_device *adev) 3728 { 3729 uint64_t clock = 0; 3730 3731 if (adev->smuio.funcs && 3732 adev->smuio.funcs->get_gpu_clock_counter) 3733 clock = adev->smuio.funcs->get_gpu_clock_counter(adev); 3734 else 3735 dev_warn(adev->dev, "query gpu clock counter is not supported\n"); 3736 3737 return clock; 3738 } 3739 3740 static int gfx_v12_0_early_init(struct amdgpu_ip_block *ip_block) 3741 { 3742 struct amdgpu_device *adev = ip_block->adev; 3743 3744 adev->gfx.funcs = &gfx_v12_0_gfx_funcs; 3745 3746 adev->gfx.num_gfx_rings = GFX12_NUM_GFX_RINGS; 3747 adev->gfx.num_compute_rings = min(amdgpu_gfx_get_num_kcq(adev), 3748 AMDGPU_MAX_COMPUTE_RINGS); 3749 3750 gfx_v12_0_set_kiq_pm4_funcs(adev); 3751 gfx_v12_0_set_ring_funcs(adev); 3752 gfx_v12_0_set_irq_funcs(adev); 3753 gfx_v12_0_set_rlc_funcs(adev); 3754 gfx_v12_0_set_mqd_funcs(adev); 3755 gfx_v12_0_set_imu_funcs(adev); 3756 3757 gfx_v12_0_init_rlcg_reg_access_ctrl(adev); 3758 3759 return gfx_v12_0_init_microcode(adev); 3760 } 3761 3762 static int gfx_v12_0_late_init(struct amdgpu_ip_block *ip_block) 3763 { 3764 struct amdgpu_device *adev = ip_block->adev; 3765 int r; 3766 3767 r = amdgpu_irq_get(adev, &adev->gfx.priv_reg_irq, 0); 3768 if (r) 3769 return r; 3770 3771 r = amdgpu_irq_get(adev, &adev->gfx.priv_inst_irq, 0); 3772 if (r) 3773 return r; 3774 3775 r = amdgpu_irq_get(adev, &adev->gfx.bad_op_irq, 0); 3776 if (r) 3777 return r; 3778 3779 return 0; 3780 } 3781 3782 static bool gfx_v12_0_is_rlc_enabled(struct amdgpu_device *adev) 3783 { 3784 uint32_t rlc_cntl; 3785 3786 /* if RLC is not enabled, do nothing */ 3787 rlc_cntl = RREG32_SOC15(GC, 0, regRLC_CNTL); 3788 return (REG_GET_FIELD(rlc_cntl, RLC_CNTL, RLC_ENABLE_F32)) ? true : false; 3789 } 3790 3791 static void gfx_v12_0_set_safe_mode(struct amdgpu_device *adev, 3792 int xcc_id) 3793 { 3794 uint32_t data; 3795 unsigned i; 3796 3797 data = RLC_SAFE_MODE__CMD_MASK; 3798 data |= (1 << RLC_SAFE_MODE__MESSAGE__SHIFT); 3799 3800 WREG32_SOC15(GC, 0, regRLC_SAFE_MODE, data); 3801 3802 /* wait for RLC_SAFE_MODE */ 3803 for (i = 0; i < adev->usec_timeout; i++) { 3804 if (!REG_GET_FIELD(RREG32_SOC15(GC, 0, regRLC_SAFE_MODE), 3805 RLC_SAFE_MODE, CMD)) 3806 break; 3807 udelay(1); 3808 } 3809 } 3810 3811 static void gfx_v12_0_unset_safe_mode(struct amdgpu_device *adev, 3812 int xcc_id) 3813 { 3814 WREG32_SOC15(GC, 0, regRLC_SAFE_MODE, RLC_SAFE_MODE__CMD_MASK); 3815 } 3816 3817 static void gfx_v12_0_update_perf_clk(struct amdgpu_device *adev, 3818 bool enable) 3819 { 3820 uint32_t def, data; 3821 3822 if (!(adev->cg_flags & AMD_CG_SUPPORT_GFX_PERF_CLK)) 3823 return; 3824 3825 def = data = RREG32_SOC15(GC, 0, regRLC_CGTT_MGCG_OVERRIDE); 3826 3827 if (enable) 3828 data &= ~RLC_CGTT_MGCG_OVERRIDE__PERFMON_CLOCK_STATE_MASK; 3829 else 3830 data |= RLC_CGTT_MGCG_OVERRIDE__PERFMON_CLOCK_STATE_MASK; 3831 3832 if (def != data) 3833 WREG32_SOC15(GC, 0, regRLC_CGTT_MGCG_OVERRIDE, data); 3834 } 3835 3836 static void gfx_v12_0_update_spm_vmid(struct amdgpu_device *adev, 3837 struct amdgpu_ring *ring, 3838 unsigned vmid) 3839 { 3840 u32 reg, data; 3841 3842 reg = SOC15_REG_OFFSET(GC, 0, regRLC_SPM_MC_CNTL); 3843 if (amdgpu_sriov_is_pp_one_vf(adev)) 3844 data = RREG32_NO_KIQ(reg); 3845 else 3846 data = RREG32(reg); 3847 3848 data &= ~RLC_SPM_MC_CNTL__RLC_SPM_VMID_MASK; 3849 data |= (vmid & RLC_SPM_MC_CNTL__RLC_SPM_VMID_MASK) << RLC_SPM_MC_CNTL__RLC_SPM_VMID__SHIFT; 3850 3851 if (amdgpu_sriov_is_pp_one_vf(adev)) 3852 WREG32_SOC15_NO_KIQ(GC, 0, regRLC_SPM_MC_CNTL, data); 3853 else 3854 WREG32_SOC15(GC, 0, regRLC_SPM_MC_CNTL, data); 3855 3856 if (ring 3857 && amdgpu_sriov_is_pp_one_vf(adev) 3858 && ((ring->funcs->type == AMDGPU_RING_TYPE_GFX) 3859 || (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE))) { 3860 uint32_t reg = SOC15_REG_OFFSET(GC, 0, regRLC_SPM_MC_CNTL); 3861 amdgpu_ring_emit_wreg(ring, reg, data); 3862 } 3863 } 3864 3865 static const struct amdgpu_rlc_funcs gfx_v12_0_rlc_funcs = { 3866 .is_rlc_enabled = gfx_v12_0_is_rlc_enabled, 3867 .set_safe_mode = gfx_v12_0_set_safe_mode, 3868 .unset_safe_mode = gfx_v12_0_unset_safe_mode, 3869 .init = gfx_v12_0_rlc_init, 3870 .get_csb_size = gfx_v12_0_get_csb_size, 3871 .get_csb_buffer = gfx_v12_0_get_csb_buffer, 3872 .resume = gfx_v12_0_rlc_resume, 3873 .stop = gfx_v12_0_rlc_stop, 3874 .reset = gfx_v12_0_rlc_reset, 3875 .start = gfx_v12_0_rlc_start, 3876 .update_spm_vmid = gfx_v12_0_update_spm_vmid, 3877 }; 3878 3879 #if 0 3880 static void gfx_v12_cntl_power_gating(struct amdgpu_device *adev, bool enable) 3881 { 3882 /* TODO */ 3883 } 3884 3885 static void gfx_v12_cntl_pg(struct amdgpu_device *adev, bool enable) 3886 { 3887 /* TODO */ 3888 } 3889 #endif 3890 3891 static int gfx_v12_0_set_powergating_state(struct amdgpu_ip_block *ip_block, 3892 enum amd_powergating_state state) 3893 { 3894 struct amdgpu_device *adev = ip_block->adev; 3895 bool enable = (state == AMD_PG_STATE_GATE); 3896 3897 if (amdgpu_sriov_vf(adev)) 3898 return 0; 3899 3900 switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { 3901 case IP_VERSION(12, 0, 0): 3902 case IP_VERSION(12, 0, 1): 3903 amdgpu_gfx_off_ctrl(adev, enable); 3904 break; 3905 default: 3906 break; 3907 } 3908 3909 return 0; 3910 } 3911 3912 static void gfx_v12_0_update_coarse_grain_clock_gating(struct amdgpu_device *adev, 3913 bool enable) 3914 { 3915 uint32_t def, data; 3916 3917 if (!(adev->cg_flags & 3918 (AMD_CG_SUPPORT_GFX_CGCG | 3919 AMD_CG_SUPPORT_GFX_CGLS | 3920 AMD_CG_SUPPORT_GFX_3D_CGCG | 3921 AMD_CG_SUPPORT_GFX_3D_CGLS))) 3922 return; 3923 3924 if (enable) { 3925 def = data = RREG32_SOC15(GC, 0, regRLC_CGTT_MGCG_OVERRIDE); 3926 3927 /* unset CGCG override */ 3928 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGCG) 3929 data &= ~RLC_CGTT_MGCG_OVERRIDE__GFXIP_CGCG_OVERRIDE_MASK; 3930 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGLS) 3931 data &= ~RLC_CGTT_MGCG_OVERRIDE__GFXIP_CGLS_OVERRIDE_MASK; 3932 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_3D_CGCG || 3933 adev->cg_flags & AMD_CG_SUPPORT_GFX_3D_CGLS) 3934 data &= ~RLC_CGTT_MGCG_OVERRIDE__GFXIP_GFX3D_CG_OVERRIDE_MASK; 3935 3936 /* update CGCG override bits */ 3937 if (def != data) 3938 WREG32_SOC15(GC, 0, regRLC_CGTT_MGCG_OVERRIDE, data); 3939 3940 /* enable cgcg FSM(0x0000363F) */ 3941 def = data = RREG32_SOC15(GC, 0, regRLC_CGCG_CGLS_CTRL); 3942 3943 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGCG) { 3944 data &= ~RLC_CGCG_CGLS_CTRL__CGCG_GFX_IDLE_THRESHOLD_MASK; 3945 data |= (0x36 << RLC_CGCG_CGLS_CTRL__CGCG_GFX_IDLE_THRESHOLD__SHIFT) | 3946 RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK; 3947 } 3948 3949 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGLS) { 3950 data &= ~RLC_CGCG_CGLS_CTRL__CGLS_REP_COMPANSAT_DELAY_MASK; 3951 data |= (0x000F << RLC_CGCG_CGLS_CTRL__CGLS_REP_COMPANSAT_DELAY__SHIFT) | 3952 RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK; 3953 } 3954 3955 if (def != data) 3956 WREG32_SOC15(GC, 0, regRLC_CGCG_CGLS_CTRL, data); 3957 3958 /* Program RLC_CGCG_CGLS_CTRL_3D */ 3959 def = data = RREG32_SOC15(GC, 0, regRLC_CGCG_CGLS_CTRL_3D); 3960 3961 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_3D_CGCG) { 3962 data &= ~RLC_CGCG_CGLS_CTRL_3D__CGCG_GFX_IDLE_THRESHOLD_MASK; 3963 data |= (0x36 << RLC_CGCG_CGLS_CTRL_3D__CGCG_GFX_IDLE_THRESHOLD__SHIFT) | 3964 RLC_CGCG_CGLS_CTRL_3D__CGCG_EN_MASK; 3965 } 3966 3967 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_3D_CGLS) { 3968 data &= ~RLC_CGCG_CGLS_CTRL_3D__CGLS_REP_COMPANSAT_DELAY_MASK; 3969 data |= (0xf << RLC_CGCG_CGLS_CTRL_3D__CGLS_REP_COMPANSAT_DELAY__SHIFT) | 3970 RLC_CGCG_CGLS_CTRL_3D__CGLS_EN_MASK; 3971 } 3972 3973 if (def != data) 3974 WREG32_SOC15(GC, 0, regRLC_CGCG_CGLS_CTRL_3D, data); 3975 3976 /* set IDLE_POLL_COUNT(0x00900100) */ 3977 def = data = RREG32_SOC15(GC, 0, regCP_RB_WPTR_POLL_CNTL); 3978 3979 data &= ~(CP_RB_WPTR_POLL_CNTL__POLL_FREQUENCY_MASK | CP_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK); 3980 data |= (0x0100 << CP_RB_WPTR_POLL_CNTL__POLL_FREQUENCY__SHIFT) | 3981 (0x0090 << CP_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT); 3982 3983 if (def != data) 3984 WREG32_SOC15(GC, 0, regCP_RB_WPTR_POLL_CNTL, data); 3985 3986 data = RREG32_SOC15(GC, 0, regCP_INT_CNTL); 3987 data = REG_SET_FIELD(data, CP_INT_CNTL, CNTX_BUSY_INT_ENABLE, 1); 3988 data = REG_SET_FIELD(data, CP_INT_CNTL, CNTX_EMPTY_INT_ENABLE, 1); 3989 data = REG_SET_FIELD(data, CP_INT_CNTL, CMP_BUSY_INT_ENABLE, 1); 3990 data = REG_SET_FIELD(data, CP_INT_CNTL, GFX_IDLE_INT_ENABLE, 1); 3991 WREG32_SOC15(GC, 0, regCP_INT_CNTL, data); 3992 3993 data = RREG32_SOC15(GC, 0, regSDMA0_RLC_CGCG_CTRL); 3994 data = REG_SET_FIELD(data, SDMA0_RLC_CGCG_CTRL, CGCG_INT_ENABLE, 1); 3995 WREG32_SOC15(GC, 0, regSDMA0_RLC_CGCG_CTRL, data); 3996 3997 /* Some ASICs only have one SDMA instance, not need to configure SDMA1 */ 3998 if (adev->sdma.num_instances > 1) { 3999 data = RREG32_SOC15(GC, 0, regSDMA1_RLC_CGCG_CTRL); 4000 data = REG_SET_FIELD(data, SDMA1_RLC_CGCG_CTRL, CGCG_INT_ENABLE, 1); 4001 WREG32_SOC15(GC, 0, regSDMA1_RLC_CGCG_CTRL, data); 4002 } 4003 } else { 4004 /* Program RLC_CGCG_CGLS_CTRL */ 4005 def = data = RREG32_SOC15(GC, 0, regRLC_CGCG_CGLS_CTRL); 4006 4007 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGCG) 4008 data &= ~RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK; 4009 4010 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGLS) 4011 data &= ~RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK; 4012 4013 if (def != data) 4014 WREG32_SOC15(GC, 0, regRLC_CGCG_CGLS_CTRL, data); 4015 4016 /* Program RLC_CGCG_CGLS_CTRL_3D */ 4017 def = data = RREG32_SOC15(GC, 0, regRLC_CGCG_CGLS_CTRL_3D); 4018 4019 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_3D_CGCG) 4020 data &= ~RLC_CGCG_CGLS_CTRL_3D__CGCG_EN_MASK; 4021 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_3D_CGLS) 4022 data &= ~RLC_CGCG_CGLS_CTRL_3D__CGLS_EN_MASK; 4023 4024 if (def != data) 4025 WREG32_SOC15(GC, 0, regRLC_CGCG_CGLS_CTRL_3D, data); 4026 } 4027 } 4028 4029 static void gfx_v12_0_update_medium_grain_clock_gating(struct amdgpu_device *adev, 4030 bool enable) 4031 { 4032 uint32_t data, def; 4033 if (!(adev->cg_flags & (AMD_CG_SUPPORT_GFX_MGCG | AMD_CG_SUPPORT_GFX_MGLS))) 4034 return; 4035 4036 /* It is disabled by HW by default */ 4037 if (enable) { 4038 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGCG) { 4039 /* 1 - RLC_CGTT_MGCG_OVERRIDE */ 4040 def = data = RREG32_SOC15(GC, 0, regRLC_CGTT_MGCG_OVERRIDE); 4041 4042 data &= ~(RLC_CGTT_MGCG_OVERRIDE__GRBM_CGTT_SCLK_OVERRIDE_MASK | 4043 RLC_CGTT_MGCG_OVERRIDE__RLC_CGTT_SCLK_OVERRIDE_MASK | 4044 RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGCG_OVERRIDE_MASK); 4045 4046 if (def != data) 4047 WREG32_SOC15(GC, 0, regRLC_CGTT_MGCG_OVERRIDE, data); 4048 } 4049 } else { 4050 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGCG) { 4051 def = data = RREG32_SOC15(GC, 0, regRLC_CGTT_MGCG_OVERRIDE); 4052 4053 data |= (RLC_CGTT_MGCG_OVERRIDE__RLC_CGTT_SCLK_OVERRIDE_MASK | 4054 RLC_CGTT_MGCG_OVERRIDE__GRBM_CGTT_SCLK_OVERRIDE_MASK | 4055 RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGCG_OVERRIDE_MASK); 4056 4057 if (def != data) 4058 WREG32_SOC15(GC, 0, regRLC_CGTT_MGCG_OVERRIDE, data); 4059 } 4060 } 4061 } 4062 4063 static void gfx_v12_0_update_repeater_fgcg(struct amdgpu_device *adev, 4064 bool enable) 4065 { 4066 uint32_t def, data; 4067 4068 if (!(adev->cg_flags & AMD_CG_SUPPORT_REPEATER_FGCG)) 4069 return; 4070 4071 def = data = RREG32_SOC15(GC, 0, regRLC_CGTT_MGCG_OVERRIDE); 4072 4073 if (enable) 4074 data &= ~(RLC_CGTT_MGCG_OVERRIDE__GFXIP_REPEATER_FGCG_OVERRIDE_MASK | 4075 RLC_CGTT_MGCG_OVERRIDE__RLC_REPEATER_FGCG_OVERRIDE_MASK); 4076 else 4077 data |= RLC_CGTT_MGCG_OVERRIDE__GFXIP_REPEATER_FGCG_OVERRIDE_MASK | 4078 RLC_CGTT_MGCG_OVERRIDE__RLC_REPEATER_FGCG_OVERRIDE_MASK; 4079 4080 if (def != data) 4081 WREG32_SOC15(GC, 0, regRLC_CGTT_MGCG_OVERRIDE, data); 4082 } 4083 4084 static void gfx_v12_0_update_sram_fgcg(struct amdgpu_device *adev, 4085 bool enable) 4086 { 4087 uint32_t def, data; 4088 4089 if (!(adev->cg_flags & AMD_CG_SUPPORT_GFX_FGCG)) 4090 return; 4091 4092 def = data = RREG32_SOC15(GC, 0, regRLC_CGTT_MGCG_OVERRIDE); 4093 4094 if (enable) 4095 data &= ~RLC_CGTT_MGCG_OVERRIDE__GFXIP_FGCG_OVERRIDE_MASK; 4096 else 4097 data |= RLC_CGTT_MGCG_OVERRIDE__GFXIP_FGCG_OVERRIDE_MASK; 4098 4099 if (def != data) 4100 WREG32_SOC15(GC, 0, regRLC_CGTT_MGCG_OVERRIDE, data); 4101 } 4102 4103 static int gfx_v12_0_update_gfx_clock_gating(struct amdgpu_device *adev, 4104 bool enable) 4105 { 4106 amdgpu_gfx_rlc_enter_safe_mode(adev, 0); 4107 4108 gfx_v12_0_update_coarse_grain_clock_gating(adev, enable); 4109 4110 gfx_v12_0_update_medium_grain_clock_gating(adev, enable); 4111 4112 gfx_v12_0_update_repeater_fgcg(adev, enable); 4113 4114 gfx_v12_0_update_sram_fgcg(adev, enable); 4115 4116 gfx_v12_0_update_perf_clk(adev, enable); 4117 4118 if (adev->cg_flags & 4119 (AMD_CG_SUPPORT_GFX_MGCG | 4120 AMD_CG_SUPPORT_GFX_CGLS | 4121 AMD_CG_SUPPORT_GFX_CGCG | 4122 AMD_CG_SUPPORT_GFX_3D_CGCG | 4123 AMD_CG_SUPPORT_GFX_3D_CGLS)) 4124 gfx_v12_0_enable_gui_idle_interrupt(adev, enable); 4125 4126 amdgpu_gfx_rlc_exit_safe_mode(adev, 0); 4127 4128 return 0; 4129 } 4130 4131 static int gfx_v12_0_set_clockgating_state(struct amdgpu_ip_block *ip_block, 4132 enum amd_clockgating_state state) 4133 { 4134 struct amdgpu_device *adev = ip_block->adev; 4135 4136 if (amdgpu_sriov_vf(adev)) 4137 return 0; 4138 4139 switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { 4140 case IP_VERSION(12, 0, 0): 4141 case IP_VERSION(12, 0, 1): 4142 gfx_v12_0_update_gfx_clock_gating(adev, 4143 state == AMD_CG_STATE_GATE); 4144 break; 4145 default: 4146 break; 4147 } 4148 4149 return 0; 4150 } 4151 4152 static void gfx_v12_0_get_clockgating_state(void *handle, u64 *flags) 4153 { 4154 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 4155 int data; 4156 4157 /* AMD_CG_SUPPORT_GFX_MGCG */ 4158 data = RREG32_SOC15(GC, 0, regRLC_CGTT_MGCG_OVERRIDE); 4159 if (!(data & RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGCG_OVERRIDE_MASK)) 4160 *flags |= AMD_CG_SUPPORT_GFX_MGCG; 4161 4162 /* AMD_CG_SUPPORT_REPEATER_FGCG */ 4163 if (!(data & RLC_CGTT_MGCG_OVERRIDE__GFXIP_REPEATER_FGCG_OVERRIDE_MASK)) 4164 *flags |= AMD_CG_SUPPORT_REPEATER_FGCG; 4165 4166 /* AMD_CG_SUPPORT_GFX_FGCG */ 4167 if (!(data & RLC_CGTT_MGCG_OVERRIDE__GFXIP_FGCG_OVERRIDE_MASK)) 4168 *flags |= AMD_CG_SUPPORT_GFX_FGCG; 4169 4170 /* AMD_CG_SUPPORT_GFX_PERF_CLK */ 4171 if (!(data & RLC_CGTT_MGCG_OVERRIDE__PERFMON_CLOCK_STATE_MASK)) 4172 *flags |= AMD_CG_SUPPORT_GFX_PERF_CLK; 4173 4174 /* AMD_CG_SUPPORT_GFX_CGCG */ 4175 data = RREG32_SOC15(GC, 0, regRLC_CGCG_CGLS_CTRL); 4176 if (data & RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK) 4177 *flags |= AMD_CG_SUPPORT_GFX_CGCG; 4178 4179 /* AMD_CG_SUPPORT_GFX_CGLS */ 4180 if (data & RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK) 4181 *flags |= AMD_CG_SUPPORT_GFX_CGLS; 4182 4183 /* AMD_CG_SUPPORT_GFX_3D_CGCG */ 4184 data = RREG32_SOC15(GC, 0, regRLC_CGCG_CGLS_CTRL_3D); 4185 if (data & RLC_CGCG_CGLS_CTRL_3D__CGCG_EN_MASK) 4186 *flags |= AMD_CG_SUPPORT_GFX_3D_CGCG; 4187 4188 /* AMD_CG_SUPPORT_GFX_3D_CGLS */ 4189 if (data & RLC_CGCG_CGLS_CTRL_3D__CGLS_EN_MASK) 4190 *flags |= AMD_CG_SUPPORT_GFX_3D_CGLS; 4191 } 4192 4193 static u64 gfx_v12_0_ring_get_rptr_gfx(struct amdgpu_ring *ring) 4194 { 4195 /* gfx12 is 32bit rptr*/ 4196 return *(uint32_t *)ring->rptr_cpu_addr; 4197 } 4198 4199 static u64 gfx_v12_0_ring_get_wptr_gfx(struct amdgpu_ring *ring) 4200 { 4201 struct amdgpu_device *adev = ring->adev; 4202 u64 wptr; 4203 4204 /* XXX check if swapping is necessary on BE */ 4205 if (ring->use_doorbell) { 4206 wptr = atomic64_read((atomic64_t *)ring->wptr_cpu_addr); 4207 } else { 4208 wptr = RREG32_SOC15(GC, 0, regCP_RB0_WPTR); 4209 wptr += (u64)RREG32_SOC15(GC, 0, regCP_RB0_WPTR_HI) << 32; 4210 } 4211 4212 return wptr; 4213 } 4214 4215 static void gfx_v12_0_ring_set_wptr_gfx(struct amdgpu_ring *ring) 4216 { 4217 struct amdgpu_device *adev = ring->adev; 4218 uint32_t *wptr_saved; 4219 uint32_t *is_queue_unmap; 4220 uint64_t aggregated_db_index; 4221 uint32_t mqd_size = adev->mqds[AMDGPU_HW_IP_GFX].mqd_size; 4222 uint64_t wptr_tmp; 4223 4224 if (ring->is_mes_queue) { 4225 wptr_saved = (uint32_t *)(ring->mqd_ptr + mqd_size); 4226 is_queue_unmap = (uint32_t *)(ring->mqd_ptr + mqd_size + 4227 sizeof(uint32_t)); 4228 aggregated_db_index = 4229 amdgpu_mes_get_aggregated_doorbell_index(adev, 4230 ring->hw_prio); 4231 4232 wptr_tmp = ring->wptr & ring->buf_mask; 4233 atomic64_set((atomic64_t *)ring->wptr_cpu_addr, wptr_tmp); 4234 *wptr_saved = wptr_tmp; 4235 /* assume doorbell always being used by mes mapped queue */ 4236 if (*is_queue_unmap) { 4237 WDOORBELL64(aggregated_db_index, wptr_tmp); 4238 WDOORBELL64(ring->doorbell_index, wptr_tmp); 4239 } else { 4240 WDOORBELL64(ring->doorbell_index, wptr_tmp); 4241 4242 if (*is_queue_unmap) 4243 WDOORBELL64(aggregated_db_index, wptr_tmp); 4244 } 4245 } else { 4246 if (ring->use_doorbell) { 4247 /* XXX check if swapping is necessary on BE */ 4248 atomic64_set((atomic64_t *)ring->wptr_cpu_addr, 4249 ring->wptr); 4250 WDOORBELL64(ring->doorbell_index, ring->wptr); 4251 } else { 4252 WREG32_SOC15(GC, 0, regCP_RB0_WPTR, 4253 lower_32_bits(ring->wptr)); 4254 WREG32_SOC15(GC, 0, regCP_RB0_WPTR_HI, 4255 upper_32_bits(ring->wptr)); 4256 } 4257 } 4258 } 4259 4260 static u64 gfx_v12_0_ring_get_rptr_compute(struct amdgpu_ring *ring) 4261 { 4262 /* gfx12 hardware is 32bit rptr */ 4263 return *(uint32_t *)ring->rptr_cpu_addr; 4264 } 4265 4266 static u64 gfx_v12_0_ring_get_wptr_compute(struct amdgpu_ring *ring) 4267 { 4268 u64 wptr; 4269 4270 /* XXX check if swapping is necessary on BE */ 4271 if (ring->use_doorbell) 4272 wptr = atomic64_read((atomic64_t *)ring->wptr_cpu_addr); 4273 else 4274 BUG(); 4275 return wptr; 4276 } 4277 4278 static void gfx_v12_0_ring_set_wptr_compute(struct amdgpu_ring *ring) 4279 { 4280 struct amdgpu_device *adev = ring->adev; 4281 uint32_t *wptr_saved; 4282 uint32_t *is_queue_unmap; 4283 uint64_t aggregated_db_index; 4284 uint32_t mqd_size = adev->mqds[AMDGPU_HW_IP_COMPUTE].mqd_size; 4285 uint64_t wptr_tmp; 4286 4287 if (ring->is_mes_queue) { 4288 wptr_saved = (uint32_t *)(ring->mqd_ptr + mqd_size); 4289 is_queue_unmap = (uint32_t *)(ring->mqd_ptr + mqd_size + 4290 sizeof(uint32_t)); 4291 aggregated_db_index = 4292 amdgpu_mes_get_aggregated_doorbell_index(adev, 4293 ring->hw_prio); 4294 4295 wptr_tmp = ring->wptr & ring->buf_mask; 4296 atomic64_set((atomic64_t *)ring->wptr_cpu_addr, wptr_tmp); 4297 *wptr_saved = wptr_tmp; 4298 /* assume doorbell always used by mes mapped queue */ 4299 if (*is_queue_unmap) { 4300 WDOORBELL64(aggregated_db_index, wptr_tmp); 4301 WDOORBELL64(ring->doorbell_index, wptr_tmp); 4302 } else { 4303 WDOORBELL64(ring->doorbell_index, wptr_tmp); 4304 4305 if (*is_queue_unmap) 4306 WDOORBELL64(aggregated_db_index, wptr_tmp); 4307 } 4308 } else { 4309 /* XXX check if swapping is necessary on BE */ 4310 if (ring->use_doorbell) { 4311 atomic64_set((atomic64_t *)ring->wptr_cpu_addr, 4312 ring->wptr); 4313 WDOORBELL64(ring->doorbell_index, ring->wptr); 4314 } else { 4315 BUG(); /* only DOORBELL method supported on gfx12 now */ 4316 } 4317 } 4318 } 4319 4320 static void gfx_v12_0_ring_emit_hdp_flush(struct amdgpu_ring *ring) 4321 { 4322 struct amdgpu_device *adev = ring->adev; 4323 u32 ref_and_mask, reg_mem_engine; 4324 const struct nbio_hdp_flush_reg *nbio_hf_reg = adev->nbio.hdp_flush_reg; 4325 4326 if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) { 4327 switch (ring->me) { 4328 case 1: 4329 ref_and_mask = nbio_hf_reg->ref_and_mask_cp2 << ring->pipe; 4330 break; 4331 case 2: 4332 ref_and_mask = nbio_hf_reg->ref_and_mask_cp6 << ring->pipe; 4333 break; 4334 default: 4335 return; 4336 } 4337 reg_mem_engine = 0; 4338 } else { 4339 ref_and_mask = nbio_hf_reg->ref_and_mask_cp0; 4340 reg_mem_engine = 1; /* pfp */ 4341 } 4342 4343 gfx_v12_0_wait_reg_mem(ring, reg_mem_engine, 0, 1, 4344 adev->nbio.funcs->get_hdp_flush_req_offset(adev), 4345 adev->nbio.funcs->get_hdp_flush_done_offset(adev), 4346 ref_and_mask, ref_and_mask, 0x20); 4347 } 4348 4349 static void gfx_v12_0_ring_emit_ib_gfx(struct amdgpu_ring *ring, 4350 struct amdgpu_job *job, 4351 struct amdgpu_ib *ib, 4352 uint32_t flags) 4353 { 4354 unsigned vmid = AMDGPU_JOB_GET_VMID(job); 4355 u32 header, control = 0; 4356 4357 BUG_ON(ib->flags & AMDGPU_IB_FLAG_CE); 4358 4359 header = PACKET3(PACKET3_INDIRECT_BUFFER, 2); 4360 4361 control |= ib->length_dw | (vmid << 24); 4362 4363 if (ring->is_mes_queue) 4364 /* inherit vmid from mqd */ 4365 control |= 0x400000; 4366 4367 amdgpu_ring_write(ring, header); 4368 BUG_ON(ib->gpu_addr & 0x3); /* Dword align */ 4369 amdgpu_ring_write(ring, 4370 #ifdef __BIG_ENDIAN 4371 (2 << 0) | 4372 #endif 4373 lower_32_bits(ib->gpu_addr)); 4374 amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr)); 4375 amdgpu_ring_write(ring, control); 4376 } 4377 4378 static void gfx_v12_0_ring_emit_ib_compute(struct amdgpu_ring *ring, 4379 struct amdgpu_job *job, 4380 struct amdgpu_ib *ib, 4381 uint32_t flags) 4382 { 4383 unsigned vmid = AMDGPU_JOB_GET_VMID(job); 4384 u32 control = INDIRECT_BUFFER_VALID | ib->length_dw | (vmid << 24); 4385 4386 if (ring->is_mes_queue) 4387 /* inherit vmid from mqd */ 4388 control |= 0x40000000; 4389 4390 amdgpu_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2)); 4391 BUG_ON(ib->gpu_addr & 0x3); /* Dword align */ 4392 amdgpu_ring_write(ring, 4393 #ifdef __BIG_ENDIAN 4394 (2 << 0) | 4395 #endif 4396 lower_32_bits(ib->gpu_addr)); 4397 amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr)); 4398 amdgpu_ring_write(ring, control); 4399 } 4400 4401 static void gfx_v12_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, 4402 u64 seq, unsigned flags) 4403 { 4404 bool write64bit = flags & AMDGPU_FENCE_FLAG_64BIT; 4405 bool int_sel = flags & AMDGPU_FENCE_FLAG_INT; 4406 4407 /* RELEASE_MEM - flush caches, send int */ 4408 amdgpu_ring_write(ring, PACKET3(PACKET3_RELEASE_MEM, 6)); 4409 amdgpu_ring_write(ring, (PACKET3_RELEASE_MEM_GCR_SEQ | 4410 PACKET3_RELEASE_MEM_GCR_GL2_WB | 4411 PACKET3_RELEASE_MEM_CACHE_POLICY(3) | 4412 PACKET3_RELEASE_MEM_EVENT_TYPE(CACHE_FLUSH_AND_INV_TS_EVENT) | 4413 PACKET3_RELEASE_MEM_EVENT_INDEX(5))); 4414 amdgpu_ring_write(ring, (PACKET3_RELEASE_MEM_DATA_SEL(write64bit ? 2 : 1) | 4415 PACKET3_RELEASE_MEM_INT_SEL(int_sel ? 2 : 0))); 4416 4417 /* 4418 * the address should be Qword aligned if 64bit write, Dword 4419 * aligned if only send 32bit data low (discard data high) 4420 */ 4421 if (write64bit) 4422 BUG_ON(addr & 0x7); 4423 else 4424 BUG_ON(addr & 0x3); 4425 amdgpu_ring_write(ring, lower_32_bits(addr)); 4426 amdgpu_ring_write(ring, upper_32_bits(addr)); 4427 amdgpu_ring_write(ring, lower_32_bits(seq)); 4428 amdgpu_ring_write(ring, upper_32_bits(seq)); 4429 amdgpu_ring_write(ring, ring->is_mes_queue ? 4430 (ring->hw_queue_id | AMDGPU_FENCE_MES_QUEUE_FLAG) : 0); 4431 } 4432 4433 static void gfx_v12_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring) 4434 { 4435 int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX); 4436 uint32_t seq = ring->fence_drv.sync_seq; 4437 uint64_t addr = ring->fence_drv.gpu_addr; 4438 4439 gfx_v12_0_wait_reg_mem(ring, usepfp, 1, 0, lower_32_bits(addr), 4440 upper_32_bits(addr), seq, 0xffffffff, 4); 4441 } 4442 4443 static void gfx_v12_0_ring_invalidate_tlbs(struct amdgpu_ring *ring, 4444 uint16_t pasid, uint32_t flush_type, 4445 bool all_hub, uint8_t dst_sel) 4446 { 4447 amdgpu_ring_write(ring, PACKET3(PACKET3_INVALIDATE_TLBS, 0)); 4448 amdgpu_ring_write(ring, 4449 PACKET3_INVALIDATE_TLBS_DST_SEL(dst_sel) | 4450 PACKET3_INVALIDATE_TLBS_ALL_HUB(all_hub) | 4451 PACKET3_INVALIDATE_TLBS_PASID(pasid) | 4452 PACKET3_INVALIDATE_TLBS_FLUSH_TYPE(flush_type)); 4453 } 4454 4455 static void gfx_v12_0_ring_emit_vm_flush(struct amdgpu_ring *ring, 4456 unsigned vmid, uint64_t pd_addr) 4457 { 4458 if (ring->is_mes_queue) 4459 gfx_v12_0_ring_invalidate_tlbs(ring, 0, 0, false, 0); 4460 else 4461 amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr); 4462 4463 /* compute doesn't have PFP */ 4464 if (ring->funcs->type == AMDGPU_RING_TYPE_GFX) { 4465 /* sync PFP to ME, otherwise we might get invalid PFP reads */ 4466 amdgpu_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0)); 4467 amdgpu_ring_write(ring, 0x0); 4468 } 4469 } 4470 4471 static void gfx_v12_0_ring_emit_fence_kiq(struct amdgpu_ring *ring, u64 addr, 4472 u64 seq, unsigned int flags) 4473 { 4474 struct amdgpu_device *adev = ring->adev; 4475 4476 /* we only allocate 32bit for each seq wb address */ 4477 BUG_ON(flags & AMDGPU_FENCE_FLAG_64BIT); 4478 4479 /* write fence seq to the "addr" */ 4480 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3)); 4481 amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) | 4482 WRITE_DATA_DST_SEL(5) | WR_CONFIRM)); 4483 amdgpu_ring_write(ring, lower_32_bits(addr)); 4484 amdgpu_ring_write(ring, upper_32_bits(addr)); 4485 amdgpu_ring_write(ring, lower_32_bits(seq)); 4486 4487 if (flags & AMDGPU_FENCE_FLAG_INT) { 4488 /* set register to trigger INT */ 4489 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3)); 4490 amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) | 4491 WRITE_DATA_DST_SEL(0) | WR_CONFIRM)); 4492 amdgpu_ring_write(ring, SOC15_REG_OFFSET(GC, 0, regCPC_INT_STATUS)); 4493 amdgpu_ring_write(ring, 0); 4494 amdgpu_ring_write(ring, 0x20000000); /* src_id is 178 */ 4495 } 4496 } 4497 4498 static void gfx_v12_0_ring_emit_cntxcntl(struct amdgpu_ring *ring, 4499 uint32_t flags) 4500 { 4501 uint32_t dw2 = 0; 4502 4503 dw2 |= 0x80000000; /* set load_enable otherwise this package is just NOPs */ 4504 if (flags & AMDGPU_HAVE_CTX_SWITCH) { 4505 /* set load_global_config & load_global_uconfig */ 4506 dw2 |= 0x8001; 4507 /* set load_cs_sh_regs */ 4508 dw2 |= 0x01000000; 4509 /* set load_per_context_state & load_gfx_sh_regs for GFX */ 4510 dw2 |= 0x10002; 4511 } 4512 4513 amdgpu_ring_write(ring, PACKET3(PACKET3_CONTEXT_CONTROL, 1)); 4514 amdgpu_ring_write(ring, dw2); 4515 amdgpu_ring_write(ring, 0); 4516 } 4517 4518 static unsigned gfx_v12_0_ring_emit_init_cond_exec(struct amdgpu_ring *ring, 4519 uint64_t addr) 4520 { 4521 unsigned ret; 4522 4523 amdgpu_ring_write(ring, PACKET3(PACKET3_COND_EXEC, 3)); 4524 amdgpu_ring_write(ring, lower_32_bits(addr)); 4525 amdgpu_ring_write(ring, upper_32_bits(addr)); 4526 /* discard following DWs if *cond_exec_gpu_addr==0 */ 4527 amdgpu_ring_write(ring, 0); 4528 ret = ring->wptr & ring->buf_mask; 4529 /* patch dummy value later */ 4530 amdgpu_ring_write(ring, 0); 4531 4532 return ret; 4533 } 4534 4535 static int gfx_v12_0_ring_preempt_ib(struct amdgpu_ring *ring) 4536 { 4537 int i, r = 0; 4538 struct amdgpu_device *adev = ring->adev; 4539 struct amdgpu_kiq *kiq = &adev->gfx.kiq[0]; 4540 struct amdgpu_ring *kiq_ring = &kiq->ring; 4541 unsigned long flags; 4542 4543 if (adev->enable_mes) 4544 return -EINVAL; 4545 4546 if (!kiq->pmf || !kiq->pmf->kiq_unmap_queues) 4547 return -EINVAL; 4548 4549 spin_lock_irqsave(&kiq->ring_lock, flags); 4550 4551 if (amdgpu_ring_alloc(kiq_ring, kiq->pmf->unmap_queues_size)) { 4552 spin_unlock_irqrestore(&kiq->ring_lock, flags); 4553 return -ENOMEM; 4554 } 4555 4556 /* assert preemption condition */ 4557 amdgpu_ring_set_preempt_cond_exec(ring, false); 4558 4559 /* assert IB preemption, emit the trailing fence */ 4560 kiq->pmf->kiq_unmap_queues(kiq_ring, ring, PREEMPT_QUEUES_NO_UNMAP, 4561 ring->trail_fence_gpu_addr, 4562 ++ring->trail_seq); 4563 amdgpu_ring_commit(kiq_ring); 4564 4565 spin_unlock_irqrestore(&kiq->ring_lock, flags); 4566 4567 /* poll the trailing fence */ 4568 for (i = 0; i < adev->usec_timeout; i++) { 4569 if (ring->trail_seq == 4570 le32_to_cpu(*(ring->trail_fence_cpu_addr))) 4571 break; 4572 udelay(1); 4573 } 4574 4575 if (i >= adev->usec_timeout) { 4576 r = -EINVAL; 4577 DRM_ERROR("ring %d failed to preempt ib\n", ring->idx); 4578 } 4579 4580 /* deassert preemption condition */ 4581 amdgpu_ring_set_preempt_cond_exec(ring, true); 4582 return r; 4583 } 4584 4585 static void gfx_v12_0_ring_emit_frame_cntl(struct amdgpu_ring *ring, 4586 bool start, 4587 bool secure) 4588 { 4589 uint32_t v = secure ? FRAME_TMZ : 0; 4590 4591 amdgpu_ring_write(ring, PACKET3(PACKET3_FRAME_CONTROL, 0)); 4592 amdgpu_ring_write(ring, v | FRAME_CMD(start ? 0 : 1)); 4593 } 4594 4595 static void gfx_v12_0_ring_emit_rreg(struct amdgpu_ring *ring, uint32_t reg, 4596 uint32_t reg_val_offs) 4597 { 4598 struct amdgpu_device *adev = ring->adev; 4599 4600 amdgpu_ring_write(ring, PACKET3(PACKET3_COPY_DATA, 4)); 4601 amdgpu_ring_write(ring, 0 | /* src: register*/ 4602 (5 << 8) | /* dst: memory */ 4603 (1 << 20)); /* write confirm */ 4604 amdgpu_ring_write(ring, reg); 4605 amdgpu_ring_write(ring, 0); 4606 amdgpu_ring_write(ring, lower_32_bits(adev->wb.gpu_addr + 4607 reg_val_offs * 4)); 4608 amdgpu_ring_write(ring, upper_32_bits(adev->wb.gpu_addr + 4609 reg_val_offs * 4)); 4610 } 4611 4612 static void gfx_v12_0_ring_emit_wreg(struct amdgpu_ring *ring, 4613 uint32_t reg, 4614 uint32_t val) 4615 { 4616 uint32_t cmd = 0; 4617 4618 switch (ring->funcs->type) { 4619 case AMDGPU_RING_TYPE_GFX: 4620 cmd = WRITE_DATA_ENGINE_SEL(1) | WR_CONFIRM; 4621 break; 4622 case AMDGPU_RING_TYPE_KIQ: 4623 cmd = (1 << 16); /* no inc addr */ 4624 break; 4625 default: 4626 cmd = WR_CONFIRM; 4627 break; 4628 } 4629 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3)); 4630 amdgpu_ring_write(ring, cmd); 4631 amdgpu_ring_write(ring, reg); 4632 amdgpu_ring_write(ring, 0); 4633 amdgpu_ring_write(ring, val); 4634 } 4635 4636 static void gfx_v12_0_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg, 4637 uint32_t val, uint32_t mask) 4638 { 4639 gfx_v12_0_wait_reg_mem(ring, 0, 0, 0, reg, 0, val, mask, 0x20); 4640 } 4641 4642 static void gfx_v12_0_ring_emit_reg_write_reg_wait(struct amdgpu_ring *ring, 4643 uint32_t reg0, uint32_t reg1, 4644 uint32_t ref, uint32_t mask) 4645 { 4646 int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX); 4647 4648 gfx_v12_0_wait_reg_mem(ring, usepfp, 0, 1, reg0, reg1, 4649 ref, mask, 0x20); 4650 } 4651 4652 static void gfx_v12_0_ring_soft_recovery(struct amdgpu_ring *ring, 4653 unsigned vmid) 4654 { 4655 struct amdgpu_device *adev = ring->adev; 4656 uint32_t value = 0; 4657 4658 value = REG_SET_FIELD(value, SQ_CMD, CMD, 0x03); 4659 value = REG_SET_FIELD(value, SQ_CMD, MODE, 0x01); 4660 value = REG_SET_FIELD(value, SQ_CMD, CHECK_VMID, 1); 4661 value = REG_SET_FIELD(value, SQ_CMD, VM_ID, vmid); 4662 amdgpu_gfx_rlc_enter_safe_mode(adev, 0); 4663 WREG32_SOC15(GC, 0, regSQ_CMD, value); 4664 amdgpu_gfx_rlc_exit_safe_mode(adev, 0); 4665 } 4666 4667 static void 4668 gfx_v12_0_set_gfx_eop_interrupt_state(struct amdgpu_device *adev, 4669 uint32_t me, uint32_t pipe, 4670 enum amdgpu_interrupt_state state) 4671 { 4672 uint32_t cp_int_cntl, cp_int_cntl_reg; 4673 4674 if (!me) { 4675 switch (pipe) { 4676 case 0: 4677 cp_int_cntl_reg = SOC15_REG_OFFSET(GC, 0, regCP_INT_CNTL_RING0); 4678 break; 4679 default: 4680 DRM_DEBUG("invalid pipe %d\n", pipe); 4681 return; 4682 } 4683 } else { 4684 DRM_DEBUG("invalid me %d\n", me); 4685 return; 4686 } 4687 4688 switch (state) { 4689 case AMDGPU_IRQ_STATE_DISABLE: 4690 cp_int_cntl = RREG32_SOC15_IP(GC, cp_int_cntl_reg); 4691 cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_INT_CNTL_RING0, 4692 TIME_STAMP_INT_ENABLE, 0); 4693 cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_INT_CNTL_RING0, 4694 GENERIC0_INT_ENABLE, 0); 4695 WREG32_SOC15_IP(GC, cp_int_cntl_reg, cp_int_cntl); 4696 break; 4697 case AMDGPU_IRQ_STATE_ENABLE: 4698 cp_int_cntl = RREG32_SOC15_IP(GC, cp_int_cntl_reg); 4699 cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_INT_CNTL_RING0, 4700 TIME_STAMP_INT_ENABLE, 1); 4701 cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_INT_CNTL_RING0, 4702 GENERIC0_INT_ENABLE, 1); 4703 WREG32_SOC15_IP(GC, cp_int_cntl_reg, cp_int_cntl); 4704 break; 4705 default: 4706 break; 4707 } 4708 } 4709 4710 static void gfx_v12_0_set_compute_eop_interrupt_state(struct amdgpu_device *adev, 4711 int me, int pipe, 4712 enum amdgpu_interrupt_state state) 4713 { 4714 u32 mec_int_cntl, mec_int_cntl_reg; 4715 4716 /* 4717 * amdgpu controls only the first MEC. That's why this function only 4718 * handles the setting of interrupts for this specific MEC. All other 4719 * pipes' interrupts are set by amdkfd. 4720 */ 4721 4722 if (me == 1) { 4723 switch (pipe) { 4724 case 0: 4725 mec_int_cntl_reg = SOC15_REG_OFFSET(GC, 0, regCP_ME1_PIPE0_INT_CNTL); 4726 break; 4727 case 1: 4728 mec_int_cntl_reg = SOC15_REG_OFFSET(GC, 0, regCP_ME1_PIPE1_INT_CNTL); 4729 break; 4730 default: 4731 DRM_DEBUG("invalid pipe %d\n", pipe); 4732 return; 4733 } 4734 } else { 4735 DRM_DEBUG("invalid me %d\n", me); 4736 return; 4737 } 4738 4739 switch (state) { 4740 case AMDGPU_IRQ_STATE_DISABLE: 4741 mec_int_cntl = RREG32_SOC15_IP(GC, mec_int_cntl_reg); 4742 mec_int_cntl = REG_SET_FIELD(mec_int_cntl, CP_ME1_PIPE0_INT_CNTL, 4743 TIME_STAMP_INT_ENABLE, 0); 4744 mec_int_cntl = REG_SET_FIELD(mec_int_cntl, CP_ME1_PIPE0_INT_CNTL, 4745 GENERIC0_INT_ENABLE, 0); 4746 WREG32_SOC15_IP(GC, mec_int_cntl_reg, mec_int_cntl); 4747 break; 4748 case AMDGPU_IRQ_STATE_ENABLE: 4749 mec_int_cntl = RREG32_SOC15_IP(GC, mec_int_cntl_reg); 4750 mec_int_cntl = REG_SET_FIELD(mec_int_cntl, CP_ME1_PIPE0_INT_CNTL, 4751 TIME_STAMP_INT_ENABLE, 1); 4752 mec_int_cntl = REG_SET_FIELD(mec_int_cntl, CP_ME1_PIPE0_INT_CNTL, 4753 GENERIC0_INT_ENABLE, 1); 4754 WREG32_SOC15_IP(GC, mec_int_cntl_reg, mec_int_cntl); 4755 break; 4756 default: 4757 break; 4758 } 4759 } 4760 4761 static int gfx_v12_0_set_eop_interrupt_state(struct amdgpu_device *adev, 4762 struct amdgpu_irq_src *src, 4763 unsigned type, 4764 enum amdgpu_interrupt_state state) 4765 { 4766 switch (type) { 4767 case AMDGPU_CP_IRQ_GFX_ME0_PIPE0_EOP: 4768 gfx_v12_0_set_gfx_eop_interrupt_state(adev, 0, 0, state); 4769 break; 4770 case AMDGPU_CP_IRQ_GFX_ME0_PIPE1_EOP: 4771 gfx_v12_0_set_gfx_eop_interrupt_state(adev, 0, 1, state); 4772 break; 4773 case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP: 4774 gfx_v12_0_set_compute_eop_interrupt_state(adev, 1, 0, state); 4775 break; 4776 case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE1_EOP: 4777 gfx_v12_0_set_compute_eop_interrupt_state(adev, 1, 1, state); 4778 break; 4779 case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE2_EOP: 4780 gfx_v12_0_set_compute_eop_interrupt_state(adev, 1, 2, state); 4781 break; 4782 case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE3_EOP: 4783 gfx_v12_0_set_compute_eop_interrupt_state(adev, 1, 3, state); 4784 break; 4785 default: 4786 break; 4787 } 4788 return 0; 4789 } 4790 4791 static int gfx_v12_0_eop_irq(struct amdgpu_device *adev, 4792 struct amdgpu_irq_src *source, 4793 struct amdgpu_iv_entry *entry) 4794 { 4795 int i; 4796 u8 me_id, pipe_id, queue_id; 4797 struct amdgpu_ring *ring; 4798 uint32_t mes_queue_id = entry->src_data[0]; 4799 4800 DRM_DEBUG("IH: CP EOP\n"); 4801 4802 if (adev->enable_mes && (mes_queue_id & AMDGPU_FENCE_MES_QUEUE_FLAG)) { 4803 struct amdgpu_mes_queue *queue; 4804 4805 mes_queue_id &= AMDGPU_FENCE_MES_QUEUE_ID_MASK; 4806 4807 spin_lock(&adev->mes.queue_id_lock); 4808 queue = idr_find(&adev->mes.queue_id_idr, mes_queue_id); 4809 if (queue) { 4810 DRM_DEBUG("process mes queue id = %d\n", mes_queue_id); 4811 amdgpu_fence_process(queue->ring); 4812 } 4813 spin_unlock(&adev->mes.queue_id_lock); 4814 } else { 4815 me_id = (entry->ring_id & 0x0c) >> 2; 4816 pipe_id = (entry->ring_id & 0x03) >> 0; 4817 queue_id = (entry->ring_id & 0x70) >> 4; 4818 4819 switch (me_id) { 4820 case 0: 4821 if (pipe_id == 0) 4822 amdgpu_fence_process(&adev->gfx.gfx_ring[0]); 4823 else 4824 amdgpu_fence_process(&adev->gfx.gfx_ring[1]); 4825 break; 4826 case 1: 4827 case 2: 4828 for (i = 0; i < adev->gfx.num_compute_rings; i++) { 4829 ring = &adev->gfx.compute_ring[i]; 4830 /* Per-queue interrupt is supported for MEC starting from VI. 4831 * The interrupt can only be enabled/disabled per pipe instead 4832 * of per queue. 4833 */ 4834 if ((ring->me == me_id) && 4835 (ring->pipe == pipe_id) && 4836 (ring->queue == queue_id)) 4837 amdgpu_fence_process(ring); 4838 } 4839 break; 4840 } 4841 } 4842 4843 return 0; 4844 } 4845 4846 static int gfx_v12_0_set_priv_reg_fault_state(struct amdgpu_device *adev, 4847 struct amdgpu_irq_src *source, 4848 unsigned int type, 4849 enum amdgpu_interrupt_state state) 4850 { 4851 u32 cp_int_cntl_reg, cp_int_cntl; 4852 int i, j; 4853 4854 switch (state) { 4855 case AMDGPU_IRQ_STATE_DISABLE: 4856 case AMDGPU_IRQ_STATE_ENABLE: 4857 for (i = 0; i < adev->gfx.me.num_me; i++) { 4858 for (j = 0; j < adev->gfx.me.num_pipe_per_me; j++) { 4859 cp_int_cntl_reg = gfx_v12_0_get_cpg_int_cntl(adev, i, j); 4860 4861 if (cp_int_cntl_reg) { 4862 cp_int_cntl = RREG32_SOC15_IP(GC, cp_int_cntl_reg); 4863 cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_INT_CNTL_RING0, 4864 PRIV_REG_INT_ENABLE, 4865 state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0); 4866 WREG32_SOC15_IP(GC, cp_int_cntl_reg, cp_int_cntl); 4867 } 4868 } 4869 } 4870 for (i = 0; i < adev->gfx.mec.num_mec; i++) { 4871 for (j = 0; j < adev->gfx.mec.num_pipe_per_mec; j++) { 4872 /* MECs start at 1 */ 4873 cp_int_cntl_reg = gfx_v12_0_get_cpc_int_cntl(adev, i + 1, j); 4874 4875 if (cp_int_cntl_reg) { 4876 cp_int_cntl = RREG32_SOC15_IP(GC, cp_int_cntl_reg); 4877 cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_ME1_PIPE0_INT_CNTL, 4878 PRIV_REG_INT_ENABLE, 4879 state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0); 4880 WREG32_SOC15_IP(GC, cp_int_cntl_reg, cp_int_cntl); 4881 } 4882 } 4883 } 4884 break; 4885 default: 4886 break; 4887 } 4888 4889 return 0; 4890 } 4891 4892 static int gfx_v12_0_set_bad_op_fault_state(struct amdgpu_device *adev, 4893 struct amdgpu_irq_src *source, 4894 unsigned type, 4895 enum amdgpu_interrupt_state state) 4896 { 4897 u32 cp_int_cntl_reg, cp_int_cntl; 4898 int i, j; 4899 4900 switch (state) { 4901 case AMDGPU_IRQ_STATE_DISABLE: 4902 case AMDGPU_IRQ_STATE_ENABLE: 4903 for (i = 0; i < adev->gfx.me.num_me; i++) { 4904 for (j = 0; j < adev->gfx.me.num_pipe_per_me; j++) { 4905 cp_int_cntl_reg = gfx_v12_0_get_cpg_int_cntl(adev, i, j); 4906 4907 if (cp_int_cntl_reg) { 4908 cp_int_cntl = RREG32_SOC15_IP(GC, cp_int_cntl_reg); 4909 cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_INT_CNTL_RING0, 4910 OPCODE_ERROR_INT_ENABLE, 4911 state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0); 4912 WREG32_SOC15_IP(GC, cp_int_cntl_reg, cp_int_cntl); 4913 } 4914 } 4915 } 4916 for (i = 0; i < adev->gfx.mec.num_mec; i++) { 4917 for (j = 0; j < adev->gfx.mec.num_pipe_per_mec; j++) { 4918 /* MECs start at 1 */ 4919 cp_int_cntl_reg = gfx_v12_0_get_cpc_int_cntl(adev, i + 1, j); 4920 4921 if (cp_int_cntl_reg) { 4922 cp_int_cntl = RREG32_SOC15_IP(GC, cp_int_cntl_reg); 4923 cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_ME1_PIPE0_INT_CNTL, 4924 OPCODE_ERROR_INT_ENABLE, 4925 state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0); 4926 WREG32_SOC15_IP(GC, cp_int_cntl_reg, cp_int_cntl); 4927 } 4928 } 4929 } 4930 break; 4931 default: 4932 break; 4933 } 4934 return 0; 4935 } 4936 4937 static int gfx_v12_0_set_priv_inst_fault_state(struct amdgpu_device *adev, 4938 struct amdgpu_irq_src *source, 4939 unsigned int type, 4940 enum amdgpu_interrupt_state state) 4941 { 4942 u32 cp_int_cntl_reg, cp_int_cntl; 4943 int i, j; 4944 4945 switch (state) { 4946 case AMDGPU_IRQ_STATE_DISABLE: 4947 case AMDGPU_IRQ_STATE_ENABLE: 4948 for (i = 0; i < adev->gfx.me.num_me; i++) { 4949 for (j = 0; j < adev->gfx.me.num_pipe_per_me; j++) { 4950 cp_int_cntl_reg = gfx_v12_0_get_cpg_int_cntl(adev, i, j); 4951 4952 if (cp_int_cntl_reg) { 4953 cp_int_cntl = RREG32_SOC15_IP(GC, cp_int_cntl_reg); 4954 cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_INT_CNTL_RING0, 4955 PRIV_INSTR_INT_ENABLE, 4956 state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0); 4957 WREG32_SOC15_IP(GC, cp_int_cntl_reg, cp_int_cntl); 4958 } 4959 } 4960 } 4961 break; 4962 default: 4963 break; 4964 } 4965 4966 return 0; 4967 } 4968 4969 static void gfx_v12_0_handle_priv_fault(struct amdgpu_device *adev, 4970 struct amdgpu_iv_entry *entry) 4971 { 4972 u8 me_id, pipe_id, queue_id; 4973 struct amdgpu_ring *ring; 4974 int i; 4975 4976 me_id = (entry->ring_id & 0x0c) >> 2; 4977 pipe_id = (entry->ring_id & 0x03) >> 0; 4978 queue_id = (entry->ring_id & 0x70) >> 4; 4979 4980 switch (me_id) { 4981 case 0: 4982 for (i = 0; i < adev->gfx.num_gfx_rings; i++) { 4983 ring = &adev->gfx.gfx_ring[i]; 4984 if (ring->me == me_id && ring->pipe == pipe_id && 4985 ring->queue == queue_id) 4986 drm_sched_fault(&ring->sched); 4987 } 4988 break; 4989 case 1: 4990 case 2: 4991 for (i = 0; i < adev->gfx.num_compute_rings; i++) { 4992 ring = &adev->gfx.compute_ring[i]; 4993 if (ring->me == me_id && ring->pipe == pipe_id && 4994 ring->queue == queue_id) 4995 drm_sched_fault(&ring->sched); 4996 } 4997 break; 4998 default: 4999 BUG(); 5000 break; 5001 } 5002 } 5003 5004 static int gfx_v12_0_priv_reg_irq(struct amdgpu_device *adev, 5005 struct amdgpu_irq_src *source, 5006 struct amdgpu_iv_entry *entry) 5007 { 5008 DRM_ERROR("Illegal register access in command stream\n"); 5009 gfx_v12_0_handle_priv_fault(adev, entry); 5010 return 0; 5011 } 5012 5013 static int gfx_v12_0_bad_op_irq(struct amdgpu_device *adev, 5014 struct amdgpu_irq_src *source, 5015 struct amdgpu_iv_entry *entry) 5016 { 5017 DRM_ERROR("Illegal opcode in command stream \n"); 5018 gfx_v12_0_handle_priv_fault(adev, entry); 5019 return 0; 5020 } 5021 5022 static int gfx_v12_0_priv_inst_irq(struct amdgpu_device *adev, 5023 struct amdgpu_irq_src *source, 5024 struct amdgpu_iv_entry *entry) 5025 { 5026 DRM_ERROR("Illegal instruction in command stream\n"); 5027 gfx_v12_0_handle_priv_fault(adev, entry); 5028 return 0; 5029 } 5030 5031 static void gfx_v12_0_emit_mem_sync(struct amdgpu_ring *ring) 5032 { 5033 const unsigned int gcr_cntl = 5034 PACKET3_ACQUIRE_MEM_GCR_CNTL_GL2_INV(1) | 5035 PACKET3_ACQUIRE_MEM_GCR_CNTL_GL2_WB(1) | 5036 PACKET3_ACQUIRE_MEM_GCR_CNTL_GLM_INV(1) | 5037 PACKET3_ACQUIRE_MEM_GCR_CNTL_GLM_WB(1) | 5038 PACKET3_ACQUIRE_MEM_GCR_CNTL_GL1_INV(1) | 5039 PACKET3_ACQUIRE_MEM_GCR_CNTL_GLV_INV(1) | 5040 PACKET3_ACQUIRE_MEM_GCR_CNTL_GLK_INV(1) | 5041 PACKET3_ACQUIRE_MEM_GCR_CNTL_GLI_INV(1); 5042 5043 /* ACQUIRE_MEM - make one or more surfaces valid for use by the subsequent operations */ 5044 amdgpu_ring_write(ring, PACKET3(PACKET3_ACQUIRE_MEM, 6)); 5045 amdgpu_ring_write(ring, 0); /* CP_COHER_CNTL */ 5046 amdgpu_ring_write(ring, 0xffffffff); /* CP_COHER_SIZE */ 5047 amdgpu_ring_write(ring, 0xffffff); /* CP_COHER_SIZE_HI */ 5048 amdgpu_ring_write(ring, 0); /* CP_COHER_BASE */ 5049 amdgpu_ring_write(ring, 0); /* CP_COHER_BASE_HI */ 5050 amdgpu_ring_write(ring, 0x0000000A); /* POLL_INTERVAL */ 5051 amdgpu_ring_write(ring, gcr_cntl); /* GCR_CNTL */ 5052 } 5053 5054 static void gfx_v12_ring_insert_nop(struct amdgpu_ring *ring, uint32_t num_nop) 5055 { 5056 /* Header itself is a NOP packet */ 5057 if (num_nop == 1) { 5058 amdgpu_ring_write(ring, ring->funcs->nop); 5059 return; 5060 } 5061 5062 /* Max HW optimization till 0x3ffe, followed by remaining one NOP at a time*/ 5063 amdgpu_ring_write(ring, PACKET3(PACKET3_NOP, min(num_nop - 2, 0x3ffe))); 5064 5065 /* Header is at index 0, followed by num_nops - 1 NOP packet's */ 5066 amdgpu_ring_insert_nop(ring, num_nop - 1); 5067 } 5068 5069 static void gfx_v12_0_ring_emit_cleaner_shader(struct amdgpu_ring *ring) 5070 { 5071 /* Emit the cleaner shader */ 5072 amdgpu_ring_write(ring, PACKET3(PACKET3_RUN_CLEANER_SHADER, 0)); 5073 amdgpu_ring_write(ring, 0); /* RESERVED field, programmed to zero */ 5074 } 5075 5076 static void gfx_v12_ip_print(struct amdgpu_ip_block *ip_block, struct drm_printer *p) 5077 { 5078 struct amdgpu_device *adev = ip_block->adev; 5079 uint32_t i, j, k, reg, index = 0; 5080 uint32_t reg_count = ARRAY_SIZE(gc_reg_list_12_0); 5081 5082 if (!adev->gfx.ip_dump_core) 5083 return; 5084 5085 for (i = 0; i < reg_count; i++) 5086 drm_printf(p, "%-50s \t 0x%08x\n", 5087 gc_reg_list_12_0[i].reg_name, 5088 adev->gfx.ip_dump_core[i]); 5089 5090 /* print compute queue registers for all instances */ 5091 if (!adev->gfx.ip_dump_compute_queues) 5092 return; 5093 5094 reg_count = ARRAY_SIZE(gc_cp_reg_list_12); 5095 drm_printf(p, "\nnum_mec: %d num_pipe: %d num_queue: %d\n", 5096 adev->gfx.mec.num_mec, 5097 adev->gfx.mec.num_pipe_per_mec, 5098 adev->gfx.mec.num_queue_per_pipe); 5099 5100 for (i = 0; i < adev->gfx.mec.num_mec; i++) { 5101 for (j = 0; j < adev->gfx.mec.num_pipe_per_mec; j++) { 5102 for (k = 0; k < adev->gfx.mec.num_queue_per_pipe; k++) { 5103 drm_printf(p, "\nmec %d, pipe %d, queue %d\n", i, j, k); 5104 for (reg = 0; reg < reg_count; reg++) { 5105 drm_printf(p, "%-50s \t 0x%08x\n", 5106 gc_cp_reg_list_12[reg].reg_name, 5107 adev->gfx.ip_dump_compute_queues[index + reg]); 5108 } 5109 index += reg_count; 5110 } 5111 } 5112 } 5113 5114 /* print gfx queue registers for all instances */ 5115 if (!adev->gfx.ip_dump_gfx_queues) 5116 return; 5117 5118 index = 0; 5119 reg_count = ARRAY_SIZE(gc_gfx_queue_reg_list_12); 5120 drm_printf(p, "\nnum_me: %d num_pipe: %d num_queue: %d\n", 5121 adev->gfx.me.num_me, 5122 adev->gfx.me.num_pipe_per_me, 5123 adev->gfx.me.num_queue_per_pipe); 5124 5125 for (i = 0; i < adev->gfx.me.num_me; i++) { 5126 for (j = 0; j < adev->gfx.me.num_pipe_per_me; j++) { 5127 for (k = 0; k < adev->gfx.me.num_queue_per_pipe; k++) { 5128 drm_printf(p, "\nme %d, pipe %d, queue %d\n", i, j, k); 5129 for (reg = 0; reg < reg_count; reg++) { 5130 drm_printf(p, "%-50s \t 0x%08x\n", 5131 gc_gfx_queue_reg_list_12[reg].reg_name, 5132 adev->gfx.ip_dump_gfx_queues[index + reg]); 5133 } 5134 index += reg_count; 5135 } 5136 } 5137 } 5138 } 5139 5140 static void gfx_v12_ip_dump(struct amdgpu_ip_block *ip_block) 5141 { 5142 struct amdgpu_device *adev = ip_block->adev; 5143 uint32_t i, j, k, reg, index = 0; 5144 uint32_t reg_count = ARRAY_SIZE(gc_reg_list_12_0); 5145 5146 if (!adev->gfx.ip_dump_core) 5147 return; 5148 5149 amdgpu_gfx_off_ctrl(adev, false); 5150 for (i = 0; i < reg_count; i++) 5151 adev->gfx.ip_dump_core[i] = RREG32(SOC15_REG_ENTRY_OFFSET(gc_reg_list_12_0[i])); 5152 amdgpu_gfx_off_ctrl(adev, true); 5153 5154 /* dump compute queue registers for all instances */ 5155 if (!adev->gfx.ip_dump_compute_queues) 5156 return; 5157 5158 reg_count = ARRAY_SIZE(gc_cp_reg_list_12); 5159 amdgpu_gfx_off_ctrl(adev, false); 5160 mutex_lock(&adev->srbm_mutex); 5161 for (i = 0; i < adev->gfx.mec.num_mec; i++) { 5162 for (j = 0; j < adev->gfx.mec.num_pipe_per_mec; j++) { 5163 for (k = 0; k < adev->gfx.mec.num_queue_per_pipe; k++) { 5164 /* ME0 is for GFX so start from 1 for CP */ 5165 soc24_grbm_select(adev, adev->gfx.me.num_me + i, j, k, 0); 5166 for (reg = 0; reg < reg_count; reg++) { 5167 adev->gfx.ip_dump_compute_queues[index + reg] = 5168 RREG32(SOC15_REG_ENTRY_OFFSET( 5169 gc_cp_reg_list_12[reg])); 5170 } 5171 index += reg_count; 5172 } 5173 } 5174 } 5175 soc24_grbm_select(adev, 0, 0, 0, 0); 5176 mutex_unlock(&adev->srbm_mutex); 5177 amdgpu_gfx_off_ctrl(adev, true); 5178 5179 /* dump gfx queue registers for all instances */ 5180 if (!adev->gfx.ip_dump_gfx_queues) 5181 return; 5182 5183 index = 0; 5184 reg_count = ARRAY_SIZE(gc_gfx_queue_reg_list_12); 5185 amdgpu_gfx_off_ctrl(adev, false); 5186 mutex_lock(&adev->srbm_mutex); 5187 for (i = 0; i < adev->gfx.me.num_me; i++) { 5188 for (j = 0; j < adev->gfx.me.num_pipe_per_me; j++) { 5189 for (k = 0; k < adev->gfx.me.num_queue_per_pipe; k++) { 5190 soc24_grbm_select(adev, i, j, k, 0); 5191 5192 for (reg = 0; reg < reg_count; reg++) { 5193 adev->gfx.ip_dump_gfx_queues[index + reg] = 5194 RREG32(SOC15_REG_ENTRY_OFFSET( 5195 gc_gfx_queue_reg_list_12[reg])); 5196 } 5197 index += reg_count; 5198 } 5199 } 5200 } 5201 soc24_grbm_select(adev, 0, 0, 0, 0); 5202 mutex_unlock(&adev->srbm_mutex); 5203 amdgpu_gfx_off_ctrl(adev, true); 5204 } 5205 5206 static int gfx_v12_0_reset_kgq(struct amdgpu_ring *ring, unsigned int vmid) 5207 { 5208 struct amdgpu_device *adev = ring->adev; 5209 int r; 5210 5211 if (amdgpu_sriov_vf(adev)) 5212 return -EINVAL; 5213 5214 r = amdgpu_mes_reset_legacy_queue(ring->adev, ring, vmid, false); 5215 if (r) { 5216 dev_err(adev->dev, "reset via MES failed %d\n", r); 5217 return r; 5218 } 5219 5220 r = amdgpu_bo_reserve(ring->mqd_obj, false); 5221 if (unlikely(r != 0)) { 5222 dev_err(adev->dev, "fail to resv mqd_obj\n"); 5223 return r; 5224 } 5225 r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr); 5226 if (!r) { 5227 r = gfx_v12_0_kgq_init_queue(ring, true); 5228 amdgpu_bo_kunmap(ring->mqd_obj); 5229 ring->mqd_ptr = NULL; 5230 } 5231 amdgpu_bo_unreserve(ring->mqd_obj); 5232 if (r) { 5233 DRM_ERROR("fail to unresv mqd_obj\n"); 5234 return r; 5235 } 5236 5237 r = amdgpu_mes_map_legacy_queue(adev, ring); 5238 if (r) { 5239 dev_err(adev->dev, "failed to remap kgq\n"); 5240 return r; 5241 } 5242 5243 return amdgpu_ring_test_ring(ring); 5244 } 5245 5246 static int gfx_v12_0_reset_kcq(struct amdgpu_ring *ring, unsigned int vmid) 5247 { 5248 struct amdgpu_device *adev = ring->adev; 5249 int r; 5250 5251 if (amdgpu_sriov_vf(adev)) 5252 return -EINVAL; 5253 5254 r = amdgpu_mes_reset_legacy_queue(ring->adev, ring, vmid, true); 5255 if (r) { 5256 dev_err(adev->dev, "reset via MMIO failed %d\n", r); 5257 return r; 5258 } 5259 5260 r = amdgpu_bo_reserve(ring->mqd_obj, false); 5261 if (unlikely(r != 0)) { 5262 DRM_ERROR("fail to resv mqd_obj\n"); 5263 return r; 5264 } 5265 r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr); 5266 if (!r) { 5267 r = gfx_v12_0_kcq_init_queue(ring, true); 5268 amdgpu_bo_kunmap(ring->mqd_obj); 5269 ring->mqd_ptr = NULL; 5270 } 5271 amdgpu_bo_unreserve(ring->mqd_obj); 5272 if (r) { 5273 DRM_ERROR("fail to unresv mqd_obj\n"); 5274 return r; 5275 } 5276 r = amdgpu_mes_map_legacy_queue(adev, ring); 5277 if (r) { 5278 dev_err(adev->dev, "failed to remap kcq\n"); 5279 return r; 5280 } 5281 5282 return amdgpu_ring_test_ring(ring); 5283 } 5284 5285 static void gfx_v12_0_ring_begin_use(struct amdgpu_ring *ring) 5286 { 5287 amdgpu_gfx_profile_ring_begin_use(ring); 5288 5289 amdgpu_gfx_enforce_isolation_ring_begin_use(ring); 5290 } 5291 5292 static void gfx_v12_0_ring_end_use(struct amdgpu_ring *ring) 5293 { 5294 amdgpu_gfx_profile_ring_end_use(ring); 5295 5296 amdgpu_gfx_enforce_isolation_ring_end_use(ring); 5297 } 5298 5299 static const struct amd_ip_funcs gfx_v12_0_ip_funcs = { 5300 .name = "gfx_v12_0", 5301 .early_init = gfx_v12_0_early_init, 5302 .late_init = gfx_v12_0_late_init, 5303 .sw_init = gfx_v12_0_sw_init, 5304 .sw_fini = gfx_v12_0_sw_fini, 5305 .hw_init = gfx_v12_0_hw_init, 5306 .hw_fini = gfx_v12_0_hw_fini, 5307 .suspend = gfx_v12_0_suspend, 5308 .resume = gfx_v12_0_resume, 5309 .is_idle = gfx_v12_0_is_idle, 5310 .wait_for_idle = gfx_v12_0_wait_for_idle, 5311 .set_clockgating_state = gfx_v12_0_set_clockgating_state, 5312 .set_powergating_state = gfx_v12_0_set_powergating_state, 5313 .get_clockgating_state = gfx_v12_0_get_clockgating_state, 5314 .dump_ip_state = gfx_v12_ip_dump, 5315 .print_ip_state = gfx_v12_ip_print, 5316 }; 5317 5318 static const struct amdgpu_ring_funcs gfx_v12_0_ring_funcs_gfx = { 5319 .type = AMDGPU_RING_TYPE_GFX, 5320 .align_mask = 0xff, 5321 .nop = PACKET3(PACKET3_NOP, 0x3FFF), 5322 .support_64bit_ptrs = true, 5323 .secure_submission_supported = true, 5324 .get_rptr = gfx_v12_0_ring_get_rptr_gfx, 5325 .get_wptr = gfx_v12_0_ring_get_wptr_gfx, 5326 .set_wptr = gfx_v12_0_ring_set_wptr_gfx, 5327 .emit_frame_size = /* totally 242 maximum if 16 IBs */ 5328 5 + /* COND_EXEC */ 5329 7 + /* PIPELINE_SYNC */ 5330 SOC15_FLUSH_GPU_TLB_NUM_WREG * 5 + 5331 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 + 5332 2 + /* VM_FLUSH */ 5333 8 + /* FENCE for VM_FLUSH */ 5334 5 + /* COND_EXEC */ 5335 7 + /* HDP_flush */ 5336 4 + /* VGT_flush */ 5337 31 + /* DE_META */ 5338 3 + /* CNTX_CTRL */ 5339 5 + /* HDP_INVL */ 5340 8 + 8 + /* FENCE x2 */ 5341 8 + /* gfx_v12_0_emit_mem_sync */ 5342 2, /* gfx_v12_0_ring_emit_cleaner_shader */ 5343 .emit_ib_size = 4, /* gfx_v12_0_ring_emit_ib_gfx */ 5344 .emit_ib = gfx_v12_0_ring_emit_ib_gfx, 5345 .emit_fence = gfx_v12_0_ring_emit_fence, 5346 .emit_pipeline_sync = gfx_v12_0_ring_emit_pipeline_sync, 5347 .emit_vm_flush = gfx_v12_0_ring_emit_vm_flush, 5348 .emit_hdp_flush = gfx_v12_0_ring_emit_hdp_flush, 5349 .test_ring = gfx_v12_0_ring_test_ring, 5350 .test_ib = gfx_v12_0_ring_test_ib, 5351 .insert_nop = gfx_v12_ring_insert_nop, 5352 .pad_ib = amdgpu_ring_generic_pad_ib, 5353 .emit_cntxcntl = gfx_v12_0_ring_emit_cntxcntl, 5354 .init_cond_exec = gfx_v12_0_ring_emit_init_cond_exec, 5355 .preempt_ib = gfx_v12_0_ring_preempt_ib, 5356 .emit_frame_cntl = gfx_v12_0_ring_emit_frame_cntl, 5357 .emit_wreg = gfx_v12_0_ring_emit_wreg, 5358 .emit_reg_wait = gfx_v12_0_ring_emit_reg_wait, 5359 .emit_reg_write_reg_wait = gfx_v12_0_ring_emit_reg_write_reg_wait, 5360 .soft_recovery = gfx_v12_0_ring_soft_recovery, 5361 .emit_mem_sync = gfx_v12_0_emit_mem_sync, 5362 .reset = gfx_v12_0_reset_kgq, 5363 .emit_cleaner_shader = gfx_v12_0_ring_emit_cleaner_shader, 5364 .begin_use = gfx_v12_0_ring_begin_use, 5365 .end_use = gfx_v12_0_ring_end_use, 5366 }; 5367 5368 static const struct amdgpu_ring_funcs gfx_v12_0_ring_funcs_compute = { 5369 .type = AMDGPU_RING_TYPE_COMPUTE, 5370 .align_mask = 0xff, 5371 .nop = PACKET3(PACKET3_NOP, 0x3FFF), 5372 .support_64bit_ptrs = true, 5373 .get_rptr = gfx_v12_0_ring_get_rptr_compute, 5374 .get_wptr = gfx_v12_0_ring_get_wptr_compute, 5375 .set_wptr = gfx_v12_0_ring_set_wptr_compute, 5376 .emit_frame_size = 5377 7 + /* gfx_v12_0_ring_emit_hdp_flush */ 5378 5 + /* hdp invalidate */ 5379 7 + /* gfx_v12_0_ring_emit_pipeline_sync */ 5380 SOC15_FLUSH_GPU_TLB_NUM_WREG * 5 + 5381 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 + 5382 2 + /* gfx_v12_0_ring_emit_vm_flush */ 5383 8 + 8 + 8 + /* gfx_v12_0_ring_emit_fence x3 for user fence, vm fence */ 5384 8 + /* gfx_v12_0_emit_mem_sync */ 5385 2, /* gfx_v12_0_ring_emit_cleaner_shader */ 5386 .emit_ib_size = 7, /* gfx_v12_0_ring_emit_ib_compute */ 5387 .emit_ib = gfx_v12_0_ring_emit_ib_compute, 5388 .emit_fence = gfx_v12_0_ring_emit_fence, 5389 .emit_pipeline_sync = gfx_v12_0_ring_emit_pipeline_sync, 5390 .emit_vm_flush = gfx_v12_0_ring_emit_vm_flush, 5391 .emit_hdp_flush = gfx_v12_0_ring_emit_hdp_flush, 5392 .test_ring = gfx_v12_0_ring_test_ring, 5393 .test_ib = gfx_v12_0_ring_test_ib, 5394 .insert_nop = gfx_v12_ring_insert_nop, 5395 .pad_ib = amdgpu_ring_generic_pad_ib, 5396 .emit_wreg = gfx_v12_0_ring_emit_wreg, 5397 .emit_reg_wait = gfx_v12_0_ring_emit_reg_wait, 5398 .emit_reg_write_reg_wait = gfx_v12_0_ring_emit_reg_write_reg_wait, 5399 .soft_recovery = gfx_v12_0_ring_soft_recovery, 5400 .emit_mem_sync = gfx_v12_0_emit_mem_sync, 5401 .reset = gfx_v12_0_reset_kcq, 5402 .emit_cleaner_shader = gfx_v12_0_ring_emit_cleaner_shader, 5403 .begin_use = gfx_v12_0_ring_begin_use, 5404 .end_use = gfx_v12_0_ring_end_use, 5405 }; 5406 5407 static const struct amdgpu_ring_funcs gfx_v12_0_ring_funcs_kiq = { 5408 .type = AMDGPU_RING_TYPE_KIQ, 5409 .align_mask = 0xff, 5410 .nop = PACKET3(PACKET3_NOP, 0x3FFF), 5411 .support_64bit_ptrs = true, 5412 .get_rptr = gfx_v12_0_ring_get_rptr_compute, 5413 .get_wptr = gfx_v12_0_ring_get_wptr_compute, 5414 .set_wptr = gfx_v12_0_ring_set_wptr_compute, 5415 .emit_frame_size = 5416 7 + /* gfx_v12_0_ring_emit_hdp_flush */ 5417 5 + /*hdp invalidate */ 5418 7 + /* gfx_v12_0_ring_emit_pipeline_sync */ 5419 SOC15_FLUSH_GPU_TLB_NUM_WREG * 5 + 5420 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 + 5421 2 + /* gfx_v12_0_ring_emit_vm_flush */ 5422 8 + 8 + 8, /* gfx_v12_0_ring_emit_fence_kiq x3 for user fence, vm fence */ 5423 .emit_ib_size = 7, /* gfx_v12_0_ring_emit_ib_compute */ 5424 .emit_ib = gfx_v12_0_ring_emit_ib_compute, 5425 .emit_fence = gfx_v12_0_ring_emit_fence_kiq, 5426 .test_ring = gfx_v12_0_ring_test_ring, 5427 .test_ib = gfx_v12_0_ring_test_ib, 5428 .insert_nop = amdgpu_ring_insert_nop, 5429 .pad_ib = amdgpu_ring_generic_pad_ib, 5430 .emit_rreg = gfx_v12_0_ring_emit_rreg, 5431 .emit_wreg = gfx_v12_0_ring_emit_wreg, 5432 .emit_reg_wait = gfx_v12_0_ring_emit_reg_wait, 5433 .emit_reg_write_reg_wait = gfx_v12_0_ring_emit_reg_write_reg_wait, 5434 }; 5435 5436 static void gfx_v12_0_set_ring_funcs(struct amdgpu_device *adev) 5437 { 5438 int i; 5439 5440 adev->gfx.kiq[0].ring.funcs = &gfx_v12_0_ring_funcs_kiq; 5441 5442 for (i = 0; i < adev->gfx.num_gfx_rings; i++) 5443 adev->gfx.gfx_ring[i].funcs = &gfx_v12_0_ring_funcs_gfx; 5444 5445 for (i = 0; i < adev->gfx.num_compute_rings; i++) 5446 adev->gfx.compute_ring[i].funcs = &gfx_v12_0_ring_funcs_compute; 5447 } 5448 5449 static const struct amdgpu_irq_src_funcs gfx_v12_0_eop_irq_funcs = { 5450 .set = gfx_v12_0_set_eop_interrupt_state, 5451 .process = gfx_v12_0_eop_irq, 5452 }; 5453 5454 static const struct amdgpu_irq_src_funcs gfx_v12_0_priv_reg_irq_funcs = { 5455 .set = gfx_v12_0_set_priv_reg_fault_state, 5456 .process = gfx_v12_0_priv_reg_irq, 5457 }; 5458 5459 static const struct amdgpu_irq_src_funcs gfx_v12_0_bad_op_irq_funcs = { 5460 .set = gfx_v12_0_set_bad_op_fault_state, 5461 .process = gfx_v12_0_bad_op_irq, 5462 }; 5463 5464 static const struct amdgpu_irq_src_funcs gfx_v12_0_priv_inst_irq_funcs = { 5465 .set = gfx_v12_0_set_priv_inst_fault_state, 5466 .process = gfx_v12_0_priv_inst_irq, 5467 }; 5468 5469 static void gfx_v12_0_set_irq_funcs(struct amdgpu_device *adev) 5470 { 5471 adev->gfx.eop_irq.num_types = AMDGPU_CP_IRQ_LAST; 5472 adev->gfx.eop_irq.funcs = &gfx_v12_0_eop_irq_funcs; 5473 5474 adev->gfx.priv_reg_irq.num_types = 1; 5475 adev->gfx.priv_reg_irq.funcs = &gfx_v12_0_priv_reg_irq_funcs; 5476 5477 adev->gfx.bad_op_irq.num_types = 1; 5478 adev->gfx.bad_op_irq.funcs = &gfx_v12_0_bad_op_irq_funcs; 5479 5480 adev->gfx.priv_inst_irq.num_types = 1; 5481 adev->gfx.priv_inst_irq.funcs = &gfx_v12_0_priv_inst_irq_funcs; 5482 } 5483 5484 static void gfx_v12_0_set_imu_funcs(struct amdgpu_device *adev) 5485 { 5486 if (adev->flags & AMD_IS_APU) 5487 adev->gfx.imu.mode = MISSION_MODE; 5488 else 5489 adev->gfx.imu.mode = DEBUG_MODE; 5490 5491 adev->gfx.imu.funcs = &gfx_v12_0_imu_funcs; 5492 } 5493 5494 static void gfx_v12_0_set_rlc_funcs(struct amdgpu_device *adev) 5495 { 5496 adev->gfx.rlc.funcs = &gfx_v12_0_rlc_funcs; 5497 } 5498 5499 static void gfx_v12_0_set_mqd_funcs(struct amdgpu_device *adev) 5500 { 5501 /* set gfx eng mqd */ 5502 adev->mqds[AMDGPU_HW_IP_GFX].mqd_size = 5503 sizeof(struct v12_gfx_mqd); 5504 adev->mqds[AMDGPU_HW_IP_GFX].init_mqd = 5505 gfx_v12_0_gfx_mqd_init; 5506 /* set compute eng mqd */ 5507 adev->mqds[AMDGPU_HW_IP_COMPUTE].mqd_size = 5508 sizeof(struct v12_compute_mqd); 5509 adev->mqds[AMDGPU_HW_IP_COMPUTE].init_mqd = 5510 gfx_v12_0_compute_mqd_init; 5511 } 5512 5513 static void gfx_v12_0_set_user_wgp_inactive_bitmap_per_sh(struct amdgpu_device *adev, 5514 u32 bitmap) 5515 { 5516 u32 data; 5517 5518 if (!bitmap) 5519 return; 5520 5521 data = bitmap << GC_USER_SHADER_ARRAY_CONFIG__INACTIVE_WGPS__SHIFT; 5522 data &= GC_USER_SHADER_ARRAY_CONFIG__INACTIVE_WGPS_MASK; 5523 5524 WREG32_SOC15(GC, 0, regGC_USER_SHADER_ARRAY_CONFIG, data); 5525 } 5526 5527 static u32 gfx_v12_0_get_wgp_active_bitmap_per_sh(struct amdgpu_device *adev) 5528 { 5529 u32 data, wgp_bitmask; 5530 data = RREG32_SOC15(GC, 0, regCC_GC_SHADER_ARRAY_CONFIG); 5531 data |= RREG32_SOC15(GC, 0, regGC_USER_SHADER_ARRAY_CONFIG); 5532 5533 data &= CC_GC_SHADER_ARRAY_CONFIG__INACTIVE_WGPS_MASK; 5534 data >>= CC_GC_SHADER_ARRAY_CONFIG__INACTIVE_WGPS__SHIFT; 5535 5536 wgp_bitmask = 5537 amdgpu_gfx_create_bitmask(adev->gfx.config.max_cu_per_sh >> 1); 5538 5539 return (~data) & wgp_bitmask; 5540 } 5541 5542 static u32 gfx_v12_0_get_cu_active_bitmap_per_sh(struct amdgpu_device *adev) 5543 { 5544 u32 wgp_idx, wgp_active_bitmap; 5545 u32 cu_bitmap_per_wgp, cu_active_bitmap; 5546 5547 wgp_active_bitmap = gfx_v12_0_get_wgp_active_bitmap_per_sh(adev); 5548 cu_active_bitmap = 0; 5549 5550 for (wgp_idx = 0; wgp_idx < 16; wgp_idx++) { 5551 /* if there is one WGP enabled, it means 2 CUs will be enabled */ 5552 cu_bitmap_per_wgp = 3 << (2 * wgp_idx); 5553 if (wgp_active_bitmap & (1 << wgp_idx)) 5554 cu_active_bitmap |= cu_bitmap_per_wgp; 5555 } 5556 5557 return cu_active_bitmap; 5558 } 5559 5560 static int gfx_v12_0_get_cu_info(struct amdgpu_device *adev, 5561 struct amdgpu_cu_info *cu_info) 5562 { 5563 int i, j, k, counter, active_cu_number = 0; 5564 u32 mask, bitmap; 5565 unsigned disable_masks[8 * 2]; 5566 5567 if (!adev || !cu_info) 5568 return -EINVAL; 5569 5570 amdgpu_gfx_parse_disable_cu(disable_masks, 8, 2); 5571 5572 mutex_lock(&adev->grbm_idx_mutex); 5573 for (i = 0; i < adev->gfx.config.max_shader_engines; i++) { 5574 for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) { 5575 bitmap = i * adev->gfx.config.max_sh_per_se + j; 5576 if (!((gfx_v12_0_get_sa_active_bitmap(adev) >> bitmap) & 1)) 5577 continue; 5578 mask = 1; 5579 counter = 0; 5580 gfx_v12_0_select_se_sh(adev, i, j, 0xffffffff, 0); 5581 if (i < 8 && j < 2) 5582 gfx_v12_0_set_user_wgp_inactive_bitmap_per_sh( 5583 adev, disable_masks[i * 2 + j]); 5584 bitmap = gfx_v12_0_get_cu_active_bitmap_per_sh(adev); 5585 5586 /** 5587 * GFX12 could support more than 4 SEs, while the bitmap 5588 * in cu_info struct is 4x4 and ioctl interface struct 5589 * drm_amdgpu_info_device should keep stable. 5590 * So we use last two columns of bitmap to store cu mask for 5591 * SEs 4 to 7, the layout of the bitmap is as below: 5592 * SE0: {SH0,SH1} --> {bitmap[0][0], bitmap[0][1]} 5593 * SE1: {SH0,SH1} --> {bitmap[1][0], bitmap[1][1]} 5594 * SE2: {SH0,SH1} --> {bitmap[2][0], bitmap[2][1]} 5595 * SE3: {SH0,SH1} --> {bitmap[3][0], bitmap[3][1]} 5596 * SE4: {SH0,SH1} --> {bitmap[0][2], bitmap[0][3]} 5597 * SE5: {SH0,SH1} --> {bitmap[1][2], bitmap[1][3]} 5598 * SE6: {SH0,SH1} --> {bitmap[2][2], bitmap[2][3]} 5599 * SE7: {SH0,SH1} --> {bitmap[3][2], bitmap[3][3]} 5600 */ 5601 cu_info->bitmap[0][i % 4][j + (i / 4) * 2] = bitmap; 5602 5603 for (k = 0; k < adev->gfx.config.max_cu_per_sh; k++) { 5604 if (bitmap & mask) 5605 counter++; 5606 5607 mask <<= 1; 5608 } 5609 active_cu_number += counter; 5610 } 5611 } 5612 gfx_v12_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0); 5613 mutex_unlock(&adev->grbm_idx_mutex); 5614 5615 cu_info->number = active_cu_number; 5616 cu_info->simd_per_cu = NUM_SIMD_PER_CU; 5617 5618 return 0; 5619 } 5620 5621 const struct amdgpu_ip_block_version gfx_v12_0_ip_block = { 5622 .type = AMD_IP_BLOCK_TYPE_GFX, 5623 .major = 12, 5624 .minor = 0, 5625 .rev = 0, 5626 .funcs = &gfx_v12_0_ip_funcs, 5627 }; 5628