1 /* 2 * Copyright 2023 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 */ 23 #include <linux/delay.h> 24 #include <linux/kernel.h> 25 #include <linux/firmware.h> 26 #include <linux/module.h> 27 #include <linux/pci.h> 28 #include "amdgpu.h" 29 #include "amdgpu_gfx.h" 30 #include "amdgpu_psp.h" 31 #include "amdgpu_smu.h" 32 #include "amdgpu_atomfirmware.h" 33 #include "imu_v12_0.h" 34 #include "soc24.h" 35 #include "nvd.h" 36 37 #include "gc/gc_12_0_0_offset.h" 38 #include "gc/gc_12_0_0_sh_mask.h" 39 #include "soc24_enum.h" 40 #include "ivsrcid/gfx/irqsrcs_gfx_11_0_0.h" 41 42 #include "soc15.h" 43 #include "soc15d.h" 44 #include "clearstate_gfx12.h" 45 #include "v12_structs.h" 46 #include "gfx_v12_0.h" 47 #include "nbif_v6_3_1.h" 48 #include "mes_v12_0.h" 49 50 #define GFX12_NUM_GFX_RINGS 1 51 #define GFX12_MEC_HPD_SIZE 2048 52 53 #define RLCG_UCODE_LOADING_START_ADDRESS 0x00002000L 54 55 MODULE_FIRMWARE("amdgpu/gc_12_0_0_pfp.bin"); 56 MODULE_FIRMWARE("amdgpu/gc_12_0_0_me.bin"); 57 MODULE_FIRMWARE("amdgpu/gc_12_0_0_mec.bin"); 58 MODULE_FIRMWARE("amdgpu/gc_12_0_0_rlc.bin"); 59 MODULE_FIRMWARE("amdgpu/gc_12_0_0_toc.bin"); 60 MODULE_FIRMWARE("amdgpu/gc_12_0_1_pfp.bin"); 61 MODULE_FIRMWARE("amdgpu/gc_12_0_1_me.bin"); 62 MODULE_FIRMWARE("amdgpu/gc_12_0_1_mec.bin"); 63 MODULE_FIRMWARE("amdgpu/gc_12_0_1_rlc.bin"); 64 MODULE_FIRMWARE("amdgpu/gc_12_0_1_toc.bin"); 65 66 static const struct amdgpu_hwip_reg_entry gc_reg_list_12_0[] = { 67 SOC15_REG_ENTRY_STR(GC, 0, regGRBM_STATUS), 68 SOC15_REG_ENTRY_STR(GC, 0, regGRBM_STATUS2), 69 SOC15_REG_ENTRY_STR(GC, 0, regGRBM_STATUS3), 70 SOC15_REG_ENTRY_STR(GC, 0, regCP_STALLED_STAT1), 71 SOC15_REG_ENTRY_STR(GC, 0, regCP_STALLED_STAT2), 72 SOC15_REG_ENTRY_STR(GC, 0, regCP_STALLED_STAT3), 73 SOC15_REG_ENTRY_STR(GC, 0, regCP_CPC_STALLED_STAT1), 74 SOC15_REG_ENTRY_STR(GC, 0, regCP_CPF_STALLED_STAT1), 75 SOC15_REG_ENTRY_STR(GC, 0, regCP_BUSY_STAT), 76 SOC15_REG_ENTRY_STR(GC, 0, regCP_CPC_BUSY_STAT), 77 SOC15_REG_ENTRY_STR(GC, 0, regCP_CPF_BUSY_STAT), 78 SOC15_REG_ENTRY_STR(GC, 0, regCP_CPC_BUSY_STAT2), 79 SOC15_REG_ENTRY_STR(GC, 0, regCP_CPF_BUSY_STAT2), 80 SOC15_REG_ENTRY_STR(GC, 0, regCP_CPF_STATUS), 81 SOC15_REG_ENTRY_STR(GC, 0, regCP_GFX_ERROR), 82 SOC15_REG_ENTRY_STR(GC, 0, regCP_GFX_HPD_STATUS0), 83 SOC15_REG_ENTRY_STR(GC, 0, regCP_RB_BASE), 84 SOC15_REG_ENTRY_STR(GC, 0, regCP_RB_RPTR), 85 SOC15_REG_ENTRY_STR(GC, 0, regCP_RB_WPTR), 86 SOC15_REG_ENTRY_STR(GC, 0, regCP_RB0_BASE), 87 SOC15_REG_ENTRY_STR(GC, 0, regCP_RB0_RPTR), 88 SOC15_REG_ENTRY_STR(GC, 0, regCP_RB0_WPTR), 89 SOC15_REG_ENTRY_STR(GC, 0, regCP_IB1_CMD_BUFSZ), 90 SOC15_REG_ENTRY_STR(GC, 0, regCP_IB2_CMD_BUFSZ), 91 SOC15_REG_ENTRY_STR(GC, 0, regCP_IB1_BASE_LO), 92 SOC15_REG_ENTRY_STR(GC, 0, regCP_IB1_BASE_HI), 93 SOC15_REG_ENTRY_STR(GC, 0, regCP_IB1_BUFSZ), 94 SOC15_REG_ENTRY_STR(GC, 0, regCP_IB2_BASE_LO), 95 SOC15_REG_ENTRY_STR(GC, 0, regCP_IB2_BASE_HI), 96 SOC15_REG_ENTRY_STR(GC, 0, regCP_IB2_BUFSZ), 97 SOC15_REG_ENTRY_STR(GC, 0, regCPF_UTCL1_STATUS), 98 SOC15_REG_ENTRY_STR(GC, 0, regCPC_UTCL1_STATUS), 99 SOC15_REG_ENTRY_STR(GC, 0, regCPG_UTCL1_STATUS), 100 SOC15_REG_ENTRY_STR(GC, 0, regIA_UTCL1_STATUS), 101 SOC15_REG_ENTRY_STR(GC, 0, regIA_UTCL1_STATUS_2), 102 SOC15_REG_ENTRY_STR(GC, 0, regPA_CL_CNTL_STATUS), 103 SOC15_REG_ENTRY_STR(GC, 0, regRMI_UTCL1_STATUS), 104 SOC15_REG_ENTRY_STR(GC, 0, regSQC_CACHES), 105 SOC15_REG_ENTRY_STR(GC, 0, regSQG_STATUS), 106 SOC15_REG_ENTRY_STR(GC, 0, regWD_UTCL1_STATUS), 107 SOC15_REG_ENTRY_STR(GC, 0, regGCVM_L2_PROTECTION_FAULT_CNTL), 108 SOC15_REG_ENTRY_STR(GC, 0, regGCVM_L2_PROTECTION_FAULT_STATUS_LO32), 109 SOC15_REG_ENTRY_STR(GC, 0, regGCVM_L2_PROTECTION_FAULT_STATUS_HI32), 110 SOC15_REG_ENTRY_STR(GC, 0, regCP_DEBUG), 111 SOC15_REG_ENTRY_STR(GC, 0, regCP_MEC_CNTL), 112 SOC15_REG_ENTRY_STR(GC, 0, regCP_MES_CNTL), 113 SOC15_REG_ENTRY_STR(GC, 0, regCP_MES_INSTR_PNTR), 114 SOC15_REG_ENTRY_STR(GC, 0, regCP_ME_INSTR_PNTR), 115 SOC15_REG_ENTRY_STR(GC, 0, regCP_PFP_INSTR_PNTR), 116 SOC15_REG_ENTRY_STR(GC, 0, regCP_CPC_STATUS), 117 SOC15_REG_ENTRY_STR(GC, 0, regCP_GFX_RS64_INSTR_PNTR0), 118 SOC15_REG_ENTRY_STR(GC, 0, regCP_GFX_RS64_INSTR_PNTR1), 119 SOC15_REG_ENTRY_STR(GC, 0, regCP_MEC_RS64_INSTR_PNTR), 120 121 /* cp header registers */ 122 SOC15_REG_ENTRY_STR(GC, 0, regCP_MEC_ME1_HEADER_DUMP), 123 SOC15_REG_ENTRY_STR(GC, 0, regCP_PFP_HEADER_DUMP), 124 SOC15_REG_ENTRY_STR(GC, 0, regCP_ME_HEADER_DUMP), 125 SOC15_REG_ENTRY_STR(GC, 0, regCP_MES_HEADER_DUMP), 126 /* SE status registers */ 127 SOC15_REG_ENTRY_STR(GC, 0, regGRBM_STATUS_SE0), 128 SOC15_REG_ENTRY_STR(GC, 0, regGRBM_STATUS_SE1), 129 SOC15_REG_ENTRY_STR(GC, 0, regGRBM_STATUS_SE2), 130 SOC15_REG_ENTRY_STR(GC, 0, regGRBM_STATUS_SE3) 131 }; 132 133 static const struct amdgpu_hwip_reg_entry gc_cp_reg_list_12[] = { 134 /* compute registers */ 135 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_VMID), 136 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_PERSISTENT_STATE), 137 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_PIPE_PRIORITY), 138 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_QUEUE_PRIORITY), 139 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_QUANTUM), 140 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_PQ_BASE), 141 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_PQ_BASE_HI), 142 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_PQ_RPTR), 143 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_PQ_WPTR_POLL_ADDR), 144 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_PQ_WPTR_POLL_ADDR_HI), 145 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_PQ_DOORBELL_CONTROL), 146 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_PQ_CONTROL), 147 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_IB_BASE_ADDR), 148 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_IB_BASE_ADDR_HI), 149 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_IB_RPTR), 150 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_IB_CONTROL), 151 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_DEQUEUE_REQUEST), 152 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_EOP_BASE_ADDR), 153 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_EOP_BASE_ADDR_HI), 154 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_EOP_CONTROL), 155 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_EOP_RPTR), 156 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_EOP_WPTR), 157 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_EOP_EVENTS), 158 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_CTX_SAVE_BASE_ADDR_LO), 159 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_CTX_SAVE_BASE_ADDR_HI), 160 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_CTX_SAVE_CONTROL), 161 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_CNTL_STACK_OFFSET), 162 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_CNTL_STACK_SIZE), 163 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_WG_STATE_OFFSET), 164 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_CTX_SAVE_SIZE), 165 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_GDS_RESOURCE_STATE), 166 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_ERROR), 167 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_EOP_WPTR_MEM), 168 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_PQ_WPTR_LO), 169 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_PQ_WPTR_HI), 170 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_SUSPEND_CNTL_STACK_OFFSET), 171 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_SUSPEND_CNTL_STACK_DW_CNT), 172 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_SUSPEND_WG_STATE_OFFSET), 173 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_DEQUEUE_STATUS) 174 }; 175 176 static const struct amdgpu_hwip_reg_entry gc_gfx_queue_reg_list_12[] = { 177 /* gfx queue registers */ 178 SOC15_REG_ENTRY_STR(GC, 0, regCP_GFX_HQD_ACTIVE), 179 SOC15_REG_ENTRY_STR(GC, 0, regCP_GFX_HQD_VMID), 180 SOC15_REG_ENTRY_STR(GC, 0, regCP_GFX_HQD_QUEUE_PRIORITY), 181 SOC15_REG_ENTRY_STR(GC, 0, regCP_GFX_HQD_QUANTUM), 182 SOC15_REG_ENTRY_STR(GC, 0, regCP_GFX_HQD_BASE), 183 SOC15_REG_ENTRY_STR(GC, 0, regCP_GFX_HQD_BASE_HI), 184 SOC15_REG_ENTRY_STR(GC, 0, regCP_GFX_HQD_OFFSET), 185 SOC15_REG_ENTRY_STR(GC, 0, regCP_GFX_HQD_CNTL), 186 SOC15_REG_ENTRY_STR(GC, 0, regCP_GFX_HQD_CSMD_RPTR), 187 SOC15_REG_ENTRY_STR(GC, 0, regCP_GFX_HQD_WPTR), 188 SOC15_REG_ENTRY_STR(GC, 0, regCP_GFX_HQD_WPTR_HI), 189 SOC15_REG_ENTRY_STR(GC, 0, regCP_GFX_HQD_DEQUEUE_REQUEST), 190 SOC15_REG_ENTRY_STR(GC, 0, regCP_GFX_HQD_MAPPED), 191 SOC15_REG_ENTRY_STR(GC, 0, regCP_GFX_HQD_QUE_MGR_CONTROL), 192 SOC15_REG_ENTRY_STR(GC, 0, regCP_GFX_HQD_HQ_CONTROL0), 193 SOC15_REG_ENTRY_STR(GC, 0, regCP_GFX_HQD_HQ_STATUS0), 194 SOC15_REG_ENTRY_STR(GC, 0, regCP_GFX_MQD_BASE_ADDR), 195 SOC15_REG_ENTRY_STR(GC, 0, regCP_GFX_MQD_BASE_ADDR_HI), 196 SOC15_REG_ENTRY_STR(GC, 0, regCP_RB_WPTR_POLL_ADDR_LO), 197 SOC15_REG_ENTRY_STR(GC, 0, regCP_RB_WPTR_POLL_ADDR_HI), 198 SOC15_REG_ENTRY_STR(GC, 0, regCP_RB_RPTR), 199 SOC15_REG_ENTRY_STR(GC, 0, regCP_IB1_BASE_LO), 200 SOC15_REG_ENTRY_STR(GC, 0, regCP_IB1_BASE_HI), 201 SOC15_REG_ENTRY_STR(GC, 0, regCP_IB1_CMD_BUFSZ), 202 SOC15_REG_ENTRY_STR(GC, 0, regCP_IB1_BUFSZ) 203 }; 204 205 #define DEFAULT_SH_MEM_CONFIG \ 206 ((SH_MEM_ADDRESS_MODE_64 << SH_MEM_CONFIG__ADDRESS_MODE__SHIFT) | \ 207 (SH_MEM_ALIGNMENT_MODE_UNALIGNED << SH_MEM_CONFIG__ALIGNMENT_MODE__SHIFT) | \ 208 (3 << SH_MEM_CONFIG__INITIAL_INST_PREFETCH__SHIFT)) 209 210 static void gfx_v12_0_disable_gpa_mode(struct amdgpu_device *adev); 211 static void gfx_v12_0_set_ring_funcs(struct amdgpu_device *adev); 212 static void gfx_v12_0_set_irq_funcs(struct amdgpu_device *adev); 213 static void gfx_v12_0_set_rlc_funcs(struct amdgpu_device *adev); 214 static void gfx_v12_0_set_mqd_funcs(struct amdgpu_device *adev); 215 static void gfx_v12_0_set_imu_funcs(struct amdgpu_device *adev); 216 static int gfx_v12_0_get_cu_info(struct amdgpu_device *adev, 217 struct amdgpu_cu_info *cu_info); 218 static uint64_t gfx_v12_0_get_gpu_clock_counter(struct amdgpu_device *adev); 219 static void gfx_v12_0_select_se_sh(struct amdgpu_device *adev, u32 se_num, 220 u32 sh_num, u32 instance, int xcc_id); 221 static u32 gfx_v12_0_get_wgp_active_bitmap_per_sh(struct amdgpu_device *adev); 222 223 static void gfx_v12_0_ring_emit_frame_cntl(struct amdgpu_ring *ring, bool start, bool secure); 224 static void gfx_v12_0_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg, 225 uint32_t val); 226 static int gfx_v12_0_wait_for_rlc_autoload_complete(struct amdgpu_device *adev); 227 static void gfx_v12_0_ring_invalidate_tlbs(struct amdgpu_ring *ring, 228 uint16_t pasid, uint32_t flush_type, 229 bool all_hub, uint8_t dst_sel); 230 static void gfx_v12_0_set_safe_mode(struct amdgpu_device *adev, int xcc_id); 231 static void gfx_v12_0_unset_safe_mode(struct amdgpu_device *adev, int xcc_id); 232 static void gfx_v12_0_update_perf_clk(struct amdgpu_device *adev, 233 bool enable); 234 235 static void gfx_v12_0_kiq_set_resources(struct amdgpu_ring *kiq_ring, 236 uint64_t queue_mask) 237 { 238 amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_SET_RESOURCES, 6)); 239 amdgpu_ring_write(kiq_ring, PACKET3_SET_RESOURCES_VMID_MASK(0) | 240 PACKET3_SET_RESOURCES_QUEUE_TYPE(0)); /* vmid_mask:0 queue_type:0 (KIQ) */ 241 amdgpu_ring_write(kiq_ring, lower_32_bits(queue_mask)); /* queue mask lo */ 242 amdgpu_ring_write(kiq_ring, upper_32_bits(queue_mask)); /* queue mask hi */ 243 amdgpu_ring_write(kiq_ring, 0); /* gws mask lo */ 244 amdgpu_ring_write(kiq_ring, 0); /* gws mask hi */ 245 amdgpu_ring_write(kiq_ring, 0); /* oac mask */ 246 amdgpu_ring_write(kiq_ring, 0); 247 } 248 249 static void gfx_v12_0_kiq_map_queues(struct amdgpu_ring *kiq_ring, 250 struct amdgpu_ring *ring) 251 { 252 uint64_t mqd_addr = amdgpu_bo_gpu_offset(ring->mqd_obj); 253 uint64_t wptr_addr = ring->wptr_gpu_addr; 254 uint32_t me = 0, eng_sel = 0; 255 256 switch (ring->funcs->type) { 257 case AMDGPU_RING_TYPE_COMPUTE: 258 me = 1; 259 eng_sel = 0; 260 break; 261 case AMDGPU_RING_TYPE_GFX: 262 me = 0; 263 eng_sel = 4; 264 break; 265 case AMDGPU_RING_TYPE_MES: 266 me = 2; 267 eng_sel = 5; 268 break; 269 default: 270 WARN_ON(1); 271 } 272 273 amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_MAP_QUEUES, 5)); 274 /* Q_sel:0, vmid:0, vidmem: 1, engine:0, num_Q:1*/ 275 amdgpu_ring_write(kiq_ring, /* Q_sel: 0, vmid: 0, engine: 0, num_Q: 1 */ 276 PACKET3_MAP_QUEUES_QUEUE_SEL(0) | /* Queue_Sel */ 277 PACKET3_MAP_QUEUES_VMID(0) | /* VMID */ 278 PACKET3_MAP_QUEUES_QUEUE(ring->queue) | 279 PACKET3_MAP_QUEUES_PIPE(ring->pipe) | 280 PACKET3_MAP_QUEUES_ME((me)) | 281 PACKET3_MAP_QUEUES_QUEUE_TYPE(0) | /*queue_type: normal compute queue */ 282 PACKET3_MAP_QUEUES_ALLOC_FORMAT(0) | /* alloc format: all_on_one_pipe */ 283 PACKET3_MAP_QUEUES_ENGINE_SEL(eng_sel) | 284 PACKET3_MAP_QUEUES_NUM_QUEUES(1)); /* num_queues: must be 1 */ 285 amdgpu_ring_write(kiq_ring, PACKET3_MAP_QUEUES_DOORBELL_OFFSET(ring->doorbell_index)); 286 amdgpu_ring_write(kiq_ring, lower_32_bits(mqd_addr)); 287 amdgpu_ring_write(kiq_ring, upper_32_bits(mqd_addr)); 288 amdgpu_ring_write(kiq_ring, lower_32_bits(wptr_addr)); 289 amdgpu_ring_write(kiq_ring, upper_32_bits(wptr_addr)); 290 } 291 292 static void gfx_v12_0_kiq_unmap_queues(struct amdgpu_ring *kiq_ring, 293 struct amdgpu_ring *ring, 294 enum amdgpu_unmap_queues_action action, 295 u64 gpu_addr, u64 seq) 296 { 297 struct amdgpu_device *adev = kiq_ring->adev; 298 uint32_t eng_sel = ring->funcs->type == AMDGPU_RING_TYPE_GFX ? 4 : 0; 299 300 if (adev->enable_mes && !adev->gfx.kiq[0].ring.sched.ready) { 301 amdgpu_mes_unmap_legacy_queue(adev, ring, action, gpu_addr, seq); 302 return; 303 } 304 305 amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_UNMAP_QUEUES, 4)); 306 amdgpu_ring_write(kiq_ring, /* Q_sel: 0, vmid: 0, engine: 0, num_Q: 1 */ 307 PACKET3_UNMAP_QUEUES_ACTION(action) | 308 PACKET3_UNMAP_QUEUES_QUEUE_SEL(0) | 309 PACKET3_UNMAP_QUEUES_ENGINE_SEL(eng_sel) | 310 PACKET3_UNMAP_QUEUES_NUM_QUEUES(1)); 311 amdgpu_ring_write(kiq_ring, 312 PACKET3_UNMAP_QUEUES_DOORBELL_OFFSET0(ring->doorbell_index)); 313 314 if (action == PREEMPT_QUEUES_NO_UNMAP) { 315 amdgpu_ring_write(kiq_ring, lower_32_bits(gpu_addr)); 316 amdgpu_ring_write(kiq_ring, upper_32_bits(gpu_addr)); 317 amdgpu_ring_write(kiq_ring, seq); 318 } else { 319 amdgpu_ring_write(kiq_ring, 0); 320 amdgpu_ring_write(kiq_ring, 0); 321 amdgpu_ring_write(kiq_ring, 0); 322 } 323 } 324 325 static void gfx_v12_0_kiq_query_status(struct amdgpu_ring *kiq_ring, 326 struct amdgpu_ring *ring, 327 u64 addr, u64 seq) 328 { 329 uint32_t eng_sel = ring->funcs->type == AMDGPU_RING_TYPE_GFX ? 4 : 0; 330 331 amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_QUERY_STATUS, 5)); 332 amdgpu_ring_write(kiq_ring, 333 PACKET3_QUERY_STATUS_CONTEXT_ID(0) | 334 PACKET3_QUERY_STATUS_INTERRUPT_SEL(0) | 335 PACKET3_QUERY_STATUS_COMMAND(2)); 336 amdgpu_ring_write(kiq_ring, /* Q_sel: 0, vmid: 0, engine: 0, num_Q: 1 */ 337 PACKET3_QUERY_STATUS_DOORBELL_OFFSET(ring->doorbell_index) | 338 PACKET3_QUERY_STATUS_ENG_SEL(eng_sel)); 339 amdgpu_ring_write(kiq_ring, lower_32_bits(addr)); 340 amdgpu_ring_write(kiq_ring, upper_32_bits(addr)); 341 amdgpu_ring_write(kiq_ring, lower_32_bits(seq)); 342 amdgpu_ring_write(kiq_ring, upper_32_bits(seq)); 343 } 344 345 static void gfx_v12_0_kiq_invalidate_tlbs(struct amdgpu_ring *kiq_ring, 346 uint16_t pasid, 347 uint32_t flush_type, 348 bool all_hub) 349 { 350 gfx_v12_0_ring_invalidate_tlbs(kiq_ring, pasid, flush_type, all_hub, 1); 351 } 352 353 static const struct kiq_pm4_funcs gfx_v12_0_kiq_pm4_funcs = { 354 .kiq_set_resources = gfx_v12_0_kiq_set_resources, 355 .kiq_map_queues = gfx_v12_0_kiq_map_queues, 356 .kiq_unmap_queues = gfx_v12_0_kiq_unmap_queues, 357 .kiq_query_status = gfx_v12_0_kiq_query_status, 358 .kiq_invalidate_tlbs = gfx_v12_0_kiq_invalidate_tlbs, 359 .set_resources_size = 8, 360 .map_queues_size = 7, 361 .unmap_queues_size = 6, 362 .query_status_size = 7, 363 .invalidate_tlbs_size = 2, 364 }; 365 366 static void gfx_v12_0_set_kiq_pm4_funcs(struct amdgpu_device *adev) 367 { 368 adev->gfx.kiq[0].pmf = &gfx_v12_0_kiq_pm4_funcs; 369 } 370 371 static void gfx_v12_0_wait_reg_mem(struct amdgpu_ring *ring, int eng_sel, 372 int mem_space, int opt, uint32_t addr0, 373 uint32_t addr1, uint32_t ref, 374 uint32_t mask, uint32_t inv) 375 { 376 amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5)); 377 amdgpu_ring_write(ring, 378 /* memory (1) or register (0) */ 379 (WAIT_REG_MEM_MEM_SPACE(mem_space) | 380 WAIT_REG_MEM_OPERATION(opt) | /* wait */ 381 WAIT_REG_MEM_FUNCTION(3) | /* equal */ 382 WAIT_REG_MEM_ENGINE(eng_sel))); 383 384 if (mem_space) 385 BUG_ON(addr0 & 0x3); /* Dword align */ 386 amdgpu_ring_write(ring, addr0); 387 amdgpu_ring_write(ring, addr1); 388 amdgpu_ring_write(ring, ref); 389 amdgpu_ring_write(ring, mask); 390 amdgpu_ring_write(ring, inv); /* poll interval */ 391 } 392 393 static int gfx_v12_0_ring_test_ring(struct amdgpu_ring *ring) 394 { 395 struct amdgpu_device *adev = ring->adev; 396 uint32_t scratch = SOC15_REG_OFFSET(GC, 0, regSCRATCH_REG0); 397 uint32_t tmp = 0; 398 unsigned i; 399 int r; 400 401 WREG32(scratch, 0xCAFEDEAD); 402 r = amdgpu_ring_alloc(ring, 5); 403 if (r) { 404 dev_err(adev->dev, 405 "amdgpu: cp failed to lock ring %d (%d).\n", 406 ring->idx, r); 407 return r; 408 } 409 410 if (ring->funcs->type == AMDGPU_RING_TYPE_KIQ) { 411 gfx_v12_0_ring_emit_wreg(ring, scratch, 0xDEADBEEF); 412 } else { 413 amdgpu_ring_write(ring, PACKET3(PACKET3_SET_UCONFIG_REG, 1)); 414 amdgpu_ring_write(ring, scratch - 415 PACKET3_SET_UCONFIG_REG_START); 416 amdgpu_ring_write(ring, 0xDEADBEEF); 417 } 418 amdgpu_ring_commit(ring); 419 420 for (i = 0; i < adev->usec_timeout; i++) { 421 tmp = RREG32(scratch); 422 if (tmp == 0xDEADBEEF) 423 break; 424 if (amdgpu_emu_mode == 1) 425 msleep(1); 426 else 427 udelay(1); 428 } 429 430 if (i >= adev->usec_timeout) 431 r = -ETIMEDOUT; 432 return r; 433 } 434 435 static int gfx_v12_0_ring_test_ib(struct amdgpu_ring *ring, long timeout) 436 { 437 struct amdgpu_device *adev = ring->adev; 438 struct amdgpu_ib ib; 439 struct dma_fence *f = NULL; 440 unsigned index; 441 uint64_t gpu_addr; 442 volatile uint32_t *cpu_ptr; 443 long r; 444 445 /* MES KIQ fw hasn't indirect buffer support for now */ 446 if (adev->enable_mes_kiq && 447 ring->funcs->type == AMDGPU_RING_TYPE_KIQ) 448 return 0; 449 450 memset(&ib, 0, sizeof(ib)); 451 452 if (ring->is_mes_queue) { 453 uint32_t padding, offset; 454 455 offset = amdgpu_mes_ctx_get_offs(ring, AMDGPU_MES_CTX_IB_OFFS); 456 padding = amdgpu_mes_ctx_get_offs(ring, 457 AMDGPU_MES_CTX_PADDING_OFFS); 458 459 ib.gpu_addr = amdgpu_mes_ctx_get_offs_gpu_addr(ring, offset); 460 ib.ptr = amdgpu_mes_ctx_get_offs_cpu_addr(ring, offset); 461 462 gpu_addr = amdgpu_mes_ctx_get_offs_gpu_addr(ring, padding); 463 cpu_ptr = amdgpu_mes_ctx_get_offs_cpu_addr(ring, padding); 464 *cpu_ptr = cpu_to_le32(0xCAFEDEAD); 465 } else { 466 r = amdgpu_device_wb_get(adev, &index); 467 if (r) 468 return r; 469 470 gpu_addr = adev->wb.gpu_addr + (index * 4); 471 adev->wb.wb[index] = cpu_to_le32(0xCAFEDEAD); 472 cpu_ptr = &adev->wb.wb[index]; 473 474 r = amdgpu_ib_get(adev, NULL, 16, AMDGPU_IB_POOL_DIRECT, &ib); 475 if (r) { 476 dev_err(adev->dev, "amdgpu: failed to get ib (%ld).\n", r); 477 goto err1; 478 } 479 } 480 481 ib.ptr[0] = PACKET3(PACKET3_WRITE_DATA, 3); 482 ib.ptr[1] = WRITE_DATA_DST_SEL(5) | WR_CONFIRM; 483 ib.ptr[2] = lower_32_bits(gpu_addr); 484 ib.ptr[3] = upper_32_bits(gpu_addr); 485 ib.ptr[4] = 0xDEADBEEF; 486 ib.length_dw = 5; 487 488 r = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f); 489 if (r) 490 goto err2; 491 492 r = dma_fence_wait_timeout(f, false, timeout); 493 if (r == 0) { 494 r = -ETIMEDOUT; 495 goto err2; 496 } else if (r < 0) { 497 goto err2; 498 } 499 500 if (le32_to_cpu(*cpu_ptr) == 0xDEADBEEF) 501 r = 0; 502 else 503 r = -EINVAL; 504 err2: 505 if (!ring->is_mes_queue) 506 amdgpu_ib_free(adev, &ib, NULL); 507 dma_fence_put(f); 508 err1: 509 if (!ring->is_mes_queue) 510 amdgpu_device_wb_free(adev, index); 511 return r; 512 } 513 514 static void gfx_v12_0_free_microcode(struct amdgpu_device *adev) 515 { 516 amdgpu_ucode_release(&adev->gfx.pfp_fw); 517 amdgpu_ucode_release(&adev->gfx.me_fw); 518 amdgpu_ucode_release(&adev->gfx.rlc_fw); 519 amdgpu_ucode_release(&adev->gfx.mec_fw); 520 521 kfree(adev->gfx.rlc.register_list_format); 522 } 523 524 static int gfx_v12_0_init_toc_microcode(struct amdgpu_device *adev, const char *ucode_prefix) 525 { 526 const struct psp_firmware_header_v1_0 *toc_hdr; 527 int err = 0; 528 529 err = amdgpu_ucode_request(adev, &adev->psp.toc_fw, 530 "amdgpu/%s_toc.bin", ucode_prefix); 531 if (err) 532 goto out; 533 534 toc_hdr = (const struct psp_firmware_header_v1_0 *)adev->psp.toc_fw->data; 535 adev->psp.toc.fw_version = le32_to_cpu(toc_hdr->header.ucode_version); 536 adev->psp.toc.feature_version = le32_to_cpu(toc_hdr->sos.fw_version); 537 adev->psp.toc.size_bytes = le32_to_cpu(toc_hdr->header.ucode_size_bytes); 538 adev->psp.toc.start_addr = (uint8_t *)toc_hdr + 539 le32_to_cpu(toc_hdr->header.ucode_array_offset_bytes); 540 return 0; 541 out: 542 amdgpu_ucode_release(&adev->psp.toc_fw); 543 return err; 544 } 545 546 static int gfx_v12_0_init_microcode(struct amdgpu_device *adev) 547 { 548 char ucode_prefix[15]; 549 int err; 550 const struct rlc_firmware_header_v2_0 *rlc_hdr; 551 uint16_t version_major; 552 uint16_t version_minor; 553 554 DRM_DEBUG("\n"); 555 556 amdgpu_ucode_ip_version_decode(adev, GC_HWIP, ucode_prefix, sizeof(ucode_prefix)); 557 558 err = amdgpu_ucode_request(adev, &adev->gfx.pfp_fw, 559 "amdgpu/%s_pfp.bin", ucode_prefix); 560 if (err) 561 goto out; 562 amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_RS64_PFP); 563 amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_RS64_PFP_P0_STACK); 564 565 err = amdgpu_ucode_request(adev, &adev->gfx.me_fw, 566 "amdgpu/%s_me.bin", ucode_prefix); 567 if (err) 568 goto out; 569 amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_RS64_ME); 570 amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_RS64_ME_P0_STACK); 571 572 if (!amdgpu_sriov_vf(adev)) { 573 err = amdgpu_ucode_request(adev, &adev->gfx.rlc_fw, 574 "amdgpu/%s_rlc.bin", ucode_prefix); 575 if (err) 576 goto out; 577 rlc_hdr = (const struct rlc_firmware_header_v2_0 *)adev->gfx.rlc_fw->data; 578 version_major = le16_to_cpu(rlc_hdr->header.header_version_major); 579 version_minor = le16_to_cpu(rlc_hdr->header.header_version_minor); 580 err = amdgpu_gfx_rlc_init_microcode(adev, version_major, version_minor); 581 if (err) 582 goto out; 583 } 584 585 err = amdgpu_ucode_request(adev, &adev->gfx.mec_fw, 586 "amdgpu/%s_mec.bin", ucode_prefix); 587 if (err) 588 goto out; 589 amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_RS64_MEC); 590 amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_RS64_MEC_P0_STACK); 591 amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_RS64_MEC_P1_STACK); 592 593 if (adev->firmware.load_type == AMDGPU_FW_LOAD_RLC_BACKDOOR_AUTO) 594 err = gfx_v12_0_init_toc_microcode(adev, ucode_prefix); 595 596 /* only one MEC for gfx 12 */ 597 adev->gfx.mec2_fw = NULL; 598 599 if (adev->gfx.imu.funcs) { 600 if (adev->gfx.imu.funcs->init_microcode) { 601 err = adev->gfx.imu.funcs->init_microcode(adev); 602 if (err) 603 dev_err(adev->dev, "Failed to load imu firmware!\n"); 604 } 605 } 606 607 out: 608 if (err) { 609 amdgpu_ucode_release(&adev->gfx.pfp_fw); 610 amdgpu_ucode_release(&adev->gfx.me_fw); 611 amdgpu_ucode_release(&adev->gfx.rlc_fw); 612 amdgpu_ucode_release(&adev->gfx.mec_fw); 613 } 614 615 return err; 616 } 617 618 static u32 gfx_v12_0_get_csb_size(struct amdgpu_device *adev) 619 { 620 u32 count = 0; 621 const struct cs_section_def *sect = NULL; 622 const struct cs_extent_def *ext = NULL; 623 624 count += 1; 625 626 for (sect = gfx12_cs_data; sect->section != NULL; ++sect) { 627 if (sect->id == SECT_CONTEXT) { 628 for (ext = sect->section; ext->extent != NULL; ++ext) 629 count += 2 + ext->reg_count; 630 } else 631 return 0; 632 } 633 634 return count; 635 } 636 637 static void gfx_v12_0_get_csb_buffer(struct amdgpu_device *adev, 638 volatile u32 *buffer) 639 { 640 u32 count = 0, clustercount = 0, i; 641 const struct cs_section_def *sect = NULL; 642 const struct cs_extent_def *ext = NULL; 643 644 if (adev->gfx.rlc.cs_data == NULL) 645 return; 646 if (buffer == NULL) 647 return; 648 649 count += 1; 650 651 for (sect = adev->gfx.rlc.cs_data; sect->section != NULL; ++sect) { 652 if (sect->id == SECT_CONTEXT) { 653 for (ext = sect->section; ext->extent != NULL; ++ext) { 654 clustercount++; 655 buffer[count++] = ext->reg_count; 656 buffer[count++] = ext->reg_index; 657 658 for (i = 0; i < ext->reg_count; i++) 659 buffer[count++] = cpu_to_le32(ext->extent[i]); 660 } 661 } else 662 return; 663 } 664 665 buffer[0] = clustercount; 666 } 667 668 static void gfx_v12_0_rlc_fini(struct amdgpu_device *adev) 669 { 670 /* clear state block */ 671 amdgpu_bo_free_kernel(&adev->gfx.rlc.clear_state_obj, 672 &adev->gfx.rlc.clear_state_gpu_addr, 673 (void **)&adev->gfx.rlc.cs_ptr); 674 675 /* jump table block */ 676 amdgpu_bo_free_kernel(&adev->gfx.rlc.cp_table_obj, 677 &adev->gfx.rlc.cp_table_gpu_addr, 678 (void **)&adev->gfx.rlc.cp_table_ptr); 679 } 680 681 static void gfx_v12_0_init_rlcg_reg_access_ctrl(struct amdgpu_device *adev) 682 { 683 struct amdgpu_rlcg_reg_access_ctrl *reg_access_ctrl; 684 685 reg_access_ctrl = &adev->gfx.rlc.reg_access_ctrl[0]; 686 reg_access_ctrl->scratch_reg0 = SOC15_REG_OFFSET(GC, 0, regSCRATCH_REG0); 687 reg_access_ctrl->scratch_reg1 = SOC15_REG_OFFSET(GC, 0, regSCRATCH_REG1); 688 reg_access_ctrl->scratch_reg2 = SOC15_REG_OFFSET(GC, 0, regSCRATCH_REG2); 689 reg_access_ctrl->scratch_reg3 = SOC15_REG_OFFSET(GC, 0, regSCRATCH_REG3); 690 reg_access_ctrl->grbm_cntl = SOC15_REG_OFFSET(GC, 0, regGRBM_GFX_CNTL); 691 reg_access_ctrl->grbm_idx = SOC15_REG_OFFSET(GC, 0, regGRBM_GFX_INDEX); 692 reg_access_ctrl->spare_int = SOC15_REG_OFFSET(GC, 0, regRLC_SPARE_INT_0); 693 adev->gfx.rlc.rlcg_reg_access_supported = true; 694 } 695 696 static int gfx_v12_0_rlc_init(struct amdgpu_device *adev) 697 { 698 const struct cs_section_def *cs_data; 699 int r; 700 701 adev->gfx.rlc.cs_data = gfx12_cs_data; 702 703 cs_data = adev->gfx.rlc.cs_data; 704 705 if (cs_data) { 706 /* init clear state block */ 707 r = amdgpu_gfx_rlc_init_csb(adev); 708 if (r) 709 return r; 710 } 711 712 /* init spm vmid with 0xf */ 713 if (adev->gfx.rlc.funcs->update_spm_vmid) 714 adev->gfx.rlc.funcs->update_spm_vmid(adev, NULL, 0xf); 715 716 return 0; 717 } 718 719 static void gfx_v12_0_mec_fini(struct amdgpu_device *adev) 720 { 721 amdgpu_bo_free_kernel(&adev->gfx.mec.hpd_eop_obj, NULL, NULL); 722 amdgpu_bo_free_kernel(&adev->gfx.mec.mec_fw_obj, NULL, NULL); 723 amdgpu_bo_free_kernel(&adev->gfx.mec.mec_fw_data_obj, NULL, NULL); 724 } 725 726 static void gfx_v12_0_me_init(struct amdgpu_device *adev) 727 { 728 bitmap_zero(adev->gfx.me.queue_bitmap, AMDGPU_MAX_GFX_QUEUES); 729 730 amdgpu_gfx_graphics_queue_acquire(adev); 731 } 732 733 static int gfx_v12_0_mec_init(struct amdgpu_device *adev) 734 { 735 int r; 736 u32 *hpd; 737 size_t mec_hpd_size; 738 739 bitmap_zero(adev->gfx.mec_bitmap[0].queue_bitmap, AMDGPU_MAX_COMPUTE_QUEUES); 740 741 /* take ownership of the relevant compute queues */ 742 amdgpu_gfx_compute_queue_acquire(adev); 743 mec_hpd_size = adev->gfx.num_compute_rings * GFX12_MEC_HPD_SIZE; 744 745 if (mec_hpd_size) { 746 r = amdgpu_bo_create_reserved(adev, mec_hpd_size, PAGE_SIZE, 747 AMDGPU_GEM_DOMAIN_GTT, 748 &adev->gfx.mec.hpd_eop_obj, 749 &adev->gfx.mec.hpd_eop_gpu_addr, 750 (void **)&hpd); 751 if (r) { 752 dev_warn(adev->dev, "(%d) create HDP EOP bo failed\n", r); 753 gfx_v12_0_mec_fini(adev); 754 return r; 755 } 756 757 memset(hpd, 0, mec_hpd_size); 758 759 amdgpu_bo_kunmap(adev->gfx.mec.hpd_eop_obj); 760 amdgpu_bo_unreserve(adev->gfx.mec.hpd_eop_obj); 761 } 762 763 return 0; 764 } 765 766 static uint32_t wave_read_ind(struct amdgpu_device *adev, uint32_t wave, uint32_t address) 767 { 768 WREG32_SOC15(GC, 0, regSQ_IND_INDEX, 769 (wave << SQ_IND_INDEX__WAVE_ID__SHIFT) | 770 (address << SQ_IND_INDEX__INDEX__SHIFT)); 771 return RREG32_SOC15(GC, 0, regSQ_IND_DATA); 772 } 773 774 static void wave_read_regs(struct amdgpu_device *adev, uint32_t wave, 775 uint32_t thread, uint32_t regno, 776 uint32_t num, uint32_t *out) 777 { 778 WREG32_SOC15(GC, 0, regSQ_IND_INDEX, 779 (wave << SQ_IND_INDEX__WAVE_ID__SHIFT) | 780 (regno << SQ_IND_INDEX__INDEX__SHIFT) | 781 (thread << SQ_IND_INDEX__WORKITEM_ID__SHIFT) | 782 (SQ_IND_INDEX__AUTO_INCR_MASK)); 783 while (num--) 784 *(out++) = RREG32_SOC15(GC, 0, regSQ_IND_DATA); 785 } 786 787 static void gfx_v12_0_read_wave_data(struct amdgpu_device *adev, 788 uint32_t xcc_id, 789 uint32_t simd, uint32_t wave, 790 uint32_t *dst, int *no_fields) 791 { 792 /* in gfx12 the SIMD_ID is specified as part of the INSTANCE 793 * field when performing a select_se_sh so it should be 794 * zero here */ 795 WARN_ON(simd != 0); 796 797 /* type 4 wave data */ 798 dst[(*no_fields)++] = 4; 799 dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_STATUS); 800 dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_PC_LO); 801 dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_PC_HI); 802 dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_EXEC_LO); 803 dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_EXEC_HI); 804 dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_HW_ID1); 805 dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_HW_ID2); 806 dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_GPR_ALLOC); 807 dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_LDS_ALLOC); 808 dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_IB_STS); 809 dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_IB_STS2); 810 dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_IB_DBG1); 811 dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_M0); 812 dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_MODE); 813 dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_STATE_PRIV); 814 dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_EXCP_FLAG_PRIV); 815 dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_EXCP_FLAG_USER); 816 dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_TRAP_CTRL); 817 dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_ACTIVE); 818 dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_VALID_AND_IDLE); 819 dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_DVGPR_ALLOC_LO); 820 dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_DVGPR_ALLOC_HI); 821 dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_SCHED_MODE); 822 } 823 824 static void gfx_v12_0_read_wave_sgprs(struct amdgpu_device *adev, 825 uint32_t xcc_id, uint32_t simd, 826 uint32_t wave, uint32_t start, 827 uint32_t size, uint32_t *dst) 828 { 829 WARN_ON(simd != 0); 830 831 wave_read_regs( 832 adev, wave, 0, start + SQIND_WAVE_SGPRS_OFFSET, size, 833 dst); 834 } 835 836 static void gfx_v12_0_read_wave_vgprs(struct amdgpu_device *adev, 837 uint32_t xcc_id, uint32_t simd, 838 uint32_t wave, uint32_t thread, 839 uint32_t start, uint32_t size, 840 uint32_t *dst) 841 { 842 wave_read_regs( 843 adev, wave, thread, 844 start + SQIND_WAVE_VGPRS_OFFSET, size, dst); 845 } 846 847 static void gfx_v12_0_select_me_pipe_q(struct amdgpu_device *adev, 848 u32 me, u32 pipe, u32 q, u32 vm, u32 xcc_id) 849 { 850 soc24_grbm_select(adev, me, pipe, q, vm); 851 } 852 853 static const struct amdgpu_gfx_funcs gfx_v12_0_gfx_funcs = { 854 .get_gpu_clock_counter = &gfx_v12_0_get_gpu_clock_counter, 855 .select_se_sh = &gfx_v12_0_select_se_sh, 856 .read_wave_data = &gfx_v12_0_read_wave_data, 857 .read_wave_sgprs = &gfx_v12_0_read_wave_sgprs, 858 .read_wave_vgprs = &gfx_v12_0_read_wave_vgprs, 859 .select_me_pipe_q = &gfx_v12_0_select_me_pipe_q, 860 .update_perfmon_mgcg = &gfx_v12_0_update_perf_clk, 861 }; 862 863 static int gfx_v12_0_gpu_early_init(struct amdgpu_device *adev) 864 { 865 866 switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { 867 case IP_VERSION(12, 0, 0): 868 case IP_VERSION(12, 0, 1): 869 adev->gfx.config.max_hw_contexts = 8; 870 adev->gfx.config.sc_prim_fifo_size_frontend = 0x20; 871 adev->gfx.config.sc_prim_fifo_size_backend = 0x100; 872 adev->gfx.config.sc_hiz_tile_fifo_size = 0; 873 adev->gfx.config.sc_earlyz_tile_fifo_size = 0x4C0; 874 break; 875 default: 876 BUG(); 877 break; 878 } 879 880 return 0; 881 } 882 883 static int gfx_v12_0_gfx_ring_init(struct amdgpu_device *adev, int ring_id, 884 int me, int pipe, int queue) 885 { 886 int r; 887 struct amdgpu_ring *ring; 888 unsigned int irq_type; 889 890 ring = &adev->gfx.gfx_ring[ring_id]; 891 892 ring->me = me; 893 ring->pipe = pipe; 894 ring->queue = queue; 895 896 ring->ring_obj = NULL; 897 ring->use_doorbell = true; 898 899 if (!ring_id) 900 ring->doorbell_index = adev->doorbell_index.gfx_ring0 << 1; 901 else 902 ring->doorbell_index = adev->doorbell_index.gfx_ring1 << 1; 903 ring->vm_hub = AMDGPU_GFXHUB(0); 904 sprintf(ring->name, "gfx_%d.%d.%d", ring->me, ring->pipe, ring->queue); 905 906 irq_type = AMDGPU_CP_IRQ_GFX_ME0_PIPE0_EOP + ring->pipe; 907 r = amdgpu_ring_init(adev, ring, 1024, &adev->gfx.eop_irq, irq_type, 908 AMDGPU_RING_PRIO_DEFAULT, NULL); 909 if (r) 910 return r; 911 return 0; 912 } 913 914 static int gfx_v12_0_compute_ring_init(struct amdgpu_device *adev, int ring_id, 915 int mec, int pipe, int queue) 916 { 917 int r; 918 unsigned irq_type; 919 struct amdgpu_ring *ring; 920 unsigned int hw_prio; 921 922 ring = &adev->gfx.compute_ring[ring_id]; 923 924 /* mec0 is me1 */ 925 ring->me = mec + 1; 926 ring->pipe = pipe; 927 ring->queue = queue; 928 929 ring->ring_obj = NULL; 930 ring->use_doorbell = true; 931 ring->doorbell_index = (adev->doorbell_index.mec_ring0 + ring_id) << 1; 932 ring->eop_gpu_addr = adev->gfx.mec.hpd_eop_gpu_addr 933 + (ring_id * GFX12_MEC_HPD_SIZE); 934 ring->vm_hub = AMDGPU_GFXHUB(0); 935 sprintf(ring->name, "comp_%d.%d.%d", ring->me, ring->pipe, ring->queue); 936 937 irq_type = AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP 938 + ((ring->me - 1) * adev->gfx.mec.num_pipe_per_mec) 939 + ring->pipe; 940 hw_prio = amdgpu_gfx_is_high_priority_compute_queue(adev, ring) ? 941 AMDGPU_GFX_PIPE_PRIO_HIGH : AMDGPU_GFX_PIPE_PRIO_NORMAL; 942 /* type-2 packets are deprecated on MEC, use type-3 instead */ 943 r = amdgpu_ring_init(adev, ring, 1024, &adev->gfx.eop_irq, irq_type, 944 hw_prio, NULL); 945 if (r) 946 return r; 947 948 return 0; 949 } 950 951 static struct { 952 SOC24_FIRMWARE_ID id; 953 unsigned int offset; 954 unsigned int size; 955 unsigned int size_x16; 956 } rlc_autoload_info[SOC24_FIRMWARE_ID_MAX]; 957 958 #define RLC_TOC_OFFSET_DWUNIT 8 959 #define RLC_SIZE_MULTIPLE 1024 960 #define RLC_TOC_UMF_SIZE_inM 23ULL 961 #define RLC_TOC_FORMAT_API 165ULL 962 963 static void gfx_v12_0_parse_rlc_toc(struct amdgpu_device *adev, void *rlc_toc) 964 { 965 RLC_TABLE_OF_CONTENT_V2 *ucode = rlc_toc; 966 967 while (ucode && (ucode->id > SOC24_FIRMWARE_ID_INVALID)) { 968 rlc_autoload_info[ucode->id].id = ucode->id; 969 rlc_autoload_info[ucode->id].offset = 970 ucode->offset * RLC_TOC_OFFSET_DWUNIT * 4; 971 rlc_autoload_info[ucode->id].size = 972 ucode->size_x16 ? ucode->size * RLC_SIZE_MULTIPLE * 4 : 973 ucode->size * 4; 974 ucode++; 975 } 976 } 977 978 static uint32_t gfx_v12_0_calc_toc_total_size(struct amdgpu_device *adev) 979 { 980 uint32_t total_size = 0; 981 SOC24_FIRMWARE_ID id; 982 983 gfx_v12_0_parse_rlc_toc(adev, adev->psp.toc.start_addr); 984 985 for (id = SOC24_FIRMWARE_ID_RLC_G_UCODE; id < SOC24_FIRMWARE_ID_MAX; id++) 986 total_size += rlc_autoload_info[id].size; 987 988 /* In case the offset in rlc toc ucode is aligned */ 989 if (total_size < rlc_autoload_info[SOC24_FIRMWARE_ID_MAX-1].offset) 990 total_size = rlc_autoload_info[SOC24_FIRMWARE_ID_MAX-1].offset + 991 rlc_autoload_info[SOC24_FIRMWARE_ID_MAX-1].size; 992 if (total_size < (RLC_TOC_UMF_SIZE_inM << 20)) 993 total_size = RLC_TOC_UMF_SIZE_inM << 20; 994 995 return total_size; 996 } 997 998 static int gfx_v12_0_rlc_autoload_buffer_init(struct amdgpu_device *adev) 999 { 1000 int r; 1001 uint32_t total_size; 1002 1003 total_size = gfx_v12_0_calc_toc_total_size(adev); 1004 1005 r = amdgpu_bo_create_reserved(adev, total_size, 64 * 1024, 1006 AMDGPU_GEM_DOMAIN_VRAM, 1007 &adev->gfx.rlc.rlc_autoload_bo, 1008 &adev->gfx.rlc.rlc_autoload_gpu_addr, 1009 (void **)&adev->gfx.rlc.rlc_autoload_ptr); 1010 1011 if (r) { 1012 dev_err(adev->dev, "(%d) failed to create fw autoload bo\n", r); 1013 return r; 1014 } 1015 1016 return 0; 1017 } 1018 1019 static void gfx_v12_0_rlc_backdoor_autoload_copy_ucode(struct amdgpu_device *adev, 1020 SOC24_FIRMWARE_ID id, 1021 const void *fw_data, 1022 uint32_t fw_size) 1023 { 1024 uint32_t toc_offset; 1025 uint32_t toc_fw_size; 1026 char *ptr = adev->gfx.rlc.rlc_autoload_ptr; 1027 1028 if (id <= SOC24_FIRMWARE_ID_INVALID || id >= SOC24_FIRMWARE_ID_MAX) 1029 return; 1030 1031 toc_offset = rlc_autoload_info[id].offset; 1032 toc_fw_size = rlc_autoload_info[id].size; 1033 1034 if (fw_size == 0) 1035 fw_size = toc_fw_size; 1036 1037 if (fw_size > toc_fw_size) 1038 fw_size = toc_fw_size; 1039 1040 memcpy(ptr + toc_offset, fw_data, fw_size); 1041 1042 if (fw_size < toc_fw_size) 1043 memset(ptr + toc_offset + fw_size, 0, toc_fw_size - fw_size); 1044 } 1045 1046 static void 1047 gfx_v12_0_rlc_backdoor_autoload_copy_toc_ucode(struct amdgpu_device *adev) 1048 { 1049 void *data; 1050 uint32_t size; 1051 uint32_t *toc_ptr; 1052 1053 data = adev->psp.toc.start_addr; 1054 size = rlc_autoload_info[SOC24_FIRMWARE_ID_RLC_TOC].size; 1055 1056 toc_ptr = (uint32_t *)data + size / 4 - 2; 1057 *toc_ptr = (RLC_TOC_FORMAT_API << 24) | 0x1; 1058 1059 gfx_v12_0_rlc_backdoor_autoload_copy_ucode(adev, SOC24_FIRMWARE_ID_RLC_TOC, 1060 data, size); 1061 } 1062 1063 static void 1064 gfx_v12_0_rlc_backdoor_autoload_copy_gfx_ucode(struct amdgpu_device *adev) 1065 { 1066 const __le32 *fw_data; 1067 uint32_t fw_size; 1068 const struct gfx_firmware_header_v2_0 *cpv2_hdr; 1069 const struct rlc_firmware_header_v2_0 *rlc_hdr; 1070 const struct rlc_firmware_header_v2_1 *rlcv21_hdr; 1071 const struct rlc_firmware_header_v2_2 *rlcv22_hdr; 1072 uint16_t version_major, version_minor; 1073 1074 /* pfp ucode */ 1075 cpv2_hdr = (const struct gfx_firmware_header_v2_0 *) 1076 adev->gfx.pfp_fw->data; 1077 /* instruction */ 1078 fw_data = (const __le32 *)(adev->gfx.pfp_fw->data + 1079 le32_to_cpu(cpv2_hdr->ucode_offset_bytes)); 1080 fw_size = le32_to_cpu(cpv2_hdr->ucode_size_bytes); 1081 gfx_v12_0_rlc_backdoor_autoload_copy_ucode(adev, SOC24_FIRMWARE_ID_RS64_PFP, 1082 fw_data, fw_size); 1083 /* data */ 1084 fw_data = (const __le32 *)(adev->gfx.pfp_fw->data + 1085 le32_to_cpu(cpv2_hdr->data_offset_bytes)); 1086 fw_size = le32_to_cpu(cpv2_hdr->data_size_bytes); 1087 gfx_v12_0_rlc_backdoor_autoload_copy_ucode(adev, SOC24_FIRMWARE_ID_RS64_PFP_P0_STACK, 1088 fw_data, fw_size); 1089 gfx_v12_0_rlc_backdoor_autoload_copy_ucode(adev, SOC24_FIRMWARE_ID_RS64_PFP_P1_STACK, 1090 fw_data, fw_size); 1091 /* me ucode */ 1092 cpv2_hdr = (const struct gfx_firmware_header_v2_0 *) 1093 adev->gfx.me_fw->data; 1094 /* instruction */ 1095 fw_data = (const __le32 *)(adev->gfx.me_fw->data + 1096 le32_to_cpu(cpv2_hdr->ucode_offset_bytes)); 1097 fw_size = le32_to_cpu(cpv2_hdr->ucode_size_bytes); 1098 gfx_v12_0_rlc_backdoor_autoload_copy_ucode(adev, SOC24_FIRMWARE_ID_RS64_ME, 1099 fw_data, fw_size); 1100 /* data */ 1101 fw_data = (const __le32 *)(adev->gfx.me_fw->data + 1102 le32_to_cpu(cpv2_hdr->data_offset_bytes)); 1103 fw_size = le32_to_cpu(cpv2_hdr->data_size_bytes); 1104 gfx_v12_0_rlc_backdoor_autoload_copy_ucode(adev, SOC24_FIRMWARE_ID_RS64_ME_P0_STACK, 1105 fw_data, fw_size); 1106 gfx_v12_0_rlc_backdoor_autoload_copy_ucode(adev, SOC24_FIRMWARE_ID_RS64_ME_P1_STACK, 1107 fw_data, fw_size); 1108 /* mec ucode */ 1109 cpv2_hdr = (const struct gfx_firmware_header_v2_0 *) 1110 adev->gfx.mec_fw->data; 1111 /* instruction */ 1112 fw_data = (const __le32 *) (adev->gfx.mec_fw->data + 1113 le32_to_cpu(cpv2_hdr->ucode_offset_bytes)); 1114 fw_size = le32_to_cpu(cpv2_hdr->ucode_size_bytes); 1115 gfx_v12_0_rlc_backdoor_autoload_copy_ucode(adev, SOC24_FIRMWARE_ID_RS64_MEC, 1116 fw_data, fw_size); 1117 /* data */ 1118 fw_data = (const __le32 *) (adev->gfx.mec_fw->data + 1119 le32_to_cpu(cpv2_hdr->data_offset_bytes)); 1120 fw_size = le32_to_cpu(cpv2_hdr->data_size_bytes); 1121 gfx_v12_0_rlc_backdoor_autoload_copy_ucode(adev, SOC24_FIRMWARE_ID_RS64_MEC_P0_STACK, 1122 fw_data, fw_size); 1123 gfx_v12_0_rlc_backdoor_autoload_copy_ucode(adev, SOC24_FIRMWARE_ID_RS64_MEC_P1_STACK, 1124 fw_data, fw_size); 1125 gfx_v12_0_rlc_backdoor_autoload_copy_ucode(adev, SOC24_FIRMWARE_ID_RS64_MEC_P2_STACK, 1126 fw_data, fw_size); 1127 gfx_v12_0_rlc_backdoor_autoload_copy_ucode(adev, SOC24_FIRMWARE_ID_RS64_MEC_P3_STACK, 1128 fw_data, fw_size); 1129 1130 /* rlc ucode */ 1131 rlc_hdr = (const struct rlc_firmware_header_v2_0 *) 1132 adev->gfx.rlc_fw->data; 1133 fw_data = (const __le32 *)(adev->gfx.rlc_fw->data + 1134 le32_to_cpu(rlc_hdr->header.ucode_array_offset_bytes)); 1135 fw_size = le32_to_cpu(rlc_hdr->header.ucode_size_bytes); 1136 gfx_v12_0_rlc_backdoor_autoload_copy_ucode(adev, SOC24_FIRMWARE_ID_RLC_G_UCODE, 1137 fw_data, fw_size); 1138 1139 version_major = le16_to_cpu(rlc_hdr->header.header_version_major); 1140 version_minor = le16_to_cpu(rlc_hdr->header.header_version_minor); 1141 if (version_major == 2) { 1142 if (version_minor >= 1) { 1143 rlcv21_hdr = (const struct rlc_firmware_header_v2_1 *)adev->gfx.rlc_fw->data; 1144 1145 fw_data = (const __le32 *)(adev->gfx.rlc_fw->data + 1146 le32_to_cpu(rlcv21_hdr->save_restore_list_gpm_offset_bytes)); 1147 fw_size = le32_to_cpu(rlcv21_hdr->save_restore_list_gpm_size_bytes); 1148 gfx_v12_0_rlc_backdoor_autoload_copy_ucode(adev, SOC24_FIRMWARE_ID_RLCG_SCRATCH, 1149 fw_data, fw_size); 1150 1151 fw_data = (const __le32 *)(adev->gfx.rlc_fw->data + 1152 le32_to_cpu(rlcv21_hdr->save_restore_list_srm_offset_bytes)); 1153 fw_size = le32_to_cpu(rlcv21_hdr->save_restore_list_srm_size_bytes); 1154 gfx_v12_0_rlc_backdoor_autoload_copy_ucode(adev, SOC24_FIRMWARE_ID_RLC_SRM_ARAM, 1155 fw_data, fw_size); 1156 } 1157 if (version_minor >= 2) { 1158 rlcv22_hdr = (const struct rlc_firmware_header_v2_2 *)adev->gfx.rlc_fw->data; 1159 1160 fw_data = (const __le32 *)(adev->gfx.rlc_fw->data + 1161 le32_to_cpu(rlcv22_hdr->rlc_iram_ucode_offset_bytes)); 1162 fw_size = le32_to_cpu(rlcv22_hdr->rlc_iram_ucode_size_bytes); 1163 gfx_v12_0_rlc_backdoor_autoload_copy_ucode(adev, SOC24_FIRMWARE_ID_RLX6_UCODE, 1164 fw_data, fw_size); 1165 1166 fw_data = (const __le32 *)(adev->gfx.rlc_fw->data + 1167 le32_to_cpu(rlcv22_hdr->rlc_dram_ucode_offset_bytes)); 1168 fw_size = le32_to_cpu(rlcv22_hdr->rlc_dram_ucode_size_bytes); 1169 gfx_v12_0_rlc_backdoor_autoload_copy_ucode(adev, SOC24_FIRMWARE_ID_RLX6_DRAM_BOOT, 1170 fw_data, fw_size); 1171 } 1172 } 1173 } 1174 1175 static void 1176 gfx_v12_0_rlc_backdoor_autoload_copy_sdma_ucode(struct amdgpu_device *adev) 1177 { 1178 const __le32 *fw_data; 1179 uint32_t fw_size; 1180 const struct sdma_firmware_header_v3_0 *sdma_hdr; 1181 1182 sdma_hdr = (const struct sdma_firmware_header_v3_0 *) 1183 adev->sdma.instance[0].fw->data; 1184 fw_data = (const __le32 *) (adev->sdma.instance[0].fw->data + 1185 le32_to_cpu(sdma_hdr->ucode_offset_bytes)); 1186 fw_size = le32_to_cpu(sdma_hdr->ucode_size_bytes); 1187 1188 gfx_v12_0_rlc_backdoor_autoload_copy_ucode(adev, SOC24_FIRMWARE_ID_SDMA_UCODE_TH0, 1189 fw_data, fw_size); 1190 } 1191 1192 static void 1193 gfx_v12_0_rlc_backdoor_autoload_copy_mes_ucode(struct amdgpu_device *adev) 1194 { 1195 const __le32 *fw_data; 1196 unsigned fw_size; 1197 const struct mes_firmware_header_v1_0 *mes_hdr; 1198 int pipe, ucode_id, data_id; 1199 1200 for (pipe = 0; pipe < 2; pipe++) { 1201 if (pipe == 0) { 1202 ucode_id = SOC24_FIRMWARE_ID_RS64_MES_P0; 1203 data_id = SOC24_FIRMWARE_ID_RS64_MES_P0_STACK; 1204 } else { 1205 ucode_id = SOC24_FIRMWARE_ID_RS64_MES_P1; 1206 data_id = SOC24_FIRMWARE_ID_RS64_MES_P1_STACK; 1207 } 1208 1209 mes_hdr = (const struct mes_firmware_header_v1_0 *) 1210 adev->mes.fw[pipe]->data; 1211 1212 fw_data = (const __le32 *)(adev->mes.fw[pipe]->data + 1213 le32_to_cpu(mes_hdr->mes_ucode_offset_bytes)); 1214 fw_size = le32_to_cpu(mes_hdr->mes_ucode_size_bytes); 1215 1216 gfx_v12_0_rlc_backdoor_autoload_copy_ucode(adev, ucode_id, fw_data, fw_size); 1217 1218 fw_data = (const __le32 *)(adev->mes.fw[pipe]->data + 1219 le32_to_cpu(mes_hdr->mes_ucode_data_offset_bytes)); 1220 fw_size = le32_to_cpu(mes_hdr->mes_ucode_data_size_bytes); 1221 1222 gfx_v12_0_rlc_backdoor_autoload_copy_ucode(adev, data_id, fw_data, fw_size); 1223 } 1224 } 1225 1226 static int gfx_v12_0_rlc_backdoor_autoload_enable(struct amdgpu_device *adev) 1227 { 1228 uint32_t rlc_g_offset, rlc_g_size; 1229 uint64_t gpu_addr; 1230 uint32_t data; 1231 1232 /* RLC autoload sequence 2: copy ucode */ 1233 gfx_v12_0_rlc_backdoor_autoload_copy_sdma_ucode(adev); 1234 gfx_v12_0_rlc_backdoor_autoload_copy_gfx_ucode(adev); 1235 gfx_v12_0_rlc_backdoor_autoload_copy_mes_ucode(adev); 1236 gfx_v12_0_rlc_backdoor_autoload_copy_toc_ucode(adev); 1237 1238 rlc_g_offset = rlc_autoload_info[SOC24_FIRMWARE_ID_RLC_G_UCODE].offset; 1239 rlc_g_size = rlc_autoload_info[SOC24_FIRMWARE_ID_RLC_G_UCODE].size; 1240 gpu_addr = adev->gfx.rlc.rlc_autoload_gpu_addr + rlc_g_offset - adev->gmc.vram_start; 1241 1242 WREG32_SOC15(GC, 0, regGFX_IMU_RLC_BOOTLOADER_ADDR_HI, upper_32_bits(gpu_addr)); 1243 WREG32_SOC15(GC, 0, regGFX_IMU_RLC_BOOTLOADER_ADDR_LO, lower_32_bits(gpu_addr)); 1244 1245 WREG32_SOC15(GC, 0, regGFX_IMU_RLC_BOOTLOADER_SIZE, rlc_g_size); 1246 1247 if (adev->gfx.imu.funcs && (amdgpu_dpm > 0)) { 1248 /* RLC autoload sequence 3: load IMU fw */ 1249 if (adev->gfx.imu.funcs->load_microcode) 1250 adev->gfx.imu.funcs->load_microcode(adev); 1251 /* RLC autoload sequence 4 init IMU fw */ 1252 if (adev->gfx.imu.funcs->setup_imu) 1253 adev->gfx.imu.funcs->setup_imu(adev); 1254 if (adev->gfx.imu.funcs->start_imu) 1255 adev->gfx.imu.funcs->start_imu(adev); 1256 1257 /* RLC autoload sequence 5 disable gpa mode */ 1258 gfx_v12_0_disable_gpa_mode(adev); 1259 } else { 1260 /* unhalt rlc to start autoload without imu */ 1261 data = RREG32_SOC15(GC, 0, regRLC_GPM_THREAD_ENABLE); 1262 data = REG_SET_FIELD(data, RLC_GPM_THREAD_ENABLE, THREAD0_ENABLE, 1); 1263 data = REG_SET_FIELD(data, RLC_GPM_THREAD_ENABLE, THREAD1_ENABLE, 1); 1264 WREG32_SOC15(GC, 0, regRLC_GPM_THREAD_ENABLE, data); 1265 WREG32_SOC15(GC, 0, regRLC_CNTL, RLC_CNTL__RLC_ENABLE_F32_MASK); 1266 } 1267 1268 return 0; 1269 } 1270 1271 static void gfx_v12_0_alloc_ip_dump(struct amdgpu_device *adev) 1272 { 1273 uint32_t reg_count = ARRAY_SIZE(gc_reg_list_12_0); 1274 uint32_t *ptr; 1275 uint32_t inst; 1276 1277 ptr = kcalloc(reg_count, sizeof(uint32_t), GFP_KERNEL); 1278 if (ptr == NULL) { 1279 DRM_ERROR("Failed to allocate memory for GFX IP Dump\n"); 1280 adev->gfx.ip_dump_core = NULL; 1281 } else { 1282 adev->gfx.ip_dump_core = ptr; 1283 } 1284 1285 /* Allocate memory for compute queue registers for all the instances */ 1286 reg_count = ARRAY_SIZE(gc_cp_reg_list_12); 1287 inst = adev->gfx.mec.num_mec * adev->gfx.mec.num_pipe_per_mec * 1288 adev->gfx.mec.num_queue_per_pipe; 1289 1290 ptr = kcalloc(reg_count * inst, sizeof(uint32_t), GFP_KERNEL); 1291 if (ptr == NULL) { 1292 DRM_ERROR("Failed to allocate memory for Compute Queues IP Dump\n"); 1293 adev->gfx.ip_dump_compute_queues = NULL; 1294 } else { 1295 adev->gfx.ip_dump_compute_queues = ptr; 1296 } 1297 1298 /* Allocate memory for gfx queue registers for all the instances */ 1299 reg_count = ARRAY_SIZE(gc_gfx_queue_reg_list_12); 1300 inst = adev->gfx.me.num_me * adev->gfx.me.num_pipe_per_me * 1301 adev->gfx.me.num_queue_per_pipe; 1302 1303 ptr = kcalloc(reg_count * inst, sizeof(uint32_t), GFP_KERNEL); 1304 if (ptr == NULL) { 1305 DRM_ERROR("Failed to allocate memory for GFX Queues IP Dump\n"); 1306 adev->gfx.ip_dump_gfx_queues = NULL; 1307 } else { 1308 adev->gfx.ip_dump_gfx_queues = ptr; 1309 } 1310 } 1311 1312 static int gfx_v12_0_sw_init(void *handle) 1313 { 1314 int i, j, k, r, ring_id = 0; 1315 unsigned num_compute_rings; 1316 int xcc_id = 0; 1317 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1318 1319 switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { 1320 case IP_VERSION(12, 0, 0): 1321 case IP_VERSION(12, 0, 1): 1322 adev->gfx.me.num_me = 1; 1323 adev->gfx.me.num_pipe_per_me = 1; 1324 adev->gfx.me.num_queue_per_pipe = 1; 1325 adev->gfx.mec.num_mec = 2; 1326 adev->gfx.mec.num_pipe_per_mec = 2; 1327 adev->gfx.mec.num_queue_per_pipe = 4; 1328 break; 1329 default: 1330 adev->gfx.me.num_me = 1; 1331 adev->gfx.me.num_pipe_per_me = 1; 1332 adev->gfx.me.num_queue_per_pipe = 1; 1333 adev->gfx.mec.num_mec = 1; 1334 adev->gfx.mec.num_pipe_per_mec = 4; 1335 adev->gfx.mec.num_queue_per_pipe = 8; 1336 break; 1337 } 1338 1339 /* recalculate compute rings to use based on hardware configuration */ 1340 num_compute_rings = (adev->gfx.mec.num_pipe_per_mec * 1341 adev->gfx.mec.num_queue_per_pipe) / 2; 1342 adev->gfx.num_compute_rings = min(adev->gfx.num_compute_rings, 1343 num_compute_rings); 1344 1345 /* EOP Event */ 1346 r = amdgpu_irq_add_id(adev, SOC21_IH_CLIENTID_GRBM_CP, 1347 GFX_11_0_0__SRCID__CP_EOP_INTERRUPT, 1348 &adev->gfx.eop_irq); 1349 if (r) 1350 return r; 1351 1352 /* Bad opcode Event */ 1353 r = amdgpu_irq_add_id(adev, SOC21_IH_CLIENTID_GRBM_CP, 1354 GFX_11_0_0__SRCID__CP_BAD_OPCODE_ERROR, 1355 &adev->gfx.bad_op_irq); 1356 if (r) 1357 return r; 1358 1359 /* Privileged reg */ 1360 r = amdgpu_irq_add_id(adev, SOC21_IH_CLIENTID_GRBM_CP, 1361 GFX_11_0_0__SRCID__CP_PRIV_REG_FAULT, 1362 &adev->gfx.priv_reg_irq); 1363 if (r) 1364 return r; 1365 1366 /* Privileged inst */ 1367 r = amdgpu_irq_add_id(adev, SOC21_IH_CLIENTID_GRBM_CP, 1368 GFX_11_0_0__SRCID__CP_PRIV_INSTR_FAULT, 1369 &adev->gfx.priv_inst_irq); 1370 if (r) 1371 return r; 1372 1373 adev->gfx.gfx_current_status = AMDGPU_GFX_NORMAL_MODE; 1374 1375 gfx_v12_0_me_init(adev); 1376 1377 r = gfx_v12_0_rlc_init(adev); 1378 if (r) { 1379 dev_err(adev->dev, "Failed to init rlc BOs!\n"); 1380 return r; 1381 } 1382 1383 r = gfx_v12_0_mec_init(adev); 1384 if (r) { 1385 dev_err(adev->dev, "Failed to init MEC BOs!\n"); 1386 return r; 1387 } 1388 1389 /* set up the gfx ring */ 1390 for (i = 0; i < adev->gfx.me.num_me; i++) { 1391 for (j = 0; j < adev->gfx.me.num_queue_per_pipe; j++) { 1392 for (k = 0; k < adev->gfx.me.num_pipe_per_me; k++) { 1393 if (!amdgpu_gfx_is_me_queue_enabled(adev, i, k, j)) 1394 continue; 1395 1396 r = gfx_v12_0_gfx_ring_init(adev, ring_id, 1397 i, k, j); 1398 if (r) 1399 return r; 1400 ring_id++; 1401 } 1402 } 1403 } 1404 1405 ring_id = 0; 1406 /* set up the compute queues - allocate horizontally across pipes */ 1407 for (i = 0; i < adev->gfx.mec.num_mec; ++i) { 1408 for (j = 0; j < adev->gfx.mec.num_queue_per_pipe; j++) { 1409 for (k = 0; k < adev->gfx.mec.num_pipe_per_mec; k++) { 1410 if (!amdgpu_gfx_is_mec_queue_enabled(adev, 1411 0, i, k, j)) 1412 continue; 1413 1414 r = gfx_v12_0_compute_ring_init(adev, ring_id, 1415 i, k, j); 1416 if (r) 1417 return r; 1418 1419 ring_id++; 1420 } 1421 } 1422 } 1423 1424 if (!adev->enable_mes_kiq) { 1425 r = amdgpu_gfx_kiq_init(adev, GFX12_MEC_HPD_SIZE, 0); 1426 if (r) { 1427 dev_err(adev->dev, "Failed to init KIQ BOs!\n"); 1428 return r; 1429 } 1430 1431 r = amdgpu_gfx_kiq_init_ring(adev, xcc_id); 1432 if (r) 1433 return r; 1434 } 1435 1436 r = amdgpu_gfx_mqd_sw_init(adev, sizeof(struct v12_compute_mqd), 0); 1437 if (r) 1438 return r; 1439 1440 /* allocate visible FB for rlc auto-loading fw */ 1441 if (adev->firmware.load_type == AMDGPU_FW_LOAD_RLC_BACKDOOR_AUTO) { 1442 r = gfx_v12_0_rlc_autoload_buffer_init(adev); 1443 if (r) 1444 return r; 1445 } 1446 1447 r = gfx_v12_0_gpu_early_init(adev); 1448 if (r) 1449 return r; 1450 1451 gfx_v12_0_alloc_ip_dump(adev); 1452 1453 return 0; 1454 } 1455 1456 static void gfx_v12_0_pfp_fini(struct amdgpu_device *adev) 1457 { 1458 amdgpu_bo_free_kernel(&adev->gfx.pfp.pfp_fw_obj, 1459 &adev->gfx.pfp.pfp_fw_gpu_addr, 1460 (void **)&adev->gfx.pfp.pfp_fw_ptr); 1461 1462 amdgpu_bo_free_kernel(&adev->gfx.pfp.pfp_fw_data_obj, 1463 &adev->gfx.pfp.pfp_fw_data_gpu_addr, 1464 (void **)&adev->gfx.pfp.pfp_fw_data_ptr); 1465 } 1466 1467 static void gfx_v12_0_me_fini(struct amdgpu_device *adev) 1468 { 1469 amdgpu_bo_free_kernel(&adev->gfx.me.me_fw_obj, 1470 &adev->gfx.me.me_fw_gpu_addr, 1471 (void **)&adev->gfx.me.me_fw_ptr); 1472 1473 amdgpu_bo_free_kernel(&adev->gfx.me.me_fw_data_obj, 1474 &adev->gfx.me.me_fw_data_gpu_addr, 1475 (void **)&adev->gfx.me.me_fw_data_ptr); 1476 } 1477 1478 static void gfx_v12_0_rlc_autoload_buffer_fini(struct amdgpu_device *adev) 1479 { 1480 amdgpu_bo_free_kernel(&adev->gfx.rlc.rlc_autoload_bo, 1481 &adev->gfx.rlc.rlc_autoload_gpu_addr, 1482 (void **)&adev->gfx.rlc.rlc_autoload_ptr); 1483 } 1484 1485 static int gfx_v12_0_sw_fini(void *handle) 1486 { 1487 int i; 1488 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1489 1490 for (i = 0; i < adev->gfx.num_gfx_rings; i++) 1491 amdgpu_ring_fini(&adev->gfx.gfx_ring[i]); 1492 for (i = 0; i < adev->gfx.num_compute_rings; i++) 1493 amdgpu_ring_fini(&adev->gfx.compute_ring[i]); 1494 1495 amdgpu_gfx_mqd_sw_fini(adev, 0); 1496 1497 if (!adev->enable_mes_kiq) { 1498 amdgpu_gfx_kiq_free_ring(&adev->gfx.kiq[0].ring); 1499 amdgpu_gfx_kiq_fini(adev, 0); 1500 } 1501 1502 gfx_v12_0_pfp_fini(adev); 1503 gfx_v12_0_me_fini(adev); 1504 gfx_v12_0_rlc_fini(adev); 1505 gfx_v12_0_mec_fini(adev); 1506 1507 if (adev->firmware.load_type == AMDGPU_FW_LOAD_RLC_BACKDOOR_AUTO) 1508 gfx_v12_0_rlc_autoload_buffer_fini(adev); 1509 1510 gfx_v12_0_free_microcode(adev); 1511 1512 kfree(adev->gfx.ip_dump_core); 1513 kfree(adev->gfx.ip_dump_compute_queues); 1514 kfree(adev->gfx.ip_dump_gfx_queues); 1515 1516 return 0; 1517 } 1518 1519 static void gfx_v12_0_select_se_sh(struct amdgpu_device *adev, u32 se_num, 1520 u32 sh_num, u32 instance, int xcc_id) 1521 { 1522 u32 data; 1523 1524 if (instance == 0xffffffff) 1525 data = REG_SET_FIELD(0, GRBM_GFX_INDEX, 1526 INSTANCE_BROADCAST_WRITES, 1); 1527 else 1528 data = REG_SET_FIELD(0, GRBM_GFX_INDEX, INSTANCE_INDEX, 1529 instance); 1530 1531 if (se_num == 0xffffffff) 1532 data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SE_BROADCAST_WRITES, 1533 1); 1534 else 1535 data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SE_INDEX, se_num); 1536 1537 if (sh_num == 0xffffffff) 1538 data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SA_BROADCAST_WRITES, 1539 1); 1540 else 1541 data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SA_INDEX, sh_num); 1542 1543 WREG32_SOC15(GC, 0, regGRBM_GFX_INDEX, data); 1544 } 1545 1546 static u32 gfx_v12_0_get_sa_active_bitmap(struct amdgpu_device *adev) 1547 { 1548 u32 gc_disabled_sa_mask, gc_user_disabled_sa_mask, sa_mask; 1549 1550 gc_disabled_sa_mask = RREG32_SOC15(GC, 0, regGRBM_CC_GC_SA_UNIT_DISABLE); 1551 gc_disabled_sa_mask = REG_GET_FIELD(gc_disabled_sa_mask, 1552 GRBM_CC_GC_SA_UNIT_DISABLE, 1553 SA_DISABLE); 1554 gc_user_disabled_sa_mask = RREG32_SOC15(GC, 0, regGRBM_GC_USER_SA_UNIT_DISABLE); 1555 gc_user_disabled_sa_mask = REG_GET_FIELD(gc_user_disabled_sa_mask, 1556 GRBM_GC_USER_SA_UNIT_DISABLE, 1557 SA_DISABLE); 1558 sa_mask = amdgpu_gfx_create_bitmask(adev->gfx.config.max_sh_per_se * 1559 adev->gfx.config.max_shader_engines); 1560 1561 return sa_mask & (~(gc_disabled_sa_mask | gc_user_disabled_sa_mask)); 1562 } 1563 1564 static u32 gfx_v12_0_get_rb_active_bitmap(struct amdgpu_device *adev) 1565 { 1566 u32 gc_disabled_rb_mask, gc_user_disabled_rb_mask; 1567 u32 rb_mask; 1568 1569 gc_disabled_rb_mask = RREG32_SOC15(GC, 0, regCC_RB_BACKEND_DISABLE); 1570 gc_disabled_rb_mask = REG_GET_FIELD(gc_disabled_rb_mask, 1571 CC_RB_BACKEND_DISABLE, 1572 BACKEND_DISABLE); 1573 gc_user_disabled_rb_mask = RREG32_SOC15(GC, 0, regGC_USER_RB_BACKEND_DISABLE); 1574 gc_user_disabled_rb_mask = REG_GET_FIELD(gc_user_disabled_rb_mask, 1575 GC_USER_RB_BACKEND_DISABLE, 1576 BACKEND_DISABLE); 1577 rb_mask = amdgpu_gfx_create_bitmask(adev->gfx.config.max_backends_per_se * 1578 adev->gfx.config.max_shader_engines); 1579 1580 return rb_mask & (~(gc_disabled_rb_mask | gc_user_disabled_rb_mask)); 1581 } 1582 1583 static void gfx_v12_0_setup_rb(struct amdgpu_device *adev) 1584 { 1585 u32 rb_bitmap_width_per_sa; 1586 u32 max_sa; 1587 u32 active_sa_bitmap; 1588 u32 global_active_rb_bitmap; 1589 u32 active_rb_bitmap = 0; 1590 u32 i; 1591 1592 /* query sa bitmap from SA_UNIT_DISABLE registers */ 1593 active_sa_bitmap = gfx_v12_0_get_sa_active_bitmap(adev); 1594 /* query rb bitmap from RB_BACKEND_DISABLE registers */ 1595 global_active_rb_bitmap = gfx_v12_0_get_rb_active_bitmap(adev); 1596 1597 /* generate active rb bitmap according to active sa bitmap */ 1598 max_sa = adev->gfx.config.max_shader_engines * 1599 adev->gfx.config.max_sh_per_se; 1600 rb_bitmap_width_per_sa = adev->gfx.config.max_backends_per_se / 1601 adev->gfx.config.max_sh_per_se; 1602 for (i = 0; i < max_sa; i++) { 1603 if (active_sa_bitmap & (1 << i)) 1604 active_rb_bitmap |= (0x3 << (i * rb_bitmap_width_per_sa)); 1605 } 1606 1607 active_rb_bitmap |= global_active_rb_bitmap; 1608 adev->gfx.config.backend_enable_mask = active_rb_bitmap; 1609 adev->gfx.config.num_rbs = hweight32(active_rb_bitmap); 1610 } 1611 1612 #define LDS_APP_BASE 0x1 1613 #define SCRATCH_APP_BASE 0x2 1614 1615 static void gfx_v12_0_init_compute_vmid(struct amdgpu_device *adev) 1616 { 1617 int i; 1618 uint32_t sh_mem_bases; 1619 uint32_t data; 1620 1621 /* 1622 * Configure apertures: 1623 * LDS: 0x60000000'00000000 - 0x60000001'00000000 (4GB) 1624 * Scratch: 0x60000001'00000000 - 0x60000002'00000000 (4GB) 1625 * GPUVM: 0x60010000'00000000 - 0x60020000'00000000 (1TB) 1626 */ 1627 sh_mem_bases = (LDS_APP_BASE << SH_MEM_BASES__SHARED_BASE__SHIFT) | 1628 SCRATCH_APP_BASE; 1629 1630 mutex_lock(&adev->srbm_mutex); 1631 for (i = adev->vm_manager.first_kfd_vmid; i < AMDGPU_NUM_VMID; i++) { 1632 soc24_grbm_select(adev, 0, 0, 0, i); 1633 /* CP and shaders */ 1634 WREG32_SOC15(GC, 0, regSH_MEM_CONFIG, DEFAULT_SH_MEM_CONFIG); 1635 WREG32_SOC15(GC, 0, regSH_MEM_BASES, sh_mem_bases); 1636 1637 /* Enable trap for each kfd vmid. */ 1638 data = RREG32_SOC15(GC, 0, regSPI_GDBG_PER_VMID_CNTL); 1639 data = REG_SET_FIELD(data, SPI_GDBG_PER_VMID_CNTL, TRAP_EN, 1); 1640 WREG32_SOC15(GC, 0, regSPI_GDBG_PER_VMID_CNTL, data); 1641 } 1642 soc24_grbm_select(adev, 0, 0, 0, 0); 1643 mutex_unlock(&adev->srbm_mutex); 1644 } 1645 1646 static void gfx_v12_0_tcp_harvest(struct amdgpu_device *adev) 1647 { 1648 /* TODO: harvest feature to be added later. */ 1649 } 1650 1651 static void gfx_v12_0_get_tcc_info(struct amdgpu_device *adev) 1652 { 1653 } 1654 1655 static void gfx_v12_0_constants_init(struct amdgpu_device *adev) 1656 { 1657 u32 tmp; 1658 int i; 1659 1660 if (!amdgpu_sriov_vf(adev)) 1661 WREG32_FIELD15_PREREG(GC, 0, GRBM_CNTL, READ_TIMEOUT, 0xff); 1662 1663 gfx_v12_0_setup_rb(adev); 1664 gfx_v12_0_get_cu_info(adev, &adev->gfx.cu_info); 1665 gfx_v12_0_get_tcc_info(adev); 1666 adev->gfx.config.pa_sc_tile_steering_override = 0; 1667 1668 /* XXX SH_MEM regs */ 1669 /* where to put LDS, scratch, GPUVM in FSA64 space */ 1670 mutex_lock(&adev->srbm_mutex); 1671 for (i = 0; i < adev->vm_manager.id_mgr[AMDGPU_GFXHUB(0)].num_ids; i++) { 1672 soc24_grbm_select(adev, 0, 0, 0, i); 1673 /* CP and shaders */ 1674 WREG32_SOC15(GC, 0, regSH_MEM_CONFIG, DEFAULT_SH_MEM_CONFIG); 1675 if (i != 0) { 1676 tmp = REG_SET_FIELD(0, SH_MEM_BASES, PRIVATE_BASE, 1677 (adev->gmc.private_aperture_start >> 48)); 1678 tmp = REG_SET_FIELD(tmp, SH_MEM_BASES, SHARED_BASE, 1679 (adev->gmc.shared_aperture_start >> 48)); 1680 WREG32_SOC15(GC, 0, regSH_MEM_BASES, tmp); 1681 } 1682 } 1683 soc24_grbm_select(adev, 0, 0, 0, 0); 1684 1685 mutex_unlock(&adev->srbm_mutex); 1686 1687 gfx_v12_0_init_compute_vmid(adev); 1688 } 1689 1690 static u32 gfx_v12_0_get_cpg_int_cntl(struct amdgpu_device *adev, 1691 int me, int pipe) 1692 { 1693 if (me != 0) 1694 return 0; 1695 1696 switch (pipe) { 1697 case 0: 1698 return SOC15_REG_OFFSET(GC, 0, regCP_INT_CNTL_RING0); 1699 default: 1700 return 0; 1701 } 1702 } 1703 1704 static u32 gfx_v12_0_get_cpc_int_cntl(struct amdgpu_device *adev, 1705 int me, int pipe) 1706 { 1707 /* 1708 * amdgpu controls only the first MEC. That's why this function only 1709 * handles the setting of interrupts for this specific MEC. All other 1710 * pipes' interrupts are set by amdkfd. 1711 */ 1712 if (me != 1) 1713 return 0; 1714 1715 switch (pipe) { 1716 case 0: 1717 return SOC15_REG_OFFSET(GC, 0, regCP_ME1_PIPE0_INT_CNTL); 1718 case 1: 1719 return SOC15_REG_OFFSET(GC, 0, regCP_ME1_PIPE1_INT_CNTL); 1720 default: 1721 return 0; 1722 } 1723 } 1724 1725 static void gfx_v12_0_enable_gui_idle_interrupt(struct amdgpu_device *adev, 1726 bool enable) 1727 { 1728 u32 tmp, cp_int_cntl_reg; 1729 int i, j; 1730 1731 if (amdgpu_sriov_vf(adev)) 1732 return; 1733 1734 for (i = 0; i < adev->gfx.me.num_me; i++) { 1735 for (j = 0; j < adev->gfx.me.num_pipe_per_me; j++) { 1736 cp_int_cntl_reg = gfx_v12_0_get_cpg_int_cntl(adev, i, j); 1737 1738 if (cp_int_cntl_reg) { 1739 tmp = RREG32_SOC15_IP(GC, cp_int_cntl_reg); 1740 tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CNTX_BUSY_INT_ENABLE, 1741 enable ? 1 : 0); 1742 tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CNTX_EMPTY_INT_ENABLE, 1743 enable ? 1 : 0); 1744 tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CMP_BUSY_INT_ENABLE, 1745 enable ? 1 : 0); 1746 tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, GFX_IDLE_INT_ENABLE, 1747 enable ? 1 : 0); 1748 WREG32_SOC15_IP(GC, cp_int_cntl_reg, tmp); 1749 } 1750 } 1751 } 1752 } 1753 1754 static int gfx_v12_0_init_csb(struct amdgpu_device *adev) 1755 { 1756 adev->gfx.rlc.funcs->get_csb_buffer(adev, adev->gfx.rlc.cs_ptr); 1757 1758 WREG32_SOC15(GC, 0, regRLC_CSIB_ADDR_HI, 1759 adev->gfx.rlc.clear_state_gpu_addr >> 32); 1760 WREG32_SOC15(GC, 0, regRLC_CSIB_ADDR_LO, 1761 adev->gfx.rlc.clear_state_gpu_addr & 0xfffffffc); 1762 WREG32_SOC15(GC, 0, regRLC_CSIB_LENGTH, adev->gfx.rlc.clear_state_size); 1763 1764 return 0; 1765 } 1766 1767 static void gfx_v12_0_rlc_stop(struct amdgpu_device *adev) 1768 { 1769 u32 tmp = RREG32_SOC15(GC, 0, regRLC_CNTL); 1770 1771 tmp = REG_SET_FIELD(tmp, RLC_CNTL, RLC_ENABLE_F32, 0); 1772 WREG32_SOC15(GC, 0, regRLC_CNTL, tmp); 1773 } 1774 1775 static void gfx_v12_0_rlc_reset(struct amdgpu_device *adev) 1776 { 1777 WREG32_FIELD15_PREREG(GC, 0, GRBM_SOFT_RESET, SOFT_RESET_RLC, 1); 1778 udelay(50); 1779 WREG32_FIELD15_PREREG(GC, 0, GRBM_SOFT_RESET, SOFT_RESET_RLC, 0); 1780 udelay(50); 1781 } 1782 1783 static void gfx_v12_0_rlc_smu_handshake_cntl(struct amdgpu_device *adev, 1784 bool enable) 1785 { 1786 uint32_t rlc_pg_cntl; 1787 1788 rlc_pg_cntl = RREG32_SOC15(GC, 0, regRLC_PG_CNTL); 1789 1790 if (!enable) { 1791 /* RLC_PG_CNTL[23] = 0 (default) 1792 * RLC will wait for handshake acks with SMU 1793 * GFXOFF will be enabled 1794 * RLC_PG_CNTL[23] = 1 1795 * RLC will not issue any message to SMU 1796 * hence no handshake between SMU & RLC 1797 * GFXOFF will be disabled 1798 */ 1799 rlc_pg_cntl |= RLC_PG_CNTL__SMU_HANDSHAKE_DISABLE_MASK; 1800 } else 1801 rlc_pg_cntl &= ~RLC_PG_CNTL__SMU_HANDSHAKE_DISABLE_MASK; 1802 WREG32_SOC15(GC, 0, regRLC_PG_CNTL, rlc_pg_cntl); 1803 } 1804 1805 static void gfx_v12_0_rlc_start(struct amdgpu_device *adev) 1806 { 1807 /* TODO: enable rlc & smu handshake until smu 1808 * and gfxoff feature works as expected */ 1809 if (!(amdgpu_pp_feature_mask & PP_GFXOFF_MASK)) 1810 gfx_v12_0_rlc_smu_handshake_cntl(adev, false); 1811 1812 WREG32_FIELD15_PREREG(GC, 0, RLC_CNTL, RLC_ENABLE_F32, 1); 1813 udelay(50); 1814 } 1815 1816 static void gfx_v12_0_rlc_enable_srm(struct amdgpu_device *adev) 1817 { 1818 uint32_t tmp; 1819 1820 /* enable Save Restore Machine */ 1821 tmp = RREG32(SOC15_REG_OFFSET(GC, 0, regRLC_SRM_CNTL)); 1822 tmp |= RLC_SRM_CNTL__AUTO_INCR_ADDR_MASK; 1823 tmp |= RLC_SRM_CNTL__SRM_ENABLE_MASK; 1824 WREG32(SOC15_REG_OFFSET(GC, 0, regRLC_SRM_CNTL), tmp); 1825 } 1826 1827 static void gfx_v12_0_load_rlcg_microcode(struct amdgpu_device *adev) 1828 { 1829 const struct rlc_firmware_header_v2_0 *hdr; 1830 const __le32 *fw_data; 1831 unsigned i, fw_size; 1832 1833 hdr = (const struct rlc_firmware_header_v2_0 *)adev->gfx.rlc_fw->data; 1834 fw_data = (const __le32 *)(adev->gfx.rlc_fw->data + 1835 le32_to_cpu(hdr->header.ucode_array_offset_bytes)); 1836 fw_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4; 1837 1838 WREG32_SOC15(GC, 0, regRLC_GPM_UCODE_ADDR, 1839 RLCG_UCODE_LOADING_START_ADDRESS); 1840 1841 for (i = 0; i < fw_size; i++) 1842 WREG32_SOC15(GC, 0, regRLC_GPM_UCODE_DATA, 1843 le32_to_cpup(fw_data++)); 1844 1845 WREG32_SOC15(GC, 0, regRLC_GPM_UCODE_ADDR, adev->gfx.rlc_fw_version); 1846 } 1847 1848 static void gfx_v12_0_load_rlc_iram_dram_microcode(struct amdgpu_device *adev) 1849 { 1850 const struct rlc_firmware_header_v2_2 *hdr; 1851 const __le32 *fw_data; 1852 unsigned i, fw_size; 1853 u32 tmp; 1854 1855 hdr = (const struct rlc_firmware_header_v2_2 *)adev->gfx.rlc_fw->data; 1856 1857 fw_data = (const __le32 *)(adev->gfx.rlc_fw->data + 1858 le32_to_cpu(hdr->rlc_iram_ucode_offset_bytes)); 1859 fw_size = le32_to_cpu(hdr->rlc_iram_ucode_size_bytes) / 4; 1860 1861 WREG32_SOC15(GC, 0, regRLC_LX6_IRAM_ADDR, 0); 1862 1863 for (i = 0; i < fw_size; i++) { 1864 if ((amdgpu_emu_mode == 1) && (i % 100 == 99)) 1865 msleep(1); 1866 WREG32_SOC15(GC, 0, regRLC_LX6_IRAM_DATA, 1867 le32_to_cpup(fw_data++)); 1868 } 1869 1870 WREG32_SOC15(GC, 0, regRLC_LX6_IRAM_ADDR, adev->gfx.rlc_fw_version); 1871 1872 fw_data = (const __le32 *)(adev->gfx.rlc_fw->data + 1873 le32_to_cpu(hdr->rlc_dram_ucode_offset_bytes)); 1874 fw_size = le32_to_cpu(hdr->rlc_dram_ucode_size_bytes) / 4; 1875 1876 WREG32_SOC15(GC, 0, regRLC_LX6_DRAM_ADDR, 0); 1877 for (i = 0; i < fw_size; i++) { 1878 if ((amdgpu_emu_mode == 1) && (i % 100 == 99)) 1879 msleep(1); 1880 WREG32_SOC15(GC, 0, regRLC_LX6_DRAM_DATA, 1881 le32_to_cpup(fw_data++)); 1882 } 1883 1884 WREG32_SOC15(GC, 0, regRLC_LX6_IRAM_ADDR, adev->gfx.rlc_fw_version); 1885 1886 tmp = RREG32_SOC15(GC, 0, regRLC_LX6_CNTL); 1887 tmp = REG_SET_FIELD(tmp, RLC_LX6_CNTL, PDEBUG_ENABLE, 1); 1888 tmp = REG_SET_FIELD(tmp, RLC_LX6_CNTL, BRESET, 0); 1889 WREG32_SOC15(GC, 0, regRLC_LX6_CNTL, tmp); 1890 } 1891 1892 static int gfx_v12_0_rlc_load_microcode(struct amdgpu_device *adev) 1893 { 1894 const struct rlc_firmware_header_v2_0 *hdr; 1895 uint16_t version_major; 1896 uint16_t version_minor; 1897 1898 if (!adev->gfx.rlc_fw) 1899 return -EINVAL; 1900 1901 hdr = (const struct rlc_firmware_header_v2_0 *)adev->gfx.rlc_fw->data; 1902 amdgpu_ucode_print_rlc_hdr(&hdr->header); 1903 1904 version_major = le16_to_cpu(hdr->header.header_version_major); 1905 version_minor = le16_to_cpu(hdr->header.header_version_minor); 1906 1907 if (version_major == 2) { 1908 gfx_v12_0_load_rlcg_microcode(adev); 1909 if (amdgpu_dpm == 1) { 1910 if (version_minor >= 2) 1911 gfx_v12_0_load_rlc_iram_dram_microcode(adev); 1912 } 1913 1914 return 0; 1915 } 1916 1917 return -EINVAL; 1918 } 1919 1920 static int gfx_v12_0_rlc_resume(struct amdgpu_device *adev) 1921 { 1922 int r; 1923 1924 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) { 1925 gfx_v12_0_init_csb(adev); 1926 1927 if (!amdgpu_sriov_vf(adev)) /* enable RLC SRM */ 1928 gfx_v12_0_rlc_enable_srm(adev); 1929 } else { 1930 if (amdgpu_sriov_vf(adev)) { 1931 gfx_v12_0_init_csb(adev); 1932 return 0; 1933 } 1934 1935 adev->gfx.rlc.funcs->stop(adev); 1936 1937 /* disable CG */ 1938 WREG32_SOC15(GC, 0, regRLC_CGCG_CGLS_CTRL, 0); 1939 1940 /* disable PG */ 1941 WREG32_SOC15(GC, 0, regRLC_PG_CNTL, 0); 1942 1943 if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT) { 1944 /* legacy rlc firmware loading */ 1945 r = gfx_v12_0_rlc_load_microcode(adev); 1946 if (r) 1947 return r; 1948 } 1949 1950 gfx_v12_0_init_csb(adev); 1951 1952 adev->gfx.rlc.funcs->start(adev); 1953 } 1954 1955 return 0; 1956 } 1957 1958 static void gfx_v12_0_config_gfx_rs64(struct amdgpu_device *adev) 1959 { 1960 const struct gfx_firmware_header_v2_0 *pfp_hdr; 1961 const struct gfx_firmware_header_v2_0 *me_hdr; 1962 const struct gfx_firmware_header_v2_0 *mec_hdr; 1963 uint32_t pipe_id, tmp; 1964 1965 mec_hdr = (const struct gfx_firmware_header_v2_0 *) 1966 adev->gfx.mec_fw->data; 1967 me_hdr = (const struct gfx_firmware_header_v2_0 *) 1968 adev->gfx.me_fw->data; 1969 pfp_hdr = (const struct gfx_firmware_header_v2_0 *) 1970 adev->gfx.pfp_fw->data; 1971 1972 /* config pfp program start addr */ 1973 for (pipe_id = 0; pipe_id < 2; pipe_id++) { 1974 soc24_grbm_select(adev, 0, pipe_id, 0, 0); 1975 WREG32_SOC15(GC, 0, regCP_PFP_PRGRM_CNTR_START, 1976 (pfp_hdr->ucode_start_addr_hi << 30) | 1977 (pfp_hdr->ucode_start_addr_lo >> 2)); 1978 WREG32_SOC15(GC, 0, regCP_PFP_PRGRM_CNTR_START_HI, 1979 pfp_hdr->ucode_start_addr_hi >> 2); 1980 } 1981 soc24_grbm_select(adev, 0, 0, 0, 0); 1982 1983 /* reset pfp pipe */ 1984 tmp = RREG32_SOC15(GC, 0, regCP_ME_CNTL); 1985 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, PFP_PIPE0_RESET, 1); 1986 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, PFP_PIPE1_RESET, 1); 1987 WREG32_SOC15(GC, 0, regCP_ME_CNTL, tmp); 1988 1989 /* clear pfp pipe reset */ 1990 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, PFP_PIPE0_RESET, 0); 1991 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, PFP_PIPE1_RESET, 0); 1992 WREG32_SOC15(GC, 0, regCP_ME_CNTL, tmp); 1993 1994 /* config me program start addr */ 1995 for (pipe_id = 0; pipe_id < 2; pipe_id++) { 1996 soc24_grbm_select(adev, 0, pipe_id, 0, 0); 1997 WREG32_SOC15(GC, 0, regCP_ME_PRGRM_CNTR_START, 1998 (me_hdr->ucode_start_addr_hi << 30) | 1999 (me_hdr->ucode_start_addr_lo >> 2)); 2000 WREG32_SOC15(GC, 0, regCP_ME_PRGRM_CNTR_START_HI, 2001 me_hdr->ucode_start_addr_hi>>2); 2002 } 2003 soc24_grbm_select(adev, 0, 0, 0, 0); 2004 2005 /* reset me pipe */ 2006 tmp = RREG32_SOC15(GC, 0, regCP_ME_CNTL); 2007 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, ME_PIPE0_RESET, 1); 2008 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, ME_PIPE1_RESET, 1); 2009 WREG32_SOC15(GC, 0, regCP_ME_CNTL, tmp); 2010 2011 /* clear me pipe reset */ 2012 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, ME_PIPE0_RESET, 0); 2013 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, ME_PIPE1_RESET, 0); 2014 WREG32_SOC15(GC, 0, regCP_ME_CNTL, tmp); 2015 2016 /* config mec program start addr */ 2017 for (pipe_id = 0; pipe_id < 4; pipe_id++) { 2018 soc24_grbm_select(adev, 1, pipe_id, 0, 0); 2019 WREG32_SOC15(GC, 0, regCP_MEC_RS64_PRGRM_CNTR_START, 2020 mec_hdr->ucode_start_addr_lo >> 2 | 2021 mec_hdr->ucode_start_addr_hi << 30); 2022 WREG32_SOC15(GC, 0, regCP_MEC_RS64_PRGRM_CNTR_START_HI, 2023 mec_hdr->ucode_start_addr_hi >> 2); 2024 } 2025 soc24_grbm_select(adev, 0, 0, 0, 0); 2026 2027 /* reset mec pipe */ 2028 tmp = RREG32_SOC15(GC, 0, regCP_MEC_RS64_CNTL); 2029 tmp = REG_SET_FIELD(tmp, CP_MEC_RS64_CNTL, MEC_PIPE0_RESET, 1); 2030 tmp = REG_SET_FIELD(tmp, CP_MEC_RS64_CNTL, MEC_PIPE1_RESET, 1); 2031 tmp = REG_SET_FIELD(tmp, CP_MEC_RS64_CNTL, MEC_PIPE2_RESET, 1); 2032 tmp = REG_SET_FIELD(tmp, CP_MEC_RS64_CNTL, MEC_PIPE3_RESET, 1); 2033 WREG32_SOC15(GC, 0, regCP_MEC_RS64_CNTL, tmp); 2034 2035 /* clear mec pipe reset */ 2036 tmp = REG_SET_FIELD(tmp, CP_MEC_RS64_CNTL, MEC_PIPE0_RESET, 0); 2037 tmp = REG_SET_FIELD(tmp, CP_MEC_RS64_CNTL, MEC_PIPE1_RESET, 0); 2038 tmp = REG_SET_FIELD(tmp, CP_MEC_RS64_CNTL, MEC_PIPE2_RESET, 0); 2039 tmp = REG_SET_FIELD(tmp, CP_MEC_RS64_CNTL, MEC_PIPE3_RESET, 0); 2040 WREG32_SOC15(GC, 0, regCP_MEC_RS64_CNTL, tmp); 2041 } 2042 2043 static void gfx_v12_0_set_pfp_ucode_start_addr(struct amdgpu_device *adev) 2044 { 2045 const struct gfx_firmware_header_v2_0 *cp_hdr; 2046 unsigned pipe_id, tmp; 2047 2048 cp_hdr = (const struct gfx_firmware_header_v2_0 *) 2049 adev->gfx.pfp_fw->data; 2050 mutex_lock(&adev->srbm_mutex); 2051 for (pipe_id = 0; pipe_id < adev->gfx.me.num_pipe_per_me; pipe_id++) { 2052 soc24_grbm_select(adev, 0, pipe_id, 0, 0); 2053 WREG32_SOC15(GC, 0, regCP_PFP_PRGRM_CNTR_START, 2054 (cp_hdr->ucode_start_addr_hi << 30) | 2055 (cp_hdr->ucode_start_addr_lo >> 2)); 2056 WREG32_SOC15(GC, 0, regCP_PFP_PRGRM_CNTR_START_HI, 2057 cp_hdr->ucode_start_addr_hi>>2); 2058 2059 /* 2060 * Program CP_ME_CNTL to reset given PIPE to take 2061 * effect of CP_PFP_PRGRM_CNTR_START. 2062 */ 2063 tmp = RREG32_SOC15(GC, 0, regCP_ME_CNTL); 2064 if (pipe_id == 0) 2065 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, 2066 PFP_PIPE0_RESET, 1); 2067 else 2068 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, 2069 PFP_PIPE1_RESET, 1); 2070 WREG32_SOC15(GC, 0, regCP_ME_CNTL, tmp); 2071 2072 /* Clear pfp pipe0 reset bit. */ 2073 if (pipe_id == 0) 2074 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, 2075 PFP_PIPE0_RESET, 0); 2076 else 2077 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, 2078 PFP_PIPE1_RESET, 0); 2079 WREG32_SOC15(GC, 0, regCP_ME_CNTL, tmp); 2080 } 2081 soc24_grbm_select(adev, 0, 0, 0, 0); 2082 mutex_unlock(&adev->srbm_mutex); 2083 } 2084 2085 static void gfx_v12_0_set_me_ucode_start_addr(struct amdgpu_device *adev) 2086 { 2087 const struct gfx_firmware_header_v2_0 *cp_hdr; 2088 unsigned pipe_id, tmp; 2089 2090 cp_hdr = (const struct gfx_firmware_header_v2_0 *) 2091 adev->gfx.me_fw->data; 2092 mutex_lock(&adev->srbm_mutex); 2093 for (pipe_id = 0; pipe_id < adev->gfx.me.num_pipe_per_me; pipe_id++) { 2094 soc24_grbm_select(adev, 0, pipe_id, 0, 0); 2095 WREG32_SOC15(GC, 0, regCP_ME_PRGRM_CNTR_START, 2096 (cp_hdr->ucode_start_addr_hi << 30) | 2097 (cp_hdr->ucode_start_addr_lo >> 2) ); 2098 WREG32_SOC15(GC, 0, regCP_ME_PRGRM_CNTR_START_HI, 2099 cp_hdr->ucode_start_addr_hi>>2); 2100 2101 /* 2102 * Program CP_ME_CNTL to reset given PIPE to take 2103 * effect of CP_ME_PRGRM_CNTR_START. 2104 */ 2105 tmp = RREG32_SOC15(GC, 0, regCP_ME_CNTL); 2106 if (pipe_id == 0) 2107 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, 2108 ME_PIPE0_RESET, 1); 2109 else 2110 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, 2111 ME_PIPE1_RESET, 1); 2112 WREG32_SOC15(GC, 0, regCP_ME_CNTL, tmp); 2113 2114 /* Clear pfp pipe0 reset bit. */ 2115 if (pipe_id == 0) 2116 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, 2117 ME_PIPE0_RESET, 0); 2118 else 2119 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, 2120 ME_PIPE1_RESET, 0); 2121 WREG32_SOC15(GC, 0, regCP_ME_CNTL, tmp); 2122 } 2123 soc24_grbm_select(adev, 0, 0, 0, 0); 2124 mutex_unlock(&adev->srbm_mutex); 2125 } 2126 2127 static void gfx_v12_0_set_mec_ucode_start_addr(struct amdgpu_device *adev) 2128 { 2129 const struct gfx_firmware_header_v2_0 *cp_hdr; 2130 unsigned pipe_id; 2131 2132 cp_hdr = (const struct gfx_firmware_header_v2_0 *) 2133 adev->gfx.mec_fw->data; 2134 mutex_lock(&adev->srbm_mutex); 2135 for (pipe_id = 0; pipe_id < adev->gfx.mec.num_pipe_per_mec; pipe_id++) { 2136 soc24_grbm_select(adev, 1, pipe_id, 0, 0); 2137 WREG32_SOC15(GC, 0, regCP_MEC_RS64_PRGRM_CNTR_START, 2138 cp_hdr->ucode_start_addr_lo >> 2 | 2139 cp_hdr->ucode_start_addr_hi << 30); 2140 WREG32_SOC15(GC, 0, regCP_MEC_RS64_PRGRM_CNTR_START_HI, 2141 cp_hdr->ucode_start_addr_hi >> 2); 2142 } 2143 soc24_grbm_select(adev, 0, 0, 0, 0); 2144 mutex_unlock(&adev->srbm_mutex); 2145 } 2146 2147 static int gfx_v12_0_wait_for_rlc_autoload_complete(struct amdgpu_device *adev) 2148 { 2149 uint32_t cp_status; 2150 uint32_t bootload_status; 2151 int i; 2152 2153 for (i = 0; i < adev->usec_timeout; i++) { 2154 cp_status = RREG32_SOC15(GC, 0, regCP_STAT); 2155 bootload_status = RREG32_SOC15(GC, 0, regRLC_RLCS_BOOTLOAD_STATUS); 2156 2157 if ((cp_status == 0) && 2158 (REG_GET_FIELD(bootload_status, 2159 RLC_RLCS_BOOTLOAD_STATUS, BOOTLOAD_COMPLETE) == 1)) { 2160 break; 2161 } 2162 udelay(1); 2163 if (amdgpu_emu_mode) 2164 msleep(10); 2165 } 2166 2167 if (i >= adev->usec_timeout) { 2168 dev_err(adev->dev, "rlc autoload: gc ucode autoload timeout\n"); 2169 return -ETIMEDOUT; 2170 } 2171 2172 if (adev->firmware.load_type == AMDGPU_FW_LOAD_RLC_BACKDOOR_AUTO) { 2173 gfx_v12_0_set_pfp_ucode_start_addr(adev); 2174 gfx_v12_0_set_me_ucode_start_addr(adev); 2175 gfx_v12_0_set_mec_ucode_start_addr(adev); 2176 } 2177 2178 return 0; 2179 } 2180 2181 static int gfx_v12_0_cp_gfx_enable(struct amdgpu_device *adev, bool enable) 2182 { 2183 int i; 2184 u32 tmp = RREG32_SOC15(GC, 0, regCP_ME_CNTL); 2185 2186 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, ME_HALT, enable ? 0 : 1); 2187 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, PFP_HALT, enable ? 0 : 1); 2188 WREG32_SOC15(GC, 0, regCP_ME_CNTL, tmp); 2189 2190 for (i = 0; i < adev->usec_timeout; i++) { 2191 if (RREG32_SOC15(GC, 0, regCP_STAT) == 0) 2192 break; 2193 udelay(1); 2194 } 2195 2196 if (i >= adev->usec_timeout) 2197 DRM_ERROR("failed to %s cp gfx\n", enable ? "unhalt" : "halt"); 2198 2199 return 0; 2200 } 2201 2202 static int gfx_v12_0_cp_gfx_load_pfp_microcode_rs64(struct amdgpu_device *adev) 2203 { 2204 int r; 2205 const struct gfx_firmware_header_v2_0 *pfp_hdr; 2206 const __le32 *fw_ucode, *fw_data; 2207 unsigned i, pipe_id, fw_ucode_size, fw_data_size; 2208 uint32_t tmp; 2209 uint32_t usec_timeout = 50000; /* wait for 50ms */ 2210 2211 pfp_hdr = (const struct gfx_firmware_header_v2_0 *) 2212 adev->gfx.pfp_fw->data; 2213 2214 amdgpu_ucode_print_gfx_hdr(&pfp_hdr->header); 2215 2216 /* instruction */ 2217 fw_ucode = (const __le32 *)(adev->gfx.pfp_fw->data + 2218 le32_to_cpu(pfp_hdr->ucode_offset_bytes)); 2219 fw_ucode_size = le32_to_cpu(pfp_hdr->ucode_size_bytes); 2220 /* data */ 2221 fw_data = (const __le32 *)(adev->gfx.pfp_fw->data + 2222 le32_to_cpu(pfp_hdr->data_offset_bytes)); 2223 fw_data_size = le32_to_cpu(pfp_hdr->data_size_bytes); 2224 2225 /* 64kb align */ 2226 r = amdgpu_bo_create_reserved(adev, fw_ucode_size, 2227 64 * 1024, AMDGPU_GEM_DOMAIN_VRAM, 2228 &adev->gfx.pfp.pfp_fw_obj, 2229 &adev->gfx.pfp.pfp_fw_gpu_addr, 2230 (void **)&adev->gfx.pfp.pfp_fw_ptr); 2231 if (r) { 2232 dev_err(adev->dev, "(%d) failed to create pfp ucode fw bo\n", r); 2233 gfx_v12_0_pfp_fini(adev); 2234 return r; 2235 } 2236 2237 r = amdgpu_bo_create_reserved(adev, fw_data_size, 2238 64 * 1024, AMDGPU_GEM_DOMAIN_VRAM, 2239 &adev->gfx.pfp.pfp_fw_data_obj, 2240 &adev->gfx.pfp.pfp_fw_data_gpu_addr, 2241 (void **)&adev->gfx.pfp.pfp_fw_data_ptr); 2242 if (r) { 2243 dev_err(adev->dev, "(%d) failed to create pfp data fw bo\n", r); 2244 gfx_v12_0_pfp_fini(adev); 2245 return r; 2246 } 2247 2248 memcpy(adev->gfx.pfp.pfp_fw_ptr, fw_ucode, fw_ucode_size); 2249 memcpy(adev->gfx.pfp.pfp_fw_data_ptr, fw_data, fw_data_size); 2250 2251 amdgpu_bo_kunmap(adev->gfx.pfp.pfp_fw_obj); 2252 amdgpu_bo_kunmap(adev->gfx.pfp.pfp_fw_data_obj); 2253 amdgpu_bo_unreserve(adev->gfx.pfp.pfp_fw_obj); 2254 amdgpu_bo_unreserve(adev->gfx.pfp.pfp_fw_data_obj); 2255 2256 if (amdgpu_emu_mode == 1) 2257 adev->hdp.funcs->flush_hdp(adev, NULL); 2258 2259 WREG32_SOC15(GC, 0, regCP_PFP_IC_BASE_LO, 2260 lower_32_bits(adev->gfx.pfp.pfp_fw_gpu_addr)); 2261 WREG32_SOC15(GC, 0, regCP_PFP_IC_BASE_HI, 2262 upper_32_bits(adev->gfx.pfp.pfp_fw_gpu_addr)); 2263 2264 tmp = RREG32_SOC15(GC, 0, regCP_PFP_IC_BASE_CNTL); 2265 tmp = REG_SET_FIELD(tmp, CP_PFP_IC_BASE_CNTL, VMID, 0); 2266 tmp = REG_SET_FIELD(tmp, CP_PFP_IC_BASE_CNTL, CACHE_POLICY, 0); 2267 tmp = REG_SET_FIELD(tmp, CP_PFP_IC_BASE_CNTL, EXE_DISABLE, 0); 2268 WREG32_SOC15(GC, 0, regCP_PFP_IC_BASE_CNTL, tmp); 2269 2270 /* 2271 * Programming any of the CP_PFP_IC_BASE registers 2272 * forces invalidation of the ME L1 I$. Wait for the 2273 * invalidation complete 2274 */ 2275 for (i = 0; i < usec_timeout; i++) { 2276 tmp = RREG32_SOC15(GC, 0, regCP_PFP_IC_OP_CNTL); 2277 if (1 == REG_GET_FIELD(tmp, CP_PFP_IC_OP_CNTL, 2278 INVALIDATE_CACHE_COMPLETE)) 2279 break; 2280 udelay(1); 2281 } 2282 2283 if (i >= usec_timeout) { 2284 dev_err(adev->dev, "failed to invalidate instruction cache\n"); 2285 return -EINVAL; 2286 } 2287 2288 /* Prime the L1 instruction caches */ 2289 tmp = RREG32_SOC15(GC, 0, regCP_PFP_IC_OP_CNTL); 2290 tmp = REG_SET_FIELD(tmp, CP_PFP_IC_OP_CNTL, PRIME_ICACHE, 1); 2291 WREG32_SOC15(GC, 0, regCP_PFP_IC_OP_CNTL, tmp); 2292 /* Waiting for cache primed*/ 2293 for (i = 0; i < usec_timeout; i++) { 2294 tmp = RREG32_SOC15(GC, 0, regCP_PFP_IC_OP_CNTL); 2295 if (1 == REG_GET_FIELD(tmp, CP_PFP_IC_OP_CNTL, 2296 ICACHE_PRIMED)) 2297 break; 2298 udelay(1); 2299 } 2300 2301 if (i >= usec_timeout) { 2302 dev_err(adev->dev, "failed to prime instruction cache\n"); 2303 return -EINVAL; 2304 } 2305 2306 mutex_lock(&adev->srbm_mutex); 2307 for (pipe_id = 0; pipe_id < adev->gfx.me.num_pipe_per_me; pipe_id++) { 2308 soc24_grbm_select(adev, 0, pipe_id, 0, 0); 2309 2310 WREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_BASE0_LO, 2311 lower_32_bits(adev->gfx.pfp.pfp_fw_data_gpu_addr)); 2312 WREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_BASE0_HI, 2313 upper_32_bits(adev->gfx.pfp.pfp_fw_data_gpu_addr)); 2314 } 2315 soc24_grbm_select(adev, 0, 0, 0, 0); 2316 mutex_unlock(&adev->srbm_mutex); 2317 2318 tmp = RREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_BASE_CNTL); 2319 tmp = REG_SET_FIELD(tmp, CP_GFX_RS64_DC_BASE_CNTL, VMID, 0); 2320 tmp = REG_SET_FIELD(tmp, CP_GFX_RS64_DC_BASE_CNTL, CACHE_POLICY, 0); 2321 WREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_BASE_CNTL, tmp); 2322 2323 /* Invalidate the data caches */ 2324 tmp = RREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_OP_CNTL); 2325 tmp = REG_SET_FIELD(tmp, CP_GFX_RS64_DC_OP_CNTL, INVALIDATE_DCACHE, 1); 2326 WREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_OP_CNTL, tmp); 2327 2328 for (i = 0; i < usec_timeout; i++) { 2329 tmp = RREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_OP_CNTL); 2330 if (1 == REG_GET_FIELD(tmp, CP_GFX_RS64_DC_OP_CNTL, 2331 INVALIDATE_DCACHE_COMPLETE)) 2332 break; 2333 udelay(1); 2334 } 2335 2336 if (i >= usec_timeout) { 2337 dev_err(adev->dev, "failed to invalidate RS64 data cache\n"); 2338 return -EINVAL; 2339 } 2340 2341 gfx_v12_0_set_pfp_ucode_start_addr(adev); 2342 2343 return 0; 2344 } 2345 2346 static int gfx_v12_0_cp_gfx_load_me_microcode_rs64(struct amdgpu_device *adev) 2347 { 2348 int r; 2349 const struct gfx_firmware_header_v2_0 *me_hdr; 2350 const __le32 *fw_ucode, *fw_data; 2351 unsigned i, pipe_id, fw_ucode_size, fw_data_size; 2352 uint32_t tmp; 2353 uint32_t usec_timeout = 50000; /* wait for 50ms */ 2354 2355 me_hdr = (const struct gfx_firmware_header_v2_0 *) 2356 adev->gfx.me_fw->data; 2357 2358 amdgpu_ucode_print_gfx_hdr(&me_hdr->header); 2359 2360 /* instruction */ 2361 fw_ucode = (const __le32 *)(adev->gfx.me_fw->data + 2362 le32_to_cpu(me_hdr->ucode_offset_bytes)); 2363 fw_ucode_size = le32_to_cpu(me_hdr->ucode_size_bytes); 2364 /* data */ 2365 fw_data = (const __le32 *)(adev->gfx.me_fw->data + 2366 le32_to_cpu(me_hdr->data_offset_bytes)); 2367 fw_data_size = le32_to_cpu(me_hdr->data_size_bytes); 2368 2369 /* 64kb align*/ 2370 r = amdgpu_bo_create_reserved(adev, fw_ucode_size, 2371 64 * 1024, AMDGPU_GEM_DOMAIN_VRAM, 2372 &adev->gfx.me.me_fw_obj, 2373 &adev->gfx.me.me_fw_gpu_addr, 2374 (void **)&adev->gfx.me.me_fw_ptr); 2375 if (r) { 2376 dev_err(adev->dev, "(%d) failed to create me ucode bo\n", r); 2377 gfx_v12_0_me_fini(adev); 2378 return r; 2379 } 2380 2381 r = amdgpu_bo_create_reserved(adev, fw_data_size, 2382 64 * 1024, AMDGPU_GEM_DOMAIN_VRAM, 2383 &adev->gfx.me.me_fw_data_obj, 2384 &adev->gfx.me.me_fw_data_gpu_addr, 2385 (void **)&adev->gfx.me.me_fw_data_ptr); 2386 if (r) { 2387 dev_err(adev->dev, "(%d) failed to create me data bo\n", r); 2388 gfx_v12_0_pfp_fini(adev); 2389 return r; 2390 } 2391 2392 memcpy(adev->gfx.me.me_fw_ptr, fw_ucode, fw_ucode_size); 2393 memcpy(adev->gfx.me.me_fw_data_ptr, fw_data, fw_data_size); 2394 2395 amdgpu_bo_kunmap(adev->gfx.me.me_fw_obj); 2396 amdgpu_bo_kunmap(adev->gfx.me.me_fw_data_obj); 2397 amdgpu_bo_unreserve(adev->gfx.me.me_fw_obj); 2398 amdgpu_bo_unreserve(adev->gfx.me.me_fw_data_obj); 2399 2400 if (amdgpu_emu_mode == 1) 2401 adev->hdp.funcs->flush_hdp(adev, NULL); 2402 2403 WREG32_SOC15(GC, 0, regCP_ME_IC_BASE_LO, 2404 lower_32_bits(adev->gfx.me.me_fw_gpu_addr)); 2405 WREG32_SOC15(GC, 0, regCP_ME_IC_BASE_HI, 2406 upper_32_bits(adev->gfx.me.me_fw_gpu_addr)); 2407 2408 tmp = RREG32_SOC15(GC, 0, regCP_ME_IC_BASE_CNTL); 2409 tmp = REG_SET_FIELD(tmp, CP_ME_IC_BASE_CNTL, VMID, 0); 2410 tmp = REG_SET_FIELD(tmp, CP_ME_IC_BASE_CNTL, CACHE_POLICY, 0); 2411 tmp = REG_SET_FIELD(tmp, CP_ME_IC_BASE_CNTL, EXE_DISABLE, 0); 2412 WREG32_SOC15(GC, 0, regCP_ME_IC_BASE_CNTL, tmp); 2413 2414 /* 2415 * Programming any of the CP_ME_IC_BASE registers 2416 * forces invalidation of the ME L1 I$. Wait for the 2417 * invalidation complete 2418 */ 2419 for (i = 0; i < usec_timeout; i++) { 2420 tmp = RREG32_SOC15(GC, 0, regCP_ME_IC_OP_CNTL); 2421 if (1 == REG_GET_FIELD(tmp, CP_ME_IC_OP_CNTL, 2422 INVALIDATE_CACHE_COMPLETE)) 2423 break; 2424 udelay(1); 2425 } 2426 2427 if (i >= usec_timeout) { 2428 dev_err(adev->dev, "failed to invalidate instruction cache\n"); 2429 return -EINVAL; 2430 } 2431 2432 /* Prime the instruction caches */ 2433 tmp = RREG32_SOC15(GC, 0, regCP_ME_IC_OP_CNTL); 2434 tmp = REG_SET_FIELD(tmp, CP_ME_IC_OP_CNTL, PRIME_ICACHE, 1); 2435 WREG32_SOC15(GC, 0, regCP_ME_IC_OP_CNTL, tmp); 2436 2437 /* Waiting for instruction cache primed*/ 2438 for (i = 0; i < usec_timeout; i++) { 2439 tmp = RREG32_SOC15(GC, 0, regCP_ME_IC_OP_CNTL); 2440 if (1 == REG_GET_FIELD(tmp, CP_ME_IC_OP_CNTL, 2441 ICACHE_PRIMED)) 2442 break; 2443 udelay(1); 2444 } 2445 2446 if (i >= usec_timeout) { 2447 dev_err(adev->dev, "failed to prime instruction cache\n"); 2448 return -EINVAL; 2449 } 2450 2451 mutex_lock(&adev->srbm_mutex); 2452 for (pipe_id = 0; pipe_id < adev->gfx.me.num_pipe_per_me; pipe_id++) { 2453 soc24_grbm_select(adev, 0, pipe_id, 0, 0); 2454 2455 WREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_BASE1_LO, 2456 lower_32_bits(adev->gfx.me.me_fw_data_gpu_addr)); 2457 WREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_BASE1_HI, 2458 upper_32_bits(adev->gfx.me.me_fw_data_gpu_addr)); 2459 } 2460 soc24_grbm_select(adev, 0, 0, 0, 0); 2461 mutex_unlock(&adev->srbm_mutex); 2462 2463 tmp = RREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_BASE_CNTL); 2464 tmp = REG_SET_FIELD(tmp, CP_GFX_RS64_DC_BASE_CNTL, VMID, 0); 2465 tmp = REG_SET_FIELD(tmp, CP_GFX_RS64_DC_BASE_CNTL, CACHE_POLICY, 0); 2466 WREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_BASE_CNTL, tmp); 2467 2468 /* Invalidate the data caches */ 2469 tmp = RREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_OP_CNTL); 2470 tmp = REG_SET_FIELD(tmp, CP_GFX_RS64_DC_OP_CNTL, INVALIDATE_DCACHE, 1); 2471 WREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_OP_CNTL, tmp); 2472 2473 for (i = 0; i < usec_timeout; i++) { 2474 tmp = RREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_OP_CNTL); 2475 if (1 == REG_GET_FIELD(tmp, CP_GFX_RS64_DC_OP_CNTL, 2476 INVALIDATE_DCACHE_COMPLETE)) 2477 break; 2478 udelay(1); 2479 } 2480 2481 if (i >= usec_timeout) { 2482 dev_err(adev->dev, "failed to invalidate RS64 data cache\n"); 2483 return -EINVAL; 2484 } 2485 2486 gfx_v12_0_set_me_ucode_start_addr(adev); 2487 2488 return 0; 2489 } 2490 2491 static int gfx_v12_0_cp_gfx_load_microcode(struct amdgpu_device *adev) 2492 { 2493 int r; 2494 2495 if (!adev->gfx.me_fw || !adev->gfx.pfp_fw) 2496 return -EINVAL; 2497 2498 gfx_v12_0_cp_gfx_enable(adev, false); 2499 2500 r = gfx_v12_0_cp_gfx_load_pfp_microcode_rs64(adev); 2501 if (r) { 2502 dev_err(adev->dev, "(%d) failed to load pfp fw\n", r); 2503 return r; 2504 } 2505 2506 r = gfx_v12_0_cp_gfx_load_me_microcode_rs64(adev); 2507 if (r) { 2508 dev_err(adev->dev, "(%d) failed to load me fw\n", r); 2509 return r; 2510 } 2511 2512 return 0; 2513 } 2514 2515 static int gfx_v12_0_cp_gfx_start(struct amdgpu_device *adev) 2516 { 2517 /* init the CP */ 2518 WREG32_SOC15(GC, 0, regCP_MAX_CONTEXT, 2519 adev->gfx.config.max_hw_contexts - 1); 2520 WREG32_SOC15(GC, 0, regCP_DEVICE_ID, 1); 2521 2522 if (!amdgpu_async_gfx_ring) 2523 gfx_v12_0_cp_gfx_enable(adev, true); 2524 2525 return 0; 2526 } 2527 2528 static void gfx_v12_0_cp_gfx_switch_pipe(struct amdgpu_device *adev, 2529 CP_PIPE_ID pipe) 2530 { 2531 u32 tmp; 2532 2533 tmp = RREG32_SOC15(GC, 0, regGRBM_GFX_CNTL); 2534 tmp = REG_SET_FIELD(tmp, GRBM_GFX_CNTL, PIPEID, pipe); 2535 2536 WREG32_SOC15(GC, 0, regGRBM_GFX_CNTL, tmp); 2537 } 2538 2539 static void gfx_v12_0_cp_gfx_set_doorbell(struct amdgpu_device *adev, 2540 struct amdgpu_ring *ring) 2541 { 2542 u32 tmp; 2543 2544 tmp = RREG32_SOC15(GC, 0, regCP_RB_DOORBELL_CONTROL); 2545 if (ring->use_doorbell) { 2546 tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL, 2547 DOORBELL_OFFSET, ring->doorbell_index); 2548 tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL, 2549 DOORBELL_EN, 1); 2550 } else { 2551 tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL, 2552 DOORBELL_EN, 0); 2553 } 2554 WREG32_SOC15(GC, 0, regCP_RB_DOORBELL_CONTROL, tmp); 2555 2556 tmp = REG_SET_FIELD(0, CP_RB_DOORBELL_RANGE_LOWER, 2557 DOORBELL_RANGE_LOWER, ring->doorbell_index); 2558 WREG32_SOC15(GC, 0, regCP_RB_DOORBELL_RANGE_LOWER, tmp); 2559 2560 WREG32_SOC15(GC, 0, regCP_RB_DOORBELL_RANGE_UPPER, 2561 CP_RB_DOORBELL_RANGE_UPPER__DOORBELL_RANGE_UPPER_MASK); 2562 } 2563 2564 static int gfx_v12_0_cp_gfx_resume(struct amdgpu_device *adev) 2565 { 2566 struct amdgpu_ring *ring; 2567 u32 tmp; 2568 u32 rb_bufsz; 2569 u64 rb_addr, rptr_addr, wptr_gpu_addr; 2570 u32 i; 2571 2572 /* Set the write pointer delay */ 2573 WREG32_SOC15(GC, 0, regCP_RB_WPTR_DELAY, 0); 2574 2575 /* set the RB to use vmid 0 */ 2576 WREG32_SOC15(GC, 0, regCP_RB_VMID, 0); 2577 2578 /* Init gfx ring 0 for pipe 0 */ 2579 mutex_lock(&adev->srbm_mutex); 2580 gfx_v12_0_cp_gfx_switch_pipe(adev, PIPE_ID0); 2581 2582 /* Set ring buffer size */ 2583 ring = &adev->gfx.gfx_ring[0]; 2584 rb_bufsz = order_base_2(ring->ring_size / 8); 2585 tmp = REG_SET_FIELD(0, CP_RB0_CNTL, RB_BUFSZ, rb_bufsz); 2586 tmp = REG_SET_FIELD(tmp, CP_RB0_CNTL, RB_BLKSZ, rb_bufsz - 2); 2587 WREG32_SOC15(GC, 0, regCP_RB0_CNTL, tmp); 2588 2589 /* Initialize the ring buffer's write pointers */ 2590 ring->wptr = 0; 2591 WREG32_SOC15(GC, 0, regCP_RB0_WPTR, lower_32_bits(ring->wptr)); 2592 WREG32_SOC15(GC, 0, regCP_RB0_WPTR_HI, upper_32_bits(ring->wptr)); 2593 2594 /* set the wb address wether it's enabled or not */ 2595 rptr_addr = ring->rptr_gpu_addr; 2596 WREG32_SOC15(GC, 0, regCP_RB0_RPTR_ADDR, lower_32_bits(rptr_addr)); 2597 WREG32_SOC15(GC, 0, regCP_RB0_RPTR_ADDR_HI, upper_32_bits(rptr_addr) & 2598 CP_RB_RPTR_ADDR_HI__RB_RPTR_ADDR_HI_MASK); 2599 2600 wptr_gpu_addr = ring->wptr_gpu_addr; 2601 WREG32_SOC15(GC, 0, regCP_RB_WPTR_POLL_ADDR_LO, 2602 lower_32_bits(wptr_gpu_addr)); 2603 WREG32_SOC15(GC, 0, regCP_RB_WPTR_POLL_ADDR_HI, 2604 upper_32_bits(wptr_gpu_addr)); 2605 2606 mdelay(1); 2607 WREG32_SOC15(GC, 0, regCP_RB0_CNTL, tmp); 2608 2609 rb_addr = ring->gpu_addr >> 8; 2610 WREG32_SOC15(GC, 0, regCP_RB0_BASE, rb_addr); 2611 WREG32_SOC15(GC, 0, regCP_RB0_BASE_HI, upper_32_bits(rb_addr)); 2612 2613 WREG32_SOC15(GC, 0, regCP_RB_ACTIVE, 1); 2614 2615 gfx_v12_0_cp_gfx_set_doorbell(adev, ring); 2616 mutex_unlock(&adev->srbm_mutex); 2617 2618 /* Switch to pipe 0 */ 2619 mutex_lock(&adev->srbm_mutex); 2620 gfx_v12_0_cp_gfx_switch_pipe(adev, PIPE_ID0); 2621 mutex_unlock(&adev->srbm_mutex); 2622 2623 /* start the ring */ 2624 gfx_v12_0_cp_gfx_start(adev); 2625 2626 for (i = 0; i < adev->gfx.num_gfx_rings; i++) { 2627 ring = &adev->gfx.gfx_ring[i]; 2628 ring->sched.ready = true; 2629 } 2630 2631 return 0; 2632 } 2633 2634 static void gfx_v12_0_cp_compute_enable(struct amdgpu_device *adev, bool enable) 2635 { 2636 u32 data; 2637 2638 data = RREG32_SOC15(GC, 0, regCP_MEC_RS64_CNTL); 2639 data = REG_SET_FIELD(data, CP_MEC_RS64_CNTL, MEC_INVALIDATE_ICACHE, 2640 enable ? 0 : 1); 2641 data = REG_SET_FIELD(data, CP_MEC_RS64_CNTL, MEC_PIPE0_RESET, 2642 enable ? 0 : 1); 2643 data = REG_SET_FIELD(data, CP_MEC_RS64_CNTL, MEC_PIPE1_RESET, 2644 enable ? 0 : 1); 2645 data = REG_SET_FIELD(data, CP_MEC_RS64_CNTL, MEC_PIPE2_RESET, 2646 enable ? 0 : 1); 2647 data = REG_SET_FIELD(data, CP_MEC_RS64_CNTL, MEC_PIPE3_RESET, 2648 enable ? 0 : 1); 2649 data = REG_SET_FIELD(data, CP_MEC_RS64_CNTL, MEC_PIPE0_ACTIVE, 2650 enable ? 1 : 0); 2651 data = REG_SET_FIELD(data, CP_MEC_RS64_CNTL, MEC_PIPE1_ACTIVE, 2652 enable ? 1 : 0); 2653 data = REG_SET_FIELD(data, CP_MEC_RS64_CNTL, MEC_PIPE2_ACTIVE, 2654 enable ? 1 : 0); 2655 data = REG_SET_FIELD(data, CP_MEC_RS64_CNTL, MEC_PIPE3_ACTIVE, 2656 enable ? 1 : 0); 2657 data = REG_SET_FIELD(data, CP_MEC_RS64_CNTL, MEC_HALT, 2658 enable ? 0 : 1); 2659 WREG32_SOC15(GC, 0, regCP_MEC_RS64_CNTL, data); 2660 2661 adev->gfx.kiq[0].ring.sched.ready = enable; 2662 2663 udelay(50); 2664 } 2665 2666 static int gfx_v12_0_cp_compute_load_microcode_rs64(struct amdgpu_device *adev) 2667 { 2668 const struct gfx_firmware_header_v2_0 *mec_hdr; 2669 const __le32 *fw_ucode, *fw_data; 2670 u32 tmp, fw_ucode_size, fw_data_size; 2671 u32 i, usec_timeout = 50000; /* Wait for 50 ms */ 2672 u32 *fw_ucode_ptr, *fw_data_ptr; 2673 int r; 2674 2675 if (!adev->gfx.mec_fw) 2676 return -EINVAL; 2677 2678 gfx_v12_0_cp_compute_enable(adev, false); 2679 2680 mec_hdr = (const struct gfx_firmware_header_v2_0 *)adev->gfx.mec_fw->data; 2681 amdgpu_ucode_print_gfx_hdr(&mec_hdr->header); 2682 2683 fw_ucode = (const __le32 *) (adev->gfx.mec_fw->data + 2684 le32_to_cpu(mec_hdr->ucode_offset_bytes)); 2685 fw_ucode_size = le32_to_cpu(mec_hdr->ucode_size_bytes); 2686 2687 fw_data = (const __le32 *) (adev->gfx.mec_fw->data + 2688 le32_to_cpu(mec_hdr->data_offset_bytes)); 2689 fw_data_size = le32_to_cpu(mec_hdr->data_size_bytes); 2690 2691 r = amdgpu_bo_create_reserved(adev, fw_ucode_size, 2692 64 * 1024, AMDGPU_GEM_DOMAIN_VRAM, 2693 &adev->gfx.mec.mec_fw_obj, 2694 &adev->gfx.mec.mec_fw_gpu_addr, 2695 (void **)&fw_ucode_ptr); 2696 if (r) { 2697 dev_err(adev->dev, "(%d) failed to create mec fw ucode bo\n", r); 2698 gfx_v12_0_mec_fini(adev); 2699 return r; 2700 } 2701 2702 r = amdgpu_bo_create_reserved(adev, 2703 ALIGN(fw_data_size, 64 * 1024) * 2704 adev->gfx.mec.num_pipe_per_mec, 2705 64 * 1024, AMDGPU_GEM_DOMAIN_VRAM, 2706 &adev->gfx.mec.mec_fw_data_obj, 2707 &adev->gfx.mec.mec_fw_data_gpu_addr, 2708 (void **)&fw_data_ptr); 2709 if (r) { 2710 dev_err(adev->dev, "(%d) failed to create mec fw ucode bo\n", r); 2711 gfx_v12_0_mec_fini(adev); 2712 return r; 2713 } 2714 2715 memcpy(fw_ucode_ptr, fw_ucode, fw_ucode_size); 2716 for (i = 0; i < adev->gfx.mec.num_pipe_per_mec; i++) { 2717 memcpy(fw_data_ptr + i * ALIGN(fw_data_size, 64 * 1024) / 4, fw_data, fw_data_size); 2718 } 2719 2720 amdgpu_bo_kunmap(adev->gfx.mec.mec_fw_obj); 2721 amdgpu_bo_kunmap(adev->gfx.mec.mec_fw_data_obj); 2722 amdgpu_bo_unreserve(adev->gfx.mec.mec_fw_obj); 2723 amdgpu_bo_unreserve(adev->gfx.mec.mec_fw_data_obj); 2724 2725 tmp = RREG32_SOC15(GC, 0, regCP_CPC_IC_BASE_CNTL); 2726 tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, VMID, 0); 2727 tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, EXE_DISABLE, 0); 2728 tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, CACHE_POLICY, 0); 2729 WREG32_SOC15(GC, 0, regCP_CPC_IC_BASE_CNTL, tmp); 2730 2731 tmp = RREG32_SOC15(GC, 0, regCP_MEC_DC_BASE_CNTL); 2732 tmp = REG_SET_FIELD(tmp, CP_MEC_DC_BASE_CNTL, VMID, 0); 2733 tmp = REG_SET_FIELD(tmp, CP_MEC_DC_BASE_CNTL, CACHE_POLICY, 0); 2734 WREG32_SOC15(GC, 0, regCP_MEC_DC_BASE_CNTL, tmp); 2735 2736 mutex_lock(&adev->srbm_mutex); 2737 for (i = 0; i < adev->gfx.mec.num_pipe_per_mec; i++) { 2738 soc24_grbm_select(adev, 1, i, 0, 0); 2739 2740 WREG32_SOC15(GC, 0, regCP_MEC_MDBASE_LO, 2741 lower_32_bits(adev->gfx.mec.mec_fw_data_gpu_addr + 2742 i * ALIGN(fw_data_size, 64 * 1024))); 2743 WREG32_SOC15(GC, 0, regCP_MEC_MDBASE_HI, 2744 upper_32_bits(adev->gfx.mec.mec_fw_data_gpu_addr + 2745 i * ALIGN(fw_data_size, 64 * 1024))); 2746 2747 WREG32_SOC15(GC, 0, regCP_CPC_IC_BASE_LO, 2748 lower_32_bits(adev->gfx.mec.mec_fw_gpu_addr)); 2749 WREG32_SOC15(GC, 0, regCP_CPC_IC_BASE_HI, 2750 upper_32_bits(adev->gfx.mec.mec_fw_gpu_addr)); 2751 } 2752 mutex_unlock(&adev->srbm_mutex); 2753 soc24_grbm_select(adev, 0, 0, 0, 0); 2754 2755 /* Trigger an invalidation of the L1 instruction caches */ 2756 tmp = RREG32_SOC15(GC, 0, regCP_MEC_DC_OP_CNTL); 2757 tmp = REG_SET_FIELD(tmp, CP_MEC_DC_OP_CNTL, INVALIDATE_DCACHE, 1); 2758 WREG32_SOC15(GC, 0, regCP_MEC_DC_OP_CNTL, tmp); 2759 2760 /* Wait for invalidation complete */ 2761 for (i = 0; i < usec_timeout; i++) { 2762 tmp = RREG32_SOC15(GC, 0, regCP_MEC_DC_OP_CNTL); 2763 if (1 == REG_GET_FIELD(tmp, CP_MEC_DC_OP_CNTL, 2764 INVALIDATE_DCACHE_COMPLETE)) 2765 break; 2766 udelay(1); 2767 } 2768 2769 if (i >= usec_timeout) { 2770 dev_err(adev->dev, "failed to invalidate instruction cache\n"); 2771 return -EINVAL; 2772 } 2773 2774 /* Trigger an invalidation of the L1 instruction caches */ 2775 tmp = RREG32_SOC15(GC, 0, regCP_CPC_IC_OP_CNTL); 2776 tmp = REG_SET_FIELD(tmp, CP_CPC_IC_OP_CNTL, INVALIDATE_CACHE, 1); 2777 WREG32_SOC15(GC, 0, regCP_CPC_IC_OP_CNTL, tmp); 2778 2779 /* Wait for invalidation complete */ 2780 for (i = 0; i < usec_timeout; i++) { 2781 tmp = RREG32_SOC15(GC, 0, regCP_CPC_IC_OP_CNTL); 2782 if (1 == REG_GET_FIELD(tmp, CP_CPC_IC_OP_CNTL, 2783 INVALIDATE_CACHE_COMPLETE)) 2784 break; 2785 udelay(1); 2786 } 2787 2788 if (i >= usec_timeout) { 2789 dev_err(adev->dev, "failed to invalidate instruction cache\n"); 2790 return -EINVAL; 2791 } 2792 2793 gfx_v12_0_set_mec_ucode_start_addr(adev); 2794 2795 return 0; 2796 } 2797 2798 static void gfx_v12_0_kiq_setting(struct amdgpu_ring *ring) 2799 { 2800 uint32_t tmp; 2801 struct amdgpu_device *adev = ring->adev; 2802 2803 /* tell RLC which is KIQ queue */ 2804 tmp = RREG32_SOC15(GC, 0, regRLC_CP_SCHEDULERS); 2805 tmp &= 0xffffff00; 2806 tmp |= (ring->me << 5) | (ring->pipe << 3) | (ring->queue); 2807 WREG32_SOC15(GC, 0, regRLC_CP_SCHEDULERS, tmp); 2808 tmp |= 0x80; 2809 WREG32_SOC15(GC, 0, regRLC_CP_SCHEDULERS, tmp); 2810 } 2811 2812 static void gfx_v12_0_cp_set_doorbell_range(struct amdgpu_device *adev) 2813 { 2814 /* set graphics engine doorbell range */ 2815 WREG32_SOC15(GC, 0, regCP_RB_DOORBELL_RANGE_LOWER, 2816 (adev->doorbell_index.gfx_ring0 * 2) << 2); 2817 WREG32_SOC15(GC, 0, regCP_RB_DOORBELL_RANGE_UPPER, 2818 (adev->doorbell_index.gfx_userqueue_end * 2) << 2); 2819 2820 /* set compute engine doorbell range */ 2821 WREG32_SOC15(GC, 0, regCP_MEC_DOORBELL_RANGE_LOWER, 2822 (adev->doorbell_index.kiq * 2) << 2); 2823 WREG32_SOC15(GC, 0, regCP_MEC_DOORBELL_RANGE_UPPER, 2824 (adev->doorbell_index.userqueue_end * 2) << 2); 2825 } 2826 2827 static int gfx_v12_0_gfx_mqd_init(struct amdgpu_device *adev, void *m, 2828 struct amdgpu_mqd_prop *prop) 2829 { 2830 struct v12_gfx_mqd *mqd = m; 2831 uint64_t hqd_gpu_addr, wb_gpu_addr; 2832 uint32_t tmp; 2833 uint32_t rb_bufsz; 2834 2835 /* set up gfx hqd wptr */ 2836 mqd->cp_gfx_hqd_wptr = 0; 2837 mqd->cp_gfx_hqd_wptr_hi = 0; 2838 2839 /* set the pointer to the MQD */ 2840 mqd->cp_mqd_base_addr = prop->mqd_gpu_addr & 0xfffffffc; 2841 mqd->cp_mqd_base_addr_hi = upper_32_bits(prop->mqd_gpu_addr); 2842 2843 /* set up mqd control */ 2844 tmp = RREG32_SOC15(GC, 0, regCP_GFX_MQD_CONTROL); 2845 tmp = REG_SET_FIELD(tmp, CP_GFX_MQD_CONTROL, VMID, 0); 2846 tmp = REG_SET_FIELD(tmp, CP_GFX_MQD_CONTROL, PRIV_STATE, 1); 2847 tmp = REG_SET_FIELD(tmp, CP_GFX_MQD_CONTROL, CACHE_POLICY, 0); 2848 mqd->cp_gfx_mqd_control = tmp; 2849 2850 /* set up gfx_hqd_vimd with 0x0 to indicate the ring buffer's vmid */ 2851 tmp = RREG32_SOC15(GC, 0, regCP_GFX_HQD_VMID); 2852 tmp = REG_SET_FIELD(tmp, CP_GFX_HQD_VMID, VMID, 0); 2853 mqd->cp_gfx_hqd_vmid = 0; 2854 2855 /* set up default queue priority level 2856 * 0x0 = low priority, 0x1 = high priority */ 2857 tmp = RREG32_SOC15(GC, 0, regCP_GFX_HQD_QUEUE_PRIORITY); 2858 tmp = REG_SET_FIELD(tmp, CP_GFX_HQD_QUEUE_PRIORITY, PRIORITY_LEVEL, 0); 2859 mqd->cp_gfx_hqd_queue_priority = tmp; 2860 2861 /* set up time quantum */ 2862 tmp = RREG32_SOC15(GC, 0, regCP_GFX_HQD_QUANTUM); 2863 tmp = REG_SET_FIELD(tmp, CP_GFX_HQD_QUANTUM, QUANTUM_EN, 1); 2864 mqd->cp_gfx_hqd_quantum = tmp; 2865 2866 /* set up gfx hqd base. this is similar as CP_RB_BASE */ 2867 hqd_gpu_addr = prop->hqd_base_gpu_addr >> 8; 2868 mqd->cp_gfx_hqd_base = hqd_gpu_addr; 2869 mqd->cp_gfx_hqd_base_hi = upper_32_bits(hqd_gpu_addr); 2870 2871 /* set up hqd_rptr_addr/_hi, similar as CP_RB_RPTR */ 2872 wb_gpu_addr = prop->rptr_gpu_addr; 2873 mqd->cp_gfx_hqd_rptr_addr = wb_gpu_addr & 0xfffffffc; 2874 mqd->cp_gfx_hqd_rptr_addr_hi = 2875 upper_32_bits(wb_gpu_addr) & 0xffff; 2876 2877 /* set up rb_wptr_poll addr */ 2878 wb_gpu_addr = prop->wptr_gpu_addr; 2879 mqd->cp_rb_wptr_poll_addr_lo = wb_gpu_addr & 0xfffffffc; 2880 mqd->cp_rb_wptr_poll_addr_hi = upper_32_bits(wb_gpu_addr) & 0xffff; 2881 2882 /* set up the gfx_hqd_control, similar as CP_RB0_CNTL */ 2883 rb_bufsz = order_base_2(prop->queue_size / 4) - 1; 2884 tmp = RREG32_SOC15(GC, 0, regCP_GFX_HQD_CNTL); 2885 tmp = REG_SET_FIELD(tmp, CP_GFX_HQD_CNTL, RB_BUFSZ, rb_bufsz); 2886 tmp = REG_SET_FIELD(tmp, CP_GFX_HQD_CNTL, RB_BLKSZ, rb_bufsz - 2); 2887 #ifdef __BIG_ENDIAN 2888 tmp = REG_SET_FIELD(tmp, CP_GFX_HQD_CNTL, BUF_SWAP, 1); 2889 #endif 2890 mqd->cp_gfx_hqd_cntl = tmp; 2891 2892 /* set up cp_doorbell_control */ 2893 tmp = RREG32_SOC15(GC, 0, regCP_RB_DOORBELL_CONTROL); 2894 if (prop->use_doorbell) { 2895 tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL, 2896 DOORBELL_OFFSET, prop->doorbell_index); 2897 tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL, 2898 DOORBELL_EN, 1); 2899 } else 2900 tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL, 2901 DOORBELL_EN, 0); 2902 mqd->cp_rb_doorbell_control = tmp; 2903 2904 /* reset read and write pointers, similar to CP_RB0_WPTR/_RPTR */ 2905 mqd->cp_gfx_hqd_rptr = RREG32_SOC15(GC, 0, regCP_GFX_HQD_RPTR); 2906 2907 /* active the queue */ 2908 mqd->cp_gfx_hqd_active = 1; 2909 2910 return 0; 2911 } 2912 2913 static int gfx_v12_0_gfx_init_queue(struct amdgpu_ring *ring) 2914 { 2915 struct amdgpu_device *adev = ring->adev; 2916 struct v12_gfx_mqd *mqd = ring->mqd_ptr; 2917 int mqd_idx = ring - &adev->gfx.gfx_ring[0]; 2918 2919 if (!amdgpu_in_reset(adev) && !adev->in_suspend) { 2920 memset((void *)mqd, 0, sizeof(*mqd)); 2921 mutex_lock(&adev->srbm_mutex); 2922 soc24_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0); 2923 amdgpu_ring_init_mqd(ring); 2924 soc24_grbm_select(adev, 0, 0, 0, 0); 2925 mutex_unlock(&adev->srbm_mutex); 2926 if (adev->gfx.me.mqd_backup[mqd_idx]) 2927 memcpy_fromio(adev->gfx.me.mqd_backup[mqd_idx], mqd, sizeof(*mqd)); 2928 } else { 2929 /* restore mqd with the backup copy */ 2930 if (adev->gfx.me.mqd_backup[mqd_idx]) 2931 memcpy_toio(mqd, adev->gfx.me.mqd_backup[mqd_idx], sizeof(*mqd)); 2932 /* reset the ring */ 2933 ring->wptr = 0; 2934 *ring->wptr_cpu_addr = 0; 2935 amdgpu_ring_clear_ring(ring); 2936 } 2937 2938 return 0; 2939 } 2940 2941 static int gfx_v12_0_cp_async_gfx_ring_resume(struct amdgpu_device *adev) 2942 { 2943 int r, i; 2944 struct amdgpu_ring *ring; 2945 2946 for (i = 0; i < adev->gfx.num_gfx_rings; i++) { 2947 ring = &adev->gfx.gfx_ring[i]; 2948 2949 r = amdgpu_bo_reserve(ring->mqd_obj, false); 2950 if (unlikely(r != 0)) 2951 goto done; 2952 2953 r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr); 2954 if (!r) { 2955 r = gfx_v12_0_gfx_init_queue(ring); 2956 amdgpu_bo_kunmap(ring->mqd_obj); 2957 ring->mqd_ptr = NULL; 2958 } 2959 amdgpu_bo_unreserve(ring->mqd_obj); 2960 if (r) 2961 goto done; 2962 } 2963 2964 r = amdgpu_gfx_enable_kgq(adev, 0); 2965 if (r) 2966 goto done; 2967 2968 r = gfx_v12_0_cp_gfx_start(adev); 2969 if (r) 2970 goto done; 2971 2972 for (i = 0; i < adev->gfx.num_gfx_rings; i++) { 2973 ring = &adev->gfx.gfx_ring[i]; 2974 ring->sched.ready = true; 2975 } 2976 done: 2977 return r; 2978 } 2979 2980 static int gfx_v12_0_compute_mqd_init(struct amdgpu_device *adev, void *m, 2981 struct amdgpu_mqd_prop *prop) 2982 { 2983 struct v12_compute_mqd *mqd = m; 2984 uint64_t hqd_gpu_addr, wb_gpu_addr, eop_base_addr; 2985 uint32_t tmp; 2986 2987 mqd->header = 0xC0310800; 2988 mqd->compute_pipelinestat_enable = 0x00000001; 2989 mqd->compute_static_thread_mgmt_se0 = 0xffffffff; 2990 mqd->compute_static_thread_mgmt_se1 = 0xffffffff; 2991 mqd->compute_static_thread_mgmt_se2 = 0xffffffff; 2992 mqd->compute_static_thread_mgmt_se3 = 0xffffffff; 2993 mqd->compute_misc_reserved = 0x00000007; 2994 2995 eop_base_addr = prop->eop_gpu_addr >> 8; 2996 mqd->cp_hqd_eop_base_addr_lo = eop_base_addr; 2997 mqd->cp_hqd_eop_base_addr_hi = upper_32_bits(eop_base_addr); 2998 2999 /* set the EOP size, register value is 2^(EOP_SIZE+1) dwords */ 3000 tmp = RREG32_SOC15(GC, 0, regCP_HQD_EOP_CONTROL); 3001 tmp = REG_SET_FIELD(tmp, CP_HQD_EOP_CONTROL, EOP_SIZE, 3002 (order_base_2(GFX12_MEC_HPD_SIZE / 4) - 1)); 3003 3004 mqd->cp_hqd_eop_control = tmp; 3005 3006 /* enable doorbell? */ 3007 tmp = RREG32_SOC15(GC, 0, regCP_HQD_PQ_DOORBELL_CONTROL); 3008 3009 if (prop->use_doorbell) { 3010 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL, 3011 DOORBELL_OFFSET, prop->doorbell_index); 3012 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL, 3013 DOORBELL_EN, 1); 3014 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL, 3015 DOORBELL_SOURCE, 0); 3016 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL, 3017 DOORBELL_HIT, 0); 3018 } else { 3019 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL, 3020 DOORBELL_EN, 0); 3021 } 3022 3023 mqd->cp_hqd_pq_doorbell_control = tmp; 3024 3025 /* disable the queue if it's active */ 3026 mqd->cp_hqd_dequeue_request = 0; 3027 mqd->cp_hqd_pq_rptr = 0; 3028 mqd->cp_hqd_pq_wptr_lo = 0; 3029 mqd->cp_hqd_pq_wptr_hi = 0; 3030 3031 /* set the pointer to the MQD */ 3032 mqd->cp_mqd_base_addr_lo = prop->mqd_gpu_addr & 0xfffffffc; 3033 mqd->cp_mqd_base_addr_hi = upper_32_bits(prop->mqd_gpu_addr); 3034 3035 /* set MQD vmid to 0 */ 3036 tmp = RREG32_SOC15(GC, 0, regCP_MQD_CONTROL); 3037 tmp = REG_SET_FIELD(tmp, CP_MQD_CONTROL, VMID, 0); 3038 mqd->cp_mqd_control = tmp; 3039 3040 /* set the pointer to the HQD, this is similar CP_RB0_BASE/_HI */ 3041 hqd_gpu_addr = prop->hqd_base_gpu_addr >> 8; 3042 mqd->cp_hqd_pq_base_lo = hqd_gpu_addr; 3043 mqd->cp_hqd_pq_base_hi = upper_32_bits(hqd_gpu_addr); 3044 3045 /* set up the HQD, this is similar to CP_RB0_CNTL */ 3046 tmp = RREG32_SOC15(GC, 0, regCP_HQD_PQ_CONTROL); 3047 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, QUEUE_SIZE, 3048 (order_base_2(prop->queue_size / 4) - 1)); 3049 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, RPTR_BLOCK_SIZE, 3050 (order_base_2(AMDGPU_GPU_PAGE_SIZE / 4) - 1)); 3051 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, UNORD_DISPATCH, 0); 3052 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, TUNNEL_DISPATCH, 0); 3053 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, PRIV_STATE, 1); 3054 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, KMD_QUEUE, 1); 3055 mqd->cp_hqd_pq_control = tmp; 3056 3057 /* set the wb address whether it's enabled or not */ 3058 wb_gpu_addr = prop->rptr_gpu_addr; 3059 mqd->cp_hqd_pq_rptr_report_addr_lo = wb_gpu_addr & 0xfffffffc; 3060 mqd->cp_hqd_pq_rptr_report_addr_hi = 3061 upper_32_bits(wb_gpu_addr) & 0xffff; 3062 3063 /* only used if CP_PQ_WPTR_POLL_CNTL.CP_PQ_WPTR_POLL_CNTL__EN_MASK=1 */ 3064 wb_gpu_addr = prop->wptr_gpu_addr; 3065 mqd->cp_hqd_pq_wptr_poll_addr_lo = wb_gpu_addr & 0xfffffffc; 3066 mqd->cp_hqd_pq_wptr_poll_addr_hi = upper_32_bits(wb_gpu_addr) & 0xffff; 3067 3068 tmp = 0; 3069 /* enable the doorbell if requested */ 3070 if (prop->use_doorbell) { 3071 tmp = RREG32_SOC15(GC, 0, regCP_HQD_PQ_DOORBELL_CONTROL); 3072 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL, 3073 DOORBELL_OFFSET, prop->doorbell_index); 3074 3075 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL, 3076 DOORBELL_EN, 1); 3077 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL, 3078 DOORBELL_SOURCE, 0); 3079 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL, 3080 DOORBELL_HIT, 0); 3081 } 3082 3083 mqd->cp_hqd_pq_doorbell_control = tmp; 3084 3085 /* reset read and write pointers, similar to CP_RB0_WPTR/_RPTR */ 3086 mqd->cp_hqd_pq_rptr = RREG32_SOC15(GC, 0, regCP_HQD_PQ_RPTR); 3087 3088 /* set the vmid for the queue */ 3089 mqd->cp_hqd_vmid = 0; 3090 3091 tmp = RREG32_SOC15(GC, 0, regCP_HQD_PERSISTENT_STATE); 3092 tmp = REG_SET_FIELD(tmp, CP_HQD_PERSISTENT_STATE, PRELOAD_SIZE, 0x55); 3093 mqd->cp_hqd_persistent_state = tmp; 3094 3095 /* set MIN_IB_AVAIL_SIZE */ 3096 tmp = RREG32_SOC15(GC, 0, regCP_HQD_IB_CONTROL); 3097 tmp = REG_SET_FIELD(tmp, CP_HQD_IB_CONTROL, MIN_IB_AVAIL_SIZE, 3); 3098 mqd->cp_hqd_ib_control = tmp; 3099 3100 /* set static priority for a compute queue/ring */ 3101 mqd->cp_hqd_pipe_priority = prop->hqd_pipe_priority; 3102 mqd->cp_hqd_queue_priority = prop->hqd_queue_priority; 3103 3104 mqd->cp_hqd_active = prop->hqd_active; 3105 3106 return 0; 3107 } 3108 3109 static int gfx_v12_0_kiq_init_register(struct amdgpu_ring *ring) 3110 { 3111 struct amdgpu_device *adev = ring->adev; 3112 struct v12_compute_mqd *mqd = ring->mqd_ptr; 3113 int j; 3114 3115 /* inactivate the queue */ 3116 if (amdgpu_sriov_vf(adev)) 3117 WREG32_SOC15(GC, 0, regCP_HQD_ACTIVE, 0); 3118 3119 /* disable wptr polling */ 3120 WREG32_FIELD15_PREREG(GC, 0, CP_PQ_WPTR_POLL_CNTL, EN, 0); 3121 3122 /* write the EOP addr */ 3123 WREG32_SOC15(GC, 0, regCP_HQD_EOP_BASE_ADDR, 3124 mqd->cp_hqd_eop_base_addr_lo); 3125 WREG32_SOC15(GC, 0, regCP_HQD_EOP_BASE_ADDR_HI, 3126 mqd->cp_hqd_eop_base_addr_hi); 3127 3128 /* set the EOP size, register value is 2^(EOP_SIZE+1) dwords */ 3129 WREG32_SOC15(GC, 0, regCP_HQD_EOP_CONTROL, 3130 mqd->cp_hqd_eop_control); 3131 3132 /* enable doorbell? */ 3133 WREG32_SOC15(GC, 0, regCP_HQD_PQ_DOORBELL_CONTROL, 3134 mqd->cp_hqd_pq_doorbell_control); 3135 3136 /* disable the queue if it's active */ 3137 if (RREG32_SOC15(GC, 0, regCP_HQD_ACTIVE) & 1) { 3138 WREG32_SOC15(GC, 0, regCP_HQD_DEQUEUE_REQUEST, 1); 3139 for (j = 0; j < adev->usec_timeout; j++) { 3140 if (!(RREG32_SOC15(GC, 0, regCP_HQD_ACTIVE) & 1)) 3141 break; 3142 udelay(1); 3143 } 3144 WREG32_SOC15(GC, 0, regCP_HQD_DEQUEUE_REQUEST, 3145 mqd->cp_hqd_dequeue_request); 3146 WREG32_SOC15(GC, 0, regCP_HQD_PQ_RPTR, 3147 mqd->cp_hqd_pq_rptr); 3148 WREG32_SOC15(GC, 0, regCP_HQD_PQ_WPTR_LO, 3149 mqd->cp_hqd_pq_wptr_lo); 3150 WREG32_SOC15(GC, 0, regCP_HQD_PQ_WPTR_HI, 3151 mqd->cp_hqd_pq_wptr_hi); 3152 } 3153 3154 /* set the pointer to the MQD */ 3155 WREG32_SOC15(GC, 0, regCP_MQD_BASE_ADDR, 3156 mqd->cp_mqd_base_addr_lo); 3157 WREG32_SOC15(GC, 0, regCP_MQD_BASE_ADDR_HI, 3158 mqd->cp_mqd_base_addr_hi); 3159 3160 /* set MQD vmid to 0 */ 3161 WREG32_SOC15(GC, 0, regCP_MQD_CONTROL, 3162 mqd->cp_mqd_control); 3163 3164 /* set the pointer to the HQD, this is similar CP_RB0_BASE/_HI */ 3165 WREG32_SOC15(GC, 0, regCP_HQD_PQ_BASE, 3166 mqd->cp_hqd_pq_base_lo); 3167 WREG32_SOC15(GC, 0, regCP_HQD_PQ_BASE_HI, 3168 mqd->cp_hqd_pq_base_hi); 3169 3170 /* set up the HQD, this is similar to CP_RB0_CNTL */ 3171 WREG32_SOC15(GC, 0, regCP_HQD_PQ_CONTROL, 3172 mqd->cp_hqd_pq_control); 3173 3174 /* set the wb address whether it's enabled or not */ 3175 WREG32_SOC15(GC, 0, regCP_HQD_PQ_RPTR_REPORT_ADDR, 3176 mqd->cp_hqd_pq_rptr_report_addr_lo); 3177 WREG32_SOC15(GC, 0, regCP_HQD_PQ_RPTR_REPORT_ADDR_HI, 3178 mqd->cp_hqd_pq_rptr_report_addr_hi); 3179 3180 /* only used if CP_PQ_WPTR_POLL_CNTL.CP_PQ_WPTR_POLL_CNTL__EN_MASK=1 */ 3181 WREG32_SOC15(GC, 0, regCP_HQD_PQ_WPTR_POLL_ADDR, 3182 mqd->cp_hqd_pq_wptr_poll_addr_lo); 3183 WREG32_SOC15(GC, 0, regCP_HQD_PQ_WPTR_POLL_ADDR_HI, 3184 mqd->cp_hqd_pq_wptr_poll_addr_hi); 3185 3186 /* enable the doorbell if requested */ 3187 if (ring->use_doorbell) { 3188 WREG32_SOC15(GC, 0, regCP_MEC_DOORBELL_RANGE_LOWER, 3189 (adev->doorbell_index.kiq * 2) << 2); 3190 WREG32_SOC15(GC, 0, regCP_MEC_DOORBELL_RANGE_UPPER, 3191 (adev->doorbell_index.userqueue_end * 2) << 2); 3192 } 3193 3194 WREG32_SOC15(GC, 0, regCP_HQD_PQ_DOORBELL_CONTROL, 3195 mqd->cp_hqd_pq_doorbell_control); 3196 3197 /* reset read and write pointers, similar to CP_RB0_WPTR/_RPTR */ 3198 WREG32_SOC15(GC, 0, regCP_HQD_PQ_WPTR_LO, 3199 mqd->cp_hqd_pq_wptr_lo); 3200 WREG32_SOC15(GC, 0, regCP_HQD_PQ_WPTR_HI, 3201 mqd->cp_hqd_pq_wptr_hi); 3202 3203 /* set the vmid for the queue */ 3204 WREG32_SOC15(GC, 0, regCP_HQD_VMID, mqd->cp_hqd_vmid); 3205 3206 WREG32_SOC15(GC, 0, regCP_HQD_PERSISTENT_STATE, 3207 mqd->cp_hqd_persistent_state); 3208 3209 /* activate the queue */ 3210 WREG32_SOC15(GC, 0, regCP_HQD_ACTIVE, 3211 mqd->cp_hqd_active); 3212 3213 if (ring->use_doorbell) 3214 WREG32_FIELD15_PREREG(GC, 0, CP_PQ_STATUS, DOORBELL_ENABLE, 1); 3215 3216 return 0; 3217 } 3218 3219 static int gfx_v12_0_kiq_init_queue(struct amdgpu_ring *ring) 3220 { 3221 struct amdgpu_device *adev = ring->adev; 3222 struct v12_compute_mqd *mqd = ring->mqd_ptr; 3223 int mqd_idx = AMDGPU_MAX_COMPUTE_RINGS; 3224 3225 gfx_v12_0_kiq_setting(ring); 3226 3227 if (amdgpu_in_reset(adev)) { /* for GPU_RESET case */ 3228 /* reset MQD to a clean status */ 3229 if (adev->gfx.mec.mqd_backup[mqd_idx]) 3230 memcpy(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(*mqd)); 3231 3232 /* reset ring buffer */ 3233 ring->wptr = 0; 3234 amdgpu_ring_clear_ring(ring); 3235 3236 mutex_lock(&adev->srbm_mutex); 3237 soc24_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0); 3238 gfx_v12_0_kiq_init_register(ring); 3239 soc24_grbm_select(adev, 0, 0, 0, 0); 3240 mutex_unlock(&adev->srbm_mutex); 3241 } else { 3242 memset((void *)mqd, 0, sizeof(*mqd)); 3243 if (amdgpu_sriov_vf(adev) && adev->in_suspend) 3244 amdgpu_ring_clear_ring(ring); 3245 mutex_lock(&adev->srbm_mutex); 3246 soc24_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0); 3247 amdgpu_ring_init_mqd(ring); 3248 gfx_v12_0_kiq_init_register(ring); 3249 soc24_grbm_select(adev, 0, 0, 0, 0); 3250 mutex_unlock(&adev->srbm_mutex); 3251 3252 if (adev->gfx.mec.mqd_backup[mqd_idx]) 3253 memcpy(adev->gfx.mec.mqd_backup[mqd_idx], mqd, sizeof(*mqd)); 3254 } 3255 3256 return 0; 3257 } 3258 3259 static int gfx_v12_0_kcq_init_queue(struct amdgpu_ring *ring) 3260 { 3261 struct amdgpu_device *adev = ring->adev; 3262 struct v12_compute_mqd *mqd = ring->mqd_ptr; 3263 int mqd_idx = ring - &adev->gfx.compute_ring[0]; 3264 3265 if (!amdgpu_in_reset(adev) && !adev->in_suspend) { 3266 memset((void *)mqd, 0, sizeof(*mqd)); 3267 mutex_lock(&adev->srbm_mutex); 3268 soc24_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0); 3269 amdgpu_ring_init_mqd(ring); 3270 soc24_grbm_select(adev, 0, 0, 0, 0); 3271 mutex_unlock(&adev->srbm_mutex); 3272 3273 if (adev->gfx.mec.mqd_backup[mqd_idx]) 3274 memcpy_fromio(adev->gfx.mec.mqd_backup[mqd_idx], mqd, sizeof(*mqd)); 3275 } else { 3276 /* restore MQD to a clean status */ 3277 if (adev->gfx.mec.mqd_backup[mqd_idx]) 3278 memcpy_toio(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(*mqd)); 3279 /* reset ring buffer */ 3280 ring->wptr = 0; 3281 atomic64_set((atomic64_t *)ring->wptr_cpu_addr, 0); 3282 amdgpu_ring_clear_ring(ring); 3283 } 3284 3285 return 0; 3286 } 3287 3288 static int gfx_v12_0_kiq_resume(struct amdgpu_device *adev) 3289 { 3290 struct amdgpu_ring *ring; 3291 int r; 3292 3293 ring = &adev->gfx.kiq[0].ring; 3294 3295 r = amdgpu_bo_reserve(ring->mqd_obj, false); 3296 if (unlikely(r != 0)) 3297 return r; 3298 3299 r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr); 3300 if (unlikely(r != 0)) { 3301 amdgpu_bo_unreserve(ring->mqd_obj); 3302 return r; 3303 } 3304 3305 gfx_v12_0_kiq_init_queue(ring); 3306 amdgpu_bo_kunmap(ring->mqd_obj); 3307 ring->mqd_ptr = NULL; 3308 amdgpu_bo_unreserve(ring->mqd_obj); 3309 ring->sched.ready = true; 3310 return 0; 3311 } 3312 3313 static int gfx_v12_0_kcq_resume(struct amdgpu_device *adev) 3314 { 3315 struct amdgpu_ring *ring = NULL; 3316 int r = 0, i; 3317 3318 if (!amdgpu_async_gfx_ring) 3319 gfx_v12_0_cp_compute_enable(adev, true); 3320 3321 for (i = 0; i < adev->gfx.num_compute_rings; i++) { 3322 ring = &adev->gfx.compute_ring[i]; 3323 3324 r = amdgpu_bo_reserve(ring->mqd_obj, false); 3325 if (unlikely(r != 0)) 3326 goto done; 3327 r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr); 3328 if (!r) { 3329 r = gfx_v12_0_kcq_init_queue(ring); 3330 amdgpu_bo_kunmap(ring->mqd_obj); 3331 ring->mqd_ptr = NULL; 3332 } 3333 amdgpu_bo_unreserve(ring->mqd_obj); 3334 if (r) 3335 goto done; 3336 } 3337 3338 r = amdgpu_gfx_enable_kcq(adev, 0); 3339 done: 3340 return r; 3341 } 3342 3343 static int gfx_v12_0_cp_resume(struct amdgpu_device *adev) 3344 { 3345 int r, i; 3346 struct amdgpu_ring *ring; 3347 3348 if (!(adev->flags & AMD_IS_APU)) 3349 gfx_v12_0_enable_gui_idle_interrupt(adev, false); 3350 3351 if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT) { 3352 /* legacy firmware loading */ 3353 r = gfx_v12_0_cp_gfx_load_microcode(adev); 3354 if (r) 3355 return r; 3356 3357 r = gfx_v12_0_cp_compute_load_microcode_rs64(adev); 3358 if (r) 3359 return r; 3360 } 3361 3362 gfx_v12_0_cp_set_doorbell_range(adev); 3363 3364 if (amdgpu_async_gfx_ring) { 3365 gfx_v12_0_cp_compute_enable(adev, true); 3366 gfx_v12_0_cp_gfx_enable(adev, true); 3367 } 3368 3369 if (adev->enable_mes_kiq && adev->mes.kiq_hw_init) 3370 r = amdgpu_mes_kiq_hw_init(adev); 3371 else 3372 r = gfx_v12_0_kiq_resume(adev); 3373 if (r) 3374 return r; 3375 3376 r = gfx_v12_0_kcq_resume(adev); 3377 if (r) 3378 return r; 3379 3380 if (!amdgpu_async_gfx_ring) { 3381 r = gfx_v12_0_cp_gfx_resume(adev); 3382 if (r) 3383 return r; 3384 } else { 3385 r = gfx_v12_0_cp_async_gfx_ring_resume(adev); 3386 if (r) 3387 return r; 3388 } 3389 3390 for (i = 0; i < adev->gfx.num_gfx_rings; i++) { 3391 ring = &adev->gfx.gfx_ring[i]; 3392 r = amdgpu_ring_test_helper(ring); 3393 if (r) 3394 return r; 3395 } 3396 3397 for (i = 0; i < adev->gfx.num_compute_rings; i++) { 3398 ring = &adev->gfx.compute_ring[i]; 3399 r = amdgpu_ring_test_helper(ring); 3400 if (r) 3401 return r; 3402 } 3403 3404 return 0; 3405 } 3406 3407 static void gfx_v12_0_cp_enable(struct amdgpu_device *adev, bool enable) 3408 { 3409 gfx_v12_0_cp_gfx_enable(adev, enable); 3410 gfx_v12_0_cp_compute_enable(adev, enable); 3411 } 3412 3413 static int gfx_v12_0_gfxhub_enable(struct amdgpu_device *adev) 3414 { 3415 int r; 3416 bool value; 3417 3418 r = adev->gfxhub.funcs->gart_enable(adev); 3419 if (r) 3420 return r; 3421 3422 adev->hdp.funcs->flush_hdp(adev, NULL); 3423 3424 value = (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS) ? 3425 false : true; 3426 3427 adev->gfxhub.funcs->set_fault_enable_default(adev, value); 3428 /* TODO investigate why this and the hdp flush above is needed, 3429 * are we missing a flush somewhere else? */ 3430 adev->gmc.gmc_funcs->flush_gpu_tlb(adev, 0, AMDGPU_GFXHUB(0), 0); 3431 3432 return 0; 3433 } 3434 3435 static int get_gb_addr_config(struct amdgpu_device *adev) 3436 { 3437 u32 gb_addr_config; 3438 3439 gb_addr_config = RREG32_SOC15(GC, 0, regGB_ADDR_CONFIG); 3440 if (gb_addr_config == 0) 3441 return -EINVAL; 3442 3443 adev->gfx.config.gb_addr_config_fields.num_pkrs = 3444 1 << REG_GET_FIELD(gb_addr_config, GB_ADDR_CONFIG, NUM_PKRS); 3445 3446 adev->gfx.config.gb_addr_config = gb_addr_config; 3447 3448 adev->gfx.config.gb_addr_config_fields.num_pipes = 1 << 3449 REG_GET_FIELD(adev->gfx.config.gb_addr_config, 3450 GB_ADDR_CONFIG, NUM_PIPES); 3451 3452 adev->gfx.config.max_tile_pipes = 3453 adev->gfx.config.gb_addr_config_fields.num_pipes; 3454 3455 adev->gfx.config.gb_addr_config_fields.max_compress_frags = 1 << 3456 REG_GET_FIELD(adev->gfx.config.gb_addr_config, 3457 GB_ADDR_CONFIG, MAX_COMPRESSED_FRAGS); 3458 adev->gfx.config.gb_addr_config_fields.num_rb_per_se = 1 << 3459 REG_GET_FIELD(adev->gfx.config.gb_addr_config, 3460 GB_ADDR_CONFIG, NUM_RB_PER_SE); 3461 adev->gfx.config.gb_addr_config_fields.num_se = 1 << 3462 REG_GET_FIELD(adev->gfx.config.gb_addr_config, 3463 GB_ADDR_CONFIG, NUM_SHADER_ENGINES); 3464 adev->gfx.config.gb_addr_config_fields.pipe_interleave_size = 1 << (8 + 3465 REG_GET_FIELD(adev->gfx.config.gb_addr_config, 3466 GB_ADDR_CONFIG, PIPE_INTERLEAVE_SIZE)); 3467 3468 return 0; 3469 } 3470 3471 static void gfx_v12_0_disable_gpa_mode(struct amdgpu_device *adev) 3472 { 3473 uint32_t data; 3474 3475 data = RREG32_SOC15(GC, 0, regCPC_PSP_DEBUG); 3476 data |= CPC_PSP_DEBUG__GPA_OVERRIDE_MASK; 3477 WREG32_SOC15(GC, 0, regCPC_PSP_DEBUG, data); 3478 3479 data = RREG32_SOC15(GC, 0, regCPG_PSP_DEBUG); 3480 data |= CPG_PSP_DEBUG__GPA_OVERRIDE_MASK; 3481 WREG32_SOC15(GC, 0, regCPG_PSP_DEBUG, data); 3482 } 3483 3484 static int gfx_v12_0_hw_init(void *handle) 3485 { 3486 int r; 3487 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 3488 3489 if (adev->firmware.load_type == AMDGPU_FW_LOAD_RLC_BACKDOOR_AUTO) { 3490 if (adev->gfx.imu.funcs && (amdgpu_dpm > 0)) { 3491 /* RLC autoload sequence 1: Program rlc ram */ 3492 if (adev->gfx.imu.funcs->program_rlc_ram) 3493 adev->gfx.imu.funcs->program_rlc_ram(adev); 3494 } 3495 /* rlc autoload firmware */ 3496 r = gfx_v12_0_rlc_backdoor_autoload_enable(adev); 3497 if (r) 3498 return r; 3499 } else { 3500 if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT) { 3501 if (adev->gfx.imu.funcs && (amdgpu_dpm > 0)) { 3502 if (adev->gfx.imu.funcs->load_microcode) 3503 adev->gfx.imu.funcs->load_microcode(adev); 3504 if (adev->gfx.imu.funcs->setup_imu) 3505 adev->gfx.imu.funcs->setup_imu(adev); 3506 if (adev->gfx.imu.funcs->start_imu) 3507 adev->gfx.imu.funcs->start_imu(adev); 3508 } 3509 3510 /* disable gpa mode in backdoor loading */ 3511 gfx_v12_0_disable_gpa_mode(adev); 3512 } 3513 } 3514 3515 if ((adev->firmware.load_type == AMDGPU_FW_LOAD_RLC_BACKDOOR_AUTO) || 3516 (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP)) { 3517 r = gfx_v12_0_wait_for_rlc_autoload_complete(adev); 3518 if (r) { 3519 dev_err(adev->dev, "(%d) failed to wait rlc autoload complete\n", r); 3520 return r; 3521 } 3522 } 3523 3524 adev->gfx.is_poweron = true; 3525 3526 if (get_gb_addr_config(adev)) 3527 DRM_WARN("Invalid gb_addr_config !\n"); 3528 3529 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) 3530 gfx_v12_0_config_gfx_rs64(adev); 3531 3532 r = gfx_v12_0_gfxhub_enable(adev); 3533 if (r) 3534 return r; 3535 3536 if ((adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT || 3537 adev->firmware.load_type == AMDGPU_FW_LOAD_RLC_BACKDOOR_AUTO) && 3538 (amdgpu_dpm == 1)) { 3539 /** 3540 * For gfx 12, rlc firmware loading relies on smu firmware is 3541 * loaded firstly, so in direct type, it has to load smc ucode 3542 * here before rlc. 3543 */ 3544 r = amdgpu_pm_load_smu_firmware(adev, NULL); 3545 if (r) 3546 return r; 3547 } 3548 3549 gfx_v12_0_constants_init(adev); 3550 3551 if (adev->nbio.funcs->gc_doorbell_init) 3552 adev->nbio.funcs->gc_doorbell_init(adev); 3553 3554 r = gfx_v12_0_rlc_resume(adev); 3555 if (r) 3556 return r; 3557 3558 /* 3559 * init golden registers and rlc resume may override some registers, 3560 * reconfig them here 3561 */ 3562 gfx_v12_0_tcp_harvest(adev); 3563 3564 r = gfx_v12_0_cp_resume(adev); 3565 if (r) 3566 return r; 3567 3568 return r; 3569 } 3570 3571 static int gfx_v12_0_kiq_disable_kgq(struct amdgpu_device *adev) 3572 { 3573 struct amdgpu_kiq *kiq = &adev->gfx.kiq[0]; 3574 struct amdgpu_ring *kiq_ring = &kiq->ring; 3575 int i, r = 0; 3576 3577 if (!kiq->pmf || !kiq->pmf->kiq_unmap_queues) 3578 return -EINVAL; 3579 3580 if (amdgpu_ring_alloc(kiq_ring, kiq->pmf->unmap_queues_size * 3581 adev->gfx.num_gfx_rings)) 3582 return -ENOMEM; 3583 3584 for (i = 0; i < adev->gfx.num_gfx_rings; i++) 3585 kiq->pmf->kiq_unmap_queues(kiq_ring, &adev->gfx.gfx_ring[i], 3586 PREEMPT_QUEUES, 0, 0); 3587 3588 if (adev->gfx.kiq[0].ring.sched.ready) 3589 r = amdgpu_ring_test_helper(kiq_ring); 3590 3591 return r; 3592 } 3593 3594 static int gfx_v12_0_hw_fini(void *handle) 3595 { 3596 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 3597 int r; 3598 uint32_t tmp; 3599 3600 amdgpu_irq_put(adev, &adev->gfx.priv_reg_irq, 0); 3601 amdgpu_irq_put(adev, &adev->gfx.priv_inst_irq, 0); 3602 amdgpu_irq_put(adev, &adev->gfx.bad_op_irq, 0); 3603 3604 if (!adev->no_hw_access) { 3605 if (amdgpu_async_gfx_ring) { 3606 r = gfx_v12_0_kiq_disable_kgq(adev); 3607 if (r) 3608 DRM_ERROR("KGQ disable failed\n"); 3609 } 3610 3611 if (amdgpu_gfx_disable_kcq(adev, 0)) 3612 DRM_ERROR("KCQ disable failed\n"); 3613 3614 amdgpu_mes_kiq_hw_fini(adev); 3615 } 3616 3617 if (amdgpu_sriov_vf(adev)) { 3618 gfx_v12_0_cp_gfx_enable(adev, false); 3619 /* Program KIQ position of RLC_CP_SCHEDULERS during destroy */ 3620 tmp = RREG32_SOC15(GC, 0, regRLC_CP_SCHEDULERS); 3621 tmp &= 0xffffff00; 3622 WREG32_SOC15(GC, 0, regRLC_CP_SCHEDULERS, tmp); 3623 3624 return 0; 3625 } 3626 gfx_v12_0_cp_enable(adev, false); 3627 gfx_v12_0_enable_gui_idle_interrupt(adev, false); 3628 3629 adev->gfxhub.funcs->gart_disable(adev); 3630 3631 adev->gfx.is_poweron = false; 3632 3633 return 0; 3634 } 3635 3636 static int gfx_v12_0_suspend(void *handle) 3637 { 3638 return gfx_v12_0_hw_fini(handle); 3639 } 3640 3641 static int gfx_v12_0_resume(void *handle) 3642 { 3643 return gfx_v12_0_hw_init(handle); 3644 } 3645 3646 static bool gfx_v12_0_is_idle(void *handle) 3647 { 3648 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 3649 3650 if (REG_GET_FIELD(RREG32_SOC15(GC, 0, regGRBM_STATUS), 3651 GRBM_STATUS, GUI_ACTIVE)) 3652 return false; 3653 else 3654 return true; 3655 } 3656 3657 static int gfx_v12_0_wait_for_idle(void *handle) 3658 { 3659 unsigned i; 3660 u32 tmp; 3661 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 3662 3663 for (i = 0; i < adev->usec_timeout; i++) { 3664 /* read MC_STATUS */ 3665 tmp = RREG32_SOC15(GC, 0, regGRBM_STATUS) & 3666 GRBM_STATUS__GUI_ACTIVE_MASK; 3667 3668 if (!REG_GET_FIELD(tmp, GRBM_STATUS, GUI_ACTIVE)) 3669 return 0; 3670 udelay(1); 3671 } 3672 return -ETIMEDOUT; 3673 } 3674 3675 static uint64_t gfx_v12_0_get_gpu_clock_counter(struct amdgpu_device *adev) 3676 { 3677 uint64_t clock = 0; 3678 3679 if (adev->smuio.funcs && 3680 adev->smuio.funcs->get_gpu_clock_counter) 3681 clock = adev->smuio.funcs->get_gpu_clock_counter(adev); 3682 else 3683 dev_warn(adev->dev, "query gpu clock counter is not supported\n"); 3684 3685 return clock; 3686 } 3687 3688 static int gfx_v12_0_early_init(void *handle) 3689 { 3690 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 3691 3692 adev->gfx.funcs = &gfx_v12_0_gfx_funcs; 3693 3694 adev->gfx.num_gfx_rings = GFX12_NUM_GFX_RINGS; 3695 adev->gfx.num_compute_rings = min(amdgpu_gfx_get_num_kcq(adev), 3696 AMDGPU_MAX_COMPUTE_RINGS); 3697 3698 gfx_v12_0_set_kiq_pm4_funcs(adev); 3699 gfx_v12_0_set_ring_funcs(adev); 3700 gfx_v12_0_set_irq_funcs(adev); 3701 gfx_v12_0_set_rlc_funcs(adev); 3702 gfx_v12_0_set_mqd_funcs(adev); 3703 gfx_v12_0_set_imu_funcs(adev); 3704 3705 gfx_v12_0_init_rlcg_reg_access_ctrl(adev); 3706 3707 return gfx_v12_0_init_microcode(adev); 3708 } 3709 3710 static int gfx_v12_0_late_init(void *handle) 3711 { 3712 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 3713 int r; 3714 3715 r = amdgpu_irq_get(adev, &adev->gfx.priv_reg_irq, 0); 3716 if (r) 3717 return r; 3718 3719 r = amdgpu_irq_get(adev, &adev->gfx.priv_inst_irq, 0); 3720 if (r) 3721 return r; 3722 3723 r = amdgpu_irq_get(adev, &adev->gfx.bad_op_irq, 0); 3724 if (r) 3725 return r; 3726 3727 return 0; 3728 } 3729 3730 static bool gfx_v12_0_is_rlc_enabled(struct amdgpu_device *adev) 3731 { 3732 uint32_t rlc_cntl; 3733 3734 /* if RLC is not enabled, do nothing */ 3735 rlc_cntl = RREG32_SOC15(GC, 0, regRLC_CNTL); 3736 return (REG_GET_FIELD(rlc_cntl, RLC_CNTL, RLC_ENABLE_F32)) ? true : false; 3737 } 3738 3739 static void gfx_v12_0_set_safe_mode(struct amdgpu_device *adev, 3740 int xcc_id) 3741 { 3742 uint32_t data; 3743 unsigned i; 3744 3745 data = RLC_SAFE_MODE__CMD_MASK; 3746 data |= (1 << RLC_SAFE_MODE__MESSAGE__SHIFT); 3747 3748 WREG32_SOC15(GC, 0, regRLC_SAFE_MODE, data); 3749 3750 /* wait for RLC_SAFE_MODE */ 3751 for (i = 0; i < adev->usec_timeout; i++) { 3752 if (!REG_GET_FIELD(RREG32_SOC15(GC, 0, regRLC_SAFE_MODE), 3753 RLC_SAFE_MODE, CMD)) 3754 break; 3755 udelay(1); 3756 } 3757 } 3758 3759 static void gfx_v12_0_unset_safe_mode(struct amdgpu_device *adev, 3760 int xcc_id) 3761 { 3762 WREG32_SOC15(GC, 0, regRLC_SAFE_MODE, RLC_SAFE_MODE__CMD_MASK); 3763 } 3764 3765 static void gfx_v12_0_update_perf_clk(struct amdgpu_device *adev, 3766 bool enable) 3767 { 3768 uint32_t def, data; 3769 3770 if (!(adev->cg_flags & AMD_CG_SUPPORT_GFX_PERF_CLK)) 3771 return; 3772 3773 def = data = RREG32_SOC15(GC, 0, regRLC_CGTT_MGCG_OVERRIDE); 3774 3775 if (enable) 3776 data &= ~RLC_CGTT_MGCG_OVERRIDE__PERFMON_CLOCK_STATE_MASK; 3777 else 3778 data |= RLC_CGTT_MGCG_OVERRIDE__PERFMON_CLOCK_STATE_MASK; 3779 3780 if (def != data) 3781 WREG32_SOC15(GC, 0, regRLC_CGTT_MGCG_OVERRIDE, data); 3782 } 3783 3784 static void gfx_v12_0_update_spm_vmid(struct amdgpu_device *adev, 3785 struct amdgpu_ring *ring, 3786 unsigned vmid) 3787 { 3788 u32 reg, data; 3789 3790 reg = SOC15_REG_OFFSET(GC, 0, regRLC_SPM_MC_CNTL); 3791 if (amdgpu_sriov_is_pp_one_vf(adev)) 3792 data = RREG32_NO_KIQ(reg); 3793 else 3794 data = RREG32(reg); 3795 3796 data &= ~RLC_SPM_MC_CNTL__RLC_SPM_VMID_MASK; 3797 data |= (vmid & RLC_SPM_MC_CNTL__RLC_SPM_VMID_MASK) << RLC_SPM_MC_CNTL__RLC_SPM_VMID__SHIFT; 3798 3799 if (amdgpu_sriov_is_pp_one_vf(adev)) 3800 WREG32_SOC15_NO_KIQ(GC, 0, regRLC_SPM_MC_CNTL, data); 3801 else 3802 WREG32_SOC15(GC, 0, regRLC_SPM_MC_CNTL, data); 3803 3804 if (ring 3805 && amdgpu_sriov_is_pp_one_vf(adev) 3806 && ((ring->funcs->type == AMDGPU_RING_TYPE_GFX) 3807 || (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE))) { 3808 uint32_t reg = SOC15_REG_OFFSET(GC, 0, regRLC_SPM_MC_CNTL); 3809 amdgpu_ring_emit_wreg(ring, reg, data); 3810 } 3811 } 3812 3813 static const struct amdgpu_rlc_funcs gfx_v12_0_rlc_funcs = { 3814 .is_rlc_enabled = gfx_v12_0_is_rlc_enabled, 3815 .set_safe_mode = gfx_v12_0_set_safe_mode, 3816 .unset_safe_mode = gfx_v12_0_unset_safe_mode, 3817 .init = gfx_v12_0_rlc_init, 3818 .get_csb_size = gfx_v12_0_get_csb_size, 3819 .get_csb_buffer = gfx_v12_0_get_csb_buffer, 3820 .resume = gfx_v12_0_rlc_resume, 3821 .stop = gfx_v12_0_rlc_stop, 3822 .reset = gfx_v12_0_rlc_reset, 3823 .start = gfx_v12_0_rlc_start, 3824 .update_spm_vmid = gfx_v12_0_update_spm_vmid, 3825 }; 3826 3827 #if 0 3828 static void gfx_v12_cntl_power_gating(struct amdgpu_device *adev, bool enable) 3829 { 3830 /* TODO */ 3831 } 3832 3833 static void gfx_v12_cntl_pg(struct amdgpu_device *adev, bool enable) 3834 { 3835 /* TODO */ 3836 } 3837 #endif 3838 3839 static int gfx_v12_0_set_powergating_state(void *handle, 3840 enum amd_powergating_state state) 3841 { 3842 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 3843 bool enable = (state == AMD_PG_STATE_GATE); 3844 3845 if (amdgpu_sriov_vf(adev)) 3846 return 0; 3847 3848 switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { 3849 case IP_VERSION(12, 0, 0): 3850 case IP_VERSION(12, 0, 1): 3851 amdgpu_gfx_off_ctrl(adev, enable); 3852 break; 3853 default: 3854 break; 3855 } 3856 3857 return 0; 3858 } 3859 3860 static void gfx_v12_0_update_coarse_grain_clock_gating(struct amdgpu_device *adev, 3861 bool enable) 3862 { 3863 uint32_t def, data; 3864 3865 if (!(adev->cg_flags & 3866 (AMD_CG_SUPPORT_GFX_CGCG | 3867 AMD_CG_SUPPORT_GFX_CGLS | 3868 AMD_CG_SUPPORT_GFX_3D_CGCG | 3869 AMD_CG_SUPPORT_GFX_3D_CGLS))) 3870 return; 3871 3872 if (enable) { 3873 def = data = RREG32_SOC15(GC, 0, regRLC_CGTT_MGCG_OVERRIDE); 3874 3875 /* unset CGCG override */ 3876 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGCG) 3877 data &= ~RLC_CGTT_MGCG_OVERRIDE__GFXIP_CGCG_OVERRIDE_MASK; 3878 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGLS) 3879 data &= ~RLC_CGTT_MGCG_OVERRIDE__GFXIP_CGLS_OVERRIDE_MASK; 3880 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_3D_CGCG || 3881 adev->cg_flags & AMD_CG_SUPPORT_GFX_3D_CGLS) 3882 data &= ~RLC_CGTT_MGCG_OVERRIDE__GFXIP_GFX3D_CG_OVERRIDE_MASK; 3883 3884 /* update CGCG override bits */ 3885 if (def != data) 3886 WREG32_SOC15(GC, 0, regRLC_CGTT_MGCG_OVERRIDE, data); 3887 3888 /* enable cgcg FSM(0x0000363F) */ 3889 def = data = RREG32_SOC15(GC, 0, regRLC_CGCG_CGLS_CTRL); 3890 3891 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGCG) { 3892 data &= ~RLC_CGCG_CGLS_CTRL__CGCG_GFX_IDLE_THRESHOLD_MASK; 3893 data |= (0x36 << RLC_CGCG_CGLS_CTRL__CGCG_GFX_IDLE_THRESHOLD__SHIFT) | 3894 RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK; 3895 } 3896 3897 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGLS) { 3898 data &= ~RLC_CGCG_CGLS_CTRL__CGLS_REP_COMPANSAT_DELAY_MASK; 3899 data |= (0x000F << RLC_CGCG_CGLS_CTRL__CGLS_REP_COMPANSAT_DELAY__SHIFT) | 3900 RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK; 3901 } 3902 3903 if (def != data) 3904 WREG32_SOC15(GC, 0, regRLC_CGCG_CGLS_CTRL, data); 3905 3906 /* Program RLC_CGCG_CGLS_CTRL_3D */ 3907 def = data = RREG32_SOC15(GC, 0, regRLC_CGCG_CGLS_CTRL_3D); 3908 3909 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_3D_CGCG) { 3910 data &= ~RLC_CGCG_CGLS_CTRL_3D__CGCG_GFX_IDLE_THRESHOLD_MASK; 3911 data |= (0x36 << RLC_CGCG_CGLS_CTRL_3D__CGCG_GFX_IDLE_THRESHOLD__SHIFT) | 3912 RLC_CGCG_CGLS_CTRL_3D__CGCG_EN_MASK; 3913 } 3914 3915 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_3D_CGLS) { 3916 data &= ~RLC_CGCG_CGLS_CTRL_3D__CGLS_REP_COMPANSAT_DELAY_MASK; 3917 data |= (0xf << RLC_CGCG_CGLS_CTRL_3D__CGLS_REP_COMPANSAT_DELAY__SHIFT) | 3918 RLC_CGCG_CGLS_CTRL_3D__CGLS_EN_MASK; 3919 } 3920 3921 if (def != data) 3922 WREG32_SOC15(GC, 0, regRLC_CGCG_CGLS_CTRL_3D, data); 3923 3924 /* set IDLE_POLL_COUNT(0x00900100) */ 3925 def = data = RREG32_SOC15(GC, 0, regCP_RB_WPTR_POLL_CNTL); 3926 3927 data &= ~(CP_RB_WPTR_POLL_CNTL__POLL_FREQUENCY_MASK | CP_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK); 3928 data |= (0x0100 << CP_RB_WPTR_POLL_CNTL__POLL_FREQUENCY__SHIFT) | 3929 (0x0090 << CP_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT); 3930 3931 if (def != data) 3932 WREG32_SOC15(GC, 0, regCP_RB_WPTR_POLL_CNTL, data); 3933 3934 data = RREG32_SOC15(GC, 0, regCP_INT_CNTL); 3935 data = REG_SET_FIELD(data, CP_INT_CNTL, CNTX_BUSY_INT_ENABLE, 1); 3936 data = REG_SET_FIELD(data, CP_INT_CNTL, CNTX_EMPTY_INT_ENABLE, 1); 3937 data = REG_SET_FIELD(data, CP_INT_CNTL, CMP_BUSY_INT_ENABLE, 1); 3938 data = REG_SET_FIELD(data, CP_INT_CNTL, GFX_IDLE_INT_ENABLE, 1); 3939 WREG32_SOC15(GC, 0, regCP_INT_CNTL, data); 3940 3941 data = RREG32_SOC15(GC, 0, regSDMA0_RLC_CGCG_CTRL); 3942 data = REG_SET_FIELD(data, SDMA0_RLC_CGCG_CTRL, CGCG_INT_ENABLE, 1); 3943 WREG32_SOC15(GC, 0, regSDMA0_RLC_CGCG_CTRL, data); 3944 3945 /* Some ASICs only have one SDMA instance, not need to configure SDMA1 */ 3946 if (adev->sdma.num_instances > 1) { 3947 data = RREG32_SOC15(GC, 0, regSDMA1_RLC_CGCG_CTRL); 3948 data = REG_SET_FIELD(data, SDMA1_RLC_CGCG_CTRL, CGCG_INT_ENABLE, 1); 3949 WREG32_SOC15(GC, 0, regSDMA1_RLC_CGCG_CTRL, data); 3950 } 3951 } else { 3952 /* Program RLC_CGCG_CGLS_CTRL */ 3953 def = data = RREG32_SOC15(GC, 0, regRLC_CGCG_CGLS_CTRL); 3954 3955 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGCG) 3956 data &= ~RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK; 3957 3958 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGLS) 3959 data &= ~RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK; 3960 3961 if (def != data) 3962 WREG32_SOC15(GC, 0, regRLC_CGCG_CGLS_CTRL, data); 3963 3964 /* Program RLC_CGCG_CGLS_CTRL_3D */ 3965 def = data = RREG32_SOC15(GC, 0, regRLC_CGCG_CGLS_CTRL_3D); 3966 3967 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_3D_CGCG) 3968 data &= ~RLC_CGCG_CGLS_CTRL_3D__CGCG_EN_MASK; 3969 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_3D_CGLS) 3970 data &= ~RLC_CGCG_CGLS_CTRL_3D__CGLS_EN_MASK; 3971 3972 if (def != data) 3973 WREG32_SOC15(GC, 0, regRLC_CGCG_CGLS_CTRL_3D, data); 3974 3975 data = RREG32_SOC15(GC, 0, regSDMA0_RLC_CGCG_CTRL); 3976 data &= ~SDMA0_RLC_CGCG_CTRL__CGCG_INT_ENABLE_MASK; 3977 WREG32_SOC15(GC, 0, regSDMA0_RLC_CGCG_CTRL, data); 3978 3979 /* Some ASICs only have one SDMA instance, not need to configure SDMA1 */ 3980 if (adev->sdma.num_instances > 1) { 3981 data = RREG32_SOC15(GC, 0, regSDMA1_RLC_CGCG_CTRL); 3982 data &= ~SDMA1_RLC_CGCG_CTRL__CGCG_INT_ENABLE_MASK; 3983 WREG32_SOC15(GC, 0, regSDMA1_RLC_CGCG_CTRL, data); 3984 } 3985 } 3986 } 3987 3988 static void gfx_v12_0_update_medium_grain_clock_gating(struct amdgpu_device *adev, 3989 bool enable) 3990 { 3991 uint32_t data, def; 3992 if (!(adev->cg_flags & (AMD_CG_SUPPORT_GFX_MGCG | AMD_CG_SUPPORT_GFX_MGLS))) 3993 return; 3994 3995 /* It is disabled by HW by default */ 3996 if (enable) { 3997 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGCG) { 3998 /* 1 - RLC_CGTT_MGCG_OVERRIDE */ 3999 def = data = RREG32_SOC15(GC, 0, regRLC_CGTT_MGCG_OVERRIDE); 4000 4001 data &= ~(RLC_CGTT_MGCG_OVERRIDE__GRBM_CGTT_SCLK_OVERRIDE_MASK | 4002 RLC_CGTT_MGCG_OVERRIDE__RLC_CGTT_SCLK_OVERRIDE_MASK | 4003 RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGCG_OVERRIDE_MASK); 4004 4005 if (def != data) 4006 WREG32_SOC15(GC, 0, regRLC_CGTT_MGCG_OVERRIDE, data); 4007 } 4008 } else { 4009 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGCG) { 4010 def = data = RREG32_SOC15(GC, 0, regRLC_CGTT_MGCG_OVERRIDE); 4011 4012 data |= (RLC_CGTT_MGCG_OVERRIDE__RLC_CGTT_SCLK_OVERRIDE_MASK | 4013 RLC_CGTT_MGCG_OVERRIDE__GRBM_CGTT_SCLK_OVERRIDE_MASK | 4014 RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGCG_OVERRIDE_MASK); 4015 4016 if (def != data) 4017 WREG32_SOC15(GC, 0, regRLC_CGTT_MGCG_OVERRIDE, data); 4018 } 4019 } 4020 } 4021 4022 static void gfx_v12_0_update_repeater_fgcg(struct amdgpu_device *adev, 4023 bool enable) 4024 { 4025 uint32_t def, data; 4026 4027 if (!(adev->cg_flags & AMD_CG_SUPPORT_REPEATER_FGCG)) 4028 return; 4029 4030 def = data = RREG32_SOC15(GC, 0, regRLC_CGTT_MGCG_OVERRIDE); 4031 4032 if (enable) 4033 data &= ~(RLC_CGTT_MGCG_OVERRIDE__GFXIP_REPEATER_FGCG_OVERRIDE_MASK | 4034 RLC_CGTT_MGCG_OVERRIDE__RLC_REPEATER_FGCG_OVERRIDE_MASK); 4035 else 4036 data |= RLC_CGTT_MGCG_OVERRIDE__GFXIP_REPEATER_FGCG_OVERRIDE_MASK | 4037 RLC_CGTT_MGCG_OVERRIDE__RLC_REPEATER_FGCG_OVERRIDE_MASK; 4038 4039 if (def != data) 4040 WREG32_SOC15(GC, 0, regRLC_CGTT_MGCG_OVERRIDE, data); 4041 } 4042 4043 static void gfx_v12_0_update_sram_fgcg(struct amdgpu_device *adev, 4044 bool enable) 4045 { 4046 uint32_t def, data; 4047 4048 if (!(adev->cg_flags & AMD_CG_SUPPORT_GFX_FGCG)) 4049 return; 4050 4051 def = data = RREG32_SOC15(GC, 0, regRLC_CGTT_MGCG_OVERRIDE); 4052 4053 if (enable) 4054 data &= ~RLC_CGTT_MGCG_OVERRIDE__GFXIP_FGCG_OVERRIDE_MASK; 4055 else 4056 data |= RLC_CGTT_MGCG_OVERRIDE__GFXIP_FGCG_OVERRIDE_MASK; 4057 4058 if (def != data) 4059 WREG32_SOC15(GC, 0, regRLC_CGTT_MGCG_OVERRIDE, data); 4060 } 4061 4062 static int gfx_v12_0_update_gfx_clock_gating(struct amdgpu_device *adev, 4063 bool enable) 4064 { 4065 amdgpu_gfx_rlc_enter_safe_mode(adev, 0); 4066 4067 gfx_v12_0_update_coarse_grain_clock_gating(adev, enable); 4068 4069 gfx_v12_0_update_medium_grain_clock_gating(adev, enable); 4070 4071 gfx_v12_0_update_repeater_fgcg(adev, enable); 4072 4073 gfx_v12_0_update_sram_fgcg(adev, enable); 4074 4075 gfx_v12_0_update_perf_clk(adev, enable); 4076 4077 if (adev->cg_flags & 4078 (AMD_CG_SUPPORT_GFX_MGCG | 4079 AMD_CG_SUPPORT_GFX_CGLS | 4080 AMD_CG_SUPPORT_GFX_CGCG | 4081 AMD_CG_SUPPORT_GFX_3D_CGCG | 4082 AMD_CG_SUPPORT_GFX_3D_CGLS)) 4083 gfx_v12_0_enable_gui_idle_interrupt(adev, enable); 4084 4085 amdgpu_gfx_rlc_exit_safe_mode(adev, 0); 4086 4087 return 0; 4088 } 4089 4090 static int gfx_v12_0_set_clockgating_state(void *handle, 4091 enum amd_clockgating_state state) 4092 { 4093 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 4094 4095 if (amdgpu_sriov_vf(adev)) 4096 return 0; 4097 4098 switch (adev->ip_versions[GC_HWIP][0]) { 4099 case IP_VERSION(12, 0, 0): 4100 case IP_VERSION(12, 0, 1): 4101 gfx_v12_0_update_gfx_clock_gating(adev, 4102 state == AMD_CG_STATE_GATE); 4103 break; 4104 default: 4105 break; 4106 } 4107 4108 return 0; 4109 } 4110 4111 static void gfx_v12_0_get_clockgating_state(void *handle, u64 *flags) 4112 { 4113 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 4114 int data; 4115 4116 /* AMD_CG_SUPPORT_GFX_MGCG */ 4117 data = RREG32_SOC15(GC, 0, regRLC_CGTT_MGCG_OVERRIDE); 4118 if (!(data & RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGCG_OVERRIDE_MASK)) 4119 *flags |= AMD_CG_SUPPORT_GFX_MGCG; 4120 4121 /* AMD_CG_SUPPORT_REPEATER_FGCG */ 4122 if (!(data & RLC_CGTT_MGCG_OVERRIDE__GFXIP_REPEATER_FGCG_OVERRIDE_MASK)) 4123 *flags |= AMD_CG_SUPPORT_REPEATER_FGCG; 4124 4125 /* AMD_CG_SUPPORT_GFX_FGCG */ 4126 if (!(data & RLC_CGTT_MGCG_OVERRIDE__GFXIP_FGCG_OVERRIDE_MASK)) 4127 *flags |= AMD_CG_SUPPORT_GFX_FGCG; 4128 4129 /* AMD_CG_SUPPORT_GFX_PERF_CLK */ 4130 if (!(data & RLC_CGTT_MGCG_OVERRIDE__PERFMON_CLOCK_STATE_MASK)) 4131 *flags |= AMD_CG_SUPPORT_GFX_PERF_CLK; 4132 4133 /* AMD_CG_SUPPORT_GFX_CGCG */ 4134 data = RREG32_SOC15(GC, 0, regRLC_CGCG_CGLS_CTRL); 4135 if (data & RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK) 4136 *flags |= AMD_CG_SUPPORT_GFX_CGCG; 4137 4138 /* AMD_CG_SUPPORT_GFX_CGLS */ 4139 if (data & RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK) 4140 *flags |= AMD_CG_SUPPORT_GFX_CGLS; 4141 4142 /* AMD_CG_SUPPORT_GFX_3D_CGCG */ 4143 data = RREG32_SOC15(GC, 0, regRLC_CGCG_CGLS_CTRL_3D); 4144 if (data & RLC_CGCG_CGLS_CTRL_3D__CGCG_EN_MASK) 4145 *flags |= AMD_CG_SUPPORT_GFX_3D_CGCG; 4146 4147 /* AMD_CG_SUPPORT_GFX_3D_CGLS */ 4148 if (data & RLC_CGCG_CGLS_CTRL_3D__CGLS_EN_MASK) 4149 *flags |= AMD_CG_SUPPORT_GFX_3D_CGLS; 4150 } 4151 4152 static u64 gfx_v12_0_ring_get_rptr_gfx(struct amdgpu_ring *ring) 4153 { 4154 /* gfx12 is 32bit rptr*/ 4155 return *(uint32_t *)ring->rptr_cpu_addr; 4156 } 4157 4158 static u64 gfx_v12_0_ring_get_wptr_gfx(struct amdgpu_ring *ring) 4159 { 4160 struct amdgpu_device *adev = ring->adev; 4161 u64 wptr; 4162 4163 /* XXX check if swapping is necessary on BE */ 4164 if (ring->use_doorbell) { 4165 wptr = atomic64_read((atomic64_t *)ring->wptr_cpu_addr); 4166 } else { 4167 wptr = RREG32_SOC15(GC, 0, regCP_RB0_WPTR); 4168 wptr += (u64)RREG32_SOC15(GC, 0, regCP_RB0_WPTR_HI) << 32; 4169 } 4170 4171 return wptr; 4172 } 4173 4174 static void gfx_v12_0_ring_set_wptr_gfx(struct amdgpu_ring *ring) 4175 { 4176 struct amdgpu_device *adev = ring->adev; 4177 uint32_t *wptr_saved; 4178 uint32_t *is_queue_unmap; 4179 uint64_t aggregated_db_index; 4180 uint32_t mqd_size = adev->mqds[AMDGPU_HW_IP_GFX].mqd_size; 4181 uint64_t wptr_tmp; 4182 4183 if (ring->is_mes_queue) { 4184 wptr_saved = (uint32_t *)(ring->mqd_ptr + mqd_size); 4185 is_queue_unmap = (uint32_t *)(ring->mqd_ptr + mqd_size + 4186 sizeof(uint32_t)); 4187 aggregated_db_index = 4188 amdgpu_mes_get_aggregated_doorbell_index(adev, 4189 ring->hw_prio); 4190 4191 wptr_tmp = ring->wptr & ring->buf_mask; 4192 atomic64_set((atomic64_t *)ring->wptr_cpu_addr, wptr_tmp); 4193 *wptr_saved = wptr_tmp; 4194 /* assume doorbell always being used by mes mapped queue */ 4195 if (*is_queue_unmap) { 4196 WDOORBELL64(aggregated_db_index, wptr_tmp); 4197 WDOORBELL64(ring->doorbell_index, wptr_tmp); 4198 } else { 4199 WDOORBELL64(ring->doorbell_index, wptr_tmp); 4200 4201 if (*is_queue_unmap) 4202 WDOORBELL64(aggregated_db_index, wptr_tmp); 4203 } 4204 } else { 4205 if (ring->use_doorbell) { 4206 /* XXX check if swapping is necessary on BE */ 4207 atomic64_set((atomic64_t *)ring->wptr_cpu_addr, 4208 ring->wptr); 4209 WDOORBELL64(ring->doorbell_index, ring->wptr); 4210 } else { 4211 WREG32_SOC15(GC, 0, regCP_RB0_WPTR, 4212 lower_32_bits(ring->wptr)); 4213 WREG32_SOC15(GC, 0, regCP_RB0_WPTR_HI, 4214 upper_32_bits(ring->wptr)); 4215 } 4216 } 4217 } 4218 4219 static u64 gfx_v12_0_ring_get_rptr_compute(struct amdgpu_ring *ring) 4220 { 4221 /* gfx12 hardware is 32bit rptr */ 4222 return *(uint32_t *)ring->rptr_cpu_addr; 4223 } 4224 4225 static u64 gfx_v12_0_ring_get_wptr_compute(struct amdgpu_ring *ring) 4226 { 4227 u64 wptr; 4228 4229 /* XXX check if swapping is necessary on BE */ 4230 if (ring->use_doorbell) 4231 wptr = atomic64_read((atomic64_t *)ring->wptr_cpu_addr); 4232 else 4233 BUG(); 4234 return wptr; 4235 } 4236 4237 static void gfx_v12_0_ring_set_wptr_compute(struct amdgpu_ring *ring) 4238 { 4239 struct amdgpu_device *adev = ring->adev; 4240 uint32_t *wptr_saved; 4241 uint32_t *is_queue_unmap; 4242 uint64_t aggregated_db_index; 4243 uint32_t mqd_size = adev->mqds[AMDGPU_HW_IP_COMPUTE].mqd_size; 4244 uint64_t wptr_tmp; 4245 4246 if (ring->is_mes_queue) { 4247 wptr_saved = (uint32_t *)(ring->mqd_ptr + mqd_size); 4248 is_queue_unmap = (uint32_t *)(ring->mqd_ptr + mqd_size + 4249 sizeof(uint32_t)); 4250 aggregated_db_index = 4251 amdgpu_mes_get_aggregated_doorbell_index(adev, 4252 ring->hw_prio); 4253 4254 wptr_tmp = ring->wptr & ring->buf_mask; 4255 atomic64_set((atomic64_t *)ring->wptr_cpu_addr, wptr_tmp); 4256 *wptr_saved = wptr_tmp; 4257 /* assume doorbell always used by mes mapped queue */ 4258 if (*is_queue_unmap) { 4259 WDOORBELL64(aggregated_db_index, wptr_tmp); 4260 WDOORBELL64(ring->doorbell_index, wptr_tmp); 4261 } else { 4262 WDOORBELL64(ring->doorbell_index, wptr_tmp); 4263 4264 if (*is_queue_unmap) 4265 WDOORBELL64(aggregated_db_index, wptr_tmp); 4266 } 4267 } else { 4268 /* XXX check if swapping is necessary on BE */ 4269 if (ring->use_doorbell) { 4270 atomic64_set((atomic64_t *)ring->wptr_cpu_addr, 4271 ring->wptr); 4272 WDOORBELL64(ring->doorbell_index, ring->wptr); 4273 } else { 4274 BUG(); /* only DOORBELL method supported on gfx12 now */ 4275 } 4276 } 4277 } 4278 4279 static void gfx_v12_0_ring_emit_hdp_flush(struct amdgpu_ring *ring) 4280 { 4281 struct amdgpu_device *adev = ring->adev; 4282 u32 ref_and_mask, reg_mem_engine; 4283 const struct nbio_hdp_flush_reg *nbio_hf_reg = adev->nbio.hdp_flush_reg; 4284 4285 if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) { 4286 switch (ring->me) { 4287 case 1: 4288 ref_and_mask = nbio_hf_reg->ref_and_mask_cp2 << ring->pipe; 4289 break; 4290 case 2: 4291 ref_and_mask = nbio_hf_reg->ref_and_mask_cp6 << ring->pipe; 4292 break; 4293 default: 4294 return; 4295 } 4296 reg_mem_engine = 0; 4297 } else { 4298 ref_and_mask = nbio_hf_reg->ref_and_mask_cp0; 4299 reg_mem_engine = 1; /* pfp */ 4300 } 4301 4302 gfx_v12_0_wait_reg_mem(ring, reg_mem_engine, 0, 1, 4303 adev->nbio.funcs->get_hdp_flush_req_offset(adev), 4304 adev->nbio.funcs->get_hdp_flush_done_offset(adev), 4305 ref_and_mask, ref_and_mask, 0x20); 4306 } 4307 4308 static void gfx_v12_0_ring_emit_ib_gfx(struct amdgpu_ring *ring, 4309 struct amdgpu_job *job, 4310 struct amdgpu_ib *ib, 4311 uint32_t flags) 4312 { 4313 unsigned vmid = AMDGPU_JOB_GET_VMID(job); 4314 u32 header, control = 0; 4315 4316 BUG_ON(ib->flags & AMDGPU_IB_FLAG_CE); 4317 4318 header = PACKET3(PACKET3_INDIRECT_BUFFER, 2); 4319 4320 control |= ib->length_dw | (vmid << 24); 4321 4322 if (ring->is_mes_queue) 4323 /* inherit vmid from mqd */ 4324 control |= 0x400000; 4325 4326 amdgpu_ring_write(ring, header); 4327 BUG_ON(ib->gpu_addr & 0x3); /* Dword align */ 4328 amdgpu_ring_write(ring, 4329 #ifdef __BIG_ENDIAN 4330 (2 << 0) | 4331 #endif 4332 lower_32_bits(ib->gpu_addr)); 4333 amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr)); 4334 amdgpu_ring_write(ring, control); 4335 } 4336 4337 static void gfx_v12_0_ring_emit_ib_compute(struct amdgpu_ring *ring, 4338 struct amdgpu_job *job, 4339 struct amdgpu_ib *ib, 4340 uint32_t flags) 4341 { 4342 unsigned vmid = AMDGPU_JOB_GET_VMID(job); 4343 u32 control = INDIRECT_BUFFER_VALID | ib->length_dw | (vmid << 24); 4344 4345 if (ring->is_mes_queue) 4346 /* inherit vmid from mqd */ 4347 control |= 0x40000000; 4348 4349 amdgpu_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2)); 4350 BUG_ON(ib->gpu_addr & 0x3); /* Dword align */ 4351 amdgpu_ring_write(ring, 4352 #ifdef __BIG_ENDIAN 4353 (2 << 0) | 4354 #endif 4355 lower_32_bits(ib->gpu_addr)); 4356 amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr)); 4357 amdgpu_ring_write(ring, control); 4358 } 4359 4360 static void gfx_v12_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, 4361 u64 seq, unsigned flags) 4362 { 4363 bool write64bit = flags & AMDGPU_FENCE_FLAG_64BIT; 4364 bool int_sel = flags & AMDGPU_FENCE_FLAG_INT; 4365 4366 /* RELEASE_MEM - flush caches, send int */ 4367 amdgpu_ring_write(ring, PACKET3(PACKET3_RELEASE_MEM, 6)); 4368 amdgpu_ring_write(ring, (PACKET3_RELEASE_MEM_GCR_SEQ | 4369 PACKET3_RELEASE_MEM_GCR_GL2_WB | 4370 PACKET3_RELEASE_MEM_CACHE_POLICY(3) | 4371 PACKET3_RELEASE_MEM_EVENT_TYPE(CACHE_FLUSH_AND_INV_TS_EVENT) | 4372 PACKET3_RELEASE_MEM_EVENT_INDEX(5))); 4373 amdgpu_ring_write(ring, (PACKET3_RELEASE_MEM_DATA_SEL(write64bit ? 2 : 1) | 4374 PACKET3_RELEASE_MEM_INT_SEL(int_sel ? 2 : 0))); 4375 4376 /* 4377 * the address should be Qword aligned if 64bit write, Dword 4378 * aligned if only send 32bit data low (discard data high) 4379 */ 4380 if (write64bit) 4381 BUG_ON(addr & 0x7); 4382 else 4383 BUG_ON(addr & 0x3); 4384 amdgpu_ring_write(ring, lower_32_bits(addr)); 4385 amdgpu_ring_write(ring, upper_32_bits(addr)); 4386 amdgpu_ring_write(ring, lower_32_bits(seq)); 4387 amdgpu_ring_write(ring, upper_32_bits(seq)); 4388 amdgpu_ring_write(ring, ring->is_mes_queue ? 4389 (ring->hw_queue_id | AMDGPU_FENCE_MES_QUEUE_FLAG) : 0); 4390 } 4391 4392 static void gfx_v12_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring) 4393 { 4394 int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX); 4395 uint32_t seq = ring->fence_drv.sync_seq; 4396 uint64_t addr = ring->fence_drv.gpu_addr; 4397 4398 gfx_v12_0_wait_reg_mem(ring, usepfp, 1, 0, lower_32_bits(addr), 4399 upper_32_bits(addr), seq, 0xffffffff, 4); 4400 } 4401 4402 static void gfx_v12_0_ring_invalidate_tlbs(struct amdgpu_ring *ring, 4403 uint16_t pasid, uint32_t flush_type, 4404 bool all_hub, uint8_t dst_sel) 4405 { 4406 amdgpu_ring_write(ring, PACKET3(PACKET3_INVALIDATE_TLBS, 0)); 4407 amdgpu_ring_write(ring, 4408 PACKET3_INVALIDATE_TLBS_DST_SEL(dst_sel) | 4409 PACKET3_INVALIDATE_TLBS_ALL_HUB(all_hub) | 4410 PACKET3_INVALIDATE_TLBS_PASID(pasid) | 4411 PACKET3_INVALIDATE_TLBS_FLUSH_TYPE(flush_type)); 4412 } 4413 4414 static void gfx_v12_0_ring_emit_vm_flush(struct amdgpu_ring *ring, 4415 unsigned vmid, uint64_t pd_addr) 4416 { 4417 if (ring->is_mes_queue) 4418 gfx_v12_0_ring_invalidate_tlbs(ring, 0, 0, false, 0); 4419 else 4420 amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr); 4421 4422 /* compute doesn't have PFP */ 4423 if (ring->funcs->type == AMDGPU_RING_TYPE_GFX) { 4424 /* sync PFP to ME, otherwise we might get invalid PFP reads */ 4425 amdgpu_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0)); 4426 amdgpu_ring_write(ring, 0x0); 4427 } 4428 } 4429 4430 static void gfx_v12_0_ring_emit_fence_kiq(struct amdgpu_ring *ring, u64 addr, 4431 u64 seq, unsigned int flags) 4432 { 4433 struct amdgpu_device *adev = ring->adev; 4434 4435 /* we only allocate 32bit for each seq wb address */ 4436 BUG_ON(flags & AMDGPU_FENCE_FLAG_64BIT); 4437 4438 /* write fence seq to the "addr" */ 4439 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3)); 4440 amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) | 4441 WRITE_DATA_DST_SEL(5) | WR_CONFIRM)); 4442 amdgpu_ring_write(ring, lower_32_bits(addr)); 4443 amdgpu_ring_write(ring, upper_32_bits(addr)); 4444 amdgpu_ring_write(ring, lower_32_bits(seq)); 4445 4446 if (flags & AMDGPU_FENCE_FLAG_INT) { 4447 /* set register to trigger INT */ 4448 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3)); 4449 amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) | 4450 WRITE_DATA_DST_SEL(0) | WR_CONFIRM)); 4451 amdgpu_ring_write(ring, SOC15_REG_OFFSET(GC, 0, regCPC_INT_STATUS)); 4452 amdgpu_ring_write(ring, 0); 4453 amdgpu_ring_write(ring, 0x20000000); /* src_id is 178 */ 4454 } 4455 } 4456 4457 static void gfx_v12_0_ring_emit_cntxcntl(struct amdgpu_ring *ring, 4458 uint32_t flags) 4459 { 4460 uint32_t dw2 = 0; 4461 4462 dw2 |= 0x80000000; /* set load_enable otherwise this package is just NOPs */ 4463 if (flags & AMDGPU_HAVE_CTX_SWITCH) { 4464 /* set load_global_config & load_global_uconfig */ 4465 dw2 |= 0x8001; 4466 /* set load_cs_sh_regs */ 4467 dw2 |= 0x01000000; 4468 /* set load_per_context_state & load_gfx_sh_regs for GFX */ 4469 dw2 |= 0x10002; 4470 } 4471 4472 amdgpu_ring_write(ring, PACKET3(PACKET3_CONTEXT_CONTROL, 1)); 4473 amdgpu_ring_write(ring, dw2); 4474 amdgpu_ring_write(ring, 0); 4475 } 4476 4477 static unsigned gfx_v12_0_ring_emit_init_cond_exec(struct amdgpu_ring *ring, 4478 uint64_t addr) 4479 { 4480 unsigned ret; 4481 4482 amdgpu_ring_write(ring, PACKET3(PACKET3_COND_EXEC, 3)); 4483 amdgpu_ring_write(ring, lower_32_bits(addr)); 4484 amdgpu_ring_write(ring, upper_32_bits(addr)); 4485 /* discard following DWs if *cond_exec_gpu_addr==0 */ 4486 amdgpu_ring_write(ring, 0); 4487 ret = ring->wptr & ring->buf_mask; 4488 /* patch dummy value later */ 4489 amdgpu_ring_write(ring, 0); 4490 4491 return ret; 4492 } 4493 4494 static int gfx_v12_0_ring_preempt_ib(struct amdgpu_ring *ring) 4495 { 4496 int i, r = 0; 4497 struct amdgpu_device *adev = ring->adev; 4498 struct amdgpu_kiq *kiq = &adev->gfx.kiq[0]; 4499 struct amdgpu_ring *kiq_ring = &kiq->ring; 4500 unsigned long flags; 4501 4502 if (!kiq->pmf || !kiq->pmf->kiq_unmap_queues) 4503 return -EINVAL; 4504 4505 spin_lock_irqsave(&kiq->ring_lock, flags); 4506 4507 if (amdgpu_ring_alloc(kiq_ring, kiq->pmf->unmap_queues_size)) { 4508 spin_unlock_irqrestore(&kiq->ring_lock, flags); 4509 return -ENOMEM; 4510 } 4511 4512 /* assert preemption condition */ 4513 amdgpu_ring_set_preempt_cond_exec(ring, false); 4514 4515 /* assert IB preemption, emit the trailing fence */ 4516 kiq->pmf->kiq_unmap_queues(kiq_ring, ring, PREEMPT_QUEUES_NO_UNMAP, 4517 ring->trail_fence_gpu_addr, 4518 ++ring->trail_seq); 4519 amdgpu_ring_commit(kiq_ring); 4520 4521 spin_unlock_irqrestore(&kiq->ring_lock, flags); 4522 4523 /* poll the trailing fence */ 4524 for (i = 0; i < adev->usec_timeout; i++) { 4525 if (ring->trail_seq == 4526 le32_to_cpu(*(ring->trail_fence_cpu_addr))) 4527 break; 4528 udelay(1); 4529 } 4530 4531 if (i >= adev->usec_timeout) { 4532 r = -EINVAL; 4533 DRM_ERROR("ring %d failed to preempt ib\n", ring->idx); 4534 } 4535 4536 /* deassert preemption condition */ 4537 amdgpu_ring_set_preempt_cond_exec(ring, true); 4538 return r; 4539 } 4540 4541 static void gfx_v12_0_ring_emit_frame_cntl(struct amdgpu_ring *ring, 4542 bool start, 4543 bool secure) 4544 { 4545 uint32_t v = secure ? FRAME_TMZ : 0; 4546 4547 amdgpu_ring_write(ring, PACKET3(PACKET3_FRAME_CONTROL, 0)); 4548 amdgpu_ring_write(ring, v | FRAME_CMD(start ? 0 : 1)); 4549 } 4550 4551 static void gfx_v12_0_ring_emit_rreg(struct amdgpu_ring *ring, uint32_t reg, 4552 uint32_t reg_val_offs) 4553 { 4554 struct amdgpu_device *adev = ring->adev; 4555 4556 amdgpu_ring_write(ring, PACKET3(PACKET3_COPY_DATA, 4)); 4557 amdgpu_ring_write(ring, 0 | /* src: register*/ 4558 (5 << 8) | /* dst: memory */ 4559 (1 << 20)); /* write confirm */ 4560 amdgpu_ring_write(ring, reg); 4561 amdgpu_ring_write(ring, 0); 4562 amdgpu_ring_write(ring, lower_32_bits(adev->wb.gpu_addr + 4563 reg_val_offs * 4)); 4564 amdgpu_ring_write(ring, upper_32_bits(adev->wb.gpu_addr + 4565 reg_val_offs * 4)); 4566 } 4567 4568 static void gfx_v12_0_ring_emit_wreg(struct amdgpu_ring *ring, 4569 uint32_t reg, 4570 uint32_t val) 4571 { 4572 uint32_t cmd = 0; 4573 4574 switch (ring->funcs->type) { 4575 case AMDGPU_RING_TYPE_GFX: 4576 cmd = WRITE_DATA_ENGINE_SEL(1) | WR_CONFIRM; 4577 break; 4578 case AMDGPU_RING_TYPE_KIQ: 4579 cmd = (1 << 16); /* no inc addr */ 4580 break; 4581 default: 4582 cmd = WR_CONFIRM; 4583 break; 4584 } 4585 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3)); 4586 amdgpu_ring_write(ring, cmd); 4587 amdgpu_ring_write(ring, reg); 4588 amdgpu_ring_write(ring, 0); 4589 amdgpu_ring_write(ring, val); 4590 } 4591 4592 static void gfx_v12_0_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg, 4593 uint32_t val, uint32_t mask) 4594 { 4595 gfx_v12_0_wait_reg_mem(ring, 0, 0, 0, reg, 0, val, mask, 0x20); 4596 } 4597 4598 static void gfx_v12_0_ring_emit_reg_write_reg_wait(struct amdgpu_ring *ring, 4599 uint32_t reg0, uint32_t reg1, 4600 uint32_t ref, uint32_t mask) 4601 { 4602 int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX); 4603 4604 gfx_v12_0_wait_reg_mem(ring, usepfp, 0, 1, reg0, reg1, 4605 ref, mask, 0x20); 4606 } 4607 4608 static void gfx_v12_0_ring_soft_recovery(struct amdgpu_ring *ring, 4609 unsigned vmid) 4610 { 4611 struct amdgpu_device *adev = ring->adev; 4612 uint32_t value = 0; 4613 4614 value = REG_SET_FIELD(value, SQ_CMD, CMD, 0x03); 4615 value = REG_SET_FIELD(value, SQ_CMD, MODE, 0x01); 4616 value = REG_SET_FIELD(value, SQ_CMD, CHECK_VMID, 1); 4617 value = REG_SET_FIELD(value, SQ_CMD, VM_ID, vmid); 4618 WREG32_SOC15(GC, 0, regSQ_CMD, value); 4619 } 4620 4621 static void 4622 gfx_v12_0_set_gfx_eop_interrupt_state(struct amdgpu_device *adev, 4623 uint32_t me, uint32_t pipe, 4624 enum amdgpu_interrupt_state state) 4625 { 4626 uint32_t cp_int_cntl, cp_int_cntl_reg; 4627 4628 if (!me) { 4629 switch (pipe) { 4630 case 0: 4631 cp_int_cntl_reg = SOC15_REG_OFFSET(GC, 0, regCP_INT_CNTL_RING0); 4632 break; 4633 default: 4634 DRM_DEBUG("invalid pipe %d\n", pipe); 4635 return; 4636 } 4637 } else { 4638 DRM_DEBUG("invalid me %d\n", me); 4639 return; 4640 } 4641 4642 switch (state) { 4643 case AMDGPU_IRQ_STATE_DISABLE: 4644 cp_int_cntl = RREG32_SOC15_IP(GC, cp_int_cntl_reg); 4645 cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_INT_CNTL_RING0, 4646 TIME_STAMP_INT_ENABLE, 0); 4647 cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_INT_CNTL_RING0, 4648 GENERIC0_INT_ENABLE, 0); 4649 WREG32_SOC15_IP(GC, cp_int_cntl_reg, cp_int_cntl); 4650 break; 4651 case AMDGPU_IRQ_STATE_ENABLE: 4652 cp_int_cntl = RREG32_SOC15_IP(GC, cp_int_cntl_reg); 4653 cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_INT_CNTL_RING0, 4654 TIME_STAMP_INT_ENABLE, 1); 4655 cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_INT_CNTL_RING0, 4656 GENERIC0_INT_ENABLE, 1); 4657 WREG32_SOC15_IP(GC, cp_int_cntl_reg, cp_int_cntl); 4658 break; 4659 default: 4660 break; 4661 } 4662 } 4663 4664 static void gfx_v12_0_set_compute_eop_interrupt_state(struct amdgpu_device *adev, 4665 int me, int pipe, 4666 enum amdgpu_interrupt_state state) 4667 { 4668 u32 mec_int_cntl, mec_int_cntl_reg; 4669 4670 /* 4671 * amdgpu controls only the first MEC. That's why this function only 4672 * handles the setting of interrupts for this specific MEC. All other 4673 * pipes' interrupts are set by amdkfd. 4674 */ 4675 4676 if (me == 1) { 4677 switch (pipe) { 4678 case 0: 4679 mec_int_cntl_reg = SOC15_REG_OFFSET(GC, 0, regCP_ME1_PIPE0_INT_CNTL); 4680 break; 4681 case 1: 4682 mec_int_cntl_reg = SOC15_REG_OFFSET(GC, 0, regCP_ME1_PIPE1_INT_CNTL); 4683 break; 4684 default: 4685 DRM_DEBUG("invalid pipe %d\n", pipe); 4686 return; 4687 } 4688 } else { 4689 DRM_DEBUG("invalid me %d\n", me); 4690 return; 4691 } 4692 4693 switch (state) { 4694 case AMDGPU_IRQ_STATE_DISABLE: 4695 mec_int_cntl = RREG32_SOC15_IP(GC, mec_int_cntl_reg); 4696 mec_int_cntl = REG_SET_FIELD(mec_int_cntl, CP_ME1_PIPE0_INT_CNTL, 4697 TIME_STAMP_INT_ENABLE, 0); 4698 mec_int_cntl = REG_SET_FIELD(mec_int_cntl, CP_ME1_PIPE0_INT_CNTL, 4699 GENERIC0_INT_ENABLE, 0); 4700 WREG32_SOC15_IP(GC, mec_int_cntl_reg, mec_int_cntl); 4701 break; 4702 case AMDGPU_IRQ_STATE_ENABLE: 4703 mec_int_cntl = RREG32_SOC15_IP(GC, mec_int_cntl_reg); 4704 mec_int_cntl = REG_SET_FIELD(mec_int_cntl, CP_ME1_PIPE0_INT_CNTL, 4705 TIME_STAMP_INT_ENABLE, 1); 4706 mec_int_cntl = REG_SET_FIELD(mec_int_cntl, CP_ME1_PIPE0_INT_CNTL, 4707 GENERIC0_INT_ENABLE, 1); 4708 WREG32_SOC15_IP(GC, mec_int_cntl_reg, mec_int_cntl); 4709 break; 4710 default: 4711 break; 4712 } 4713 } 4714 4715 static int gfx_v12_0_set_eop_interrupt_state(struct amdgpu_device *adev, 4716 struct amdgpu_irq_src *src, 4717 unsigned type, 4718 enum amdgpu_interrupt_state state) 4719 { 4720 switch (type) { 4721 case AMDGPU_CP_IRQ_GFX_ME0_PIPE0_EOP: 4722 gfx_v12_0_set_gfx_eop_interrupt_state(adev, 0, 0, state); 4723 break; 4724 case AMDGPU_CP_IRQ_GFX_ME0_PIPE1_EOP: 4725 gfx_v12_0_set_gfx_eop_interrupt_state(adev, 0, 1, state); 4726 break; 4727 case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP: 4728 gfx_v12_0_set_compute_eop_interrupt_state(adev, 1, 0, state); 4729 break; 4730 case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE1_EOP: 4731 gfx_v12_0_set_compute_eop_interrupt_state(adev, 1, 1, state); 4732 break; 4733 case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE2_EOP: 4734 gfx_v12_0_set_compute_eop_interrupt_state(adev, 1, 2, state); 4735 break; 4736 case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE3_EOP: 4737 gfx_v12_0_set_compute_eop_interrupt_state(adev, 1, 3, state); 4738 break; 4739 default: 4740 break; 4741 } 4742 return 0; 4743 } 4744 4745 static int gfx_v12_0_eop_irq(struct amdgpu_device *adev, 4746 struct amdgpu_irq_src *source, 4747 struct amdgpu_iv_entry *entry) 4748 { 4749 int i; 4750 u8 me_id, pipe_id, queue_id; 4751 struct amdgpu_ring *ring; 4752 uint32_t mes_queue_id = entry->src_data[0]; 4753 4754 DRM_DEBUG("IH: CP EOP\n"); 4755 4756 if (adev->enable_mes && (mes_queue_id & AMDGPU_FENCE_MES_QUEUE_FLAG)) { 4757 struct amdgpu_mes_queue *queue; 4758 4759 mes_queue_id &= AMDGPU_FENCE_MES_QUEUE_ID_MASK; 4760 4761 spin_lock(&adev->mes.queue_id_lock); 4762 queue = idr_find(&adev->mes.queue_id_idr, mes_queue_id); 4763 if (queue) { 4764 DRM_DEBUG("process mes queue id = %d\n", mes_queue_id); 4765 amdgpu_fence_process(queue->ring); 4766 } 4767 spin_unlock(&adev->mes.queue_id_lock); 4768 } else { 4769 me_id = (entry->ring_id & 0x0c) >> 2; 4770 pipe_id = (entry->ring_id & 0x03) >> 0; 4771 queue_id = (entry->ring_id & 0x70) >> 4; 4772 4773 switch (me_id) { 4774 case 0: 4775 if (pipe_id == 0) 4776 amdgpu_fence_process(&adev->gfx.gfx_ring[0]); 4777 else 4778 amdgpu_fence_process(&adev->gfx.gfx_ring[1]); 4779 break; 4780 case 1: 4781 case 2: 4782 for (i = 0; i < adev->gfx.num_compute_rings; i++) { 4783 ring = &adev->gfx.compute_ring[i]; 4784 /* Per-queue interrupt is supported for MEC starting from VI. 4785 * The interrupt can only be enabled/disabled per pipe instead 4786 * of per queue. 4787 */ 4788 if ((ring->me == me_id) && 4789 (ring->pipe == pipe_id) && 4790 (ring->queue == queue_id)) 4791 amdgpu_fence_process(ring); 4792 } 4793 break; 4794 } 4795 } 4796 4797 return 0; 4798 } 4799 4800 static int gfx_v12_0_set_priv_reg_fault_state(struct amdgpu_device *adev, 4801 struct amdgpu_irq_src *source, 4802 unsigned int type, 4803 enum amdgpu_interrupt_state state) 4804 { 4805 u32 cp_int_cntl_reg, cp_int_cntl; 4806 int i, j; 4807 4808 switch (state) { 4809 case AMDGPU_IRQ_STATE_DISABLE: 4810 case AMDGPU_IRQ_STATE_ENABLE: 4811 for (i = 0; i < adev->gfx.me.num_me; i++) { 4812 for (j = 0; j < adev->gfx.me.num_pipe_per_me; j++) { 4813 cp_int_cntl_reg = gfx_v12_0_get_cpg_int_cntl(adev, i, j); 4814 4815 if (cp_int_cntl_reg) { 4816 cp_int_cntl = RREG32_SOC15_IP(GC, cp_int_cntl_reg); 4817 cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_INT_CNTL_RING0, 4818 PRIV_REG_INT_ENABLE, 4819 state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0); 4820 WREG32_SOC15_IP(GC, cp_int_cntl_reg, cp_int_cntl); 4821 } 4822 } 4823 } 4824 for (i = 0; i < adev->gfx.mec.num_mec; i++) { 4825 for (j = 0; j < adev->gfx.mec.num_pipe_per_mec; j++) { 4826 /* MECs start at 1 */ 4827 cp_int_cntl_reg = gfx_v12_0_get_cpc_int_cntl(adev, i + 1, j); 4828 4829 if (cp_int_cntl_reg) { 4830 cp_int_cntl = RREG32_SOC15_IP(GC, cp_int_cntl_reg); 4831 cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_ME1_PIPE0_INT_CNTL, 4832 PRIV_REG_INT_ENABLE, 4833 state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0); 4834 WREG32_SOC15_IP(GC, cp_int_cntl_reg, cp_int_cntl); 4835 } 4836 } 4837 } 4838 break; 4839 default: 4840 break; 4841 } 4842 4843 return 0; 4844 } 4845 4846 static int gfx_v12_0_set_bad_op_fault_state(struct amdgpu_device *adev, 4847 struct amdgpu_irq_src *source, 4848 unsigned type, 4849 enum amdgpu_interrupt_state state) 4850 { 4851 u32 cp_int_cntl_reg, cp_int_cntl; 4852 int i, j; 4853 4854 switch (state) { 4855 case AMDGPU_IRQ_STATE_DISABLE: 4856 case AMDGPU_IRQ_STATE_ENABLE: 4857 for (i = 0; i < adev->gfx.me.num_me; i++) { 4858 for (j = 0; j < adev->gfx.me.num_pipe_per_me; j++) { 4859 cp_int_cntl_reg = gfx_v12_0_get_cpg_int_cntl(adev, i, j); 4860 4861 if (cp_int_cntl_reg) { 4862 cp_int_cntl = RREG32_SOC15_IP(GC, cp_int_cntl_reg); 4863 cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_INT_CNTL_RING0, 4864 OPCODE_ERROR_INT_ENABLE, 4865 state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0); 4866 WREG32_SOC15_IP(GC, cp_int_cntl_reg, cp_int_cntl); 4867 } 4868 } 4869 } 4870 for (i = 0; i < adev->gfx.mec.num_mec; i++) { 4871 for (j = 0; j < adev->gfx.mec.num_pipe_per_mec; j++) { 4872 /* MECs start at 1 */ 4873 cp_int_cntl_reg = gfx_v12_0_get_cpc_int_cntl(adev, i + 1, j); 4874 4875 if (cp_int_cntl_reg) { 4876 cp_int_cntl = RREG32_SOC15_IP(GC, cp_int_cntl_reg); 4877 cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_ME1_PIPE0_INT_CNTL, 4878 OPCODE_ERROR_INT_ENABLE, 4879 state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0); 4880 WREG32_SOC15_IP(GC, cp_int_cntl_reg, cp_int_cntl); 4881 } 4882 } 4883 } 4884 break; 4885 default: 4886 break; 4887 } 4888 return 0; 4889 } 4890 4891 static int gfx_v12_0_set_priv_inst_fault_state(struct amdgpu_device *adev, 4892 struct amdgpu_irq_src *source, 4893 unsigned int type, 4894 enum amdgpu_interrupt_state state) 4895 { 4896 u32 cp_int_cntl_reg, cp_int_cntl; 4897 int i, j; 4898 4899 switch (state) { 4900 case AMDGPU_IRQ_STATE_DISABLE: 4901 case AMDGPU_IRQ_STATE_ENABLE: 4902 for (i = 0; i < adev->gfx.me.num_me; i++) { 4903 for (j = 0; j < adev->gfx.me.num_pipe_per_me; j++) { 4904 cp_int_cntl_reg = gfx_v12_0_get_cpg_int_cntl(adev, i, j); 4905 4906 if (cp_int_cntl_reg) { 4907 cp_int_cntl = RREG32_SOC15_IP(GC, cp_int_cntl_reg); 4908 cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_INT_CNTL_RING0, 4909 PRIV_INSTR_INT_ENABLE, 4910 state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0); 4911 WREG32_SOC15_IP(GC, cp_int_cntl_reg, cp_int_cntl); 4912 } 4913 } 4914 } 4915 break; 4916 default: 4917 break; 4918 } 4919 4920 return 0; 4921 } 4922 4923 static void gfx_v12_0_handle_priv_fault(struct amdgpu_device *adev, 4924 struct amdgpu_iv_entry *entry) 4925 { 4926 u8 me_id, pipe_id, queue_id; 4927 struct amdgpu_ring *ring; 4928 int i; 4929 4930 me_id = (entry->ring_id & 0x0c) >> 2; 4931 pipe_id = (entry->ring_id & 0x03) >> 0; 4932 queue_id = (entry->ring_id & 0x70) >> 4; 4933 4934 switch (me_id) { 4935 case 0: 4936 for (i = 0; i < adev->gfx.num_gfx_rings; i++) { 4937 ring = &adev->gfx.gfx_ring[i]; 4938 if (ring->me == me_id && ring->pipe == pipe_id && 4939 ring->queue == queue_id) 4940 drm_sched_fault(&ring->sched); 4941 } 4942 break; 4943 case 1: 4944 case 2: 4945 for (i = 0; i < adev->gfx.num_compute_rings; i++) { 4946 ring = &adev->gfx.compute_ring[i]; 4947 if (ring->me == me_id && ring->pipe == pipe_id && 4948 ring->queue == queue_id) 4949 drm_sched_fault(&ring->sched); 4950 } 4951 break; 4952 default: 4953 BUG(); 4954 break; 4955 } 4956 } 4957 4958 static int gfx_v12_0_priv_reg_irq(struct amdgpu_device *adev, 4959 struct amdgpu_irq_src *source, 4960 struct amdgpu_iv_entry *entry) 4961 { 4962 DRM_ERROR("Illegal register access in command stream\n"); 4963 gfx_v12_0_handle_priv_fault(adev, entry); 4964 return 0; 4965 } 4966 4967 static int gfx_v12_0_bad_op_irq(struct amdgpu_device *adev, 4968 struct amdgpu_irq_src *source, 4969 struct amdgpu_iv_entry *entry) 4970 { 4971 DRM_ERROR("Illegal opcode in command stream \n"); 4972 gfx_v12_0_handle_priv_fault(adev, entry); 4973 return 0; 4974 } 4975 4976 static int gfx_v12_0_priv_inst_irq(struct amdgpu_device *adev, 4977 struct amdgpu_irq_src *source, 4978 struct amdgpu_iv_entry *entry) 4979 { 4980 DRM_ERROR("Illegal instruction in command stream\n"); 4981 gfx_v12_0_handle_priv_fault(adev, entry); 4982 return 0; 4983 } 4984 4985 static void gfx_v12_0_emit_mem_sync(struct amdgpu_ring *ring) 4986 { 4987 const unsigned int gcr_cntl = 4988 PACKET3_ACQUIRE_MEM_GCR_CNTL_GL2_INV(1) | 4989 PACKET3_ACQUIRE_MEM_GCR_CNTL_GL2_WB(1) | 4990 PACKET3_ACQUIRE_MEM_GCR_CNTL_GLM_INV(1) | 4991 PACKET3_ACQUIRE_MEM_GCR_CNTL_GLM_WB(1) | 4992 PACKET3_ACQUIRE_MEM_GCR_CNTL_GL1_INV(1) | 4993 PACKET3_ACQUIRE_MEM_GCR_CNTL_GLV_INV(1) | 4994 PACKET3_ACQUIRE_MEM_GCR_CNTL_GLK_INV(1) | 4995 PACKET3_ACQUIRE_MEM_GCR_CNTL_GLI_INV(1); 4996 4997 /* ACQUIRE_MEM - make one or more surfaces valid for use by the subsequent operations */ 4998 amdgpu_ring_write(ring, PACKET3(PACKET3_ACQUIRE_MEM, 6)); 4999 amdgpu_ring_write(ring, 0); /* CP_COHER_CNTL */ 5000 amdgpu_ring_write(ring, 0xffffffff); /* CP_COHER_SIZE */ 5001 amdgpu_ring_write(ring, 0xffffff); /* CP_COHER_SIZE_HI */ 5002 amdgpu_ring_write(ring, 0); /* CP_COHER_BASE */ 5003 amdgpu_ring_write(ring, 0); /* CP_COHER_BASE_HI */ 5004 amdgpu_ring_write(ring, 0x0000000A); /* POLL_INTERVAL */ 5005 amdgpu_ring_write(ring, gcr_cntl); /* GCR_CNTL */ 5006 } 5007 5008 static void gfx_v12_ip_print(void *handle, struct drm_printer *p) 5009 { 5010 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 5011 uint32_t i, j, k, reg, index = 0; 5012 uint32_t reg_count = ARRAY_SIZE(gc_reg_list_12_0); 5013 5014 if (!adev->gfx.ip_dump_core) 5015 return; 5016 5017 for (i = 0; i < reg_count; i++) 5018 drm_printf(p, "%-50s \t 0x%08x\n", 5019 gc_reg_list_12_0[i].reg_name, 5020 adev->gfx.ip_dump_core[i]); 5021 5022 /* print compute queue registers for all instances */ 5023 if (!adev->gfx.ip_dump_compute_queues) 5024 return; 5025 5026 reg_count = ARRAY_SIZE(gc_cp_reg_list_12); 5027 drm_printf(p, "\nnum_mec: %d num_pipe: %d num_queue: %d\n", 5028 adev->gfx.mec.num_mec, 5029 adev->gfx.mec.num_pipe_per_mec, 5030 adev->gfx.mec.num_queue_per_pipe); 5031 5032 for (i = 0; i < adev->gfx.mec.num_mec; i++) { 5033 for (j = 0; j < adev->gfx.mec.num_pipe_per_mec; j++) { 5034 for (k = 0; k < adev->gfx.mec.num_queue_per_pipe; k++) { 5035 drm_printf(p, "\nmec %d, pipe %d, queue %d\n", i, j, k); 5036 for (reg = 0; reg < reg_count; reg++) { 5037 drm_printf(p, "%-50s \t 0x%08x\n", 5038 gc_cp_reg_list_12[reg].reg_name, 5039 adev->gfx.ip_dump_compute_queues[index + reg]); 5040 } 5041 index += reg_count; 5042 } 5043 } 5044 } 5045 5046 /* print gfx queue registers for all instances */ 5047 if (!adev->gfx.ip_dump_gfx_queues) 5048 return; 5049 5050 index = 0; 5051 reg_count = ARRAY_SIZE(gc_gfx_queue_reg_list_12); 5052 drm_printf(p, "\nnum_me: %d num_pipe: %d num_queue: %d\n", 5053 adev->gfx.me.num_me, 5054 adev->gfx.me.num_pipe_per_me, 5055 adev->gfx.me.num_queue_per_pipe); 5056 5057 for (i = 0; i < adev->gfx.me.num_me; i++) { 5058 for (j = 0; j < adev->gfx.me.num_pipe_per_me; j++) { 5059 for (k = 0; k < adev->gfx.me.num_queue_per_pipe; k++) { 5060 drm_printf(p, "\nme %d, pipe %d, queue %d\n", i, j, k); 5061 for (reg = 0; reg < reg_count; reg++) { 5062 drm_printf(p, "%-50s \t 0x%08x\n", 5063 gc_gfx_queue_reg_list_12[reg].reg_name, 5064 adev->gfx.ip_dump_gfx_queues[index + reg]); 5065 } 5066 index += reg_count; 5067 } 5068 } 5069 } 5070 } 5071 5072 static void gfx_v12_ip_dump(void *handle) 5073 { 5074 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 5075 uint32_t i, j, k, reg, index = 0; 5076 uint32_t reg_count = ARRAY_SIZE(gc_reg_list_12_0); 5077 5078 if (!adev->gfx.ip_dump_core) 5079 return; 5080 5081 amdgpu_gfx_off_ctrl(adev, false); 5082 for (i = 0; i < reg_count; i++) 5083 adev->gfx.ip_dump_core[i] = RREG32(SOC15_REG_ENTRY_OFFSET(gc_reg_list_12_0[i])); 5084 amdgpu_gfx_off_ctrl(adev, true); 5085 5086 /* dump compute queue registers for all instances */ 5087 if (!adev->gfx.ip_dump_compute_queues) 5088 return; 5089 5090 reg_count = ARRAY_SIZE(gc_cp_reg_list_12); 5091 amdgpu_gfx_off_ctrl(adev, false); 5092 mutex_lock(&adev->srbm_mutex); 5093 for (i = 0; i < adev->gfx.mec.num_mec; i++) { 5094 for (j = 0; j < adev->gfx.mec.num_pipe_per_mec; j++) { 5095 for (k = 0; k < adev->gfx.mec.num_queue_per_pipe; k++) { 5096 /* ME0 is for GFX so start from 1 for CP */ 5097 soc24_grbm_select(adev, adev->gfx.me.num_me + i, j, k, 0); 5098 for (reg = 0; reg < reg_count; reg++) { 5099 adev->gfx.ip_dump_compute_queues[index + reg] = 5100 RREG32(SOC15_REG_ENTRY_OFFSET( 5101 gc_cp_reg_list_12[reg])); 5102 } 5103 index += reg_count; 5104 } 5105 } 5106 } 5107 soc24_grbm_select(adev, 0, 0, 0, 0); 5108 mutex_unlock(&adev->srbm_mutex); 5109 amdgpu_gfx_off_ctrl(adev, true); 5110 5111 /* dump gfx queue registers for all instances */ 5112 if (!adev->gfx.ip_dump_gfx_queues) 5113 return; 5114 5115 index = 0; 5116 reg_count = ARRAY_SIZE(gc_gfx_queue_reg_list_12); 5117 amdgpu_gfx_off_ctrl(adev, false); 5118 mutex_lock(&adev->srbm_mutex); 5119 for (i = 0; i < adev->gfx.me.num_me; i++) { 5120 for (j = 0; j < adev->gfx.me.num_pipe_per_me; j++) { 5121 for (k = 0; k < adev->gfx.me.num_queue_per_pipe; k++) { 5122 soc24_grbm_select(adev, i, j, k, 0); 5123 5124 for (reg = 0; reg < reg_count; reg++) { 5125 adev->gfx.ip_dump_gfx_queues[index + reg] = 5126 RREG32(SOC15_REG_ENTRY_OFFSET( 5127 gc_gfx_queue_reg_list_12[reg])); 5128 } 5129 index += reg_count; 5130 } 5131 } 5132 } 5133 soc24_grbm_select(adev, 0, 0, 0, 0); 5134 mutex_unlock(&adev->srbm_mutex); 5135 amdgpu_gfx_off_ctrl(adev, true); 5136 } 5137 5138 static const struct amd_ip_funcs gfx_v12_0_ip_funcs = { 5139 .name = "gfx_v12_0", 5140 .early_init = gfx_v12_0_early_init, 5141 .late_init = gfx_v12_0_late_init, 5142 .sw_init = gfx_v12_0_sw_init, 5143 .sw_fini = gfx_v12_0_sw_fini, 5144 .hw_init = gfx_v12_0_hw_init, 5145 .hw_fini = gfx_v12_0_hw_fini, 5146 .suspend = gfx_v12_0_suspend, 5147 .resume = gfx_v12_0_resume, 5148 .is_idle = gfx_v12_0_is_idle, 5149 .wait_for_idle = gfx_v12_0_wait_for_idle, 5150 .set_clockgating_state = gfx_v12_0_set_clockgating_state, 5151 .set_powergating_state = gfx_v12_0_set_powergating_state, 5152 .get_clockgating_state = gfx_v12_0_get_clockgating_state, 5153 .dump_ip_state = gfx_v12_ip_dump, 5154 .print_ip_state = gfx_v12_ip_print, 5155 }; 5156 5157 static const struct amdgpu_ring_funcs gfx_v12_0_ring_funcs_gfx = { 5158 .type = AMDGPU_RING_TYPE_GFX, 5159 .align_mask = 0xff, 5160 .nop = PACKET3(PACKET3_NOP, 0x3FFF), 5161 .support_64bit_ptrs = true, 5162 .secure_submission_supported = true, 5163 .get_rptr = gfx_v12_0_ring_get_rptr_gfx, 5164 .get_wptr = gfx_v12_0_ring_get_wptr_gfx, 5165 .set_wptr = gfx_v12_0_ring_set_wptr_gfx, 5166 .emit_frame_size = /* totally 242 maximum if 16 IBs */ 5167 5 + /* COND_EXEC */ 5168 7 + /* PIPELINE_SYNC */ 5169 SOC15_FLUSH_GPU_TLB_NUM_WREG * 5 + 5170 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 + 5171 2 + /* VM_FLUSH */ 5172 8 + /* FENCE for VM_FLUSH */ 5173 5 + /* COND_EXEC */ 5174 7 + /* HDP_flush */ 5175 4 + /* VGT_flush */ 5176 31 + /* DE_META */ 5177 3 + /* CNTX_CTRL */ 5178 5 + /* HDP_INVL */ 5179 8 + 8 + /* FENCE x2 */ 5180 8, /* gfx_v12_0_emit_mem_sync */ 5181 .emit_ib_size = 4, /* gfx_v12_0_ring_emit_ib_gfx */ 5182 .emit_ib = gfx_v12_0_ring_emit_ib_gfx, 5183 .emit_fence = gfx_v12_0_ring_emit_fence, 5184 .emit_pipeline_sync = gfx_v12_0_ring_emit_pipeline_sync, 5185 .emit_vm_flush = gfx_v12_0_ring_emit_vm_flush, 5186 .emit_hdp_flush = gfx_v12_0_ring_emit_hdp_flush, 5187 .test_ring = gfx_v12_0_ring_test_ring, 5188 .test_ib = gfx_v12_0_ring_test_ib, 5189 .insert_nop = amdgpu_ring_insert_nop, 5190 .pad_ib = amdgpu_ring_generic_pad_ib, 5191 .emit_cntxcntl = gfx_v12_0_ring_emit_cntxcntl, 5192 .init_cond_exec = gfx_v12_0_ring_emit_init_cond_exec, 5193 .preempt_ib = gfx_v12_0_ring_preempt_ib, 5194 .emit_frame_cntl = gfx_v12_0_ring_emit_frame_cntl, 5195 .emit_wreg = gfx_v12_0_ring_emit_wreg, 5196 .emit_reg_wait = gfx_v12_0_ring_emit_reg_wait, 5197 .emit_reg_write_reg_wait = gfx_v12_0_ring_emit_reg_write_reg_wait, 5198 .soft_recovery = gfx_v12_0_ring_soft_recovery, 5199 .emit_mem_sync = gfx_v12_0_emit_mem_sync, 5200 }; 5201 5202 static const struct amdgpu_ring_funcs gfx_v12_0_ring_funcs_compute = { 5203 .type = AMDGPU_RING_TYPE_COMPUTE, 5204 .align_mask = 0xff, 5205 .nop = PACKET3(PACKET3_NOP, 0x3FFF), 5206 .support_64bit_ptrs = true, 5207 .get_rptr = gfx_v12_0_ring_get_rptr_compute, 5208 .get_wptr = gfx_v12_0_ring_get_wptr_compute, 5209 .set_wptr = gfx_v12_0_ring_set_wptr_compute, 5210 .emit_frame_size = 5211 7 + /* gfx_v12_0_ring_emit_hdp_flush */ 5212 5 + /* hdp invalidate */ 5213 7 + /* gfx_v12_0_ring_emit_pipeline_sync */ 5214 SOC15_FLUSH_GPU_TLB_NUM_WREG * 5 + 5215 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 + 5216 2 + /* gfx_v12_0_ring_emit_vm_flush */ 5217 8 + 8 + 8 + /* gfx_v12_0_ring_emit_fence x3 for user fence, vm fence */ 5218 8, /* gfx_v12_0_emit_mem_sync */ 5219 .emit_ib_size = 7, /* gfx_v12_0_ring_emit_ib_compute */ 5220 .emit_ib = gfx_v12_0_ring_emit_ib_compute, 5221 .emit_fence = gfx_v12_0_ring_emit_fence, 5222 .emit_pipeline_sync = gfx_v12_0_ring_emit_pipeline_sync, 5223 .emit_vm_flush = gfx_v12_0_ring_emit_vm_flush, 5224 .emit_hdp_flush = gfx_v12_0_ring_emit_hdp_flush, 5225 .test_ring = gfx_v12_0_ring_test_ring, 5226 .test_ib = gfx_v12_0_ring_test_ib, 5227 .insert_nop = amdgpu_ring_insert_nop, 5228 .pad_ib = amdgpu_ring_generic_pad_ib, 5229 .emit_wreg = gfx_v12_0_ring_emit_wreg, 5230 .emit_reg_wait = gfx_v12_0_ring_emit_reg_wait, 5231 .emit_reg_write_reg_wait = gfx_v12_0_ring_emit_reg_write_reg_wait, 5232 .soft_recovery = gfx_v12_0_ring_soft_recovery, 5233 .emit_mem_sync = gfx_v12_0_emit_mem_sync, 5234 }; 5235 5236 static const struct amdgpu_ring_funcs gfx_v12_0_ring_funcs_kiq = { 5237 .type = AMDGPU_RING_TYPE_KIQ, 5238 .align_mask = 0xff, 5239 .nop = PACKET3(PACKET3_NOP, 0x3FFF), 5240 .support_64bit_ptrs = true, 5241 .get_rptr = gfx_v12_0_ring_get_rptr_compute, 5242 .get_wptr = gfx_v12_0_ring_get_wptr_compute, 5243 .set_wptr = gfx_v12_0_ring_set_wptr_compute, 5244 .emit_frame_size = 5245 7 + /* gfx_v12_0_ring_emit_hdp_flush */ 5246 5 + /*hdp invalidate */ 5247 7 + /* gfx_v12_0_ring_emit_pipeline_sync */ 5248 SOC15_FLUSH_GPU_TLB_NUM_WREG * 5 + 5249 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 + 5250 2 + /* gfx_v12_0_ring_emit_vm_flush */ 5251 8 + 8 + 8, /* gfx_v12_0_ring_emit_fence_kiq x3 for user fence, vm fence */ 5252 .emit_ib_size = 7, /* gfx_v12_0_ring_emit_ib_compute */ 5253 .emit_ib = gfx_v12_0_ring_emit_ib_compute, 5254 .emit_fence = gfx_v12_0_ring_emit_fence_kiq, 5255 .test_ring = gfx_v12_0_ring_test_ring, 5256 .test_ib = gfx_v12_0_ring_test_ib, 5257 .insert_nop = amdgpu_ring_insert_nop, 5258 .pad_ib = amdgpu_ring_generic_pad_ib, 5259 .emit_rreg = gfx_v12_0_ring_emit_rreg, 5260 .emit_wreg = gfx_v12_0_ring_emit_wreg, 5261 .emit_reg_wait = gfx_v12_0_ring_emit_reg_wait, 5262 .emit_reg_write_reg_wait = gfx_v12_0_ring_emit_reg_write_reg_wait, 5263 }; 5264 5265 static void gfx_v12_0_set_ring_funcs(struct amdgpu_device *adev) 5266 { 5267 int i; 5268 5269 adev->gfx.kiq[0].ring.funcs = &gfx_v12_0_ring_funcs_kiq; 5270 5271 for (i = 0; i < adev->gfx.num_gfx_rings; i++) 5272 adev->gfx.gfx_ring[i].funcs = &gfx_v12_0_ring_funcs_gfx; 5273 5274 for (i = 0; i < adev->gfx.num_compute_rings; i++) 5275 adev->gfx.compute_ring[i].funcs = &gfx_v12_0_ring_funcs_compute; 5276 } 5277 5278 static const struct amdgpu_irq_src_funcs gfx_v12_0_eop_irq_funcs = { 5279 .set = gfx_v12_0_set_eop_interrupt_state, 5280 .process = gfx_v12_0_eop_irq, 5281 }; 5282 5283 static const struct amdgpu_irq_src_funcs gfx_v12_0_priv_reg_irq_funcs = { 5284 .set = gfx_v12_0_set_priv_reg_fault_state, 5285 .process = gfx_v12_0_priv_reg_irq, 5286 }; 5287 5288 static const struct amdgpu_irq_src_funcs gfx_v12_0_bad_op_irq_funcs = { 5289 .set = gfx_v12_0_set_bad_op_fault_state, 5290 .process = gfx_v12_0_bad_op_irq, 5291 }; 5292 5293 static const struct amdgpu_irq_src_funcs gfx_v12_0_priv_inst_irq_funcs = { 5294 .set = gfx_v12_0_set_priv_inst_fault_state, 5295 .process = gfx_v12_0_priv_inst_irq, 5296 }; 5297 5298 static void gfx_v12_0_set_irq_funcs(struct amdgpu_device *adev) 5299 { 5300 adev->gfx.eop_irq.num_types = AMDGPU_CP_IRQ_LAST; 5301 adev->gfx.eop_irq.funcs = &gfx_v12_0_eop_irq_funcs; 5302 5303 adev->gfx.priv_reg_irq.num_types = 1; 5304 adev->gfx.priv_reg_irq.funcs = &gfx_v12_0_priv_reg_irq_funcs; 5305 5306 adev->gfx.bad_op_irq.num_types = 1; 5307 adev->gfx.bad_op_irq.funcs = &gfx_v12_0_bad_op_irq_funcs; 5308 5309 adev->gfx.priv_inst_irq.num_types = 1; 5310 adev->gfx.priv_inst_irq.funcs = &gfx_v12_0_priv_inst_irq_funcs; 5311 } 5312 5313 static void gfx_v12_0_set_imu_funcs(struct amdgpu_device *adev) 5314 { 5315 if (adev->flags & AMD_IS_APU) 5316 adev->gfx.imu.mode = MISSION_MODE; 5317 else 5318 adev->gfx.imu.mode = DEBUG_MODE; 5319 5320 adev->gfx.imu.funcs = &gfx_v12_0_imu_funcs; 5321 } 5322 5323 static void gfx_v12_0_set_rlc_funcs(struct amdgpu_device *adev) 5324 { 5325 adev->gfx.rlc.funcs = &gfx_v12_0_rlc_funcs; 5326 } 5327 5328 static void gfx_v12_0_set_mqd_funcs(struct amdgpu_device *adev) 5329 { 5330 /* set gfx eng mqd */ 5331 adev->mqds[AMDGPU_HW_IP_GFX].mqd_size = 5332 sizeof(struct v12_gfx_mqd); 5333 adev->mqds[AMDGPU_HW_IP_GFX].init_mqd = 5334 gfx_v12_0_gfx_mqd_init; 5335 /* set compute eng mqd */ 5336 adev->mqds[AMDGPU_HW_IP_COMPUTE].mqd_size = 5337 sizeof(struct v12_compute_mqd); 5338 adev->mqds[AMDGPU_HW_IP_COMPUTE].init_mqd = 5339 gfx_v12_0_compute_mqd_init; 5340 } 5341 5342 static void gfx_v12_0_set_user_wgp_inactive_bitmap_per_sh(struct amdgpu_device *adev, 5343 u32 bitmap) 5344 { 5345 u32 data; 5346 5347 if (!bitmap) 5348 return; 5349 5350 data = bitmap << GC_USER_SHADER_ARRAY_CONFIG__INACTIVE_WGPS__SHIFT; 5351 data &= GC_USER_SHADER_ARRAY_CONFIG__INACTIVE_WGPS_MASK; 5352 5353 WREG32_SOC15(GC, 0, regGC_USER_SHADER_ARRAY_CONFIG, data); 5354 } 5355 5356 static u32 gfx_v12_0_get_wgp_active_bitmap_per_sh(struct amdgpu_device *adev) 5357 { 5358 u32 data, wgp_bitmask; 5359 data = RREG32_SOC15(GC, 0, regCC_GC_SHADER_ARRAY_CONFIG); 5360 data |= RREG32_SOC15(GC, 0, regGC_USER_SHADER_ARRAY_CONFIG); 5361 5362 data &= CC_GC_SHADER_ARRAY_CONFIG__INACTIVE_WGPS_MASK; 5363 data >>= CC_GC_SHADER_ARRAY_CONFIG__INACTIVE_WGPS__SHIFT; 5364 5365 wgp_bitmask = 5366 amdgpu_gfx_create_bitmask(adev->gfx.config.max_cu_per_sh >> 1); 5367 5368 return (~data) & wgp_bitmask; 5369 } 5370 5371 static u32 gfx_v12_0_get_cu_active_bitmap_per_sh(struct amdgpu_device *adev) 5372 { 5373 u32 wgp_idx, wgp_active_bitmap; 5374 u32 cu_bitmap_per_wgp, cu_active_bitmap; 5375 5376 wgp_active_bitmap = gfx_v12_0_get_wgp_active_bitmap_per_sh(adev); 5377 cu_active_bitmap = 0; 5378 5379 for (wgp_idx = 0; wgp_idx < 16; wgp_idx++) { 5380 /* if there is one WGP enabled, it means 2 CUs will be enabled */ 5381 cu_bitmap_per_wgp = 3 << (2 * wgp_idx); 5382 if (wgp_active_bitmap & (1 << wgp_idx)) 5383 cu_active_bitmap |= cu_bitmap_per_wgp; 5384 } 5385 5386 return cu_active_bitmap; 5387 } 5388 5389 static int gfx_v12_0_get_cu_info(struct amdgpu_device *adev, 5390 struct amdgpu_cu_info *cu_info) 5391 { 5392 int i, j, k, counter, active_cu_number = 0; 5393 u32 mask, bitmap; 5394 unsigned disable_masks[8 * 2]; 5395 5396 if (!adev || !cu_info) 5397 return -EINVAL; 5398 5399 amdgpu_gfx_parse_disable_cu(disable_masks, 8, 2); 5400 5401 mutex_lock(&adev->grbm_idx_mutex); 5402 for (i = 0; i < adev->gfx.config.max_shader_engines; i++) { 5403 for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) { 5404 bitmap = i * adev->gfx.config.max_sh_per_se + j; 5405 if (!((gfx_v12_0_get_sa_active_bitmap(adev) >> bitmap) & 1)) 5406 continue; 5407 mask = 1; 5408 counter = 0; 5409 gfx_v12_0_select_se_sh(adev, i, j, 0xffffffff, 0); 5410 if (i < 8 && j < 2) 5411 gfx_v12_0_set_user_wgp_inactive_bitmap_per_sh( 5412 adev, disable_masks[i * 2 + j]); 5413 bitmap = gfx_v12_0_get_cu_active_bitmap_per_sh(adev); 5414 5415 /** 5416 * GFX12 could support more than 4 SEs, while the bitmap 5417 * in cu_info struct is 4x4 and ioctl interface struct 5418 * drm_amdgpu_info_device should keep stable. 5419 * So we use last two columns of bitmap to store cu mask for 5420 * SEs 4 to 7, the layout of the bitmap is as below: 5421 * SE0: {SH0,SH1} --> {bitmap[0][0], bitmap[0][1]} 5422 * SE1: {SH0,SH1} --> {bitmap[1][0], bitmap[1][1]} 5423 * SE2: {SH0,SH1} --> {bitmap[2][0], bitmap[2][1]} 5424 * SE3: {SH0,SH1} --> {bitmap[3][0], bitmap[3][1]} 5425 * SE4: {SH0,SH1} --> {bitmap[0][2], bitmap[0][3]} 5426 * SE5: {SH0,SH1} --> {bitmap[1][2], bitmap[1][3]} 5427 * SE6: {SH0,SH1} --> {bitmap[2][2], bitmap[2][3]} 5428 * SE7: {SH0,SH1} --> {bitmap[3][2], bitmap[3][3]} 5429 */ 5430 cu_info->bitmap[0][i % 4][j + (i / 4) * 2] = bitmap; 5431 5432 for (k = 0; k < adev->gfx.config.max_cu_per_sh; k++) { 5433 if (bitmap & mask) 5434 counter++; 5435 5436 mask <<= 1; 5437 } 5438 active_cu_number += counter; 5439 } 5440 } 5441 gfx_v12_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0); 5442 mutex_unlock(&adev->grbm_idx_mutex); 5443 5444 cu_info->number = active_cu_number; 5445 cu_info->simd_per_cu = NUM_SIMD_PER_CU; 5446 5447 return 0; 5448 } 5449 5450 const struct amdgpu_ip_block_version gfx_v12_0_ip_block = { 5451 .type = AMD_IP_BLOCK_TYPE_GFX, 5452 .major = 12, 5453 .minor = 0, 5454 .rev = 0, 5455 .funcs = &gfx_v12_0_ip_funcs, 5456 }; 5457