1 /* 2 * Copyright 2023 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 */ 23 #include <linux/delay.h> 24 #include <linux/kernel.h> 25 #include <linux/firmware.h> 26 #include <linux/module.h> 27 #include <linux/pci.h> 28 #include "amdgpu.h" 29 #include "amdgpu_gfx.h" 30 #include "amdgpu_psp.h" 31 #include "amdgpu_smu.h" 32 #include "amdgpu_atomfirmware.h" 33 #include "imu_v12_0.h" 34 #include "soc24.h" 35 #include "nvd.h" 36 37 #include "gc/gc_12_0_0_offset.h" 38 #include "gc/gc_12_0_0_sh_mask.h" 39 #include "soc24_enum.h" 40 #include "ivsrcid/gfx/irqsrcs_gfx_11_0_0.h" 41 42 #include "soc15.h" 43 #include "soc15d.h" 44 #include "clearstate_gfx12.h" 45 #include "v12_structs.h" 46 #include "gfx_v12_0.h" 47 #include "nbif_v6_3_1.h" 48 #include "mes_v12_0.h" 49 50 #define GFX12_NUM_GFX_RINGS 1 51 #define GFX12_MEC_HPD_SIZE 2048 52 53 #define RLCG_UCODE_LOADING_START_ADDRESS 0x00002000L 54 55 MODULE_FIRMWARE("amdgpu/gc_12_0_0_pfp.bin"); 56 MODULE_FIRMWARE("amdgpu/gc_12_0_0_me.bin"); 57 MODULE_FIRMWARE("amdgpu/gc_12_0_0_mec.bin"); 58 MODULE_FIRMWARE("amdgpu/gc_12_0_0_rlc.bin"); 59 MODULE_FIRMWARE("amdgpu/gc_12_0_0_toc.bin"); 60 MODULE_FIRMWARE("amdgpu/gc_12_0_1_pfp.bin"); 61 MODULE_FIRMWARE("amdgpu/gc_12_0_1_me.bin"); 62 MODULE_FIRMWARE("amdgpu/gc_12_0_1_mec.bin"); 63 MODULE_FIRMWARE("amdgpu/gc_12_0_1_rlc.bin"); 64 MODULE_FIRMWARE("amdgpu/gc_12_0_1_toc.bin"); 65 66 static const struct amdgpu_hwip_reg_entry gc_reg_list_12_0[] = { 67 SOC15_REG_ENTRY_STR(GC, 0, regGRBM_STATUS), 68 SOC15_REG_ENTRY_STR(GC, 0, regGRBM_STATUS2), 69 SOC15_REG_ENTRY_STR(GC, 0, regGRBM_STATUS3), 70 SOC15_REG_ENTRY_STR(GC, 0, regCP_STALLED_STAT1), 71 SOC15_REG_ENTRY_STR(GC, 0, regCP_STALLED_STAT2), 72 SOC15_REG_ENTRY_STR(GC, 0, regCP_STALLED_STAT3), 73 SOC15_REG_ENTRY_STR(GC, 0, regCP_CPC_STALLED_STAT1), 74 SOC15_REG_ENTRY_STR(GC, 0, regCP_CPF_STALLED_STAT1), 75 SOC15_REG_ENTRY_STR(GC, 0, regCP_BUSY_STAT), 76 SOC15_REG_ENTRY_STR(GC, 0, regCP_CPC_BUSY_STAT), 77 SOC15_REG_ENTRY_STR(GC, 0, regCP_CPF_BUSY_STAT), 78 SOC15_REG_ENTRY_STR(GC, 0, regCP_CPC_BUSY_STAT2), 79 SOC15_REG_ENTRY_STR(GC, 0, regCP_CPF_BUSY_STAT2), 80 SOC15_REG_ENTRY_STR(GC, 0, regCP_CPF_STATUS), 81 SOC15_REG_ENTRY_STR(GC, 0, regCP_GFX_ERROR), 82 SOC15_REG_ENTRY_STR(GC, 0, regCP_GFX_HPD_STATUS0), 83 SOC15_REG_ENTRY_STR(GC, 0, regCP_RB_BASE), 84 SOC15_REG_ENTRY_STR(GC, 0, regCP_RB_RPTR), 85 SOC15_REG_ENTRY_STR(GC, 0, regCP_RB_WPTR), 86 SOC15_REG_ENTRY_STR(GC, 0, regCP_RB0_BASE), 87 SOC15_REG_ENTRY_STR(GC, 0, regCP_RB0_RPTR), 88 SOC15_REG_ENTRY_STR(GC, 0, regCP_RB0_WPTR), 89 SOC15_REG_ENTRY_STR(GC, 0, regCP_IB1_CMD_BUFSZ), 90 SOC15_REG_ENTRY_STR(GC, 0, regCP_IB2_CMD_BUFSZ), 91 SOC15_REG_ENTRY_STR(GC, 0, regCP_IB1_BASE_LO), 92 SOC15_REG_ENTRY_STR(GC, 0, regCP_IB1_BASE_HI), 93 SOC15_REG_ENTRY_STR(GC, 0, regCP_IB1_BUFSZ), 94 SOC15_REG_ENTRY_STR(GC, 0, regCP_IB2_BASE_LO), 95 SOC15_REG_ENTRY_STR(GC, 0, regCP_IB2_BASE_HI), 96 SOC15_REG_ENTRY_STR(GC, 0, regCP_IB2_BUFSZ), 97 SOC15_REG_ENTRY_STR(GC, 0, regCPF_UTCL1_STATUS), 98 SOC15_REG_ENTRY_STR(GC, 0, regCPC_UTCL1_STATUS), 99 SOC15_REG_ENTRY_STR(GC, 0, regCPG_UTCL1_STATUS), 100 SOC15_REG_ENTRY_STR(GC, 0, regIA_UTCL1_STATUS), 101 SOC15_REG_ENTRY_STR(GC, 0, regIA_UTCL1_STATUS_2), 102 SOC15_REG_ENTRY_STR(GC, 0, regPA_CL_CNTL_STATUS), 103 SOC15_REG_ENTRY_STR(GC, 0, regRMI_UTCL1_STATUS), 104 SOC15_REG_ENTRY_STR(GC, 0, regSQC_CACHES), 105 SOC15_REG_ENTRY_STR(GC, 0, regSQG_STATUS), 106 SOC15_REG_ENTRY_STR(GC, 0, regWD_UTCL1_STATUS), 107 SOC15_REG_ENTRY_STR(GC, 0, regGCVM_L2_PROTECTION_FAULT_CNTL), 108 SOC15_REG_ENTRY_STR(GC, 0, regGCVM_L2_PROTECTION_FAULT_STATUS_LO32), 109 SOC15_REG_ENTRY_STR(GC, 0, regGCVM_L2_PROTECTION_FAULT_STATUS_HI32), 110 SOC15_REG_ENTRY_STR(GC, 0, regCP_DEBUG), 111 SOC15_REG_ENTRY_STR(GC, 0, regCP_MEC_CNTL), 112 SOC15_REG_ENTRY_STR(GC, 0, regCP_MES_CNTL), 113 SOC15_REG_ENTRY_STR(GC, 0, regCP_MES_INSTR_PNTR), 114 SOC15_REG_ENTRY_STR(GC, 0, regCP_ME_INSTR_PNTR), 115 SOC15_REG_ENTRY_STR(GC, 0, regCP_PFP_INSTR_PNTR), 116 SOC15_REG_ENTRY_STR(GC, 0, regCP_CPC_STATUS), 117 SOC15_REG_ENTRY_STR(GC, 0, regCP_GFX_RS64_INSTR_PNTR0), 118 SOC15_REG_ENTRY_STR(GC, 0, regCP_GFX_RS64_INSTR_PNTR1), 119 SOC15_REG_ENTRY_STR(GC, 0, regCP_MEC_RS64_INSTR_PNTR), 120 121 /* cp header registers */ 122 SOC15_REG_ENTRY_STR(GC, 0, regCP_MEC_ME1_HEADER_DUMP), 123 SOC15_REG_ENTRY_STR(GC, 0, regCP_PFP_HEADER_DUMP), 124 SOC15_REG_ENTRY_STR(GC, 0, regCP_ME_HEADER_DUMP), 125 SOC15_REG_ENTRY_STR(GC, 0, regCP_MES_HEADER_DUMP), 126 /* SE status registers */ 127 SOC15_REG_ENTRY_STR(GC, 0, regGRBM_STATUS_SE0), 128 SOC15_REG_ENTRY_STR(GC, 0, regGRBM_STATUS_SE1), 129 SOC15_REG_ENTRY_STR(GC, 0, regGRBM_STATUS_SE2), 130 SOC15_REG_ENTRY_STR(GC, 0, regGRBM_STATUS_SE3) 131 }; 132 133 static const struct amdgpu_hwip_reg_entry gc_cp_reg_list_12[] = { 134 /* compute registers */ 135 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_VMID), 136 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_PERSISTENT_STATE), 137 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_PIPE_PRIORITY), 138 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_QUEUE_PRIORITY), 139 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_QUANTUM), 140 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_PQ_BASE), 141 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_PQ_BASE_HI), 142 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_PQ_RPTR), 143 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_PQ_WPTR_POLL_ADDR), 144 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_PQ_WPTR_POLL_ADDR_HI), 145 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_PQ_DOORBELL_CONTROL), 146 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_PQ_CONTROL), 147 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_IB_BASE_ADDR), 148 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_IB_BASE_ADDR_HI), 149 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_IB_RPTR), 150 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_IB_CONTROL), 151 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_DEQUEUE_REQUEST), 152 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_EOP_BASE_ADDR), 153 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_EOP_BASE_ADDR_HI), 154 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_EOP_CONTROL), 155 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_EOP_RPTR), 156 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_EOP_WPTR), 157 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_EOP_EVENTS), 158 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_CTX_SAVE_BASE_ADDR_LO), 159 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_CTX_SAVE_BASE_ADDR_HI), 160 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_CTX_SAVE_CONTROL), 161 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_CNTL_STACK_OFFSET), 162 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_CNTL_STACK_SIZE), 163 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_WG_STATE_OFFSET), 164 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_CTX_SAVE_SIZE), 165 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_GDS_RESOURCE_STATE), 166 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_ERROR), 167 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_EOP_WPTR_MEM), 168 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_PQ_WPTR_LO), 169 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_PQ_WPTR_HI), 170 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_SUSPEND_CNTL_STACK_OFFSET), 171 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_SUSPEND_CNTL_STACK_DW_CNT), 172 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_SUSPEND_WG_STATE_OFFSET), 173 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_DEQUEUE_STATUS) 174 }; 175 176 static const struct amdgpu_hwip_reg_entry gc_gfx_queue_reg_list_12[] = { 177 /* gfx queue registers */ 178 SOC15_REG_ENTRY_STR(GC, 0, regCP_GFX_HQD_ACTIVE), 179 SOC15_REG_ENTRY_STR(GC, 0, regCP_GFX_HQD_VMID), 180 SOC15_REG_ENTRY_STR(GC, 0, regCP_GFX_HQD_QUEUE_PRIORITY), 181 SOC15_REG_ENTRY_STR(GC, 0, regCP_GFX_HQD_QUANTUM), 182 SOC15_REG_ENTRY_STR(GC, 0, regCP_GFX_HQD_BASE), 183 SOC15_REG_ENTRY_STR(GC, 0, regCP_GFX_HQD_BASE_HI), 184 SOC15_REG_ENTRY_STR(GC, 0, regCP_GFX_HQD_OFFSET), 185 SOC15_REG_ENTRY_STR(GC, 0, regCP_GFX_HQD_CNTL), 186 SOC15_REG_ENTRY_STR(GC, 0, regCP_GFX_HQD_CSMD_RPTR), 187 SOC15_REG_ENTRY_STR(GC, 0, regCP_GFX_HQD_WPTR), 188 SOC15_REG_ENTRY_STR(GC, 0, regCP_GFX_HQD_WPTR_HI), 189 SOC15_REG_ENTRY_STR(GC, 0, regCP_GFX_HQD_DEQUEUE_REQUEST), 190 SOC15_REG_ENTRY_STR(GC, 0, regCP_GFX_HQD_MAPPED), 191 SOC15_REG_ENTRY_STR(GC, 0, regCP_GFX_HQD_QUE_MGR_CONTROL), 192 SOC15_REG_ENTRY_STR(GC, 0, regCP_GFX_HQD_HQ_CONTROL0), 193 SOC15_REG_ENTRY_STR(GC, 0, regCP_GFX_HQD_HQ_STATUS0), 194 SOC15_REG_ENTRY_STR(GC, 0, regCP_GFX_MQD_BASE_ADDR), 195 SOC15_REG_ENTRY_STR(GC, 0, regCP_GFX_MQD_BASE_ADDR_HI), 196 SOC15_REG_ENTRY_STR(GC, 0, regCP_RB_WPTR_POLL_ADDR_LO), 197 SOC15_REG_ENTRY_STR(GC, 0, regCP_RB_WPTR_POLL_ADDR_HI), 198 SOC15_REG_ENTRY_STR(GC, 0, regCP_RB_RPTR), 199 SOC15_REG_ENTRY_STR(GC, 0, regCP_IB1_BASE_LO), 200 SOC15_REG_ENTRY_STR(GC, 0, regCP_IB1_BASE_HI), 201 SOC15_REG_ENTRY_STR(GC, 0, regCP_IB1_CMD_BUFSZ), 202 SOC15_REG_ENTRY_STR(GC, 0, regCP_IB1_BUFSZ) 203 }; 204 205 static const struct soc15_reg_golden golden_settings_gc_12_0_rev0[] = { 206 SOC15_REG_GOLDEN_VALUE(GC, 0, regDB_MEM_CONFIG, 0x0000000f, 0x0000000f), 207 SOC15_REG_GOLDEN_VALUE(GC, 0, regCB_HW_CONTROL_1, 0x03000000, 0x03000000), 208 SOC15_REG_GOLDEN_VALUE(GC, 0, regGL2C_CTRL5, 0x00000070, 0x00000020) 209 }; 210 211 static const struct soc15_reg_golden golden_settings_gc_12_0[] = { 212 SOC15_REG_GOLDEN_VALUE(GC, 0, regDB_MEM_CONFIG, 0x00008000, 0x00008000), 213 }; 214 215 #define DEFAULT_SH_MEM_CONFIG \ 216 ((SH_MEM_ADDRESS_MODE_64 << SH_MEM_CONFIG__ADDRESS_MODE__SHIFT) | \ 217 (SH_MEM_ALIGNMENT_MODE_UNALIGNED << SH_MEM_CONFIG__ALIGNMENT_MODE__SHIFT) | \ 218 (3 << SH_MEM_CONFIG__INITIAL_INST_PREFETCH__SHIFT)) 219 220 static void gfx_v12_0_disable_gpa_mode(struct amdgpu_device *adev); 221 static void gfx_v12_0_set_ring_funcs(struct amdgpu_device *adev); 222 static void gfx_v12_0_set_irq_funcs(struct amdgpu_device *adev); 223 static void gfx_v12_0_set_rlc_funcs(struct amdgpu_device *adev); 224 static void gfx_v12_0_set_mqd_funcs(struct amdgpu_device *adev); 225 static void gfx_v12_0_set_imu_funcs(struct amdgpu_device *adev); 226 static int gfx_v12_0_get_cu_info(struct amdgpu_device *adev, 227 struct amdgpu_cu_info *cu_info); 228 static uint64_t gfx_v12_0_get_gpu_clock_counter(struct amdgpu_device *adev); 229 static void gfx_v12_0_select_se_sh(struct amdgpu_device *adev, u32 se_num, 230 u32 sh_num, u32 instance, int xcc_id); 231 static u32 gfx_v12_0_get_wgp_active_bitmap_per_sh(struct amdgpu_device *adev); 232 233 static void gfx_v12_0_ring_emit_frame_cntl(struct amdgpu_ring *ring, bool start, bool secure); 234 static void gfx_v12_0_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg, 235 uint32_t val); 236 static int gfx_v12_0_wait_for_rlc_autoload_complete(struct amdgpu_device *adev); 237 static void gfx_v12_0_ring_invalidate_tlbs(struct amdgpu_ring *ring, 238 uint16_t pasid, uint32_t flush_type, 239 bool all_hub, uint8_t dst_sel); 240 static void gfx_v12_0_set_safe_mode(struct amdgpu_device *adev, int xcc_id); 241 static void gfx_v12_0_unset_safe_mode(struct amdgpu_device *adev, int xcc_id); 242 static void gfx_v12_0_update_perf_clk(struct amdgpu_device *adev, 243 bool enable); 244 245 static void gfx_v12_0_kiq_set_resources(struct amdgpu_ring *kiq_ring, 246 uint64_t queue_mask) 247 { 248 amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_SET_RESOURCES, 6)); 249 amdgpu_ring_write(kiq_ring, PACKET3_SET_RESOURCES_VMID_MASK(0) | 250 PACKET3_SET_RESOURCES_QUEUE_TYPE(0)); /* vmid_mask:0 queue_type:0 (KIQ) */ 251 amdgpu_ring_write(kiq_ring, lower_32_bits(queue_mask)); /* queue mask lo */ 252 amdgpu_ring_write(kiq_ring, upper_32_bits(queue_mask)); /* queue mask hi */ 253 amdgpu_ring_write(kiq_ring, 0); /* gws mask lo */ 254 amdgpu_ring_write(kiq_ring, 0); /* gws mask hi */ 255 amdgpu_ring_write(kiq_ring, 0); /* oac mask */ 256 amdgpu_ring_write(kiq_ring, 0); 257 } 258 259 static void gfx_v12_0_kiq_map_queues(struct amdgpu_ring *kiq_ring, 260 struct amdgpu_ring *ring) 261 { 262 uint64_t mqd_addr = amdgpu_bo_gpu_offset(ring->mqd_obj); 263 uint64_t wptr_addr = ring->wptr_gpu_addr; 264 uint32_t me = 0, eng_sel = 0; 265 266 switch (ring->funcs->type) { 267 case AMDGPU_RING_TYPE_COMPUTE: 268 me = 1; 269 eng_sel = 0; 270 break; 271 case AMDGPU_RING_TYPE_GFX: 272 me = 0; 273 eng_sel = 4; 274 break; 275 case AMDGPU_RING_TYPE_MES: 276 me = 2; 277 eng_sel = 5; 278 break; 279 default: 280 WARN_ON(1); 281 } 282 283 amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_MAP_QUEUES, 5)); 284 /* Q_sel:0, vmid:0, vidmem: 1, engine:0, num_Q:1*/ 285 amdgpu_ring_write(kiq_ring, /* Q_sel: 0, vmid: 0, engine: 0, num_Q: 1 */ 286 PACKET3_MAP_QUEUES_QUEUE_SEL(0) | /* Queue_Sel */ 287 PACKET3_MAP_QUEUES_VMID(0) | /* VMID */ 288 PACKET3_MAP_QUEUES_QUEUE(ring->queue) | 289 PACKET3_MAP_QUEUES_PIPE(ring->pipe) | 290 PACKET3_MAP_QUEUES_ME((me)) | 291 PACKET3_MAP_QUEUES_QUEUE_TYPE(0) | /*queue_type: normal compute queue */ 292 PACKET3_MAP_QUEUES_ALLOC_FORMAT(0) | /* alloc format: all_on_one_pipe */ 293 PACKET3_MAP_QUEUES_ENGINE_SEL(eng_sel) | 294 PACKET3_MAP_QUEUES_NUM_QUEUES(1)); /* num_queues: must be 1 */ 295 amdgpu_ring_write(kiq_ring, PACKET3_MAP_QUEUES_DOORBELL_OFFSET(ring->doorbell_index)); 296 amdgpu_ring_write(kiq_ring, lower_32_bits(mqd_addr)); 297 amdgpu_ring_write(kiq_ring, upper_32_bits(mqd_addr)); 298 amdgpu_ring_write(kiq_ring, lower_32_bits(wptr_addr)); 299 amdgpu_ring_write(kiq_ring, upper_32_bits(wptr_addr)); 300 } 301 302 static void gfx_v12_0_kiq_unmap_queues(struct amdgpu_ring *kiq_ring, 303 struct amdgpu_ring *ring, 304 enum amdgpu_unmap_queues_action action, 305 u64 gpu_addr, u64 seq) 306 { 307 struct amdgpu_device *adev = kiq_ring->adev; 308 uint32_t eng_sel = ring->funcs->type == AMDGPU_RING_TYPE_GFX ? 4 : 0; 309 310 if (adev->enable_mes && !adev->gfx.kiq[0].ring.sched.ready) { 311 amdgpu_mes_unmap_legacy_queue(adev, ring, action, gpu_addr, seq); 312 return; 313 } 314 315 amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_UNMAP_QUEUES, 4)); 316 amdgpu_ring_write(kiq_ring, /* Q_sel: 0, vmid: 0, engine: 0, num_Q: 1 */ 317 PACKET3_UNMAP_QUEUES_ACTION(action) | 318 PACKET3_UNMAP_QUEUES_QUEUE_SEL(0) | 319 PACKET3_UNMAP_QUEUES_ENGINE_SEL(eng_sel) | 320 PACKET3_UNMAP_QUEUES_NUM_QUEUES(1)); 321 amdgpu_ring_write(kiq_ring, 322 PACKET3_UNMAP_QUEUES_DOORBELL_OFFSET0(ring->doorbell_index)); 323 324 if (action == PREEMPT_QUEUES_NO_UNMAP) { 325 amdgpu_ring_write(kiq_ring, lower_32_bits(gpu_addr)); 326 amdgpu_ring_write(kiq_ring, upper_32_bits(gpu_addr)); 327 amdgpu_ring_write(kiq_ring, seq); 328 } else { 329 amdgpu_ring_write(kiq_ring, 0); 330 amdgpu_ring_write(kiq_ring, 0); 331 amdgpu_ring_write(kiq_ring, 0); 332 } 333 } 334 335 static void gfx_v12_0_kiq_query_status(struct amdgpu_ring *kiq_ring, 336 struct amdgpu_ring *ring, 337 u64 addr, u64 seq) 338 { 339 uint32_t eng_sel = ring->funcs->type == AMDGPU_RING_TYPE_GFX ? 4 : 0; 340 341 amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_QUERY_STATUS, 5)); 342 amdgpu_ring_write(kiq_ring, 343 PACKET3_QUERY_STATUS_CONTEXT_ID(0) | 344 PACKET3_QUERY_STATUS_INTERRUPT_SEL(0) | 345 PACKET3_QUERY_STATUS_COMMAND(2)); 346 amdgpu_ring_write(kiq_ring, /* Q_sel: 0, vmid: 0, engine: 0, num_Q: 1 */ 347 PACKET3_QUERY_STATUS_DOORBELL_OFFSET(ring->doorbell_index) | 348 PACKET3_QUERY_STATUS_ENG_SEL(eng_sel)); 349 amdgpu_ring_write(kiq_ring, lower_32_bits(addr)); 350 amdgpu_ring_write(kiq_ring, upper_32_bits(addr)); 351 amdgpu_ring_write(kiq_ring, lower_32_bits(seq)); 352 amdgpu_ring_write(kiq_ring, upper_32_bits(seq)); 353 } 354 355 static void gfx_v12_0_kiq_invalidate_tlbs(struct amdgpu_ring *kiq_ring, 356 uint16_t pasid, 357 uint32_t flush_type, 358 bool all_hub) 359 { 360 gfx_v12_0_ring_invalidate_tlbs(kiq_ring, pasid, flush_type, all_hub, 1); 361 } 362 363 static const struct kiq_pm4_funcs gfx_v12_0_kiq_pm4_funcs = { 364 .kiq_set_resources = gfx_v12_0_kiq_set_resources, 365 .kiq_map_queues = gfx_v12_0_kiq_map_queues, 366 .kiq_unmap_queues = gfx_v12_0_kiq_unmap_queues, 367 .kiq_query_status = gfx_v12_0_kiq_query_status, 368 .kiq_invalidate_tlbs = gfx_v12_0_kiq_invalidate_tlbs, 369 .set_resources_size = 8, 370 .map_queues_size = 7, 371 .unmap_queues_size = 6, 372 .query_status_size = 7, 373 .invalidate_tlbs_size = 2, 374 }; 375 376 static void gfx_v12_0_set_kiq_pm4_funcs(struct amdgpu_device *adev) 377 { 378 adev->gfx.kiq[0].pmf = &gfx_v12_0_kiq_pm4_funcs; 379 } 380 381 static void gfx_v12_0_wait_reg_mem(struct amdgpu_ring *ring, int eng_sel, 382 int mem_space, int opt, uint32_t addr0, 383 uint32_t addr1, uint32_t ref, 384 uint32_t mask, uint32_t inv) 385 { 386 amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5)); 387 amdgpu_ring_write(ring, 388 /* memory (1) or register (0) */ 389 (WAIT_REG_MEM_MEM_SPACE(mem_space) | 390 WAIT_REG_MEM_OPERATION(opt) | /* wait */ 391 WAIT_REG_MEM_FUNCTION(3) | /* equal */ 392 WAIT_REG_MEM_ENGINE(eng_sel))); 393 394 if (mem_space) 395 BUG_ON(addr0 & 0x3); /* Dword align */ 396 amdgpu_ring_write(ring, addr0); 397 amdgpu_ring_write(ring, addr1); 398 amdgpu_ring_write(ring, ref); 399 amdgpu_ring_write(ring, mask); 400 amdgpu_ring_write(ring, inv); /* poll interval */ 401 } 402 403 static int gfx_v12_0_ring_test_ring(struct amdgpu_ring *ring) 404 { 405 struct amdgpu_device *adev = ring->adev; 406 uint32_t scratch = SOC15_REG_OFFSET(GC, 0, regSCRATCH_REG0); 407 uint32_t tmp = 0; 408 unsigned i; 409 int r; 410 411 WREG32(scratch, 0xCAFEDEAD); 412 r = amdgpu_ring_alloc(ring, 5); 413 if (r) { 414 dev_err(adev->dev, 415 "amdgpu: cp failed to lock ring %d (%d).\n", 416 ring->idx, r); 417 return r; 418 } 419 420 if (ring->funcs->type == AMDGPU_RING_TYPE_KIQ) { 421 gfx_v12_0_ring_emit_wreg(ring, scratch, 0xDEADBEEF); 422 } else { 423 amdgpu_ring_write(ring, PACKET3(PACKET3_SET_UCONFIG_REG, 1)); 424 amdgpu_ring_write(ring, scratch - 425 PACKET3_SET_UCONFIG_REG_START); 426 amdgpu_ring_write(ring, 0xDEADBEEF); 427 } 428 amdgpu_ring_commit(ring); 429 430 for (i = 0; i < adev->usec_timeout; i++) { 431 tmp = RREG32(scratch); 432 if (tmp == 0xDEADBEEF) 433 break; 434 if (amdgpu_emu_mode == 1) 435 msleep(1); 436 else 437 udelay(1); 438 } 439 440 if (i >= adev->usec_timeout) 441 r = -ETIMEDOUT; 442 return r; 443 } 444 445 static int gfx_v12_0_ring_test_ib(struct amdgpu_ring *ring, long timeout) 446 { 447 struct amdgpu_device *adev = ring->adev; 448 struct amdgpu_ib ib; 449 struct dma_fence *f = NULL; 450 unsigned index; 451 uint64_t gpu_addr; 452 volatile uint32_t *cpu_ptr; 453 long r; 454 455 /* MES KIQ fw hasn't indirect buffer support for now */ 456 if (adev->enable_mes_kiq && 457 ring->funcs->type == AMDGPU_RING_TYPE_KIQ) 458 return 0; 459 460 memset(&ib, 0, sizeof(ib)); 461 462 if (ring->is_mes_queue) { 463 uint32_t padding, offset; 464 465 offset = amdgpu_mes_ctx_get_offs(ring, AMDGPU_MES_CTX_IB_OFFS); 466 padding = amdgpu_mes_ctx_get_offs(ring, 467 AMDGPU_MES_CTX_PADDING_OFFS); 468 469 ib.gpu_addr = amdgpu_mes_ctx_get_offs_gpu_addr(ring, offset); 470 ib.ptr = amdgpu_mes_ctx_get_offs_cpu_addr(ring, offset); 471 472 gpu_addr = amdgpu_mes_ctx_get_offs_gpu_addr(ring, padding); 473 cpu_ptr = amdgpu_mes_ctx_get_offs_cpu_addr(ring, padding); 474 *cpu_ptr = cpu_to_le32(0xCAFEDEAD); 475 } else { 476 r = amdgpu_device_wb_get(adev, &index); 477 if (r) 478 return r; 479 480 gpu_addr = adev->wb.gpu_addr + (index * 4); 481 adev->wb.wb[index] = cpu_to_le32(0xCAFEDEAD); 482 cpu_ptr = &adev->wb.wb[index]; 483 484 r = amdgpu_ib_get(adev, NULL, 16, AMDGPU_IB_POOL_DIRECT, &ib); 485 if (r) { 486 dev_err(adev->dev, "amdgpu: failed to get ib (%ld).\n", r); 487 goto err1; 488 } 489 } 490 491 ib.ptr[0] = PACKET3(PACKET3_WRITE_DATA, 3); 492 ib.ptr[1] = WRITE_DATA_DST_SEL(5) | WR_CONFIRM; 493 ib.ptr[2] = lower_32_bits(gpu_addr); 494 ib.ptr[3] = upper_32_bits(gpu_addr); 495 ib.ptr[4] = 0xDEADBEEF; 496 ib.length_dw = 5; 497 498 r = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f); 499 if (r) 500 goto err2; 501 502 r = dma_fence_wait_timeout(f, false, timeout); 503 if (r == 0) { 504 r = -ETIMEDOUT; 505 goto err2; 506 } else if (r < 0) { 507 goto err2; 508 } 509 510 if (le32_to_cpu(*cpu_ptr) == 0xDEADBEEF) 511 r = 0; 512 else 513 r = -EINVAL; 514 err2: 515 if (!ring->is_mes_queue) 516 amdgpu_ib_free(adev, &ib, NULL); 517 dma_fence_put(f); 518 err1: 519 if (!ring->is_mes_queue) 520 amdgpu_device_wb_free(adev, index); 521 return r; 522 } 523 524 static void gfx_v12_0_free_microcode(struct amdgpu_device *adev) 525 { 526 amdgpu_ucode_release(&adev->gfx.pfp_fw); 527 amdgpu_ucode_release(&adev->gfx.me_fw); 528 amdgpu_ucode_release(&adev->gfx.rlc_fw); 529 amdgpu_ucode_release(&adev->gfx.mec_fw); 530 531 kfree(adev->gfx.rlc.register_list_format); 532 } 533 534 static int gfx_v12_0_init_toc_microcode(struct amdgpu_device *adev, const char *ucode_prefix) 535 { 536 const struct psp_firmware_header_v1_0 *toc_hdr; 537 int err = 0; 538 539 err = amdgpu_ucode_request(adev, &adev->psp.toc_fw, 540 "amdgpu/%s_toc.bin", ucode_prefix); 541 if (err) 542 goto out; 543 544 toc_hdr = (const struct psp_firmware_header_v1_0 *)adev->psp.toc_fw->data; 545 adev->psp.toc.fw_version = le32_to_cpu(toc_hdr->header.ucode_version); 546 adev->psp.toc.feature_version = le32_to_cpu(toc_hdr->sos.fw_version); 547 adev->psp.toc.size_bytes = le32_to_cpu(toc_hdr->header.ucode_size_bytes); 548 adev->psp.toc.start_addr = (uint8_t *)toc_hdr + 549 le32_to_cpu(toc_hdr->header.ucode_array_offset_bytes); 550 return 0; 551 out: 552 amdgpu_ucode_release(&adev->psp.toc_fw); 553 return err; 554 } 555 556 static int gfx_v12_0_init_microcode(struct amdgpu_device *adev) 557 { 558 char ucode_prefix[15]; 559 int err; 560 const struct rlc_firmware_header_v2_0 *rlc_hdr; 561 uint16_t version_major; 562 uint16_t version_minor; 563 564 DRM_DEBUG("\n"); 565 566 amdgpu_ucode_ip_version_decode(adev, GC_HWIP, ucode_prefix, sizeof(ucode_prefix)); 567 568 err = amdgpu_ucode_request(adev, &adev->gfx.pfp_fw, 569 "amdgpu/%s_pfp.bin", ucode_prefix); 570 if (err) 571 goto out; 572 amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_RS64_PFP); 573 amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_RS64_PFP_P0_STACK); 574 575 err = amdgpu_ucode_request(adev, &adev->gfx.me_fw, 576 "amdgpu/%s_me.bin", ucode_prefix); 577 if (err) 578 goto out; 579 amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_RS64_ME); 580 amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_RS64_ME_P0_STACK); 581 582 if (!amdgpu_sriov_vf(adev)) { 583 err = amdgpu_ucode_request(adev, &adev->gfx.rlc_fw, 584 "amdgpu/%s_rlc.bin", ucode_prefix); 585 if (err) 586 goto out; 587 rlc_hdr = (const struct rlc_firmware_header_v2_0 *)adev->gfx.rlc_fw->data; 588 version_major = le16_to_cpu(rlc_hdr->header.header_version_major); 589 version_minor = le16_to_cpu(rlc_hdr->header.header_version_minor); 590 err = amdgpu_gfx_rlc_init_microcode(adev, version_major, version_minor); 591 if (err) 592 goto out; 593 } 594 595 err = amdgpu_ucode_request(adev, &adev->gfx.mec_fw, 596 "amdgpu/%s_mec.bin", ucode_prefix); 597 if (err) 598 goto out; 599 amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_RS64_MEC); 600 amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_RS64_MEC_P0_STACK); 601 amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_RS64_MEC_P1_STACK); 602 603 if (adev->firmware.load_type == AMDGPU_FW_LOAD_RLC_BACKDOOR_AUTO) 604 err = gfx_v12_0_init_toc_microcode(adev, ucode_prefix); 605 606 /* only one MEC for gfx 12 */ 607 adev->gfx.mec2_fw = NULL; 608 609 if (adev->gfx.imu.funcs) { 610 if (adev->gfx.imu.funcs->init_microcode) { 611 err = adev->gfx.imu.funcs->init_microcode(adev); 612 if (err) 613 dev_err(adev->dev, "Failed to load imu firmware!\n"); 614 } 615 } 616 617 out: 618 if (err) { 619 amdgpu_ucode_release(&adev->gfx.pfp_fw); 620 amdgpu_ucode_release(&adev->gfx.me_fw); 621 amdgpu_ucode_release(&adev->gfx.rlc_fw); 622 amdgpu_ucode_release(&adev->gfx.mec_fw); 623 } 624 625 return err; 626 } 627 628 static u32 gfx_v12_0_get_csb_size(struct amdgpu_device *adev) 629 { 630 u32 count = 0; 631 const struct cs_section_def *sect = NULL; 632 const struct cs_extent_def *ext = NULL; 633 634 count += 1; 635 636 for (sect = gfx12_cs_data; sect->section != NULL; ++sect) { 637 if (sect->id == SECT_CONTEXT) { 638 for (ext = sect->section; ext->extent != NULL; ++ext) 639 count += 2 + ext->reg_count; 640 } else 641 return 0; 642 } 643 644 return count; 645 } 646 647 static void gfx_v12_0_get_csb_buffer(struct amdgpu_device *adev, 648 volatile u32 *buffer) 649 { 650 u32 count = 0, clustercount = 0, i; 651 const struct cs_section_def *sect = NULL; 652 const struct cs_extent_def *ext = NULL; 653 654 if (adev->gfx.rlc.cs_data == NULL) 655 return; 656 if (buffer == NULL) 657 return; 658 659 count += 1; 660 661 for (sect = adev->gfx.rlc.cs_data; sect->section != NULL; ++sect) { 662 if (sect->id == SECT_CONTEXT) { 663 for (ext = sect->section; ext->extent != NULL; ++ext) { 664 clustercount++; 665 buffer[count++] = ext->reg_count; 666 buffer[count++] = ext->reg_index; 667 668 for (i = 0; i < ext->reg_count; i++) 669 buffer[count++] = cpu_to_le32(ext->extent[i]); 670 } 671 } else 672 return; 673 } 674 675 buffer[0] = clustercount; 676 } 677 678 static void gfx_v12_0_rlc_fini(struct amdgpu_device *adev) 679 { 680 /* clear state block */ 681 amdgpu_bo_free_kernel(&adev->gfx.rlc.clear_state_obj, 682 &adev->gfx.rlc.clear_state_gpu_addr, 683 (void **)&adev->gfx.rlc.cs_ptr); 684 685 /* jump table block */ 686 amdgpu_bo_free_kernel(&adev->gfx.rlc.cp_table_obj, 687 &adev->gfx.rlc.cp_table_gpu_addr, 688 (void **)&adev->gfx.rlc.cp_table_ptr); 689 } 690 691 static void gfx_v12_0_init_rlcg_reg_access_ctrl(struct amdgpu_device *adev) 692 { 693 struct amdgpu_rlcg_reg_access_ctrl *reg_access_ctrl; 694 695 reg_access_ctrl = &adev->gfx.rlc.reg_access_ctrl[0]; 696 reg_access_ctrl->scratch_reg0 = SOC15_REG_OFFSET(GC, 0, regSCRATCH_REG0); 697 reg_access_ctrl->scratch_reg1 = SOC15_REG_OFFSET(GC, 0, regSCRATCH_REG1); 698 reg_access_ctrl->scratch_reg2 = SOC15_REG_OFFSET(GC, 0, regSCRATCH_REG2); 699 reg_access_ctrl->scratch_reg3 = SOC15_REG_OFFSET(GC, 0, regSCRATCH_REG3); 700 reg_access_ctrl->grbm_cntl = SOC15_REG_OFFSET(GC, 0, regGRBM_GFX_CNTL); 701 reg_access_ctrl->grbm_idx = SOC15_REG_OFFSET(GC, 0, regGRBM_GFX_INDEX); 702 reg_access_ctrl->spare_int = SOC15_REG_OFFSET(GC, 0, regRLC_SPARE_INT_0); 703 adev->gfx.rlc.rlcg_reg_access_supported = true; 704 } 705 706 static int gfx_v12_0_rlc_init(struct amdgpu_device *adev) 707 { 708 const struct cs_section_def *cs_data; 709 int r; 710 711 adev->gfx.rlc.cs_data = gfx12_cs_data; 712 713 cs_data = adev->gfx.rlc.cs_data; 714 715 if (cs_data) { 716 /* init clear state block */ 717 r = amdgpu_gfx_rlc_init_csb(adev); 718 if (r) 719 return r; 720 } 721 722 /* init spm vmid with 0xf */ 723 if (adev->gfx.rlc.funcs->update_spm_vmid) 724 adev->gfx.rlc.funcs->update_spm_vmid(adev, NULL, 0xf); 725 726 return 0; 727 } 728 729 static void gfx_v12_0_mec_fini(struct amdgpu_device *adev) 730 { 731 amdgpu_bo_free_kernel(&adev->gfx.mec.hpd_eop_obj, NULL, NULL); 732 amdgpu_bo_free_kernel(&adev->gfx.mec.mec_fw_obj, NULL, NULL); 733 amdgpu_bo_free_kernel(&adev->gfx.mec.mec_fw_data_obj, NULL, NULL); 734 } 735 736 static void gfx_v12_0_me_init(struct amdgpu_device *adev) 737 { 738 bitmap_zero(adev->gfx.me.queue_bitmap, AMDGPU_MAX_GFX_QUEUES); 739 740 amdgpu_gfx_graphics_queue_acquire(adev); 741 } 742 743 static int gfx_v12_0_mec_init(struct amdgpu_device *adev) 744 { 745 int r; 746 u32 *hpd; 747 size_t mec_hpd_size; 748 749 bitmap_zero(adev->gfx.mec_bitmap[0].queue_bitmap, AMDGPU_MAX_COMPUTE_QUEUES); 750 751 /* take ownership of the relevant compute queues */ 752 amdgpu_gfx_compute_queue_acquire(adev); 753 mec_hpd_size = adev->gfx.num_compute_rings * GFX12_MEC_HPD_SIZE; 754 755 if (mec_hpd_size) { 756 r = amdgpu_bo_create_reserved(adev, mec_hpd_size, PAGE_SIZE, 757 AMDGPU_GEM_DOMAIN_GTT, 758 &adev->gfx.mec.hpd_eop_obj, 759 &adev->gfx.mec.hpd_eop_gpu_addr, 760 (void **)&hpd); 761 if (r) { 762 dev_warn(adev->dev, "(%d) create HDP EOP bo failed\n", r); 763 gfx_v12_0_mec_fini(adev); 764 return r; 765 } 766 767 memset(hpd, 0, mec_hpd_size); 768 769 amdgpu_bo_kunmap(adev->gfx.mec.hpd_eop_obj); 770 amdgpu_bo_unreserve(adev->gfx.mec.hpd_eop_obj); 771 } 772 773 return 0; 774 } 775 776 static uint32_t wave_read_ind(struct amdgpu_device *adev, uint32_t wave, uint32_t address) 777 { 778 WREG32_SOC15(GC, 0, regSQ_IND_INDEX, 779 (wave << SQ_IND_INDEX__WAVE_ID__SHIFT) | 780 (address << SQ_IND_INDEX__INDEX__SHIFT)); 781 return RREG32_SOC15(GC, 0, regSQ_IND_DATA); 782 } 783 784 static void wave_read_regs(struct amdgpu_device *adev, uint32_t wave, 785 uint32_t thread, uint32_t regno, 786 uint32_t num, uint32_t *out) 787 { 788 WREG32_SOC15(GC, 0, regSQ_IND_INDEX, 789 (wave << SQ_IND_INDEX__WAVE_ID__SHIFT) | 790 (regno << SQ_IND_INDEX__INDEX__SHIFT) | 791 (thread << SQ_IND_INDEX__WORKITEM_ID__SHIFT) | 792 (SQ_IND_INDEX__AUTO_INCR_MASK)); 793 while (num--) 794 *(out++) = RREG32_SOC15(GC, 0, regSQ_IND_DATA); 795 } 796 797 static void gfx_v12_0_read_wave_data(struct amdgpu_device *adev, 798 uint32_t xcc_id, 799 uint32_t simd, uint32_t wave, 800 uint32_t *dst, int *no_fields) 801 { 802 /* in gfx12 the SIMD_ID is specified as part of the INSTANCE 803 * field when performing a select_se_sh so it should be 804 * zero here */ 805 WARN_ON(simd != 0); 806 807 /* type 4 wave data */ 808 dst[(*no_fields)++] = 4; 809 dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_STATUS); 810 dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_PC_LO); 811 dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_PC_HI); 812 dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_EXEC_LO); 813 dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_EXEC_HI); 814 dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_HW_ID1); 815 dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_HW_ID2); 816 dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_GPR_ALLOC); 817 dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_LDS_ALLOC); 818 dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_IB_STS); 819 dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_IB_STS2); 820 dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_IB_DBG1); 821 dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_M0); 822 dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_MODE); 823 dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_STATE_PRIV); 824 dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_EXCP_FLAG_PRIV); 825 dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_EXCP_FLAG_USER); 826 dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_TRAP_CTRL); 827 dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_ACTIVE); 828 dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_VALID_AND_IDLE); 829 dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_DVGPR_ALLOC_LO); 830 dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_DVGPR_ALLOC_HI); 831 dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_SCHED_MODE); 832 } 833 834 static void gfx_v12_0_read_wave_sgprs(struct amdgpu_device *adev, 835 uint32_t xcc_id, uint32_t simd, 836 uint32_t wave, uint32_t start, 837 uint32_t size, uint32_t *dst) 838 { 839 WARN_ON(simd != 0); 840 841 wave_read_regs( 842 adev, wave, 0, start + SQIND_WAVE_SGPRS_OFFSET, size, 843 dst); 844 } 845 846 static void gfx_v12_0_read_wave_vgprs(struct amdgpu_device *adev, 847 uint32_t xcc_id, uint32_t simd, 848 uint32_t wave, uint32_t thread, 849 uint32_t start, uint32_t size, 850 uint32_t *dst) 851 { 852 wave_read_regs( 853 adev, wave, thread, 854 start + SQIND_WAVE_VGPRS_OFFSET, size, dst); 855 } 856 857 static void gfx_v12_0_select_me_pipe_q(struct amdgpu_device *adev, 858 u32 me, u32 pipe, u32 q, u32 vm, u32 xcc_id) 859 { 860 soc24_grbm_select(adev, me, pipe, q, vm); 861 } 862 863 static const struct amdgpu_gfx_funcs gfx_v12_0_gfx_funcs = { 864 .get_gpu_clock_counter = &gfx_v12_0_get_gpu_clock_counter, 865 .select_se_sh = &gfx_v12_0_select_se_sh, 866 .read_wave_data = &gfx_v12_0_read_wave_data, 867 .read_wave_sgprs = &gfx_v12_0_read_wave_sgprs, 868 .read_wave_vgprs = &gfx_v12_0_read_wave_vgprs, 869 .select_me_pipe_q = &gfx_v12_0_select_me_pipe_q, 870 .update_perfmon_mgcg = &gfx_v12_0_update_perf_clk, 871 }; 872 873 static int gfx_v12_0_gpu_early_init(struct amdgpu_device *adev) 874 { 875 876 switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { 877 case IP_VERSION(12, 0, 0): 878 case IP_VERSION(12, 0, 1): 879 adev->gfx.config.max_hw_contexts = 8; 880 adev->gfx.config.sc_prim_fifo_size_frontend = 0x20; 881 adev->gfx.config.sc_prim_fifo_size_backend = 0x100; 882 adev->gfx.config.sc_hiz_tile_fifo_size = 0; 883 adev->gfx.config.sc_earlyz_tile_fifo_size = 0x4C0; 884 break; 885 default: 886 BUG(); 887 break; 888 } 889 890 return 0; 891 } 892 893 static int gfx_v12_0_gfx_ring_init(struct amdgpu_device *adev, int ring_id, 894 int me, int pipe, int queue) 895 { 896 int r; 897 struct amdgpu_ring *ring; 898 unsigned int irq_type; 899 900 ring = &adev->gfx.gfx_ring[ring_id]; 901 902 ring->me = me; 903 ring->pipe = pipe; 904 ring->queue = queue; 905 906 ring->ring_obj = NULL; 907 ring->use_doorbell = true; 908 909 if (!ring_id) 910 ring->doorbell_index = adev->doorbell_index.gfx_ring0 << 1; 911 else 912 ring->doorbell_index = adev->doorbell_index.gfx_ring1 << 1; 913 ring->vm_hub = AMDGPU_GFXHUB(0); 914 sprintf(ring->name, "gfx_%d.%d.%d", ring->me, ring->pipe, ring->queue); 915 916 irq_type = AMDGPU_CP_IRQ_GFX_ME0_PIPE0_EOP + ring->pipe; 917 r = amdgpu_ring_init(adev, ring, 1024, &adev->gfx.eop_irq, irq_type, 918 AMDGPU_RING_PRIO_DEFAULT, NULL); 919 if (r) 920 return r; 921 return 0; 922 } 923 924 static int gfx_v12_0_compute_ring_init(struct amdgpu_device *adev, int ring_id, 925 int mec, int pipe, int queue) 926 { 927 int r; 928 unsigned irq_type; 929 struct amdgpu_ring *ring; 930 unsigned int hw_prio; 931 932 ring = &adev->gfx.compute_ring[ring_id]; 933 934 /* mec0 is me1 */ 935 ring->me = mec + 1; 936 ring->pipe = pipe; 937 ring->queue = queue; 938 939 ring->ring_obj = NULL; 940 ring->use_doorbell = true; 941 ring->doorbell_index = (adev->doorbell_index.mec_ring0 + ring_id) << 1; 942 ring->eop_gpu_addr = adev->gfx.mec.hpd_eop_gpu_addr 943 + (ring_id * GFX12_MEC_HPD_SIZE); 944 ring->vm_hub = AMDGPU_GFXHUB(0); 945 sprintf(ring->name, "comp_%d.%d.%d", ring->me, ring->pipe, ring->queue); 946 947 irq_type = AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP 948 + ((ring->me - 1) * adev->gfx.mec.num_pipe_per_mec) 949 + ring->pipe; 950 hw_prio = amdgpu_gfx_is_high_priority_compute_queue(adev, ring) ? 951 AMDGPU_GFX_PIPE_PRIO_HIGH : AMDGPU_GFX_PIPE_PRIO_NORMAL; 952 /* type-2 packets are deprecated on MEC, use type-3 instead */ 953 r = amdgpu_ring_init(adev, ring, 1024, &adev->gfx.eop_irq, irq_type, 954 hw_prio, NULL); 955 if (r) 956 return r; 957 958 return 0; 959 } 960 961 static struct { 962 SOC24_FIRMWARE_ID id; 963 unsigned int offset; 964 unsigned int size; 965 unsigned int size_x16; 966 } rlc_autoload_info[SOC24_FIRMWARE_ID_MAX]; 967 968 #define RLC_TOC_OFFSET_DWUNIT 8 969 #define RLC_SIZE_MULTIPLE 1024 970 #define RLC_TOC_UMF_SIZE_inM 23ULL 971 #define RLC_TOC_FORMAT_API 165ULL 972 973 static void gfx_v12_0_parse_rlc_toc(struct amdgpu_device *adev, void *rlc_toc) 974 { 975 RLC_TABLE_OF_CONTENT_V2 *ucode = rlc_toc; 976 977 while (ucode && (ucode->id > SOC24_FIRMWARE_ID_INVALID)) { 978 rlc_autoload_info[ucode->id].id = ucode->id; 979 rlc_autoload_info[ucode->id].offset = 980 ucode->offset * RLC_TOC_OFFSET_DWUNIT * 4; 981 rlc_autoload_info[ucode->id].size = 982 ucode->size_x16 ? ucode->size * RLC_SIZE_MULTIPLE * 4 : 983 ucode->size * 4; 984 ucode++; 985 } 986 } 987 988 static uint32_t gfx_v12_0_calc_toc_total_size(struct amdgpu_device *adev) 989 { 990 uint32_t total_size = 0; 991 SOC24_FIRMWARE_ID id; 992 993 gfx_v12_0_parse_rlc_toc(adev, adev->psp.toc.start_addr); 994 995 for (id = SOC24_FIRMWARE_ID_RLC_G_UCODE; id < SOC24_FIRMWARE_ID_MAX; id++) 996 total_size += rlc_autoload_info[id].size; 997 998 /* In case the offset in rlc toc ucode is aligned */ 999 if (total_size < rlc_autoload_info[SOC24_FIRMWARE_ID_MAX-1].offset) 1000 total_size = rlc_autoload_info[SOC24_FIRMWARE_ID_MAX-1].offset + 1001 rlc_autoload_info[SOC24_FIRMWARE_ID_MAX-1].size; 1002 if (total_size < (RLC_TOC_UMF_SIZE_inM << 20)) 1003 total_size = RLC_TOC_UMF_SIZE_inM << 20; 1004 1005 return total_size; 1006 } 1007 1008 static int gfx_v12_0_rlc_autoload_buffer_init(struct amdgpu_device *adev) 1009 { 1010 int r; 1011 uint32_t total_size; 1012 1013 total_size = gfx_v12_0_calc_toc_total_size(adev); 1014 1015 r = amdgpu_bo_create_reserved(adev, total_size, 64 * 1024, 1016 AMDGPU_GEM_DOMAIN_VRAM, 1017 &adev->gfx.rlc.rlc_autoload_bo, 1018 &adev->gfx.rlc.rlc_autoload_gpu_addr, 1019 (void **)&adev->gfx.rlc.rlc_autoload_ptr); 1020 1021 if (r) { 1022 dev_err(adev->dev, "(%d) failed to create fw autoload bo\n", r); 1023 return r; 1024 } 1025 1026 return 0; 1027 } 1028 1029 static void gfx_v12_0_rlc_backdoor_autoload_copy_ucode(struct amdgpu_device *adev, 1030 SOC24_FIRMWARE_ID id, 1031 const void *fw_data, 1032 uint32_t fw_size) 1033 { 1034 uint32_t toc_offset; 1035 uint32_t toc_fw_size; 1036 char *ptr = adev->gfx.rlc.rlc_autoload_ptr; 1037 1038 if (id <= SOC24_FIRMWARE_ID_INVALID || id >= SOC24_FIRMWARE_ID_MAX) 1039 return; 1040 1041 toc_offset = rlc_autoload_info[id].offset; 1042 toc_fw_size = rlc_autoload_info[id].size; 1043 1044 if (fw_size == 0) 1045 fw_size = toc_fw_size; 1046 1047 if (fw_size > toc_fw_size) 1048 fw_size = toc_fw_size; 1049 1050 memcpy(ptr + toc_offset, fw_data, fw_size); 1051 1052 if (fw_size < toc_fw_size) 1053 memset(ptr + toc_offset + fw_size, 0, toc_fw_size - fw_size); 1054 } 1055 1056 static void 1057 gfx_v12_0_rlc_backdoor_autoload_copy_toc_ucode(struct amdgpu_device *adev) 1058 { 1059 void *data; 1060 uint32_t size; 1061 uint32_t *toc_ptr; 1062 1063 data = adev->psp.toc.start_addr; 1064 size = rlc_autoload_info[SOC24_FIRMWARE_ID_RLC_TOC].size; 1065 1066 toc_ptr = (uint32_t *)data + size / 4 - 2; 1067 *toc_ptr = (RLC_TOC_FORMAT_API << 24) | 0x1; 1068 1069 gfx_v12_0_rlc_backdoor_autoload_copy_ucode(adev, SOC24_FIRMWARE_ID_RLC_TOC, 1070 data, size); 1071 } 1072 1073 static void 1074 gfx_v12_0_rlc_backdoor_autoload_copy_gfx_ucode(struct amdgpu_device *adev) 1075 { 1076 const __le32 *fw_data; 1077 uint32_t fw_size; 1078 const struct gfx_firmware_header_v2_0 *cpv2_hdr; 1079 const struct rlc_firmware_header_v2_0 *rlc_hdr; 1080 const struct rlc_firmware_header_v2_1 *rlcv21_hdr; 1081 const struct rlc_firmware_header_v2_2 *rlcv22_hdr; 1082 uint16_t version_major, version_minor; 1083 1084 /* pfp ucode */ 1085 cpv2_hdr = (const struct gfx_firmware_header_v2_0 *) 1086 adev->gfx.pfp_fw->data; 1087 /* instruction */ 1088 fw_data = (const __le32 *)(adev->gfx.pfp_fw->data + 1089 le32_to_cpu(cpv2_hdr->ucode_offset_bytes)); 1090 fw_size = le32_to_cpu(cpv2_hdr->ucode_size_bytes); 1091 gfx_v12_0_rlc_backdoor_autoload_copy_ucode(adev, SOC24_FIRMWARE_ID_RS64_PFP, 1092 fw_data, fw_size); 1093 /* data */ 1094 fw_data = (const __le32 *)(adev->gfx.pfp_fw->data + 1095 le32_to_cpu(cpv2_hdr->data_offset_bytes)); 1096 fw_size = le32_to_cpu(cpv2_hdr->data_size_bytes); 1097 gfx_v12_0_rlc_backdoor_autoload_copy_ucode(adev, SOC24_FIRMWARE_ID_RS64_PFP_P0_STACK, 1098 fw_data, fw_size); 1099 gfx_v12_0_rlc_backdoor_autoload_copy_ucode(adev, SOC24_FIRMWARE_ID_RS64_PFP_P1_STACK, 1100 fw_data, fw_size); 1101 /* me ucode */ 1102 cpv2_hdr = (const struct gfx_firmware_header_v2_0 *) 1103 adev->gfx.me_fw->data; 1104 /* instruction */ 1105 fw_data = (const __le32 *)(adev->gfx.me_fw->data + 1106 le32_to_cpu(cpv2_hdr->ucode_offset_bytes)); 1107 fw_size = le32_to_cpu(cpv2_hdr->ucode_size_bytes); 1108 gfx_v12_0_rlc_backdoor_autoload_copy_ucode(adev, SOC24_FIRMWARE_ID_RS64_ME, 1109 fw_data, fw_size); 1110 /* data */ 1111 fw_data = (const __le32 *)(adev->gfx.me_fw->data + 1112 le32_to_cpu(cpv2_hdr->data_offset_bytes)); 1113 fw_size = le32_to_cpu(cpv2_hdr->data_size_bytes); 1114 gfx_v12_0_rlc_backdoor_autoload_copy_ucode(adev, SOC24_FIRMWARE_ID_RS64_ME_P0_STACK, 1115 fw_data, fw_size); 1116 gfx_v12_0_rlc_backdoor_autoload_copy_ucode(adev, SOC24_FIRMWARE_ID_RS64_ME_P1_STACK, 1117 fw_data, fw_size); 1118 /* mec ucode */ 1119 cpv2_hdr = (const struct gfx_firmware_header_v2_0 *) 1120 adev->gfx.mec_fw->data; 1121 /* instruction */ 1122 fw_data = (const __le32 *) (adev->gfx.mec_fw->data + 1123 le32_to_cpu(cpv2_hdr->ucode_offset_bytes)); 1124 fw_size = le32_to_cpu(cpv2_hdr->ucode_size_bytes); 1125 gfx_v12_0_rlc_backdoor_autoload_copy_ucode(adev, SOC24_FIRMWARE_ID_RS64_MEC, 1126 fw_data, fw_size); 1127 /* data */ 1128 fw_data = (const __le32 *) (adev->gfx.mec_fw->data + 1129 le32_to_cpu(cpv2_hdr->data_offset_bytes)); 1130 fw_size = le32_to_cpu(cpv2_hdr->data_size_bytes); 1131 gfx_v12_0_rlc_backdoor_autoload_copy_ucode(adev, SOC24_FIRMWARE_ID_RS64_MEC_P0_STACK, 1132 fw_data, fw_size); 1133 gfx_v12_0_rlc_backdoor_autoload_copy_ucode(adev, SOC24_FIRMWARE_ID_RS64_MEC_P1_STACK, 1134 fw_data, fw_size); 1135 gfx_v12_0_rlc_backdoor_autoload_copy_ucode(adev, SOC24_FIRMWARE_ID_RS64_MEC_P2_STACK, 1136 fw_data, fw_size); 1137 gfx_v12_0_rlc_backdoor_autoload_copy_ucode(adev, SOC24_FIRMWARE_ID_RS64_MEC_P3_STACK, 1138 fw_data, fw_size); 1139 1140 /* rlc ucode */ 1141 rlc_hdr = (const struct rlc_firmware_header_v2_0 *) 1142 adev->gfx.rlc_fw->data; 1143 fw_data = (const __le32 *)(adev->gfx.rlc_fw->data + 1144 le32_to_cpu(rlc_hdr->header.ucode_array_offset_bytes)); 1145 fw_size = le32_to_cpu(rlc_hdr->header.ucode_size_bytes); 1146 gfx_v12_0_rlc_backdoor_autoload_copy_ucode(adev, SOC24_FIRMWARE_ID_RLC_G_UCODE, 1147 fw_data, fw_size); 1148 1149 version_major = le16_to_cpu(rlc_hdr->header.header_version_major); 1150 version_minor = le16_to_cpu(rlc_hdr->header.header_version_minor); 1151 if (version_major == 2) { 1152 if (version_minor >= 1) { 1153 rlcv21_hdr = (const struct rlc_firmware_header_v2_1 *)adev->gfx.rlc_fw->data; 1154 1155 fw_data = (const __le32 *)(adev->gfx.rlc_fw->data + 1156 le32_to_cpu(rlcv21_hdr->save_restore_list_gpm_offset_bytes)); 1157 fw_size = le32_to_cpu(rlcv21_hdr->save_restore_list_gpm_size_bytes); 1158 gfx_v12_0_rlc_backdoor_autoload_copy_ucode(adev, SOC24_FIRMWARE_ID_RLCG_SCRATCH, 1159 fw_data, fw_size); 1160 1161 fw_data = (const __le32 *)(adev->gfx.rlc_fw->data + 1162 le32_to_cpu(rlcv21_hdr->save_restore_list_srm_offset_bytes)); 1163 fw_size = le32_to_cpu(rlcv21_hdr->save_restore_list_srm_size_bytes); 1164 gfx_v12_0_rlc_backdoor_autoload_copy_ucode(adev, SOC24_FIRMWARE_ID_RLC_SRM_ARAM, 1165 fw_data, fw_size); 1166 } 1167 if (version_minor >= 2) { 1168 rlcv22_hdr = (const struct rlc_firmware_header_v2_2 *)adev->gfx.rlc_fw->data; 1169 1170 fw_data = (const __le32 *)(adev->gfx.rlc_fw->data + 1171 le32_to_cpu(rlcv22_hdr->rlc_iram_ucode_offset_bytes)); 1172 fw_size = le32_to_cpu(rlcv22_hdr->rlc_iram_ucode_size_bytes); 1173 gfx_v12_0_rlc_backdoor_autoload_copy_ucode(adev, SOC24_FIRMWARE_ID_RLX6_UCODE, 1174 fw_data, fw_size); 1175 1176 fw_data = (const __le32 *)(adev->gfx.rlc_fw->data + 1177 le32_to_cpu(rlcv22_hdr->rlc_dram_ucode_offset_bytes)); 1178 fw_size = le32_to_cpu(rlcv22_hdr->rlc_dram_ucode_size_bytes); 1179 gfx_v12_0_rlc_backdoor_autoload_copy_ucode(adev, SOC24_FIRMWARE_ID_RLX6_DRAM_BOOT, 1180 fw_data, fw_size); 1181 } 1182 } 1183 } 1184 1185 static void 1186 gfx_v12_0_rlc_backdoor_autoload_copy_sdma_ucode(struct amdgpu_device *adev) 1187 { 1188 const __le32 *fw_data; 1189 uint32_t fw_size; 1190 const struct sdma_firmware_header_v3_0 *sdma_hdr; 1191 1192 sdma_hdr = (const struct sdma_firmware_header_v3_0 *) 1193 adev->sdma.instance[0].fw->data; 1194 fw_data = (const __le32 *) (adev->sdma.instance[0].fw->data + 1195 le32_to_cpu(sdma_hdr->ucode_offset_bytes)); 1196 fw_size = le32_to_cpu(sdma_hdr->ucode_size_bytes); 1197 1198 gfx_v12_0_rlc_backdoor_autoload_copy_ucode(adev, SOC24_FIRMWARE_ID_SDMA_UCODE_TH0, 1199 fw_data, fw_size); 1200 } 1201 1202 static void 1203 gfx_v12_0_rlc_backdoor_autoload_copy_mes_ucode(struct amdgpu_device *adev) 1204 { 1205 const __le32 *fw_data; 1206 unsigned fw_size; 1207 const struct mes_firmware_header_v1_0 *mes_hdr; 1208 int pipe, ucode_id, data_id; 1209 1210 for (pipe = 0; pipe < 2; pipe++) { 1211 if (pipe == 0) { 1212 ucode_id = SOC24_FIRMWARE_ID_RS64_MES_P0; 1213 data_id = SOC24_FIRMWARE_ID_RS64_MES_P0_STACK; 1214 } else { 1215 ucode_id = SOC24_FIRMWARE_ID_RS64_MES_P1; 1216 data_id = SOC24_FIRMWARE_ID_RS64_MES_P1_STACK; 1217 } 1218 1219 mes_hdr = (const struct mes_firmware_header_v1_0 *) 1220 adev->mes.fw[pipe]->data; 1221 1222 fw_data = (const __le32 *)(adev->mes.fw[pipe]->data + 1223 le32_to_cpu(mes_hdr->mes_ucode_offset_bytes)); 1224 fw_size = le32_to_cpu(mes_hdr->mes_ucode_size_bytes); 1225 1226 gfx_v12_0_rlc_backdoor_autoload_copy_ucode(adev, ucode_id, fw_data, fw_size); 1227 1228 fw_data = (const __le32 *)(adev->mes.fw[pipe]->data + 1229 le32_to_cpu(mes_hdr->mes_ucode_data_offset_bytes)); 1230 fw_size = le32_to_cpu(mes_hdr->mes_ucode_data_size_bytes); 1231 1232 gfx_v12_0_rlc_backdoor_autoload_copy_ucode(adev, data_id, fw_data, fw_size); 1233 } 1234 } 1235 1236 static int gfx_v12_0_rlc_backdoor_autoload_enable(struct amdgpu_device *adev) 1237 { 1238 uint32_t rlc_g_offset, rlc_g_size; 1239 uint64_t gpu_addr; 1240 uint32_t data; 1241 1242 /* RLC autoload sequence 2: copy ucode */ 1243 gfx_v12_0_rlc_backdoor_autoload_copy_sdma_ucode(adev); 1244 gfx_v12_0_rlc_backdoor_autoload_copy_gfx_ucode(adev); 1245 gfx_v12_0_rlc_backdoor_autoload_copy_mes_ucode(adev); 1246 gfx_v12_0_rlc_backdoor_autoload_copy_toc_ucode(adev); 1247 1248 rlc_g_offset = rlc_autoload_info[SOC24_FIRMWARE_ID_RLC_G_UCODE].offset; 1249 rlc_g_size = rlc_autoload_info[SOC24_FIRMWARE_ID_RLC_G_UCODE].size; 1250 gpu_addr = adev->gfx.rlc.rlc_autoload_gpu_addr + rlc_g_offset - adev->gmc.vram_start; 1251 1252 WREG32_SOC15(GC, 0, regGFX_IMU_RLC_BOOTLOADER_ADDR_HI, upper_32_bits(gpu_addr)); 1253 WREG32_SOC15(GC, 0, regGFX_IMU_RLC_BOOTLOADER_ADDR_LO, lower_32_bits(gpu_addr)); 1254 1255 WREG32_SOC15(GC, 0, regGFX_IMU_RLC_BOOTLOADER_SIZE, rlc_g_size); 1256 1257 if (adev->gfx.imu.funcs && (amdgpu_dpm > 0)) { 1258 /* RLC autoload sequence 3: load IMU fw */ 1259 if (adev->gfx.imu.funcs->load_microcode) 1260 adev->gfx.imu.funcs->load_microcode(adev); 1261 /* RLC autoload sequence 4 init IMU fw */ 1262 if (adev->gfx.imu.funcs->setup_imu) 1263 adev->gfx.imu.funcs->setup_imu(adev); 1264 if (adev->gfx.imu.funcs->start_imu) 1265 adev->gfx.imu.funcs->start_imu(adev); 1266 1267 /* RLC autoload sequence 5 disable gpa mode */ 1268 gfx_v12_0_disable_gpa_mode(adev); 1269 } else { 1270 /* unhalt rlc to start autoload without imu */ 1271 data = RREG32_SOC15(GC, 0, regRLC_GPM_THREAD_ENABLE); 1272 data = REG_SET_FIELD(data, RLC_GPM_THREAD_ENABLE, THREAD0_ENABLE, 1); 1273 data = REG_SET_FIELD(data, RLC_GPM_THREAD_ENABLE, THREAD1_ENABLE, 1); 1274 WREG32_SOC15(GC, 0, regRLC_GPM_THREAD_ENABLE, data); 1275 WREG32_SOC15(GC, 0, regRLC_CNTL, RLC_CNTL__RLC_ENABLE_F32_MASK); 1276 } 1277 1278 return 0; 1279 } 1280 1281 static void gfx_v12_0_alloc_ip_dump(struct amdgpu_device *adev) 1282 { 1283 uint32_t reg_count = ARRAY_SIZE(gc_reg_list_12_0); 1284 uint32_t *ptr; 1285 uint32_t inst; 1286 1287 ptr = kcalloc(reg_count, sizeof(uint32_t), GFP_KERNEL); 1288 if (!ptr) { 1289 DRM_ERROR("Failed to allocate memory for GFX IP Dump\n"); 1290 adev->gfx.ip_dump_core = NULL; 1291 } else { 1292 adev->gfx.ip_dump_core = ptr; 1293 } 1294 1295 /* Allocate memory for compute queue registers for all the instances */ 1296 reg_count = ARRAY_SIZE(gc_cp_reg_list_12); 1297 inst = adev->gfx.mec.num_mec * adev->gfx.mec.num_pipe_per_mec * 1298 adev->gfx.mec.num_queue_per_pipe; 1299 1300 ptr = kcalloc(reg_count * inst, sizeof(uint32_t), GFP_KERNEL); 1301 if (!ptr) { 1302 DRM_ERROR("Failed to allocate memory for Compute Queues IP Dump\n"); 1303 adev->gfx.ip_dump_compute_queues = NULL; 1304 } else { 1305 adev->gfx.ip_dump_compute_queues = ptr; 1306 } 1307 1308 /* Allocate memory for gfx queue registers for all the instances */ 1309 reg_count = ARRAY_SIZE(gc_gfx_queue_reg_list_12); 1310 inst = adev->gfx.me.num_me * adev->gfx.me.num_pipe_per_me * 1311 adev->gfx.me.num_queue_per_pipe; 1312 1313 ptr = kcalloc(reg_count * inst, sizeof(uint32_t), GFP_KERNEL); 1314 if (!ptr) { 1315 DRM_ERROR("Failed to allocate memory for GFX Queues IP Dump\n"); 1316 adev->gfx.ip_dump_gfx_queues = NULL; 1317 } else { 1318 adev->gfx.ip_dump_gfx_queues = ptr; 1319 } 1320 } 1321 1322 static int gfx_v12_0_sw_init(struct amdgpu_ip_block *ip_block) 1323 { 1324 int i, j, k, r, ring_id = 0; 1325 unsigned num_compute_rings; 1326 int xcc_id = 0; 1327 struct amdgpu_device *adev = ip_block->adev; 1328 1329 switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { 1330 case IP_VERSION(12, 0, 0): 1331 case IP_VERSION(12, 0, 1): 1332 adev->gfx.me.num_me = 1; 1333 adev->gfx.me.num_pipe_per_me = 1; 1334 adev->gfx.me.num_queue_per_pipe = 1; 1335 adev->gfx.mec.num_mec = 2; 1336 adev->gfx.mec.num_pipe_per_mec = 2; 1337 adev->gfx.mec.num_queue_per_pipe = 4; 1338 break; 1339 default: 1340 adev->gfx.me.num_me = 1; 1341 adev->gfx.me.num_pipe_per_me = 1; 1342 adev->gfx.me.num_queue_per_pipe = 1; 1343 adev->gfx.mec.num_mec = 1; 1344 adev->gfx.mec.num_pipe_per_mec = 4; 1345 adev->gfx.mec.num_queue_per_pipe = 8; 1346 break; 1347 } 1348 1349 switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { 1350 default: 1351 adev->gfx.enable_cleaner_shader = false; 1352 break; 1353 } 1354 1355 /* recalculate compute rings to use based on hardware configuration */ 1356 num_compute_rings = (adev->gfx.mec.num_pipe_per_mec * 1357 adev->gfx.mec.num_queue_per_pipe) / 2; 1358 adev->gfx.num_compute_rings = min(adev->gfx.num_compute_rings, 1359 num_compute_rings); 1360 1361 /* EOP Event */ 1362 r = amdgpu_irq_add_id(adev, SOC21_IH_CLIENTID_GRBM_CP, 1363 GFX_11_0_0__SRCID__CP_EOP_INTERRUPT, 1364 &adev->gfx.eop_irq); 1365 if (r) 1366 return r; 1367 1368 /* Bad opcode Event */ 1369 r = amdgpu_irq_add_id(adev, SOC21_IH_CLIENTID_GRBM_CP, 1370 GFX_11_0_0__SRCID__CP_BAD_OPCODE_ERROR, 1371 &adev->gfx.bad_op_irq); 1372 if (r) 1373 return r; 1374 1375 /* Privileged reg */ 1376 r = amdgpu_irq_add_id(adev, SOC21_IH_CLIENTID_GRBM_CP, 1377 GFX_11_0_0__SRCID__CP_PRIV_REG_FAULT, 1378 &adev->gfx.priv_reg_irq); 1379 if (r) 1380 return r; 1381 1382 /* Privileged inst */ 1383 r = amdgpu_irq_add_id(adev, SOC21_IH_CLIENTID_GRBM_CP, 1384 GFX_11_0_0__SRCID__CP_PRIV_INSTR_FAULT, 1385 &adev->gfx.priv_inst_irq); 1386 if (r) 1387 return r; 1388 1389 adev->gfx.gfx_current_status = AMDGPU_GFX_NORMAL_MODE; 1390 1391 gfx_v12_0_me_init(adev); 1392 1393 r = gfx_v12_0_rlc_init(adev); 1394 if (r) { 1395 dev_err(adev->dev, "Failed to init rlc BOs!\n"); 1396 return r; 1397 } 1398 1399 r = gfx_v12_0_mec_init(adev); 1400 if (r) { 1401 dev_err(adev->dev, "Failed to init MEC BOs!\n"); 1402 return r; 1403 } 1404 1405 /* set up the gfx ring */ 1406 for (i = 0; i < adev->gfx.me.num_me; i++) { 1407 for (j = 0; j < adev->gfx.me.num_queue_per_pipe; j++) { 1408 for (k = 0; k < adev->gfx.me.num_pipe_per_me; k++) { 1409 if (!amdgpu_gfx_is_me_queue_enabled(adev, i, k, j)) 1410 continue; 1411 1412 r = gfx_v12_0_gfx_ring_init(adev, ring_id, 1413 i, k, j); 1414 if (r) 1415 return r; 1416 ring_id++; 1417 } 1418 } 1419 } 1420 1421 ring_id = 0; 1422 /* set up the compute queues - allocate horizontally across pipes */ 1423 for (i = 0; i < adev->gfx.mec.num_mec; ++i) { 1424 for (j = 0; j < adev->gfx.mec.num_queue_per_pipe; j++) { 1425 for (k = 0; k < adev->gfx.mec.num_pipe_per_mec; k++) { 1426 if (!amdgpu_gfx_is_mec_queue_enabled(adev, 1427 0, i, k, j)) 1428 continue; 1429 1430 r = gfx_v12_0_compute_ring_init(adev, ring_id, 1431 i, k, j); 1432 if (r) 1433 return r; 1434 1435 ring_id++; 1436 } 1437 } 1438 } 1439 1440 if (!adev->enable_mes_kiq) { 1441 r = amdgpu_gfx_kiq_init(adev, GFX12_MEC_HPD_SIZE, 0); 1442 if (r) { 1443 dev_err(adev->dev, "Failed to init KIQ BOs!\n"); 1444 return r; 1445 } 1446 1447 r = amdgpu_gfx_kiq_init_ring(adev, xcc_id); 1448 if (r) 1449 return r; 1450 } 1451 1452 r = amdgpu_gfx_mqd_sw_init(adev, sizeof(struct v12_compute_mqd), 0); 1453 if (r) 1454 return r; 1455 1456 /* allocate visible FB for rlc auto-loading fw */ 1457 if (adev->firmware.load_type == AMDGPU_FW_LOAD_RLC_BACKDOOR_AUTO) { 1458 r = gfx_v12_0_rlc_autoload_buffer_init(adev); 1459 if (r) 1460 return r; 1461 } 1462 1463 r = gfx_v12_0_gpu_early_init(adev); 1464 if (r) 1465 return r; 1466 1467 gfx_v12_0_alloc_ip_dump(adev); 1468 1469 r = amdgpu_gfx_sysfs_isolation_shader_init(adev); 1470 if (r) 1471 return r; 1472 1473 return 0; 1474 } 1475 1476 static void gfx_v12_0_pfp_fini(struct amdgpu_device *adev) 1477 { 1478 amdgpu_bo_free_kernel(&adev->gfx.pfp.pfp_fw_obj, 1479 &adev->gfx.pfp.pfp_fw_gpu_addr, 1480 (void **)&adev->gfx.pfp.pfp_fw_ptr); 1481 1482 amdgpu_bo_free_kernel(&adev->gfx.pfp.pfp_fw_data_obj, 1483 &adev->gfx.pfp.pfp_fw_data_gpu_addr, 1484 (void **)&adev->gfx.pfp.pfp_fw_data_ptr); 1485 } 1486 1487 static void gfx_v12_0_me_fini(struct amdgpu_device *adev) 1488 { 1489 amdgpu_bo_free_kernel(&adev->gfx.me.me_fw_obj, 1490 &adev->gfx.me.me_fw_gpu_addr, 1491 (void **)&adev->gfx.me.me_fw_ptr); 1492 1493 amdgpu_bo_free_kernel(&adev->gfx.me.me_fw_data_obj, 1494 &adev->gfx.me.me_fw_data_gpu_addr, 1495 (void **)&adev->gfx.me.me_fw_data_ptr); 1496 } 1497 1498 static void gfx_v12_0_rlc_autoload_buffer_fini(struct amdgpu_device *adev) 1499 { 1500 amdgpu_bo_free_kernel(&adev->gfx.rlc.rlc_autoload_bo, 1501 &adev->gfx.rlc.rlc_autoload_gpu_addr, 1502 (void **)&adev->gfx.rlc.rlc_autoload_ptr); 1503 } 1504 1505 static int gfx_v12_0_sw_fini(struct amdgpu_ip_block *ip_block) 1506 { 1507 int i; 1508 struct amdgpu_device *adev = ip_block->adev; 1509 1510 for (i = 0; i < adev->gfx.num_gfx_rings; i++) 1511 amdgpu_ring_fini(&adev->gfx.gfx_ring[i]); 1512 for (i = 0; i < adev->gfx.num_compute_rings; i++) 1513 amdgpu_ring_fini(&adev->gfx.compute_ring[i]); 1514 1515 amdgpu_gfx_mqd_sw_fini(adev, 0); 1516 1517 if (!adev->enable_mes_kiq) { 1518 amdgpu_gfx_kiq_free_ring(&adev->gfx.kiq[0].ring); 1519 amdgpu_gfx_kiq_fini(adev, 0); 1520 } 1521 1522 gfx_v12_0_pfp_fini(adev); 1523 gfx_v12_0_me_fini(adev); 1524 gfx_v12_0_rlc_fini(adev); 1525 gfx_v12_0_mec_fini(adev); 1526 1527 if (adev->firmware.load_type == AMDGPU_FW_LOAD_RLC_BACKDOOR_AUTO) 1528 gfx_v12_0_rlc_autoload_buffer_fini(adev); 1529 1530 gfx_v12_0_free_microcode(adev); 1531 1532 amdgpu_gfx_sysfs_isolation_shader_fini(adev); 1533 1534 kfree(adev->gfx.ip_dump_core); 1535 kfree(adev->gfx.ip_dump_compute_queues); 1536 kfree(adev->gfx.ip_dump_gfx_queues); 1537 1538 return 0; 1539 } 1540 1541 static void gfx_v12_0_select_se_sh(struct amdgpu_device *adev, u32 se_num, 1542 u32 sh_num, u32 instance, int xcc_id) 1543 { 1544 u32 data; 1545 1546 if (instance == 0xffffffff) 1547 data = REG_SET_FIELD(0, GRBM_GFX_INDEX, 1548 INSTANCE_BROADCAST_WRITES, 1); 1549 else 1550 data = REG_SET_FIELD(0, GRBM_GFX_INDEX, INSTANCE_INDEX, 1551 instance); 1552 1553 if (se_num == 0xffffffff) 1554 data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SE_BROADCAST_WRITES, 1555 1); 1556 else 1557 data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SE_INDEX, se_num); 1558 1559 if (sh_num == 0xffffffff) 1560 data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SA_BROADCAST_WRITES, 1561 1); 1562 else 1563 data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SA_INDEX, sh_num); 1564 1565 WREG32_SOC15(GC, 0, regGRBM_GFX_INDEX, data); 1566 } 1567 1568 static u32 gfx_v12_0_get_sa_active_bitmap(struct amdgpu_device *adev) 1569 { 1570 u32 gc_disabled_sa_mask, gc_user_disabled_sa_mask, sa_mask; 1571 1572 gc_disabled_sa_mask = RREG32_SOC15(GC, 0, regGRBM_CC_GC_SA_UNIT_DISABLE); 1573 gc_disabled_sa_mask = REG_GET_FIELD(gc_disabled_sa_mask, 1574 GRBM_CC_GC_SA_UNIT_DISABLE, 1575 SA_DISABLE); 1576 gc_user_disabled_sa_mask = RREG32_SOC15(GC, 0, regGRBM_GC_USER_SA_UNIT_DISABLE); 1577 gc_user_disabled_sa_mask = REG_GET_FIELD(gc_user_disabled_sa_mask, 1578 GRBM_GC_USER_SA_UNIT_DISABLE, 1579 SA_DISABLE); 1580 sa_mask = amdgpu_gfx_create_bitmask(adev->gfx.config.max_sh_per_se * 1581 adev->gfx.config.max_shader_engines); 1582 1583 return sa_mask & (~(gc_disabled_sa_mask | gc_user_disabled_sa_mask)); 1584 } 1585 1586 static u32 gfx_v12_0_get_rb_active_bitmap(struct amdgpu_device *adev) 1587 { 1588 u32 gc_disabled_rb_mask, gc_user_disabled_rb_mask; 1589 u32 rb_mask; 1590 1591 gc_disabled_rb_mask = RREG32_SOC15(GC, 0, regCC_RB_BACKEND_DISABLE); 1592 gc_disabled_rb_mask = REG_GET_FIELD(gc_disabled_rb_mask, 1593 CC_RB_BACKEND_DISABLE, 1594 BACKEND_DISABLE); 1595 gc_user_disabled_rb_mask = RREG32_SOC15(GC, 0, regGC_USER_RB_BACKEND_DISABLE); 1596 gc_user_disabled_rb_mask = REG_GET_FIELD(gc_user_disabled_rb_mask, 1597 GC_USER_RB_BACKEND_DISABLE, 1598 BACKEND_DISABLE); 1599 rb_mask = amdgpu_gfx_create_bitmask(adev->gfx.config.max_backends_per_se * 1600 adev->gfx.config.max_shader_engines); 1601 1602 return rb_mask & (~(gc_disabled_rb_mask | gc_user_disabled_rb_mask)); 1603 } 1604 1605 static void gfx_v12_0_setup_rb(struct amdgpu_device *adev) 1606 { 1607 u32 rb_bitmap_width_per_sa; 1608 u32 max_sa; 1609 u32 active_sa_bitmap; 1610 u32 global_active_rb_bitmap; 1611 u32 active_rb_bitmap = 0; 1612 u32 i; 1613 1614 /* query sa bitmap from SA_UNIT_DISABLE registers */ 1615 active_sa_bitmap = gfx_v12_0_get_sa_active_bitmap(adev); 1616 /* query rb bitmap from RB_BACKEND_DISABLE registers */ 1617 global_active_rb_bitmap = gfx_v12_0_get_rb_active_bitmap(adev); 1618 1619 /* generate active rb bitmap according to active sa bitmap */ 1620 max_sa = adev->gfx.config.max_shader_engines * 1621 adev->gfx.config.max_sh_per_se; 1622 rb_bitmap_width_per_sa = adev->gfx.config.max_backends_per_se / 1623 adev->gfx.config.max_sh_per_se; 1624 for (i = 0; i < max_sa; i++) { 1625 if (active_sa_bitmap & (1 << i)) 1626 active_rb_bitmap |= (0x3 << (i * rb_bitmap_width_per_sa)); 1627 } 1628 1629 active_rb_bitmap |= global_active_rb_bitmap; 1630 adev->gfx.config.backend_enable_mask = active_rb_bitmap; 1631 adev->gfx.config.num_rbs = hweight32(active_rb_bitmap); 1632 } 1633 1634 #define LDS_APP_BASE 0x1 1635 #define SCRATCH_APP_BASE 0x2 1636 1637 static void gfx_v12_0_init_compute_vmid(struct amdgpu_device *adev) 1638 { 1639 int i; 1640 uint32_t sh_mem_bases; 1641 uint32_t data; 1642 1643 /* 1644 * Configure apertures: 1645 * LDS: 0x60000000'00000000 - 0x60000001'00000000 (4GB) 1646 * Scratch: 0x60000001'00000000 - 0x60000002'00000000 (4GB) 1647 * GPUVM: 0x60010000'00000000 - 0x60020000'00000000 (1TB) 1648 */ 1649 sh_mem_bases = (LDS_APP_BASE << SH_MEM_BASES__SHARED_BASE__SHIFT) | 1650 SCRATCH_APP_BASE; 1651 1652 mutex_lock(&adev->srbm_mutex); 1653 for (i = adev->vm_manager.first_kfd_vmid; i < AMDGPU_NUM_VMID; i++) { 1654 soc24_grbm_select(adev, 0, 0, 0, i); 1655 /* CP and shaders */ 1656 WREG32_SOC15(GC, 0, regSH_MEM_CONFIG, DEFAULT_SH_MEM_CONFIG); 1657 WREG32_SOC15(GC, 0, regSH_MEM_BASES, sh_mem_bases); 1658 1659 /* Enable trap for each kfd vmid. */ 1660 data = RREG32_SOC15(GC, 0, regSPI_GDBG_PER_VMID_CNTL); 1661 data = REG_SET_FIELD(data, SPI_GDBG_PER_VMID_CNTL, TRAP_EN, 1); 1662 WREG32_SOC15(GC, 0, regSPI_GDBG_PER_VMID_CNTL, data); 1663 } 1664 soc24_grbm_select(adev, 0, 0, 0, 0); 1665 mutex_unlock(&adev->srbm_mutex); 1666 } 1667 1668 static void gfx_v12_0_tcp_harvest(struct amdgpu_device *adev) 1669 { 1670 /* TODO: harvest feature to be added later. */ 1671 } 1672 1673 static void gfx_v12_0_get_tcc_info(struct amdgpu_device *adev) 1674 { 1675 } 1676 1677 static void gfx_v12_0_constants_init(struct amdgpu_device *adev) 1678 { 1679 u32 tmp; 1680 int i; 1681 1682 if (!amdgpu_sriov_vf(adev)) 1683 WREG32_FIELD15_PREREG(GC, 0, GRBM_CNTL, READ_TIMEOUT, 0xff); 1684 1685 gfx_v12_0_setup_rb(adev); 1686 gfx_v12_0_get_cu_info(adev, &adev->gfx.cu_info); 1687 gfx_v12_0_get_tcc_info(adev); 1688 adev->gfx.config.pa_sc_tile_steering_override = 0; 1689 1690 /* XXX SH_MEM regs */ 1691 /* where to put LDS, scratch, GPUVM in FSA64 space */ 1692 mutex_lock(&adev->srbm_mutex); 1693 for (i = 0; i < adev->vm_manager.id_mgr[AMDGPU_GFXHUB(0)].num_ids; i++) { 1694 soc24_grbm_select(adev, 0, 0, 0, i); 1695 /* CP and shaders */ 1696 WREG32_SOC15(GC, 0, regSH_MEM_CONFIG, DEFAULT_SH_MEM_CONFIG); 1697 if (i != 0) { 1698 tmp = REG_SET_FIELD(0, SH_MEM_BASES, PRIVATE_BASE, 1699 (adev->gmc.private_aperture_start >> 48)); 1700 tmp = REG_SET_FIELD(tmp, SH_MEM_BASES, SHARED_BASE, 1701 (adev->gmc.shared_aperture_start >> 48)); 1702 WREG32_SOC15(GC, 0, regSH_MEM_BASES, tmp); 1703 } 1704 } 1705 soc24_grbm_select(adev, 0, 0, 0, 0); 1706 1707 mutex_unlock(&adev->srbm_mutex); 1708 1709 gfx_v12_0_init_compute_vmid(adev); 1710 } 1711 1712 static u32 gfx_v12_0_get_cpg_int_cntl(struct amdgpu_device *adev, 1713 int me, int pipe) 1714 { 1715 if (me != 0) 1716 return 0; 1717 1718 switch (pipe) { 1719 case 0: 1720 return SOC15_REG_OFFSET(GC, 0, regCP_INT_CNTL_RING0); 1721 default: 1722 return 0; 1723 } 1724 } 1725 1726 static u32 gfx_v12_0_get_cpc_int_cntl(struct amdgpu_device *adev, 1727 int me, int pipe) 1728 { 1729 /* 1730 * amdgpu controls only the first MEC. That's why this function only 1731 * handles the setting of interrupts for this specific MEC. All other 1732 * pipes' interrupts are set by amdkfd. 1733 */ 1734 if (me != 1) 1735 return 0; 1736 1737 switch (pipe) { 1738 case 0: 1739 return SOC15_REG_OFFSET(GC, 0, regCP_ME1_PIPE0_INT_CNTL); 1740 case 1: 1741 return SOC15_REG_OFFSET(GC, 0, regCP_ME1_PIPE1_INT_CNTL); 1742 default: 1743 return 0; 1744 } 1745 } 1746 1747 static void gfx_v12_0_enable_gui_idle_interrupt(struct amdgpu_device *adev, 1748 bool enable) 1749 { 1750 u32 tmp, cp_int_cntl_reg; 1751 int i, j; 1752 1753 if (amdgpu_sriov_vf(adev)) 1754 return; 1755 1756 for (i = 0; i < adev->gfx.me.num_me; i++) { 1757 for (j = 0; j < adev->gfx.me.num_pipe_per_me; j++) { 1758 cp_int_cntl_reg = gfx_v12_0_get_cpg_int_cntl(adev, i, j); 1759 1760 if (cp_int_cntl_reg) { 1761 tmp = RREG32_SOC15_IP(GC, cp_int_cntl_reg); 1762 tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CNTX_BUSY_INT_ENABLE, 1763 enable ? 1 : 0); 1764 tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CNTX_EMPTY_INT_ENABLE, 1765 enable ? 1 : 0); 1766 tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CMP_BUSY_INT_ENABLE, 1767 enable ? 1 : 0); 1768 tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, GFX_IDLE_INT_ENABLE, 1769 enable ? 1 : 0); 1770 WREG32_SOC15_IP(GC, cp_int_cntl_reg, tmp); 1771 } 1772 } 1773 } 1774 } 1775 1776 static int gfx_v12_0_init_csb(struct amdgpu_device *adev) 1777 { 1778 adev->gfx.rlc.funcs->get_csb_buffer(adev, adev->gfx.rlc.cs_ptr); 1779 1780 WREG32_SOC15(GC, 0, regRLC_CSIB_ADDR_HI, 1781 adev->gfx.rlc.clear_state_gpu_addr >> 32); 1782 WREG32_SOC15(GC, 0, regRLC_CSIB_ADDR_LO, 1783 adev->gfx.rlc.clear_state_gpu_addr & 0xfffffffc); 1784 WREG32_SOC15(GC, 0, regRLC_CSIB_LENGTH, adev->gfx.rlc.clear_state_size); 1785 1786 return 0; 1787 } 1788 1789 static void gfx_v12_0_rlc_stop(struct amdgpu_device *adev) 1790 { 1791 u32 tmp = RREG32_SOC15(GC, 0, regRLC_CNTL); 1792 1793 tmp = REG_SET_FIELD(tmp, RLC_CNTL, RLC_ENABLE_F32, 0); 1794 WREG32_SOC15(GC, 0, regRLC_CNTL, tmp); 1795 } 1796 1797 static void gfx_v12_0_rlc_reset(struct amdgpu_device *adev) 1798 { 1799 WREG32_FIELD15_PREREG(GC, 0, GRBM_SOFT_RESET, SOFT_RESET_RLC, 1); 1800 udelay(50); 1801 WREG32_FIELD15_PREREG(GC, 0, GRBM_SOFT_RESET, SOFT_RESET_RLC, 0); 1802 udelay(50); 1803 } 1804 1805 static void gfx_v12_0_rlc_smu_handshake_cntl(struct amdgpu_device *adev, 1806 bool enable) 1807 { 1808 uint32_t rlc_pg_cntl; 1809 1810 rlc_pg_cntl = RREG32_SOC15(GC, 0, regRLC_PG_CNTL); 1811 1812 if (!enable) { 1813 /* RLC_PG_CNTL[23] = 0 (default) 1814 * RLC will wait for handshake acks with SMU 1815 * GFXOFF will be enabled 1816 * RLC_PG_CNTL[23] = 1 1817 * RLC will not issue any message to SMU 1818 * hence no handshake between SMU & RLC 1819 * GFXOFF will be disabled 1820 */ 1821 rlc_pg_cntl |= RLC_PG_CNTL__SMU_HANDSHAKE_DISABLE_MASK; 1822 } else 1823 rlc_pg_cntl &= ~RLC_PG_CNTL__SMU_HANDSHAKE_DISABLE_MASK; 1824 WREG32_SOC15(GC, 0, regRLC_PG_CNTL, rlc_pg_cntl); 1825 } 1826 1827 static void gfx_v12_0_rlc_start(struct amdgpu_device *adev) 1828 { 1829 /* TODO: enable rlc & smu handshake until smu 1830 * and gfxoff feature works as expected */ 1831 if (!(amdgpu_pp_feature_mask & PP_GFXOFF_MASK)) 1832 gfx_v12_0_rlc_smu_handshake_cntl(adev, false); 1833 1834 WREG32_FIELD15_PREREG(GC, 0, RLC_CNTL, RLC_ENABLE_F32, 1); 1835 udelay(50); 1836 } 1837 1838 static void gfx_v12_0_rlc_enable_srm(struct amdgpu_device *adev) 1839 { 1840 uint32_t tmp; 1841 1842 /* enable Save Restore Machine */ 1843 tmp = RREG32(SOC15_REG_OFFSET(GC, 0, regRLC_SRM_CNTL)); 1844 tmp |= RLC_SRM_CNTL__AUTO_INCR_ADDR_MASK; 1845 tmp |= RLC_SRM_CNTL__SRM_ENABLE_MASK; 1846 WREG32(SOC15_REG_OFFSET(GC, 0, regRLC_SRM_CNTL), tmp); 1847 } 1848 1849 static void gfx_v12_0_load_rlcg_microcode(struct amdgpu_device *adev) 1850 { 1851 const struct rlc_firmware_header_v2_0 *hdr; 1852 const __le32 *fw_data; 1853 unsigned i, fw_size; 1854 1855 hdr = (const struct rlc_firmware_header_v2_0 *)adev->gfx.rlc_fw->data; 1856 fw_data = (const __le32 *)(adev->gfx.rlc_fw->data + 1857 le32_to_cpu(hdr->header.ucode_array_offset_bytes)); 1858 fw_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4; 1859 1860 WREG32_SOC15(GC, 0, regRLC_GPM_UCODE_ADDR, 1861 RLCG_UCODE_LOADING_START_ADDRESS); 1862 1863 for (i = 0; i < fw_size; i++) 1864 WREG32_SOC15(GC, 0, regRLC_GPM_UCODE_DATA, 1865 le32_to_cpup(fw_data++)); 1866 1867 WREG32_SOC15(GC, 0, regRLC_GPM_UCODE_ADDR, adev->gfx.rlc_fw_version); 1868 } 1869 1870 static void gfx_v12_0_load_rlc_iram_dram_microcode(struct amdgpu_device *adev) 1871 { 1872 const struct rlc_firmware_header_v2_2 *hdr; 1873 const __le32 *fw_data; 1874 unsigned i, fw_size; 1875 u32 tmp; 1876 1877 hdr = (const struct rlc_firmware_header_v2_2 *)adev->gfx.rlc_fw->data; 1878 1879 fw_data = (const __le32 *)(adev->gfx.rlc_fw->data + 1880 le32_to_cpu(hdr->rlc_iram_ucode_offset_bytes)); 1881 fw_size = le32_to_cpu(hdr->rlc_iram_ucode_size_bytes) / 4; 1882 1883 WREG32_SOC15(GC, 0, regRLC_LX6_IRAM_ADDR, 0); 1884 1885 for (i = 0; i < fw_size; i++) { 1886 if ((amdgpu_emu_mode == 1) && (i % 100 == 99)) 1887 msleep(1); 1888 WREG32_SOC15(GC, 0, regRLC_LX6_IRAM_DATA, 1889 le32_to_cpup(fw_data++)); 1890 } 1891 1892 WREG32_SOC15(GC, 0, regRLC_LX6_IRAM_ADDR, adev->gfx.rlc_fw_version); 1893 1894 fw_data = (const __le32 *)(adev->gfx.rlc_fw->data + 1895 le32_to_cpu(hdr->rlc_dram_ucode_offset_bytes)); 1896 fw_size = le32_to_cpu(hdr->rlc_dram_ucode_size_bytes) / 4; 1897 1898 WREG32_SOC15(GC, 0, regRLC_LX6_DRAM_ADDR, 0); 1899 for (i = 0; i < fw_size; i++) { 1900 if ((amdgpu_emu_mode == 1) && (i % 100 == 99)) 1901 msleep(1); 1902 WREG32_SOC15(GC, 0, regRLC_LX6_DRAM_DATA, 1903 le32_to_cpup(fw_data++)); 1904 } 1905 1906 WREG32_SOC15(GC, 0, regRLC_LX6_IRAM_ADDR, adev->gfx.rlc_fw_version); 1907 1908 tmp = RREG32_SOC15(GC, 0, regRLC_LX6_CNTL); 1909 tmp = REG_SET_FIELD(tmp, RLC_LX6_CNTL, PDEBUG_ENABLE, 1); 1910 tmp = REG_SET_FIELD(tmp, RLC_LX6_CNTL, BRESET, 0); 1911 WREG32_SOC15(GC, 0, regRLC_LX6_CNTL, tmp); 1912 } 1913 1914 static int gfx_v12_0_rlc_load_microcode(struct amdgpu_device *adev) 1915 { 1916 const struct rlc_firmware_header_v2_0 *hdr; 1917 uint16_t version_major; 1918 uint16_t version_minor; 1919 1920 if (!adev->gfx.rlc_fw) 1921 return -EINVAL; 1922 1923 hdr = (const struct rlc_firmware_header_v2_0 *)adev->gfx.rlc_fw->data; 1924 amdgpu_ucode_print_rlc_hdr(&hdr->header); 1925 1926 version_major = le16_to_cpu(hdr->header.header_version_major); 1927 version_minor = le16_to_cpu(hdr->header.header_version_minor); 1928 1929 if (version_major == 2) { 1930 gfx_v12_0_load_rlcg_microcode(adev); 1931 if (amdgpu_dpm == 1) { 1932 if (version_minor >= 2) 1933 gfx_v12_0_load_rlc_iram_dram_microcode(adev); 1934 } 1935 1936 return 0; 1937 } 1938 1939 return -EINVAL; 1940 } 1941 1942 static int gfx_v12_0_rlc_resume(struct amdgpu_device *adev) 1943 { 1944 int r; 1945 1946 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) { 1947 gfx_v12_0_init_csb(adev); 1948 1949 if (!amdgpu_sriov_vf(adev)) /* enable RLC SRM */ 1950 gfx_v12_0_rlc_enable_srm(adev); 1951 } else { 1952 if (amdgpu_sriov_vf(adev)) { 1953 gfx_v12_0_init_csb(adev); 1954 return 0; 1955 } 1956 1957 adev->gfx.rlc.funcs->stop(adev); 1958 1959 /* disable CG */ 1960 WREG32_SOC15(GC, 0, regRLC_CGCG_CGLS_CTRL, 0); 1961 1962 /* disable PG */ 1963 WREG32_SOC15(GC, 0, regRLC_PG_CNTL, 0); 1964 1965 if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT) { 1966 /* legacy rlc firmware loading */ 1967 r = gfx_v12_0_rlc_load_microcode(adev); 1968 if (r) 1969 return r; 1970 } 1971 1972 gfx_v12_0_init_csb(adev); 1973 1974 adev->gfx.rlc.funcs->start(adev); 1975 } 1976 1977 return 0; 1978 } 1979 1980 static void gfx_v12_0_config_gfx_rs64(struct amdgpu_device *adev) 1981 { 1982 const struct gfx_firmware_header_v2_0 *pfp_hdr; 1983 const struct gfx_firmware_header_v2_0 *me_hdr; 1984 const struct gfx_firmware_header_v2_0 *mec_hdr; 1985 uint32_t pipe_id, tmp; 1986 1987 mec_hdr = (const struct gfx_firmware_header_v2_0 *) 1988 adev->gfx.mec_fw->data; 1989 me_hdr = (const struct gfx_firmware_header_v2_0 *) 1990 adev->gfx.me_fw->data; 1991 pfp_hdr = (const struct gfx_firmware_header_v2_0 *) 1992 adev->gfx.pfp_fw->data; 1993 1994 /* config pfp program start addr */ 1995 for (pipe_id = 0; pipe_id < 2; pipe_id++) { 1996 soc24_grbm_select(adev, 0, pipe_id, 0, 0); 1997 WREG32_SOC15(GC, 0, regCP_PFP_PRGRM_CNTR_START, 1998 (pfp_hdr->ucode_start_addr_hi << 30) | 1999 (pfp_hdr->ucode_start_addr_lo >> 2)); 2000 WREG32_SOC15(GC, 0, regCP_PFP_PRGRM_CNTR_START_HI, 2001 pfp_hdr->ucode_start_addr_hi >> 2); 2002 } 2003 soc24_grbm_select(adev, 0, 0, 0, 0); 2004 2005 /* reset pfp pipe */ 2006 tmp = RREG32_SOC15(GC, 0, regCP_ME_CNTL); 2007 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, PFP_PIPE0_RESET, 1); 2008 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, PFP_PIPE1_RESET, 1); 2009 WREG32_SOC15(GC, 0, regCP_ME_CNTL, tmp); 2010 2011 /* clear pfp pipe reset */ 2012 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, PFP_PIPE0_RESET, 0); 2013 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, PFP_PIPE1_RESET, 0); 2014 WREG32_SOC15(GC, 0, regCP_ME_CNTL, tmp); 2015 2016 /* config me program start addr */ 2017 for (pipe_id = 0; pipe_id < 2; pipe_id++) { 2018 soc24_grbm_select(adev, 0, pipe_id, 0, 0); 2019 WREG32_SOC15(GC, 0, regCP_ME_PRGRM_CNTR_START, 2020 (me_hdr->ucode_start_addr_hi << 30) | 2021 (me_hdr->ucode_start_addr_lo >> 2)); 2022 WREG32_SOC15(GC, 0, regCP_ME_PRGRM_CNTR_START_HI, 2023 me_hdr->ucode_start_addr_hi>>2); 2024 } 2025 soc24_grbm_select(adev, 0, 0, 0, 0); 2026 2027 /* reset me pipe */ 2028 tmp = RREG32_SOC15(GC, 0, regCP_ME_CNTL); 2029 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, ME_PIPE0_RESET, 1); 2030 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, ME_PIPE1_RESET, 1); 2031 WREG32_SOC15(GC, 0, regCP_ME_CNTL, tmp); 2032 2033 /* clear me pipe reset */ 2034 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, ME_PIPE0_RESET, 0); 2035 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, ME_PIPE1_RESET, 0); 2036 WREG32_SOC15(GC, 0, regCP_ME_CNTL, tmp); 2037 2038 /* config mec program start addr */ 2039 for (pipe_id = 0; pipe_id < 4; pipe_id++) { 2040 soc24_grbm_select(adev, 1, pipe_id, 0, 0); 2041 WREG32_SOC15(GC, 0, regCP_MEC_RS64_PRGRM_CNTR_START, 2042 mec_hdr->ucode_start_addr_lo >> 2 | 2043 mec_hdr->ucode_start_addr_hi << 30); 2044 WREG32_SOC15(GC, 0, regCP_MEC_RS64_PRGRM_CNTR_START_HI, 2045 mec_hdr->ucode_start_addr_hi >> 2); 2046 } 2047 soc24_grbm_select(adev, 0, 0, 0, 0); 2048 2049 /* reset mec pipe */ 2050 tmp = RREG32_SOC15(GC, 0, regCP_MEC_RS64_CNTL); 2051 tmp = REG_SET_FIELD(tmp, CP_MEC_RS64_CNTL, MEC_PIPE0_RESET, 1); 2052 tmp = REG_SET_FIELD(tmp, CP_MEC_RS64_CNTL, MEC_PIPE1_RESET, 1); 2053 tmp = REG_SET_FIELD(tmp, CP_MEC_RS64_CNTL, MEC_PIPE2_RESET, 1); 2054 tmp = REG_SET_FIELD(tmp, CP_MEC_RS64_CNTL, MEC_PIPE3_RESET, 1); 2055 WREG32_SOC15(GC, 0, regCP_MEC_RS64_CNTL, tmp); 2056 2057 /* clear mec pipe reset */ 2058 tmp = REG_SET_FIELD(tmp, CP_MEC_RS64_CNTL, MEC_PIPE0_RESET, 0); 2059 tmp = REG_SET_FIELD(tmp, CP_MEC_RS64_CNTL, MEC_PIPE1_RESET, 0); 2060 tmp = REG_SET_FIELD(tmp, CP_MEC_RS64_CNTL, MEC_PIPE2_RESET, 0); 2061 tmp = REG_SET_FIELD(tmp, CP_MEC_RS64_CNTL, MEC_PIPE3_RESET, 0); 2062 WREG32_SOC15(GC, 0, regCP_MEC_RS64_CNTL, tmp); 2063 } 2064 2065 static void gfx_v12_0_set_pfp_ucode_start_addr(struct amdgpu_device *adev) 2066 { 2067 const struct gfx_firmware_header_v2_0 *cp_hdr; 2068 unsigned pipe_id, tmp; 2069 2070 cp_hdr = (const struct gfx_firmware_header_v2_0 *) 2071 adev->gfx.pfp_fw->data; 2072 mutex_lock(&adev->srbm_mutex); 2073 for (pipe_id = 0; pipe_id < adev->gfx.me.num_pipe_per_me; pipe_id++) { 2074 soc24_grbm_select(adev, 0, pipe_id, 0, 0); 2075 WREG32_SOC15(GC, 0, regCP_PFP_PRGRM_CNTR_START, 2076 (cp_hdr->ucode_start_addr_hi << 30) | 2077 (cp_hdr->ucode_start_addr_lo >> 2)); 2078 WREG32_SOC15(GC, 0, regCP_PFP_PRGRM_CNTR_START_HI, 2079 cp_hdr->ucode_start_addr_hi>>2); 2080 2081 /* 2082 * Program CP_ME_CNTL to reset given PIPE to take 2083 * effect of CP_PFP_PRGRM_CNTR_START. 2084 */ 2085 tmp = RREG32_SOC15(GC, 0, regCP_ME_CNTL); 2086 if (pipe_id == 0) 2087 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, 2088 PFP_PIPE0_RESET, 1); 2089 else 2090 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, 2091 PFP_PIPE1_RESET, 1); 2092 WREG32_SOC15(GC, 0, regCP_ME_CNTL, tmp); 2093 2094 /* Clear pfp pipe0 reset bit. */ 2095 if (pipe_id == 0) 2096 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, 2097 PFP_PIPE0_RESET, 0); 2098 else 2099 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, 2100 PFP_PIPE1_RESET, 0); 2101 WREG32_SOC15(GC, 0, regCP_ME_CNTL, tmp); 2102 } 2103 soc24_grbm_select(adev, 0, 0, 0, 0); 2104 mutex_unlock(&adev->srbm_mutex); 2105 } 2106 2107 static void gfx_v12_0_set_me_ucode_start_addr(struct amdgpu_device *adev) 2108 { 2109 const struct gfx_firmware_header_v2_0 *cp_hdr; 2110 unsigned pipe_id, tmp; 2111 2112 cp_hdr = (const struct gfx_firmware_header_v2_0 *) 2113 adev->gfx.me_fw->data; 2114 mutex_lock(&adev->srbm_mutex); 2115 for (pipe_id = 0; pipe_id < adev->gfx.me.num_pipe_per_me; pipe_id++) { 2116 soc24_grbm_select(adev, 0, pipe_id, 0, 0); 2117 WREG32_SOC15(GC, 0, regCP_ME_PRGRM_CNTR_START, 2118 (cp_hdr->ucode_start_addr_hi << 30) | 2119 (cp_hdr->ucode_start_addr_lo >> 2) ); 2120 WREG32_SOC15(GC, 0, regCP_ME_PRGRM_CNTR_START_HI, 2121 cp_hdr->ucode_start_addr_hi>>2); 2122 2123 /* 2124 * Program CP_ME_CNTL to reset given PIPE to take 2125 * effect of CP_ME_PRGRM_CNTR_START. 2126 */ 2127 tmp = RREG32_SOC15(GC, 0, regCP_ME_CNTL); 2128 if (pipe_id == 0) 2129 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, 2130 ME_PIPE0_RESET, 1); 2131 else 2132 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, 2133 ME_PIPE1_RESET, 1); 2134 WREG32_SOC15(GC, 0, regCP_ME_CNTL, tmp); 2135 2136 /* Clear pfp pipe0 reset bit. */ 2137 if (pipe_id == 0) 2138 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, 2139 ME_PIPE0_RESET, 0); 2140 else 2141 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, 2142 ME_PIPE1_RESET, 0); 2143 WREG32_SOC15(GC, 0, regCP_ME_CNTL, tmp); 2144 } 2145 soc24_grbm_select(adev, 0, 0, 0, 0); 2146 mutex_unlock(&adev->srbm_mutex); 2147 } 2148 2149 static void gfx_v12_0_set_mec_ucode_start_addr(struct amdgpu_device *adev) 2150 { 2151 const struct gfx_firmware_header_v2_0 *cp_hdr; 2152 unsigned pipe_id; 2153 2154 cp_hdr = (const struct gfx_firmware_header_v2_0 *) 2155 adev->gfx.mec_fw->data; 2156 mutex_lock(&adev->srbm_mutex); 2157 for (pipe_id = 0; pipe_id < adev->gfx.mec.num_pipe_per_mec; pipe_id++) { 2158 soc24_grbm_select(adev, 1, pipe_id, 0, 0); 2159 WREG32_SOC15(GC, 0, regCP_MEC_RS64_PRGRM_CNTR_START, 2160 cp_hdr->ucode_start_addr_lo >> 2 | 2161 cp_hdr->ucode_start_addr_hi << 30); 2162 WREG32_SOC15(GC, 0, regCP_MEC_RS64_PRGRM_CNTR_START_HI, 2163 cp_hdr->ucode_start_addr_hi >> 2); 2164 } 2165 soc24_grbm_select(adev, 0, 0, 0, 0); 2166 mutex_unlock(&adev->srbm_mutex); 2167 } 2168 2169 static int gfx_v12_0_wait_for_rlc_autoload_complete(struct amdgpu_device *adev) 2170 { 2171 uint32_t cp_status; 2172 uint32_t bootload_status; 2173 int i; 2174 2175 for (i = 0; i < adev->usec_timeout; i++) { 2176 cp_status = RREG32_SOC15(GC, 0, regCP_STAT); 2177 bootload_status = RREG32_SOC15(GC, 0, regRLC_RLCS_BOOTLOAD_STATUS); 2178 2179 if ((cp_status == 0) && 2180 (REG_GET_FIELD(bootload_status, 2181 RLC_RLCS_BOOTLOAD_STATUS, BOOTLOAD_COMPLETE) == 1)) { 2182 break; 2183 } 2184 udelay(1); 2185 if (amdgpu_emu_mode) 2186 msleep(10); 2187 } 2188 2189 if (i >= adev->usec_timeout) { 2190 dev_err(adev->dev, "rlc autoload: gc ucode autoload timeout\n"); 2191 return -ETIMEDOUT; 2192 } 2193 2194 if (adev->firmware.load_type == AMDGPU_FW_LOAD_RLC_BACKDOOR_AUTO) { 2195 gfx_v12_0_set_pfp_ucode_start_addr(adev); 2196 gfx_v12_0_set_me_ucode_start_addr(adev); 2197 gfx_v12_0_set_mec_ucode_start_addr(adev); 2198 } 2199 2200 return 0; 2201 } 2202 2203 static int gfx_v12_0_cp_gfx_enable(struct amdgpu_device *adev, bool enable) 2204 { 2205 int i; 2206 u32 tmp = RREG32_SOC15(GC, 0, regCP_ME_CNTL); 2207 2208 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, ME_HALT, enable ? 0 : 1); 2209 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, PFP_HALT, enable ? 0 : 1); 2210 WREG32_SOC15(GC, 0, regCP_ME_CNTL, tmp); 2211 2212 for (i = 0; i < adev->usec_timeout; i++) { 2213 if (RREG32_SOC15(GC, 0, regCP_STAT) == 0) 2214 break; 2215 udelay(1); 2216 } 2217 2218 if (i >= adev->usec_timeout) 2219 DRM_ERROR("failed to %s cp gfx\n", enable ? "unhalt" : "halt"); 2220 2221 return 0; 2222 } 2223 2224 static int gfx_v12_0_cp_gfx_load_pfp_microcode_rs64(struct amdgpu_device *adev) 2225 { 2226 int r; 2227 const struct gfx_firmware_header_v2_0 *pfp_hdr; 2228 const __le32 *fw_ucode, *fw_data; 2229 unsigned i, pipe_id, fw_ucode_size, fw_data_size; 2230 uint32_t tmp; 2231 uint32_t usec_timeout = 50000; /* wait for 50ms */ 2232 2233 pfp_hdr = (const struct gfx_firmware_header_v2_0 *) 2234 adev->gfx.pfp_fw->data; 2235 2236 amdgpu_ucode_print_gfx_hdr(&pfp_hdr->header); 2237 2238 /* instruction */ 2239 fw_ucode = (const __le32 *)(adev->gfx.pfp_fw->data + 2240 le32_to_cpu(pfp_hdr->ucode_offset_bytes)); 2241 fw_ucode_size = le32_to_cpu(pfp_hdr->ucode_size_bytes); 2242 /* data */ 2243 fw_data = (const __le32 *)(adev->gfx.pfp_fw->data + 2244 le32_to_cpu(pfp_hdr->data_offset_bytes)); 2245 fw_data_size = le32_to_cpu(pfp_hdr->data_size_bytes); 2246 2247 /* 64kb align */ 2248 r = amdgpu_bo_create_reserved(adev, fw_ucode_size, 2249 64 * 1024, AMDGPU_GEM_DOMAIN_VRAM, 2250 &adev->gfx.pfp.pfp_fw_obj, 2251 &adev->gfx.pfp.pfp_fw_gpu_addr, 2252 (void **)&adev->gfx.pfp.pfp_fw_ptr); 2253 if (r) { 2254 dev_err(adev->dev, "(%d) failed to create pfp ucode fw bo\n", r); 2255 gfx_v12_0_pfp_fini(adev); 2256 return r; 2257 } 2258 2259 r = amdgpu_bo_create_reserved(adev, fw_data_size, 2260 64 * 1024, AMDGPU_GEM_DOMAIN_VRAM, 2261 &adev->gfx.pfp.pfp_fw_data_obj, 2262 &adev->gfx.pfp.pfp_fw_data_gpu_addr, 2263 (void **)&adev->gfx.pfp.pfp_fw_data_ptr); 2264 if (r) { 2265 dev_err(adev->dev, "(%d) failed to create pfp data fw bo\n", r); 2266 gfx_v12_0_pfp_fini(adev); 2267 return r; 2268 } 2269 2270 memcpy(adev->gfx.pfp.pfp_fw_ptr, fw_ucode, fw_ucode_size); 2271 memcpy(adev->gfx.pfp.pfp_fw_data_ptr, fw_data, fw_data_size); 2272 2273 amdgpu_bo_kunmap(adev->gfx.pfp.pfp_fw_obj); 2274 amdgpu_bo_kunmap(adev->gfx.pfp.pfp_fw_data_obj); 2275 amdgpu_bo_unreserve(adev->gfx.pfp.pfp_fw_obj); 2276 amdgpu_bo_unreserve(adev->gfx.pfp.pfp_fw_data_obj); 2277 2278 if (amdgpu_emu_mode == 1) 2279 adev->hdp.funcs->flush_hdp(adev, NULL); 2280 2281 WREG32_SOC15(GC, 0, regCP_PFP_IC_BASE_LO, 2282 lower_32_bits(adev->gfx.pfp.pfp_fw_gpu_addr)); 2283 WREG32_SOC15(GC, 0, regCP_PFP_IC_BASE_HI, 2284 upper_32_bits(adev->gfx.pfp.pfp_fw_gpu_addr)); 2285 2286 tmp = RREG32_SOC15(GC, 0, regCP_PFP_IC_BASE_CNTL); 2287 tmp = REG_SET_FIELD(tmp, CP_PFP_IC_BASE_CNTL, VMID, 0); 2288 tmp = REG_SET_FIELD(tmp, CP_PFP_IC_BASE_CNTL, CACHE_POLICY, 0); 2289 tmp = REG_SET_FIELD(tmp, CP_PFP_IC_BASE_CNTL, EXE_DISABLE, 0); 2290 WREG32_SOC15(GC, 0, regCP_PFP_IC_BASE_CNTL, tmp); 2291 2292 /* 2293 * Programming any of the CP_PFP_IC_BASE registers 2294 * forces invalidation of the ME L1 I$. Wait for the 2295 * invalidation complete 2296 */ 2297 for (i = 0; i < usec_timeout; i++) { 2298 tmp = RREG32_SOC15(GC, 0, regCP_PFP_IC_OP_CNTL); 2299 if (1 == REG_GET_FIELD(tmp, CP_PFP_IC_OP_CNTL, 2300 INVALIDATE_CACHE_COMPLETE)) 2301 break; 2302 udelay(1); 2303 } 2304 2305 if (i >= usec_timeout) { 2306 dev_err(adev->dev, "failed to invalidate instruction cache\n"); 2307 return -EINVAL; 2308 } 2309 2310 /* Prime the L1 instruction caches */ 2311 tmp = RREG32_SOC15(GC, 0, regCP_PFP_IC_OP_CNTL); 2312 tmp = REG_SET_FIELD(tmp, CP_PFP_IC_OP_CNTL, PRIME_ICACHE, 1); 2313 WREG32_SOC15(GC, 0, regCP_PFP_IC_OP_CNTL, tmp); 2314 /* Waiting for cache primed*/ 2315 for (i = 0; i < usec_timeout; i++) { 2316 tmp = RREG32_SOC15(GC, 0, regCP_PFP_IC_OP_CNTL); 2317 if (1 == REG_GET_FIELD(tmp, CP_PFP_IC_OP_CNTL, 2318 ICACHE_PRIMED)) 2319 break; 2320 udelay(1); 2321 } 2322 2323 if (i >= usec_timeout) { 2324 dev_err(adev->dev, "failed to prime instruction cache\n"); 2325 return -EINVAL; 2326 } 2327 2328 mutex_lock(&adev->srbm_mutex); 2329 for (pipe_id = 0; pipe_id < adev->gfx.me.num_pipe_per_me; pipe_id++) { 2330 soc24_grbm_select(adev, 0, pipe_id, 0, 0); 2331 2332 WREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_BASE0_LO, 2333 lower_32_bits(adev->gfx.pfp.pfp_fw_data_gpu_addr)); 2334 WREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_BASE0_HI, 2335 upper_32_bits(adev->gfx.pfp.pfp_fw_data_gpu_addr)); 2336 } 2337 soc24_grbm_select(adev, 0, 0, 0, 0); 2338 mutex_unlock(&adev->srbm_mutex); 2339 2340 tmp = RREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_BASE_CNTL); 2341 tmp = REG_SET_FIELD(tmp, CP_GFX_RS64_DC_BASE_CNTL, VMID, 0); 2342 tmp = REG_SET_FIELD(tmp, CP_GFX_RS64_DC_BASE_CNTL, CACHE_POLICY, 0); 2343 WREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_BASE_CNTL, tmp); 2344 2345 /* Invalidate the data caches */ 2346 tmp = RREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_OP_CNTL); 2347 tmp = REG_SET_FIELD(tmp, CP_GFX_RS64_DC_OP_CNTL, INVALIDATE_DCACHE, 1); 2348 WREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_OP_CNTL, tmp); 2349 2350 for (i = 0; i < usec_timeout; i++) { 2351 tmp = RREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_OP_CNTL); 2352 if (1 == REG_GET_FIELD(tmp, CP_GFX_RS64_DC_OP_CNTL, 2353 INVALIDATE_DCACHE_COMPLETE)) 2354 break; 2355 udelay(1); 2356 } 2357 2358 if (i >= usec_timeout) { 2359 dev_err(adev->dev, "failed to invalidate RS64 data cache\n"); 2360 return -EINVAL; 2361 } 2362 2363 gfx_v12_0_set_pfp_ucode_start_addr(adev); 2364 2365 return 0; 2366 } 2367 2368 static int gfx_v12_0_cp_gfx_load_me_microcode_rs64(struct amdgpu_device *adev) 2369 { 2370 int r; 2371 const struct gfx_firmware_header_v2_0 *me_hdr; 2372 const __le32 *fw_ucode, *fw_data; 2373 unsigned i, pipe_id, fw_ucode_size, fw_data_size; 2374 uint32_t tmp; 2375 uint32_t usec_timeout = 50000; /* wait for 50ms */ 2376 2377 me_hdr = (const struct gfx_firmware_header_v2_0 *) 2378 adev->gfx.me_fw->data; 2379 2380 amdgpu_ucode_print_gfx_hdr(&me_hdr->header); 2381 2382 /* instruction */ 2383 fw_ucode = (const __le32 *)(adev->gfx.me_fw->data + 2384 le32_to_cpu(me_hdr->ucode_offset_bytes)); 2385 fw_ucode_size = le32_to_cpu(me_hdr->ucode_size_bytes); 2386 /* data */ 2387 fw_data = (const __le32 *)(adev->gfx.me_fw->data + 2388 le32_to_cpu(me_hdr->data_offset_bytes)); 2389 fw_data_size = le32_to_cpu(me_hdr->data_size_bytes); 2390 2391 /* 64kb align*/ 2392 r = amdgpu_bo_create_reserved(adev, fw_ucode_size, 2393 64 * 1024, AMDGPU_GEM_DOMAIN_VRAM, 2394 &adev->gfx.me.me_fw_obj, 2395 &adev->gfx.me.me_fw_gpu_addr, 2396 (void **)&adev->gfx.me.me_fw_ptr); 2397 if (r) { 2398 dev_err(adev->dev, "(%d) failed to create me ucode bo\n", r); 2399 gfx_v12_0_me_fini(adev); 2400 return r; 2401 } 2402 2403 r = amdgpu_bo_create_reserved(adev, fw_data_size, 2404 64 * 1024, AMDGPU_GEM_DOMAIN_VRAM, 2405 &adev->gfx.me.me_fw_data_obj, 2406 &adev->gfx.me.me_fw_data_gpu_addr, 2407 (void **)&adev->gfx.me.me_fw_data_ptr); 2408 if (r) { 2409 dev_err(adev->dev, "(%d) failed to create me data bo\n", r); 2410 gfx_v12_0_pfp_fini(adev); 2411 return r; 2412 } 2413 2414 memcpy(adev->gfx.me.me_fw_ptr, fw_ucode, fw_ucode_size); 2415 memcpy(adev->gfx.me.me_fw_data_ptr, fw_data, fw_data_size); 2416 2417 amdgpu_bo_kunmap(adev->gfx.me.me_fw_obj); 2418 amdgpu_bo_kunmap(adev->gfx.me.me_fw_data_obj); 2419 amdgpu_bo_unreserve(adev->gfx.me.me_fw_obj); 2420 amdgpu_bo_unreserve(adev->gfx.me.me_fw_data_obj); 2421 2422 if (amdgpu_emu_mode == 1) 2423 adev->hdp.funcs->flush_hdp(adev, NULL); 2424 2425 WREG32_SOC15(GC, 0, regCP_ME_IC_BASE_LO, 2426 lower_32_bits(adev->gfx.me.me_fw_gpu_addr)); 2427 WREG32_SOC15(GC, 0, regCP_ME_IC_BASE_HI, 2428 upper_32_bits(adev->gfx.me.me_fw_gpu_addr)); 2429 2430 tmp = RREG32_SOC15(GC, 0, regCP_ME_IC_BASE_CNTL); 2431 tmp = REG_SET_FIELD(tmp, CP_ME_IC_BASE_CNTL, VMID, 0); 2432 tmp = REG_SET_FIELD(tmp, CP_ME_IC_BASE_CNTL, CACHE_POLICY, 0); 2433 tmp = REG_SET_FIELD(tmp, CP_ME_IC_BASE_CNTL, EXE_DISABLE, 0); 2434 WREG32_SOC15(GC, 0, regCP_ME_IC_BASE_CNTL, tmp); 2435 2436 /* 2437 * Programming any of the CP_ME_IC_BASE registers 2438 * forces invalidation of the ME L1 I$. Wait for the 2439 * invalidation complete 2440 */ 2441 for (i = 0; i < usec_timeout; i++) { 2442 tmp = RREG32_SOC15(GC, 0, regCP_ME_IC_OP_CNTL); 2443 if (1 == REG_GET_FIELD(tmp, CP_ME_IC_OP_CNTL, 2444 INVALIDATE_CACHE_COMPLETE)) 2445 break; 2446 udelay(1); 2447 } 2448 2449 if (i >= usec_timeout) { 2450 dev_err(adev->dev, "failed to invalidate instruction cache\n"); 2451 return -EINVAL; 2452 } 2453 2454 /* Prime the instruction caches */ 2455 tmp = RREG32_SOC15(GC, 0, regCP_ME_IC_OP_CNTL); 2456 tmp = REG_SET_FIELD(tmp, CP_ME_IC_OP_CNTL, PRIME_ICACHE, 1); 2457 WREG32_SOC15(GC, 0, regCP_ME_IC_OP_CNTL, tmp); 2458 2459 /* Waiting for instruction cache primed*/ 2460 for (i = 0; i < usec_timeout; i++) { 2461 tmp = RREG32_SOC15(GC, 0, regCP_ME_IC_OP_CNTL); 2462 if (1 == REG_GET_FIELD(tmp, CP_ME_IC_OP_CNTL, 2463 ICACHE_PRIMED)) 2464 break; 2465 udelay(1); 2466 } 2467 2468 if (i >= usec_timeout) { 2469 dev_err(adev->dev, "failed to prime instruction cache\n"); 2470 return -EINVAL; 2471 } 2472 2473 mutex_lock(&adev->srbm_mutex); 2474 for (pipe_id = 0; pipe_id < adev->gfx.me.num_pipe_per_me; pipe_id++) { 2475 soc24_grbm_select(adev, 0, pipe_id, 0, 0); 2476 2477 WREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_BASE1_LO, 2478 lower_32_bits(adev->gfx.me.me_fw_data_gpu_addr)); 2479 WREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_BASE1_HI, 2480 upper_32_bits(adev->gfx.me.me_fw_data_gpu_addr)); 2481 } 2482 soc24_grbm_select(adev, 0, 0, 0, 0); 2483 mutex_unlock(&adev->srbm_mutex); 2484 2485 tmp = RREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_BASE_CNTL); 2486 tmp = REG_SET_FIELD(tmp, CP_GFX_RS64_DC_BASE_CNTL, VMID, 0); 2487 tmp = REG_SET_FIELD(tmp, CP_GFX_RS64_DC_BASE_CNTL, CACHE_POLICY, 0); 2488 WREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_BASE_CNTL, tmp); 2489 2490 /* Invalidate the data caches */ 2491 tmp = RREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_OP_CNTL); 2492 tmp = REG_SET_FIELD(tmp, CP_GFX_RS64_DC_OP_CNTL, INVALIDATE_DCACHE, 1); 2493 WREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_OP_CNTL, tmp); 2494 2495 for (i = 0; i < usec_timeout; i++) { 2496 tmp = RREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_OP_CNTL); 2497 if (1 == REG_GET_FIELD(tmp, CP_GFX_RS64_DC_OP_CNTL, 2498 INVALIDATE_DCACHE_COMPLETE)) 2499 break; 2500 udelay(1); 2501 } 2502 2503 if (i >= usec_timeout) { 2504 dev_err(adev->dev, "failed to invalidate RS64 data cache\n"); 2505 return -EINVAL; 2506 } 2507 2508 gfx_v12_0_set_me_ucode_start_addr(adev); 2509 2510 return 0; 2511 } 2512 2513 static int gfx_v12_0_cp_gfx_load_microcode(struct amdgpu_device *adev) 2514 { 2515 int r; 2516 2517 if (!adev->gfx.me_fw || !adev->gfx.pfp_fw) 2518 return -EINVAL; 2519 2520 gfx_v12_0_cp_gfx_enable(adev, false); 2521 2522 r = gfx_v12_0_cp_gfx_load_pfp_microcode_rs64(adev); 2523 if (r) { 2524 dev_err(adev->dev, "(%d) failed to load pfp fw\n", r); 2525 return r; 2526 } 2527 2528 r = gfx_v12_0_cp_gfx_load_me_microcode_rs64(adev); 2529 if (r) { 2530 dev_err(adev->dev, "(%d) failed to load me fw\n", r); 2531 return r; 2532 } 2533 2534 return 0; 2535 } 2536 2537 static int gfx_v12_0_cp_gfx_start(struct amdgpu_device *adev) 2538 { 2539 /* init the CP */ 2540 WREG32_SOC15(GC, 0, regCP_MAX_CONTEXT, 2541 adev->gfx.config.max_hw_contexts - 1); 2542 WREG32_SOC15(GC, 0, regCP_DEVICE_ID, 1); 2543 2544 if (!amdgpu_async_gfx_ring) 2545 gfx_v12_0_cp_gfx_enable(adev, true); 2546 2547 return 0; 2548 } 2549 2550 static void gfx_v12_0_cp_gfx_switch_pipe(struct amdgpu_device *adev, 2551 CP_PIPE_ID pipe) 2552 { 2553 u32 tmp; 2554 2555 tmp = RREG32_SOC15(GC, 0, regGRBM_GFX_CNTL); 2556 tmp = REG_SET_FIELD(tmp, GRBM_GFX_CNTL, PIPEID, pipe); 2557 2558 WREG32_SOC15(GC, 0, regGRBM_GFX_CNTL, tmp); 2559 } 2560 2561 static void gfx_v12_0_cp_gfx_set_doorbell(struct amdgpu_device *adev, 2562 struct amdgpu_ring *ring) 2563 { 2564 u32 tmp; 2565 2566 tmp = RREG32_SOC15(GC, 0, regCP_RB_DOORBELL_CONTROL); 2567 if (ring->use_doorbell) { 2568 tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL, 2569 DOORBELL_OFFSET, ring->doorbell_index); 2570 tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL, 2571 DOORBELL_EN, 1); 2572 } else { 2573 tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL, 2574 DOORBELL_EN, 0); 2575 } 2576 WREG32_SOC15(GC, 0, regCP_RB_DOORBELL_CONTROL, tmp); 2577 2578 tmp = REG_SET_FIELD(0, CP_RB_DOORBELL_RANGE_LOWER, 2579 DOORBELL_RANGE_LOWER, ring->doorbell_index); 2580 WREG32_SOC15(GC, 0, regCP_RB_DOORBELL_RANGE_LOWER, tmp); 2581 2582 WREG32_SOC15(GC, 0, regCP_RB_DOORBELL_RANGE_UPPER, 2583 CP_RB_DOORBELL_RANGE_UPPER__DOORBELL_RANGE_UPPER_MASK); 2584 } 2585 2586 static int gfx_v12_0_cp_gfx_resume(struct amdgpu_device *adev) 2587 { 2588 struct amdgpu_ring *ring; 2589 u32 tmp; 2590 u32 rb_bufsz; 2591 u64 rb_addr, rptr_addr, wptr_gpu_addr; 2592 u32 i; 2593 2594 /* Set the write pointer delay */ 2595 WREG32_SOC15(GC, 0, regCP_RB_WPTR_DELAY, 0); 2596 2597 /* set the RB to use vmid 0 */ 2598 WREG32_SOC15(GC, 0, regCP_RB_VMID, 0); 2599 2600 /* Init gfx ring 0 for pipe 0 */ 2601 mutex_lock(&adev->srbm_mutex); 2602 gfx_v12_0_cp_gfx_switch_pipe(adev, PIPE_ID0); 2603 2604 /* Set ring buffer size */ 2605 ring = &adev->gfx.gfx_ring[0]; 2606 rb_bufsz = order_base_2(ring->ring_size / 8); 2607 tmp = REG_SET_FIELD(0, CP_RB0_CNTL, RB_BUFSZ, rb_bufsz); 2608 tmp = REG_SET_FIELD(tmp, CP_RB0_CNTL, RB_BLKSZ, rb_bufsz - 2); 2609 WREG32_SOC15(GC, 0, regCP_RB0_CNTL, tmp); 2610 2611 /* Initialize the ring buffer's write pointers */ 2612 ring->wptr = 0; 2613 WREG32_SOC15(GC, 0, regCP_RB0_WPTR, lower_32_bits(ring->wptr)); 2614 WREG32_SOC15(GC, 0, regCP_RB0_WPTR_HI, upper_32_bits(ring->wptr)); 2615 2616 /* set the wb address whether it's enabled or not */ 2617 rptr_addr = ring->rptr_gpu_addr; 2618 WREG32_SOC15(GC, 0, regCP_RB0_RPTR_ADDR, lower_32_bits(rptr_addr)); 2619 WREG32_SOC15(GC, 0, regCP_RB0_RPTR_ADDR_HI, upper_32_bits(rptr_addr) & 2620 CP_RB_RPTR_ADDR_HI__RB_RPTR_ADDR_HI_MASK); 2621 2622 wptr_gpu_addr = ring->wptr_gpu_addr; 2623 WREG32_SOC15(GC, 0, regCP_RB_WPTR_POLL_ADDR_LO, 2624 lower_32_bits(wptr_gpu_addr)); 2625 WREG32_SOC15(GC, 0, regCP_RB_WPTR_POLL_ADDR_HI, 2626 upper_32_bits(wptr_gpu_addr)); 2627 2628 mdelay(1); 2629 WREG32_SOC15(GC, 0, regCP_RB0_CNTL, tmp); 2630 2631 rb_addr = ring->gpu_addr >> 8; 2632 WREG32_SOC15(GC, 0, regCP_RB0_BASE, rb_addr); 2633 WREG32_SOC15(GC, 0, regCP_RB0_BASE_HI, upper_32_bits(rb_addr)); 2634 2635 WREG32_SOC15(GC, 0, regCP_RB_ACTIVE, 1); 2636 2637 gfx_v12_0_cp_gfx_set_doorbell(adev, ring); 2638 mutex_unlock(&adev->srbm_mutex); 2639 2640 /* Switch to pipe 0 */ 2641 mutex_lock(&adev->srbm_mutex); 2642 gfx_v12_0_cp_gfx_switch_pipe(adev, PIPE_ID0); 2643 mutex_unlock(&adev->srbm_mutex); 2644 2645 /* start the ring */ 2646 gfx_v12_0_cp_gfx_start(adev); 2647 2648 for (i = 0; i < adev->gfx.num_gfx_rings; i++) { 2649 ring = &adev->gfx.gfx_ring[i]; 2650 ring->sched.ready = true; 2651 } 2652 2653 return 0; 2654 } 2655 2656 static void gfx_v12_0_cp_compute_enable(struct amdgpu_device *adev, bool enable) 2657 { 2658 u32 data; 2659 2660 data = RREG32_SOC15(GC, 0, regCP_MEC_RS64_CNTL); 2661 data = REG_SET_FIELD(data, CP_MEC_RS64_CNTL, MEC_INVALIDATE_ICACHE, 2662 enable ? 0 : 1); 2663 data = REG_SET_FIELD(data, CP_MEC_RS64_CNTL, MEC_PIPE0_RESET, 2664 enable ? 0 : 1); 2665 data = REG_SET_FIELD(data, CP_MEC_RS64_CNTL, MEC_PIPE1_RESET, 2666 enable ? 0 : 1); 2667 data = REG_SET_FIELD(data, CP_MEC_RS64_CNTL, MEC_PIPE2_RESET, 2668 enable ? 0 : 1); 2669 data = REG_SET_FIELD(data, CP_MEC_RS64_CNTL, MEC_PIPE3_RESET, 2670 enable ? 0 : 1); 2671 data = REG_SET_FIELD(data, CP_MEC_RS64_CNTL, MEC_PIPE0_ACTIVE, 2672 enable ? 1 : 0); 2673 data = REG_SET_FIELD(data, CP_MEC_RS64_CNTL, MEC_PIPE1_ACTIVE, 2674 enable ? 1 : 0); 2675 data = REG_SET_FIELD(data, CP_MEC_RS64_CNTL, MEC_PIPE2_ACTIVE, 2676 enable ? 1 : 0); 2677 data = REG_SET_FIELD(data, CP_MEC_RS64_CNTL, MEC_PIPE3_ACTIVE, 2678 enable ? 1 : 0); 2679 data = REG_SET_FIELD(data, CP_MEC_RS64_CNTL, MEC_HALT, 2680 enable ? 0 : 1); 2681 WREG32_SOC15(GC, 0, regCP_MEC_RS64_CNTL, data); 2682 2683 adev->gfx.kiq[0].ring.sched.ready = enable; 2684 2685 udelay(50); 2686 } 2687 2688 static int gfx_v12_0_cp_compute_load_microcode_rs64(struct amdgpu_device *adev) 2689 { 2690 const struct gfx_firmware_header_v2_0 *mec_hdr; 2691 const __le32 *fw_ucode, *fw_data; 2692 u32 tmp, fw_ucode_size, fw_data_size; 2693 u32 i, usec_timeout = 50000; /* Wait for 50 ms */ 2694 u32 *fw_ucode_ptr, *fw_data_ptr; 2695 int r; 2696 2697 if (!adev->gfx.mec_fw) 2698 return -EINVAL; 2699 2700 gfx_v12_0_cp_compute_enable(adev, false); 2701 2702 mec_hdr = (const struct gfx_firmware_header_v2_0 *)adev->gfx.mec_fw->data; 2703 amdgpu_ucode_print_gfx_hdr(&mec_hdr->header); 2704 2705 fw_ucode = (const __le32 *) (adev->gfx.mec_fw->data + 2706 le32_to_cpu(mec_hdr->ucode_offset_bytes)); 2707 fw_ucode_size = le32_to_cpu(mec_hdr->ucode_size_bytes); 2708 2709 fw_data = (const __le32 *) (adev->gfx.mec_fw->data + 2710 le32_to_cpu(mec_hdr->data_offset_bytes)); 2711 fw_data_size = le32_to_cpu(mec_hdr->data_size_bytes); 2712 2713 r = amdgpu_bo_create_reserved(adev, fw_ucode_size, 2714 64 * 1024, AMDGPU_GEM_DOMAIN_VRAM, 2715 &adev->gfx.mec.mec_fw_obj, 2716 &adev->gfx.mec.mec_fw_gpu_addr, 2717 (void **)&fw_ucode_ptr); 2718 if (r) { 2719 dev_err(adev->dev, "(%d) failed to create mec fw ucode bo\n", r); 2720 gfx_v12_0_mec_fini(adev); 2721 return r; 2722 } 2723 2724 r = amdgpu_bo_create_reserved(adev, 2725 ALIGN(fw_data_size, 64 * 1024) * 2726 adev->gfx.mec.num_pipe_per_mec, 2727 64 * 1024, AMDGPU_GEM_DOMAIN_VRAM, 2728 &adev->gfx.mec.mec_fw_data_obj, 2729 &adev->gfx.mec.mec_fw_data_gpu_addr, 2730 (void **)&fw_data_ptr); 2731 if (r) { 2732 dev_err(adev->dev, "(%d) failed to create mec fw ucode bo\n", r); 2733 gfx_v12_0_mec_fini(adev); 2734 return r; 2735 } 2736 2737 memcpy(fw_ucode_ptr, fw_ucode, fw_ucode_size); 2738 for (i = 0; i < adev->gfx.mec.num_pipe_per_mec; i++) { 2739 memcpy(fw_data_ptr + i * ALIGN(fw_data_size, 64 * 1024) / 4, fw_data, fw_data_size); 2740 } 2741 2742 amdgpu_bo_kunmap(adev->gfx.mec.mec_fw_obj); 2743 amdgpu_bo_kunmap(adev->gfx.mec.mec_fw_data_obj); 2744 amdgpu_bo_unreserve(adev->gfx.mec.mec_fw_obj); 2745 amdgpu_bo_unreserve(adev->gfx.mec.mec_fw_data_obj); 2746 2747 tmp = RREG32_SOC15(GC, 0, regCP_CPC_IC_BASE_CNTL); 2748 tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, VMID, 0); 2749 tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, EXE_DISABLE, 0); 2750 tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, CACHE_POLICY, 0); 2751 WREG32_SOC15(GC, 0, regCP_CPC_IC_BASE_CNTL, tmp); 2752 2753 tmp = RREG32_SOC15(GC, 0, regCP_MEC_DC_BASE_CNTL); 2754 tmp = REG_SET_FIELD(tmp, CP_MEC_DC_BASE_CNTL, VMID, 0); 2755 tmp = REG_SET_FIELD(tmp, CP_MEC_DC_BASE_CNTL, CACHE_POLICY, 0); 2756 WREG32_SOC15(GC, 0, regCP_MEC_DC_BASE_CNTL, tmp); 2757 2758 mutex_lock(&adev->srbm_mutex); 2759 for (i = 0; i < adev->gfx.mec.num_pipe_per_mec; i++) { 2760 soc24_grbm_select(adev, 1, i, 0, 0); 2761 2762 WREG32_SOC15(GC, 0, regCP_MEC_MDBASE_LO, 2763 lower_32_bits(adev->gfx.mec.mec_fw_data_gpu_addr + 2764 i * ALIGN(fw_data_size, 64 * 1024))); 2765 WREG32_SOC15(GC, 0, regCP_MEC_MDBASE_HI, 2766 upper_32_bits(adev->gfx.mec.mec_fw_data_gpu_addr + 2767 i * ALIGN(fw_data_size, 64 * 1024))); 2768 2769 WREG32_SOC15(GC, 0, regCP_CPC_IC_BASE_LO, 2770 lower_32_bits(adev->gfx.mec.mec_fw_gpu_addr)); 2771 WREG32_SOC15(GC, 0, regCP_CPC_IC_BASE_HI, 2772 upper_32_bits(adev->gfx.mec.mec_fw_gpu_addr)); 2773 } 2774 mutex_unlock(&adev->srbm_mutex); 2775 soc24_grbm_select(adev, 0, 0, 0, 0); 2776 2777 /* Trigger an invalidation of the L1 instruction caches */ 2778 tmp = RREG32_SOC15(GC, 0, regCP_MEC_DC_OP_CNTL); 2779 tmp = REG_SET_FIELD(tmp, CP_MEC_DC_OP_CNTL, INVALIDATE_DCACHE, 1); 2780 WREG32_SOC15(GC, 0, regCP_MEC_DC_OP_CNTL, tmp); 2781 2782 /* Wait for invalidation complete */ 2783 for (i = 0; i < usec_timeout; i++) { 2784 tmp = RREG32_SOC15(GC, 0, regCP_MEC_DC_OP_CNTL); 2785 if (1 == REG_GET_FIELD(tmp, CP_MEC_DC_OP_CNTL, 2786 INVALIDATE_DCACHE_COMPLETE)) 2787 break; 2788 udelay(1); 2789 } 2790 2791 if (i >= usec_timeout) { 2792 dev_err(adev->dev, "failed to invalidate instruction cache\n"); 2793 return -EINVAL; 2794 } 2795 2796 /* Trigger an invalidation of the L1 instruction caches */ 2797 tmp = RREG32_SOC15(GC, 0, regCP_CPC_IC_OP_CNTL); 2798 tmp = REG_SET_FIELD(tmp, CP_CPC_IC_OP_CNTL, INVALIDATE_CACHE, 1); 2799 WREG32_SOC15(GC, 0, regCP_CPC_IC_OP_CNTL, tmp); 2800 2801 /* Wait for invalidation complete */ 2802 for (i = 0; i < usec_timeout; i++) { 2803 tmp = RREG32_SOC15(GC, 0, regCP_CPC_IC_OP_CNTL); 2804 if (1 == REG_GET_FIELD(tmp, CP_CPC_IC_OP_CNTL, 2805 INVALIDATE_CACHE_COMPLETE)) 2806 break; 2807 udelay(1); 2808 } 2809 2810 if (i >= usec_timeout) { 2811 dev_err(adev->dev, "failed to invalidate instruction cache\n"); 2812 return -EINVAL; 2813 } 2814 2815 gfx_v12_0_set_mec_ucode_start_addr(adev); 2816 2817 return 0; 2818 } 2819 2820 static void gfx_v12_0_kiq_setting(struct amdgpu_ring *ring) 2821 { 2822 uint32_t tmp; 2823 struct amdgpu_device *adev = ring->adev; 2824 2825 /* tell RLC which is KIQ queue */ 2826 tmp = RREG32_SOC15(GC, 0, regRLC_CP_SCHEDULERS); 2827 tmp &= 0xffffff00; 2828 tmp |= (ring->me << 5) | (ring->pipe << 3) | (ring->queue); 2829 WREG32_SOC15(GC, 0, regRLC_CP_SCHEDULERS, tmp); 2830 tmp |= 0x80; 2831 WREG32_SOC15(GC, 0, regRLC_CP_SCHEDULERS, tmp); 2832 } 2833 2834 static void gfx_v12_0_cp_set_doorbell_range(struct amdgpu_device *adev) 2835 { 2836 /* set graphics engine doorbell range */ 2837 WREG32_SOC15(GC, 0, regCP_RB_DOORBELL_RANGE_LOWER, 2838 (adev->doorbell_index.gfx_ring0 * 2) << 2); 2839 WREG32_SOC15(GC, 0, regCP_RB_DOORBELL_RANGE_UPPER, 2840 (adev->doorbell_index.gfx_userqueue_end * 2) << 2); 2841 2842 /* set compute engine doorbell range */ 2843 WREG32_SOC15(GC, 0, regCP_MEC_DOORBELL_RANGE_LOWER, 2844 (adev->doorbell_index.kiq * 2) << 2); 2845 WREG32_SOC15(GC, 0, regCP_MEC_DOORBELL_RANGE_UPPER, 2846 (adev->doorbell_index.userqueue_end * 2) << 2); 2847 } 2848 2849 static int gfx_v12_0_gfx_mqd_init(struct amdgpu_device *adev, void *m, 2850 struct amdgpu_mqd_prop *prop) 2851 { 2852 struct v12_gfx_mqd *mqd = m; 2853 uint64_t hqd_gpu_addr, wb_gpu_addr; 2854 uint32_t tmp; 2855 uint32_t rb_bufsz; 2856 2857 /* set up gfx hqd wptr */ 2858 mqd->cp_gfx_hqd_wptr = 0; 2859 mqd->cp_gfx_hqd_wptr_hi = 0; 2860 2861 /* set the pointer to the MQD */ 2862 mqd->cp_mqd_base_addr = prop->mqd_gpu_addr & 0xfffffffc; 2863 mqd->cp_mqd_base_addr_hi = upper_32_bits(prop->mqd_gpu_addr); 2864 2865 /* set up mqd control */ 2866 tmp = RREG32_SOC15(GC, 0, regCP_GFX_MQD_CONTROL); 2867 tmp = REG_SET_FIELD(tmp, CP_GFX_MQD_CONTROL, VMID, 0); 2868 tmp = REG_SET_FIELD(tmp, CP_GFX_MQD_CONTROL, PRIV_STATE, 1); 2869 tmp = REG_SET_FIELD(tmp, CP_GFX_MQD_CONTROL, CACHE_POLICY, 0); 2870 mqd->cp_gfx_mqd_control = tmp; 2871 2872 /* set up gfx_hqd_vimd with 0x0 to indicate the ring buffer's vmid */ 2873 tmp = RREG32_SOC15(GC, 0, regCP_GFX_HQD_VMID); 2874 tmp = REG_SET_FIELD(tmp, CP_GFX_HQD_VMID, VMID, 0); 2875 mqd->cp_gfx_hqd_vmid = 0; 2876 2877 /* set up default queue priority level 2878 * 0x0 = low priority, 0x1 = high priority */ 2879 tmp = RREG32_SOC15(GC, 0, regCP_GFX_HQD_QUEUE_PRIORITY); 2880 tmp = REG_SET_FIELD(tmp, CP_GFX_HQD_QUEUE_PRIORITY, PRIORITY_LEVEL, 0); 2881 mqd->cp_gfx_hqd_queue_priority = tmp; 2882 2883 /* set up time quantum */ 2884 tmp = RREG32_SOC15(GC, 0, regCP_GFX_HQD_QUANTUM); 2885 tmp = REG_SET_FIELD(tmp, CP_GFX_HQD_QUANTUM, QUANTUM_EN, 1); 2886 mqd->cp_gfx_hqd_quantum = tmp; 2887 2888 /* set up gfx hqd base. this is similar as CP_RB_BASE */ 2889 hqd_gpu_addr = prop->hqd_base_gpu_addr >> 8; 2890 mqd->cp_gfx_hqd_base = hqd_gpu_addr; 2891 mqd->cp_gfx_hqd_base_hi = upper_32_bits(hqd_gpu_addr); 2892 2893 /* set up hqd_rptr_addr/_hi, similar as CP_RB_RPTR */ 2894 wb_gpu_addr = prop->rptr_gpu_addr; 2895 mqd->cp_gfx_hqd_rptr_addr = wb_gpu_addr & 0xfffffffc; 2896 mqd->cp_gfx_hqd_rptr_addr_hi = 2897 upper_32_bits(wb_gpu_addr) & 0xffff; 2898 2899 /* set up rb_wptr_poll addr */ 2900 wb_gpu_addr = prop->wptr_gpu_addr; 2901 mqd->cp_rb_wptr_poll_addr_lo = wb_gpu_addr & 0xfffffffc; 2902 mqd->cp_rb_wptr_poll_addr_hi = upper_32_bits(wb_gpu_addr) & 0xffff; 2903 2904 /* set up the gfx_hqd_control, similar as CP_RB0_CNTL */ 2905 rb_bufsz = order_base_2(prop->queue_size / 4) - 1; 2906 tmp = RREG32_SOC15(GC, 0, regCP_GFX_HQD_CNTL); 2907 tmp = REG_SET_FIELD(tmp, CP_GFX_HQD_CNTL, RB_BUFSZ, rb_bufsz); 2908 tmp = REG_SET_FIELD(tmp, CP_GFX_HQD_CNTL, RB_BLKSZ, rb_bufsz - 2); 2909 #ifdef __BIG_ENDIAN 2910 tmp = REG_SET_FIELD(tmp, CP_GFX_HQD_CNTL, BUF_SWAP, 1); 2911 #endif 2912 mqd->cp_gfx_hqd_cntl = tmp; 2913 2914 /* set up cp_doorbell_control */ 2915 tmp = RREG32_SOC15(GC, 0, regCP_RB_DOORBELL_CONTROL); 2916 if (prop->use_doorbell) { 2917 tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL, 2918 DOORBELL_OFFSET, prop->doorbell_index); 2919 tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL, 2920 DOORBELL_EN, 1); 2921 } else 2922 tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL, 2923 DOORBELL_EN, 0); 2924 mqd->cp_rb_doorbell_control = tmp; 2925 2926 /* reset read and write pointers, similar to CP_RB0_WPTR/_RPTR */ 2927 mqd->cp_gfx_hqd_rptr = RREG32_SOC15(GC, 0, regCP_GFX_HQD_RPTR); 2928 2929 /* active the queue */ 2930 mqd->cp_gfx_hqd_active = 1; 2931 2932 return 0; 2933 } 2934 2935 static int gfx_v12_0_kgq_init_queue(struct amdgpu_ring *ring, bool reset) 2936 { 2937 struct amdgpu_device *adev = ring->adev; 2938 struct v12_gfx_mqd *mqd = ring->mqd_ptr; 2939 int mqd_idx = ring - &adev->gfx.gfx_ring[0]; 2940 2941 if (!reset && !amdgpu_in_reset(adev) && !adev->in_suspend) { 2942 memset((void *)mqd, 0, sizeof(*mqd)); 2943 mutex_lock(&adev->srbm_mutex); 2944 soc24_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0); 2945 amdgpu_ring_init_mqd(ring); 2946 soc24_grbm_select(adev, 0, 0, 0, 0); 2947 mutex_unlock(&adev->srbm_mutex); 2948 if (adev->gfx.me.mqd_backup[mqd_idx]) 2949 memcpy_fromio(adev->gfx.me.mqd_backup[mqd_idx], mqd, sizeof(*mqd)); 2950 } else { 2951 /* restore mqd with the backup copy */ 2952 if (adev->gfx.me.mqd_backup[mqd_idx]) 2953 memcpy_toio(mqd, adev->gfx.me.mqd_backup[mqd_idx], sizeof(*mqd)); 2954 /* reset the ring */ 2955 ring->wptr = 0; 2956 *ring->wptr_cpu_addr = 0; 2957 amdgpu_ring_clear_ring(ring); 2958 } 2959 2960 return 0; 2961 } 2962 2963 static int gfx_v12_0_cp_async_gfx_ring_resume(struct amdgpu_device *adev) 2964 { 2965 int r, i; 2966 struct amdgpu_ring *ring; 2967 2968 for (i = 0; i < adev->gfx.num_gfx_rings; i++) { 2969 ring = &adev->gfx.gfx_ring[i]; 2970 2971 r = amdgpu_bo_reserve(ring->mqd_obj, false); 2972 if (unlikely(r != 0)) 2973 goto done; 2974 2975 r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr); 2976 if (!r) { 2977 r = gfx_v12_0_kgq_init_queue(ring, false); 2978 amdgpu_bo_kunmap(ring->mqd_obj); 2979 ring->mqd_ptr = NULL; 2980 } 2981 amdgpu_bo_unreserve(ring->mqd_obj); 2982 if (r) 2983 goto done; 2984 } 2985 2986 r = amdgpu_gfx_enable_kgq(adev, 0); 2987 if (r) 2988 goto done; 2989 2990 r = gfx_v12_0_cp_gfx_start(adev); 2991 if (r) 2992 goto done; 2993 2994 for (i = 0; i < adev->gfx.num_gfx_rings; i++) { 2995 ring = &adev->gfx.gfx_ring[i]; 2996 ring->sched.ready = true; 2997 } 2998 done: 2999 return r; 3000 } 3001 3002 static int gfx_v12_0_compute_mqd_init(struct amdgpu_device *adev, void *m, 3003 struct amdgpu_mqd_prop *prop) 3004 { 3005 struct v12_compute_mqd *mqd = m; 3006 uint64_t hqd_gpu_addr, wb_gpu_addr, eop_base_addr; 3007 uint32_t tmp; 3008 3009 mqd->header = 0xC0310800; 3010 mqd->compute_pipelinestat_enable = 0x00000001; 3011 mqd->compute_static_thread_mgmt_se0 = 0xffffffff; 3012 mqd->compute_static_thread_mgmt_se1 = 0xffffffff; 3013 mqd->compute_static_thread_mgmt_se2 = 0xffffffff; 3014 mqd->compute_static_thread_mgmt_se3 = 0xffffffff; 3015 mqd->compute_misc_reserved = 0x00000007; 3016 3017 eop_base_addr = prop->eop_gpu_addr >> 8; 3018 mqd->cp_hqd_eop_base_addr_lo = eop_base_addr; 3019 mqd->cp_hqd_eop_base_addr_hi = upper_32_bits(eop_base_addr); 3020 3021 /* set the EOP size, register value is 2^(EOP_SIZE+1) dwords */ 3022 tmp = RREG32_SOC15(GC, 0, regCP_HQD_EOP_CONTROL); 3023 tmp = REG_SET_FIELD(tmp, CP_HQD_EOP_CONTROL, EOP_SIZE, 3024 (order_base_2(GFX12_MEC_HPD_SIZE / 4) - 1)); 3025 3026 mqd->cp_hqd_eop_control = tmp; 3027 3028 /* enable doorbell? */ 3029 tmp = RREG32_SOC15(GC, 0, regCP_HQD_PQ_DOORBELL_CONTROL); 3030 3031 if (prop->use_doorbell) { 3032 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL, 3033 DOORBELL_OFFSET, prop->doorbell_index); 3034 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL, 3035 DOORBELL_EN, 1); 3036 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL, 3037 DOORBELL_SOURCE, 0); 3038 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL, 3039 DOORBELL_HIT, 0); 3040 } else { 3041 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL, 3042 DOORBELL_EN, 0); 3043 } 3044 3045 mqd->cp_hqd_pq_doorbell_control = tmp; 3046 3047 /* disable the queue if it's active */ 3048 mqd->cp_hqd_dequeue_request = 0; 3049 mqd->cp_hqd_pq_rptr = 0; 3050 mqd->cp_hqd_pq_wptr_lo = 0; 3051 mqd->cp_hqd_pq_wptr_hi = 0; 3052 3053 /* set the pointer to the MQD */ 3054 mqd->cp_mqd_base_addr_lo = prop->mqd_gpu_addr & 0xfffffffc; 3055 mqd->cp_mqd_base_addr_hi = upper_32_bits(prop->mqd_gpu_addr); 3056 3057 /* set MQD vmid to 0 */ 3058 tmp = RREG32_SOC15(GC, 0, regCP_MQD_CONTROL); 3059 tmp = REG_SET_FIELD(tmp, CP_MQD_CONTROL, VMID, 0); 3060 mqd->cp_mqd_control = tmp; 3061 3062 /* set the pointer to the HQD, this is similar CP_RB0_BASE/_HI */ 3063 hqd_gpu_addr = prop->hqd_base_gpu_addr >> 8; 3064 mqd->cp_hqd_pq_base_lo = hqd_gpu_addr; 3065 mqd->cp_hqd_pq_base_hi = upper_32_bits(hqd_gpu_addr); 3066 3067 /* set up the HQD, this is similar to CP_RB0_CNTL */ 3068 tmp = RREG32_SOC15(GC, 0, regCP_HQD_PQ_CONTROL); 3069 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, QUEUE_SIZE, 3070 (order_base_2(prop->queue_size / 4) - 1)); 3071 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, RPTR_BLOCK_SIZE, 3072 (order_base_2(AMDGPU_GPU_PAGE_SIZE / 4) - 1)); 3073 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, UNORD_DISPATCH, 1); 3074 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, TUNNEL_DISPATCH, 0); 3075 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, PRIV_STATE, 1); 3076 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, KMD_QUEUE, 1); 3077 mqd->cp_hqd_pq_control = tmp; 3078 3079 /* set the wb address whether it's enabled or not */ 3080 wb_gpu_addr = prop->rptr_gpu_addr; 3081 mqd->cp_hqd_pq_rptr_report_addr_lo = wb_gpu_addr & 0xfffffffc; 3082 mqd->cp_hqd_pq_rptr_report_addr_hi = 3083 upper_32_bits(wb_gpu_addr) & 0xffff; 3084 3085 /* only used if CP_PQ_WPTR_POLL_CNTL.CP_PQ_WPTR_POLL_CNTL__EN_MASK=1 */ 3086 wb_gpu_addr = prop->wptr_gpu_addr; 3087 mqd->cp_hqd_pq_wptr_poll_addr_lo = wb_gpu_addr & 0xfffffffc; 3088 mqd->cp_hqd_pq_wptr_poll_addr_hi = upper_32_bits(wb_gpu_addr) & 0xffff; 3089 3090 tmp = 0; 3091 /* enable the doorbell if requested */ 3092 if (prop->use_doorbell) { 3093 tmp = RREG32_SOC15(GC, 0, regCP_HQD_PQ_DOORBELL_CONTROL); 3094 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL, 3095 DOORBELL_OFFSET, prop->doorbell_index); 3096 3097 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL, 3098 DOORBELL_EN, 1); 3099 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL, 3100 DOORBELL_SOURCE, 0); 3101 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL, 3102 DOORBELL_HIT, 0); 3103 } 3104 3105 mqd->cp_hqd_pq_doorbell_control = tmp; 3106 3107 /* reset read and write pointers, similar to CP_RB0_WPTR/_RPTR */ 3108 mqd->cp_hqd_pq_rptr = RREG32_SOC15(GC, 0, regCP_HQD_PQ_RPTR); 3109 3110 /* set the vmid for the queue */ 3111 mqd->cp_hqd_vmid = 0; 3112 3113 tmp = RREG32_SOC15(GC, 0, regCP_HQD_PERSISTENT_STATE); 3114 tmp = REG_SET_FIELD(tmp, CP_HQD_PERSISTENT_STATE, PRELOAD_SIZE, 0x55); 3115 mqd->cp_hqd_persistent_state = tmp; 3116 3117 /* set MIN_IB_AVAIL_SIZE */ 3118 tmp = RREG32_SOC15(GC, 0, regCP_HQD_IB_CONTROL); 3119 tmp = REG_SET_FIELD(tmp, CP_HQD_IB_CONTROL, MIN_IB_AVAIL_SIZE, 3); 3120 mqd->cp_hqd_ib_control = tmp; 3121 3122 /* set static priority for a compute queue/ring */ 3123 mqd->cp_hqd_pipe_priority = prop->hqd_pipe_priority; 3124 mqd->cp_hqd_queue_priority = prop->hqd_queue_priority; 3125 3126 mqd->cp_hqd_active = prop->hqd_active; 3127 3128 return 0; 3129 } 3130 3131 static int gfx_v12_0_kiq_init_register(struct amdgpu_ring *ring) 3132 { 3133 struct amdgpu_device *adev = ring->adev; 3134 struct v12_compute_mqd *mqd = ring->mqd_ptr; 3135 int j; 3136 3137 /* inactivate the queue */ 3138 if (amdgpu_sriov_vf(adev)) 3139 WREG32_SOC15(GC, 0, regCP_HQD_ACTIVE, 0); 3140 3141 /* disable wptr polling */ 3142 WREG32_FIELD15_PREREG(GC, 0, CP_PQ_WPTR_POLL_CNTL, EN, 0); 3143 3144 /* write the EOP addr */ 3145 WREG32_SOC15(GC, 0, regCP_HQD_EOP_BASE_ADDR, 3146 mqd->cp_hqd_eop_base_addr_lo); 3147 WREG32_SOC15(GC, 0, regCP_HQD_EOP_BASE_ADDR_HI, 3148 mqd->cp_hqd_eop_base_addr_hi); 3149 3150 /* set the EOP size, register value is 2^(EOP_SIZE+1) dwords */ 3151 WREG32_SOC15(GC, 0, regCP_HQD_EOP_CONTROL, 3152 mqd->cp_hqd_eop_control); 3153 3154 /* enable doorbell? */ 3155 WREG32_SOC15(GC, 0, regCP_HQD_PQ_DOORBELL_CONTROL, 3156 mqd->cp_hqd_pq_doorbell_control); 3157 3158 /* disable the queue if it's active */ 3159 if (RREG32_SOC15(GC, 0, regCP_HQD_ACTIVE) & 1) { 3160 WREG32_SOC15(GC, 0, regCP_HQD_DEQUEUE_REQUEST, 1); 3161 for (j = 0; j < adev->usec_timeout; j++) { 3162 if (!(RREG32_SOC15(GC, 0, regCP_HQD_ACTIVE) & 1)) 3163 break; 3164 udelay(1); 3165 } 3166 WREG32_SOC15(GC, 0, regCP_HQD_DEQUEUE_REQUEST, 3167 mqd->cp_hqd_dequeue_request); 3168 WREG32_SOC15(GC, 0, regCP_HQD_PQ_RPTR, 3169 mqd->cp_hqd_pq_rptr); 3170 WREG32_SOC15(GC, 0, regCP_HQD_PQ_WPTR_LO, 3171 mqd->cp_hqd_pq_wptr_lo); 3172 WREG32_SOC15(GC, 0, regCP_HQD_PQ_WPTR_HI, 3173 mqd->cp_hqd_pq_wptr_hi); 3174 } 3175 3176 /* set the pointer to the MQD */ 3177 WREG32_SOC15(GC, 0, regCP_MQD_BASE_ADDR, 3178 mqd->cp_mqd_base_addr_lo); 3179 WREG32_SOC15(GC, 0, regCP_MQD_BASE_ADDR_HI, 3180 mqd->cp_mqd_base_addr_hi); 3181 3182 /* set MQD vmid to 0 */ 3183 WREG32_SOC15(GC, 0, regCP_MQD_CONTROL, 3184 mqd->cp_mqd_control); 3185 3186 /* set the pointer to the HQD, this is similar CP_RB0_BASE/_HI */ 3187 WREG32_SOC15(GC, 0, regCP_HQD_PQ_BASE, 3188 mqd->cp_hqd_pq_base_lo); 3189 WREG32_SOC15(GC, 0, regCP_HQD_PQ_BASE_HI, 3190 mqd->cp_hqd_pq_base_hi); 3191 3192 /* set up the HQD, this is similar to CP_RB0_CNTL */ 3193 WREG32_SOC15(GC, 0, regCP_HQD_PQ_CONTROL, 3194 mqd->cp_hqd_pq_control); 3195 3196 /* set the wb address whether it's enabled or not */ 3197 WREG32_SOC15(GC, 0, regCP_HQD_PQ_RPTR_REPORT_ADDR, 3198 mqd->cp_hqd_pq_rptr_report_addr_lo); 3199 WREG32_SOC15(GC, 0, regCP_HQD_PQ_RPTR_REPORT_ADDR_HI, 3200 mqd->cp_hqd_pq_rptr_report_addr_hi); 3201 3202 /* only used if CP_PQ_WPTR_POLL_CNTL.CP_PQ_WPTR_POLL_CNTL__EN_MASK=1 */ 3203 WREG32_SOC15(GC, 0, regCP_HQD_PQ_WPTR_POLL_ADDR, 3204 mqd->cp_hqd_pq_wptr_poll_addr_lo); 3205 WREG32_SOC15(GC, 0, regCP_HQD_PQ_WPTR_POLL_ADDR_HI, 3206 mqd->cp_hqd_pq_wptr_poll_addr_hi); 3207 3208 /* enable the doorbell if requested */ 3209 if (ring->use_doorbell) { 3210 WREG32_SOC15(GC, 0, regCP_MEC_DOORBELL_RANGE_LOWER, 3211 (adev->doorbell_index.kiq * 2) << 2); 3212 WREG32_SOC15(GC, 0, regCP_MEC_DOORBELL_RANGE_UPPER, 3213 (adev->doorbell_index.userqueue_end * 2) << 2); 3214 } 3215 3216 WREG32_SOC15(GC, 0, regCP_HQD_PQ_DOORBELL_CONTROL, 3217 mqd->cp_hqd_pq_doorbell_control); 3218 3219 /* reset read and write pointers, similar to CP_RB0_WPTR/_RPTR */ 3220 WREG32_SOC15(GC, 0, regCP_HQD_PQ_WPTR_LO, 3221 mqd->cp_hqd_pq_wptr_lo); 3222 WREG32_SOC15(GC, 0, regCP_HQD_PQ_WPTR_HI, 3223 mqd->cp_hqd_pq_wptr_hi); 3224 3225 /* set the vmid for the queue */ 3226 WREG32_SOC15(GC, 0, regCP_HQD_VMID, mqd->cp_hqd_vmid); 3227 3228 WREG32_SOC15(GC, 0, regCP_HQD_PERSISTENT_STATE, 3229 mqd->cp_hqd_persistent_state); 3230 3231 /* activate the queue */ 3232 WREG32_SOC15(GC, 0, regCP_HQD_ACTIVE, 3233 mqd->cp_hqd_active); 3234 3235 if (ring->use_doorbell) 3236 WREG32_FIELD15_PREREG(GC, 0, CP_PQ_STATUS, DOORBELL_ENABLE, 1); 3237 3238 return 0; 3239 } 3240 3241 static int gfx_v12_0_kiq_init_queue(struct amdgpu_ring *ring) 3242 { 3243 struct amdgpu_device *adev = ring->adev; 3244 struct v12_compute_mqd *mqd = ring->mqd_ptr; 3245 int mqd_idx = AMDGPU_MAX_COMPUTE_RINGS; 3246 3247 gfx_v12_0_kiq_setting(ring); 3248 3249 if (amdgpu_in_reset(adev)) { /* for GPU_RESET case */ 3250 /* reset MQD to a clean status */ 3251 if (adev->gfx.mec.mqd_backup[mqd_idx]) 3252 memcpy(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(*mqd)); 3253 3254 /* reset ring buffer */ 3255 ring->wptr = 0; 3256 amdgpu_ring_clear_ring(ring); 3257 3258 mutex_lock(&adev->srbm_mutex); 3259 soc24_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0); 3260 gfx_v12_0_kiq_init_register(ring); 3261 soc24_grbm_select(adev, 0, 0, 0, 0); 3262 mutex_unlock(&adev->srbm_mutex); 3263 } else { 3264 memset((void *)mqd, 0, sizeof(*mqd)); 3265 if (amdgpu_sriov_vf(adev) && adev->in_suspend) 3266 amdgpu_ring_clear_ring(ring); 3267 mutex_lock(&adev->srbm_mutex); 3268 soc24_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0); 3269 amdgpu_ring_init_mqd(ring); 3270 gfx_v12_0_kiq_init_register(ring); 3271 soc24_grbm_select(adev, 0, 0, 0, 0); 3272 mutex_unlock(&adev->srbm_mutex); 3273 3274 if (adev->gfx.mec.mqd_backup[mqd_idx]) 3275 memcpy(adev->gfx.mec.mqd_backup[mqd_idx], mqd, sizeof(*mqd)); 3276 } 3277 3278 return 0; 3279 } 3280 3281 static int gfx_v12_0_kcq_init_queue(struct amdgpu_ring *ring, bool reset) 3282 { 3283 struct amdgpu_device *adev = ring->adev; 3284 struct v12_compute_mqd *mqd = ring->mqd_ptr; 3285 int mqd_idx = ring - &adev->gfx.compute_ring[0]; 3286 3287 if (!reset && !amdgpu_in_reset(adev) && !adev->in_suspend) { 3288 memset((void *)mqd, 0, sizeof(*mqd)); 3289 mutex_lock(&adev->srbm_mutex); 3290 soc24_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0); 3291 amdgpu_ring_init_mqd(ring); 3292 soc24_grbm_select(adev, 0, 0, 0, 0); 3293 mutex_unlock(&adev->srbm_mutex); 3294 3295 if (adev->gfx.mec.mqd_backup[mqd_idx]) 3296 memcpy_fromio(adev->gfx.mec.mqd_backup[mqd_idx], mqd, sizeof(*mqd)); 3297 } else { 3298 /* restore MQD to a clean status */ 3299 if (adev->gfx.mec.mqd_backup[mqd_idx]) 3300 memcpy_toio(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(*mqd)); 3301 /* reset ring buffer */ 3302 ring->wptr = 0; 3303 atomic64_set((atomic64_t *)ring->wptr_cpu_addr, 0); 3304 amdgpu_ring_clear_ring(ring); 3305 } 3306 3307 return 0; 3308 } 3309 3310 static int gfx_v12_0_kiq_resume(struct amdgpu_device *adev) 3311 { 3312 struct amdgpu_ring *ring; 3313 int r; 3314 3315 ring = &adev->gfx.kiq[0].ring; 3316 3317 r = amdgpu_bo_reserve(ring->mqd_obj, false); 3318 if (unlikely(r != 0)) 3319 return r; 3320 3321 r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr); 3322 if (unlikely(r != 0)) { 3323 amdgpu_bo_unreserve(ring->mqd_obj); 3324 return r; 3325 } 3326 3327 gfx_v12_0_kiq_init_queue(ring); 3328 amdgpu_bo_kunmap(ring->mqd_obj); 3329 ring->mqd_ptr = NULL; 3330 amdgpu_bo_unreserve(ring->mqd_obj); 3331 ring->sched.ready = true; 3332 return 0; 3333 } 3334 3335 static int gfx_v12_0_kcq_resume(struct amdgpu_device *adev) 3336 { 3337 struct amdgpu_ring *ring = NULL; 3338 int r = 0, i; 3339 3340 if (!amdgpu_async_gfx_ring) 3341 gfx_v12_0_cp_compute_enable(adev, true); 3342 3343 for (i = 0; i < adev->gfx.num_compute_rings; i++) { 3344 ring = &adev->gfx.compute_ring[i]; 3345 3346 r = amdgpu_bo_reserve(ring->mqd_obj, false); 3347 if (unlikely(r != 0)) 3348 goto done; 3349 r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr); 3350 if (!r) { 3351 r = gfx_v12_0_kcq_init_queue(ring, false); 3352 amdgpu_bo_kunmap(ring->mqd_obj); 3353 ring->mqd_ptr = NULL; 3354 } 3355 amdgpu_bo_unreserve(ring->mqd_obj); 3356 if (r) 3357 goto done; 3358 } 3359 3360 r = amdgpu_gfx_enable_kcq(adev, 0); 3361 done: 3362 return r; 3363 } 3364 3365 static int gfx_v12_0_cp_resume(struct amdgpu_device *adev) 3366 { 3367 int r, i; 3368 struct amdgpu_ring *ring; 3369 3370 if (!(adev->flags & AMD_IS_APU)) 3371 gfx_v12_0_enable_gui_idle_interrupt(adev, false); 3372 3373 if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT) { 3374 /* legacy firmware loading */ 3375 r = gfx_v12_0_cp_gfx_load_microcode(adev); 3376 if (r) 3377 return r; 3378 3379 r = gfx_v12_0_cp_compute_load_microcode_rs64(adev); 3380 if (r) 3381 return r; 3382 } 3383 3384 gfx_v12_0_cp_set_doorbell_range(adev); 3385 3386 if (amdgpu_async_gfx_ring) { 3387 gfx_v12_0_cp_compute_enable(adev, true); 3388 gfx_v12_0_cp_gfx_enable(adev, true); 3389 } 3390 3391 if (adev->enable_mes_kiq && adev->mes.kiq_hw_init) 3392 r = amdgpu_mes_kiq_hw_init(adev); 3393 else 3394 r = gfx_v12_0_kiq_resume(adev); 3395 if (r) 3396 return r; 3397 3398 r = gfx_v12_0_kcq_resume(adev); 3399 if (r) 3400 return r; 3401 3402 if (!amdgpu_async_gfx_ring) { 3403 r = gfx_v12_0_cp_gfx_resume(adev); 3404 if (r) 3405 return r; 3406 } else { 3407 r = gfx_v12_0_cp_async_gfx_ring_resume(adev); 3408 if (r) 3409 return r; 3410 } 3411 3412 for (i = 0; i < adev->gfx.num_gfx_rings; i++) { 3413 ring = &adev->gfx.gfx_ring[i]; 3414 r = amdgpu_ring_test_helper(ring); 3415 if (r) 3416 return r; 3417 } 3418 3419 for (i = 0; i < adev->gfx.num_compute_rings; i++) { 3420 ring = &adev->gfx.compute_ring[i]; 3421 r = amdgpu_ring_test_helper(ring); 3422 if (r) 3423 return r; 3424 } 3425 3426 return 0; 3427 } 3428 3429 static void gfx_v12_0_cp_enable(struct amdgpu_device *adev, bool enable) 3430 { 3431 gfx_v12_0_cp_gfx_enable(adev, enable); 3432 gfx_v12_0_cp_compute_enable(adev, enable); 3433 } 3434 3435 static int gfx_v12_0_gfxhub_enable(struct amdgpu_device *adev) 3436 { 3437 int r; 3438 bool value; 3439 3440 r = adev->gfxhub.funcs->gart_enable(adev); 3441 if (r) 3442 return r; 3443 3444 adev->hdp.funcs->flush_hdp(adev, NULL); 3445 3446 value = (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS) ? 3447 false : true; 3448 3449 adev->gfxhub.funcs->set_fault_enable_default(adev, value); 3450 /* TODO investigate why this and the hdp flush above is needed, 3451 * are we missing a flush somewhere else? */ 3452 adev->gmc.gmc_funcs->flush_gpu_tlb(adev, 0, AMDGPU_GFXHUB(0), 0); 3453 3454 return 0; 3455 } 3456 3457 static int get_gb_addr_config(struct amdgpu_device *adev) 3458 { 3459 u32 gb_addr_config; 3460 3461 gb_addr_config = RREG32_SOC15(GC, 0, regGB_ADDR_CONFIG); 3462 if (gb_addr_config == 0) 3463 return -EINVAL; 3464 3465 adev->gfx.config.gb_addr_config_fields.num_pkrs = 3466 1 << REG_GET_FIELD(gb_addr_config, GB_ADDR_CONFIG, NUM_PKRS); 3467 3468 adev->gfx.config.gb_addr_config = gb_addr_config; 3469 3470 adev->gfx.config.gb_addr_config_fields.num_pipes = 1 << 3471 REG_GET_FIELD(adev->gfx.config.gb_addr_config, 3472 GB_ADDR_CONFIG, NUM_PIPES); 3473 3474 adev->gfx.config.max_tile_pipes = 3475 adev->gfx.config.gb_addr_config_fields.num_pipes; 3476 3477 adev->gfx.config.gb_addr_config_fields.max_compress_frags = 1 << 3478 REG_GET_FIELD(adev->gfx.config.gb_addr_config, 3479 GB_ADDR_CONFIG, MAX_COMPRESSED_FRAGS); 3480 adev->gfx.config.gb_addr_config_fields.num_rb_per_se = 1 << 3481 REG_GET_FIELD(adev->gfx.config.gb_addr_config, 3482 GB_ADDR_CONFIG, NUM_RB_PER_SE); 3483 adev->gfx.config.gb_addr_config_fields.num_se = 1 << 3484 REG_GET_FIELD(adev->gfx.config.gb_addr_config, 3485 GB_ADDR_CONFIG, NUM_SHADER_ENGINES); 3486 adev->gfx.config.gb_addr_config_fields.pipe_interleave_size = 1 << (8 + 3487 REG_GET_FIELD(adev->gfx.config.gb_addr_config, 3488 GB_ADDR_CONFIG, PIPE_INTERLEAVE_SIZE)); 3489 3490 return 0; 3491 } 3492 3493 static void gfx_v12_0_disable_gpa_mode(struct amdgpu_device *adev) 3494 { 3495 uint32_t data; 3496 3497 data = RREG32_SOC15(GC, 0, regCPC_PSP_DEBUG); 3498 data |= CPC_PSP_DEBUG__GPA_OVERRIDE_MASK; 3499 WREG32_SOC15(GC, 0, regCPC_PSP_DEBUG, data); 3500 3501 data = RREG32_SOC15(GC, 0, regCPG_PSP_DEBUG); 3502 data |= CPG_PSP_DEBUG__GPA_OVERRIDE_MASK; 3503 WREG32_SOC15(GC, 0, regCPG_PSP_DEBUG, data); 3504 } 3505 3506 static void gfx_v12_0_init_golden_registers(struct amdgpu_device *adev) 3507 { 3508 if (amdgpu_sriov_vf(adev)) 3509 return; 3510 3511 switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { 3512 case IP_VERSION(12, 0, 0): 3513 case IP_VERSION(12, 0, 1): 3514 soc15_program_register_sequence(adev, 3515 golden_settings_gc_12_0, 3516 (const u32)ARRAY_SIZE(golden_settings_gc_12_0)); 3517 3518 if (adev->rev_id == 0) 3519 soc15_program_register_sequence(adev, 3520 golden_settings_gc_12_0_rev0, 3521 (const u32)ARRAY_SIZE(golden_settings_gc_12_0_rev0)); 3522 break; 3523 default: 3524 break; 3525 } 3526 } 3527 3528 static int gfx_v12_0_hw_init(struct amdgpu_ip_block *ip_block) 3529 { 3530 int r; 3531 struct amdgpu_device *adev = ip_block->adev; 3532 3533 if (adev->firmware.load_type == AMDGPU_FW_LOAD_RLC_BACKDOOR_AUTO) { 3534 if (adev->gfx.imu.funcs && (amdgpu_dpm > 0)) { 3535 /* RLC autoload sequence 1: Program rlc ram */ 3536 if (adev->gfx.imu.funcs->program_rlc_ram) 3537 adev->gfx.imu.funcs->program_rlc_ram(adev); 3538 } 3539 /* rlc autoload firmware */ 3540 r = gfx_v12_0_rlc_backdoor_autoload_enable(adev); 3541 if (r) 3542 return r; 3543 } else { 3544 if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT) { 3545 if (adev->gfx.imu.funcs && (amdgpu_dpm > 0)) { 3546 if (adev->gfx.imu.funcs->load_microcode) 3547 adev->gfx.imu.funcs->load_microcode(adev); 3548 if (adev->gfx.imu.funcs->setup_imu) 3549 adev->gfx.imu.funcs->setup_imu(adev); 3550 if (adev->gfx.imu.funcs->start_imu) 3551 adev->gfx.imu.funcs->start_imu(adev); 3552 } 3553 3554 /* disable gpa mode in backdoor loading */ 3555 gfx_v12_0_disable_gpa_mode(adev); 3556 } 3557 } 3558 3559 if ((adev->firmware.load_type == AMDGPU_FW_LOAD_RLC_BACKDOOR_AUTO) || 3560 (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP)) { 3561 r = gfx_v12_0_wait_for_rlc_autoload_complete(adev); 3562 if (r) { 3563 dev_err(adev->dev, "(%d) failed to wait rlc autoload complete\n", r); 3564 return r; 3565 } 3566 } 3567 3568 if (!amdgpu_emu_mode) 3569 gfx_v12_0_init_golden_registers(adev); 3570 3571 adev->gfx.is_poweron = true; 3572 3573 if (get_gb_addr_config(adev)) 3574 DRM_WARN("Invalid gb_addr_config !\n"); 3575 3576 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) 3577 gfx_v12_0_config_gfx_rs64(adev); 3578 3579 r = gfx_v12_0_gfxhub_enable(adev); 3580 if (r) 3581 return r; 3582 3583 if ((adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT || 3584 adev->firmware.load_type == AMDGPU_FW_LOAD_RLC_BACKDOOR_AUTO) && 3585 (amdgpu_dpm == 1)) { 3586 /** 3587 * For gfx 12, rlc firmware loading relies on smu firmware is 3588 * loaded firstly, so in direct type, it has to load smc ucode 3589 * here before rlc. 3590 */ 3591 r = amdgpu_pm_load_smu_firmware(adev, NULL); 3592 if (r) 3593 return r; 3594 } 3595 3596 gfx_v12_0_constants_init(adev); 3597 3598 if (adev->nbio.funcs->gc_doorbell_init) 3599 adev->nbio.funcs->gc_doorbell_init(adev); 3600 3601 r = gfx_v12_0_rlc_resume(adev); 3602 if (r) 3603 return r; 3604 3605 /* 3606 * init golden registers and rlc resume may override some registers, 3607 * reconfig them here 3608 */ 3609 gfx_v12_0_tcp_harvest(adev); 3610 3611 r = gfx_v12_0_cp_resume(adev); 3612 if (r) 3613 return r; 3614 3615 return r; 3616 } 3617 3618 static int gfx_v12_0_hw_fini(struct amdgpu_ip_block *ip_block) 3619 { 3620 struct amdgpu_device *adev = ip_block->adev; 3621 uint32_t tmp; 3622 3623 amdgpu_irq_put(adev, &adev->gfx.priv_reg_irq, 0); 3624 amdgpu_irq_put(adev, &adev->gfx.priv_inst_irq, 0); 3625 amdgpu_irq_put(adev, &adev->gfx.bad_op_irq, 0); 3626 3627 if (!adev->no_hw_access) { 3628 if (amdgpu_async_gfx_ring) { 3629 if (amdgpu_gfx_disable_kgq(adev, 0)) 3630 DRM_ERROR("KGQ disable failed\n"); 3631 } 3632 3633 if (amdgpu_gfx_disable_kcq(adev, 0)) 3634 DRM_ERROR("KCQ disable failed\n"); 3635 3636 amdgpu_mes_kiq_hw_fini(adev); 3637 } 3638 3639 if (amdgpu_sriov_vf(adev)) { 3640 gfx_v12_0_cp_gfx_enable(adev, false); 3641 /* Program KIQ position of RLC_CP_SCHEDULERS during destroy */ 3642 tmp = RREG32_SOC15(GC, 0, regRLC_CP_SCHEDULERS); 3643 tmp &= 0xffffff00; 3644 WREG32_SOC15(GC, 0, regRLC_CP_SCHEDULERS, tmp); 3645 3646 return 0; 3647 } 3648 gfx_v12_0_cp_enable(adev, false); 3649 gfx_v12_0_enable_gui_idle_interrupt(adev, false); 3650 3651 adev->gfxhub.funcs->gart_disable(adev); 3652 3653 adev->gfx.is_poweron = false; 3654 3655 return 0; 3656 } 3657 3658 static int gfx_v12_0_suspend(struct amdgpu_ip_block *ip_block) 3659 { 3660 return gfx_v12_0_hw_fini(ip_block); 3661 } 3662 3663 static int gfx_v12_0_resume(struct amdgpu_ip_block *ip_block) 3664 { 3665 return gfx_v12_0_hw_init(ip_block); 3666 } 3667 3668 static bool gfx_v12_0_is_idle(void *handle) 3669 { 3670 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 3671 3672 if (REG_GET_FIELD(RREG32_SOC15(GC, 0, regGRBM_STATUS), 3673 GRBM_STATUS, GUI_ACTIVE)) 3674 return false; 3675 else 3676 return true; 3677 } 3678 3679 static int gfx_v12_0_wait_for_idle(struct amdgpu_ip_block *ip_block) 3680 { 3681 unsigned i; 3682 u32 tmp; 3683 struct amdgpu_device *adev = ip_block->adev; 3684 3685 for (i = 0; i < adev->usec_timeout; i++) { 3686 /* read MC_STATUS */ 3687 tmp = RREG32_SOC15(GC, 0, regGRBM_STATUS) & 3688 GRBM_STATUS__GUI_ACTIVE_MASK; 3689 3690 if (!REG_GET_FIELD(tmp, GRBM_STATUS, GUI_ACTIVE)) 3691 return 0; 3692 udelay(1); 3693 } 3694 return -ETIMEDOUT; 3695 } 3696 3697 static uint64_t gfx_v12_0_get_gpu_clock_counter(struct amdgpu_device *adev) 3698 { 3699 uint64_t clock = 0; 3700 3701 if (adev->smuio.funcs && 3702 adev->smuio.funcs->get_gpu_clock_counter) 3703 clock = adev->smuio.funcs->get_gpu_clock_counter(adev); 3704 else 3705 dev_warn(adev->dev, "query gpu clock counter is not supported\n"); 3706 3707 return clock; 3708 } 3709 3710 static int gfx_v12_0_early_init(struct amdgpu_ip_block *ip_block) 3711 { 3712 struct amdgpu_device *adev = ip_block->adev; 3713 3714 adev->gfx.funcs = &gfx_v12_0_gfx_funcs; 3715 3716 adev->gfx.num_gfx_rings = GFX12_NUM_GFX_RINGS; 3717 adev->gfx.num_compute_rings = min(amdgpu_gfx_get_num_kcq(adev), 3718 AMDGPU_MAX_COMPUTE_RINGS); 3719 3720 gfx_v12_0_set_kiq_pm4_funcs(adev); 3721 gfx_v12_0_set_ring_funcs(adev); 3722 gfx_v12_0_set_irq_funcs(adev); 3723 gfx_v12_0_set_rlc_funcs(adev); 3724 gfx_v12_0_set_mqd_funcs(adev); 3725 gfx_v12_0_set_imu_funcs(adev); 3726 3727 gfx_v12_0_init_rlcg_reg_access_ctrl(adev); 3728 3729 return gfx_v12_0_init_microcode(adev); 3730 } 3731 3732 static int gfx_v12_0_late_init(struct amdgpu_ip_block *ip_block) 3733 { 3734 struct amdgpu_device *adev = ip_block->adev; 3735 int r; 3736 3737 r = amdgpu_irq_get(adev, &adev->gfx.priv_reg_irq, 0); 3738 if (r) 3739 return r; 3740 3741 r = amdgpu_irq_get(adev, &adev->gfx.priv_inst_irq, 0); 3742 if (r) 3743 return r; 3744 3745 r = amdgpu_irq_get(adev, &adev->gfx.bad_op_irq, 0); 3746 if (r) 3747 return r; 3748 3749 return 0; 3750 } 3751 3752 static bool gfx_v12_0_is_rlc_enabled(struct amdgpu_device *adev) 3753 { 3754 uint32_t rlc_cntl; 3755 3756 /* if RLC is not enabled, do nothing */ 3757 rlc_cntl = RREG32_SOC15(GC, 0, regRLC_CNTL); 3758 return (REG_GET_FIELD(rlc_cntl, RLC_CNTL, RLC_ENABLE_F32)) ? true : false; 3759 } 3760 3761 static void gfx_v12_0_set_safe_mode(struct amdgpu_device *adev, 3762 int xcc_id) 3763 { 3764 uint32_t data; 3765 unsigned i; 3766 3767 data = RLC_SAFE_MODE__CMD_MASK; 3768 data |= (1 << RLC_SAFE_MODE__MESSAGE__SHIFT); 3769 3770 WREG32_SOC15(GC, 0, regRLC_SAFE_MODE, data); 3771 3772 /* wait for RLC_SAFE_MODE */ 3773 for (i = 0; i < adev->usec_timeout; i++) { 3774 if (!REG_GET_FIELD(RREG32_SOC15(GC, 0, regRLC_SAFE_MODE), 3775 RLC_SAFE_MODE, CMD)) 3776 break; 3777 udelay(1); 3778 } 3779 } 3780 3781 static void gfx_v12_0_unset_safe_mode(struct amdgpu_device *adev, 3782 int xcc_id) 3783 { 3784 WREG32_SOC15(GC, 0, regRLC_SAFE_MODE, RLC_SAFE_MODE__CMD_MASK); 3785 } 3786 3787 static void gfx_v12_0_update_perf_clk(struct amdgpu_device *adev, 3788 bool enable) 3789 { 3790 uint32_t def, data; 3791 3792 if (!(adev->cg_flags & AMD_CG_SUPPORT_GFX_PERF_CLK)) 3793 return; 3794 3795 def = data = RREG32_SOC15(GC, 0, regRLC_CGTT_MGCG_OVERRIDE); 3796 3797 if (enable) 3798 data &= ~RLC_CGTT_MGCG_OVERRIDE__PERFMON_CLOCK_STATE_MASK; 3799 else 3800 data |= RLC_CGTT_MGCG_OVERRIDE__PERFMON_CLOCK_STATE_MASK; 3801 3802 if (def != data) 3803 WREG32_SOC15(GC, 0, regRLC_CGTT_MGCG_OVERRIDE, data); 3804 } 3805 3806 static void gfx_v12_0_update_spm_vmid(struct amdgpu_device *adev, 3807 struct amdgpu_ring *ring, 3808 unsigned vmid) 3809 { 3810 u32 reg, data; 3811 3812 reg = SOC15_REG_OFFSET(GC, 0, regRLC_SPM_MC_CNTL); 3813 if (amdgpu_sriov_is_pp_one_vf(adev)) 3814 data = RREG32_NO_KIQ(reg); 3815 else 3816 data = RREG32(reg); 3817 3818 data &= ~RLC_SPM_MC_CNTL__RLC_SPM_VMID_MASK; 3819 data |= (vmid & RLC_SPM_MC_CNTL__RLC_SPM_VMID_MASK) << RLC_SPM_MC_CNTL__RLC_SPM_VMID__SHIFT; 3820 3821 if (amdgpu_sriov_is_pp_one_vf(adev)) 3822 WREG32_SOC15_NO_KIQ(GC, 0, regRLC_SPM_MC_CNTL, data); 3823 else 3824 WREG32_SOC15(GC, 0, regRLC_SPM_MC_CNTL, data); 3825 3826 if (ring 3827 && amdgpu_sriov_is_pp_one_vf(adev) 3828 && ((ring->funcs->type == AMDGPU_RING_TYPE_GFX) 3829 || (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE))) { 3830 uint32_t reg = SOC15_REG_OFFSET(GC, 0, regRLC_SPM_MC_CNTL); 3831 amdgpu_ring_emit_wreg(ring, reg, data); 3832 } 3833 } 3834 3835 static const struct amdgpu_rlc_funcs gfx_v12_0_rlc_funcs = { 3836 .is_rlc_enabled = gfx_v12_0_is_rlc_enabled, 3837 .set_safe_mode = gfx_v12_0_set_safe_mode, 3838 .unset_safe_mode = gfx_v12_0_unset_safe_mode, 3839 .init = gfx_v12_0_rlc_init, 3840 .get_csb_size = gfx_v12_0_get_csb_size, 3841 .get_csb_buffer = gfx_v12_0_get_csb_buffer, 3842 .resume = gfx_v12_0_rlc_resume, 3843 .stop = gfx_v12_0_rlc_stop, 3844 .reset = gfx_v12_0_rlc_reset, 3845 .start = gfx_v12_0_rlc_start, 3846 .update_spm_vmid = gfx_v12_0_update_spm_vmid, 3847 }; 3848 3849 #if 0 3850 static void gfx_v12_cntl_power_gating(struct amdgpu_device *adev, bool enable) 3851 { 3852 /* TODO */ 3853 } 3854 3855 static void gfx_v12_cntl_pg(struct amdgpu_device *adev, bool enable) 3856 { 3857 /* TODO */ 3858 } 3859 #endif 3860 3861 static int gfx_v12_0_set_powergating_state(void *handle, 3862 enum amd_powergating_state state) 3863 { 3864 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 3865 bool enable = (state == AMD_PG_STATE_GATE); 3866 3867 if (amdgpu_sriov_vf(adev)) 3868 return 0; 3869 3870 switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { 3871 case IP_VERSION(12, 0, 0): 3872 case IP_VERSION(12, 0, 1): 3873 amdgpu_gfx_off_ctrl(adev, enable); 3874 break; 3875 default: 3876 break; 3877 } 3878 3879 return 0; 3880 } 3881 3882 static void gfx_v12_0_update_coarse_grain_clock_gating(struct amdgpu_device *adev, 3883 bool enable) 3884 { 3885 uint32_t def, data; 3886 3887 if (!(adev->cg_flags & 3888 (AMD_CG_SUPPORT_GFX_CGCG | 3889 AMD_CG_SUPPORT_GFX_CGLS | 3890 AMD_CG_SUPPORT_GFX_3D_CGCG | 3891 AMD_CG_SUPPORT_GFX_3D_CGLS))) 3892 return; 3893 3894 if (enable) { 3895 def = data = RREG32_SOC15(GC, 0, regRLC_CGTT_MGCG_OVERRIDE); 3896 3897 /* unset CGCG override */ 3898 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGCG) 3899 data &= ~RLC_CGTT_MGCG_OVERRIDE__GFXIP_CGCG_OVERRIDE_MASK; 3900 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGLS) 3901 data &= ~RLC_CGTT_MGCG_OVERRIDE__GFXIP_CGLS_OVERRIDE_MASK; 3902 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_3D_CGCG || 3903 adev->cg_flags & AMD_CG_SUPPORT_GFX_3D_CGLS) 3904 data &= ~RLC_CGTT_MGCG_OVERRIDE__GFXIP_GFX3D_CG_OVERRIDE_MASK; 3905 3906 /* update CGCG override bits */ 3907 if (def != data) 3908 WREG32_SOC15(GC, 0, regRLC_CGTT_MGCG_OVERRIDE, data); 3909 3910 /* enable cgcg FSM(0x0000363F) */ 3911 def = data = RREG32_SOC15(GC, 0, regRLC_CGCG_CGLS_CTRL); 3912 3913 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGCG) { 3914 data &= ~RLC_CGCG_CGLS_CTRL__CGCG_GFX_IDLE_THRESHOLD_MASK; 3915 data |= (0x36 << RLC_CGCG_CGLS_CTRL__CGCG_GFX_IDLE_THRESHOLD__SHIFT) | 3916 RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK; 3917 } 3918 3919 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGLS) { 3920 data &= ~RLC_CGCG_CGLS_CTRL__CGLS_REP_COMPANSAT_DELAY_MASK; 3921 data |= (0x000F << RLC_CGCG_CGLS_CTRL__CGLS_REP_COMPANSAT_DELAY__SHIFT) | 3922 RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK; 3923 } 3924 3925 if (def != data) 3926 WREG32_SOC15(GC, 0, regRLC_CGCG_CGLS_CTRL, data); 3927 3928 /* Program RLC_CGCG_CGLS_CTRL_3D */ 3929 def = data = RREG32_SOC15(GC, 0, regRLC_CGCG_CGLS_CTRL_3D); 3930 3931 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_3D_CGCG) { 3932 data &= ~RLC_CGCG_CGLS_CTRL_3D__CGCG_GFX_IDLE_THRESHOLD_MASK; 3933 data |= (0x36 << RLC_CGCG_CGLS_CTRL_3D__CGCG_GFX_IDLE_THRESHOLD__SHIFT) | 3934 RLC_CGCG_CGLS_CTRL_3D__CGCG_EN_MASK; 3935 } 3936 3937 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_3D_CGLS) { 3938 data &= ~RLC_CGCG_CGLS_CTRL_3D__CGLS_REP_COMPANSAT_DELAY_MASK; 3939 data |= (0xf << RLC_CGCG_CGLS_CTRL_3D__CGLS_REP_COMPANSAT_DELAY__SHIFT) | 3940 RLC_CGCG_CGLS_CTRL_3D__CGLS_EN_MASK; 3941 } 3942 3943 if (def != data) 3944 WREG32_SOC15(GC, 0, regRLC_CGCG_CGLS_CTRL_3D, data); 3945 3946 /* set IDLE_POLL_COUNT(0x00900100) */ 3947 def = data = RREG32_SOC15(GC, 0, regCP_RB_WPTR_POLL_CNTL); 3948 3949 data &= ~(CP_RB_WPTR_POLL_CNTL__POLL_FREQUENCY_MASK | CP_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK); 3950 data |= (0x0100 << CP_RB_WPTR_POLL_CNTL__POLL_FREQUENCY__SHIFT) | 3951 (0x0090 << CP_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT); 3952 3953 if (def != data) 3954 WREG32_SOC15(GC, 0, regCP_RB_WPTR_POLL_CNTL, data); 3955 3956 data = RREG32_SOC15(GC, 0, regCP_INT_CNTL); 3957 data = REG_SET_FIELD(data, CP_INT_CNTL, CNTX_BUSY_INT_ENABLE, 1); 3958 data = REG_SET_FIELD(data, CP_INT_CNTL, CNTX_EMPTY_INT_ENABLE, 1); 3959 data = REG_SET_FIELD(data, CP_INT_CNTL, CMP_BUSY_INT_ENABLE, 1); 3960 data = REG_SET_FIELD(data, CP_INT_CNTL, GFX_IDLE_INT_ENABLE, 1); 3961 WREG32_SOC15(GC, 0, regCP_INT_CNTL, data); 3962 3963 data = RREG32_SOC15(GC, 0, regSDMA0_RLC_CGCG_CTRL); 3964 data = REG_SET_FIELD(data, SDMA0_RLC_CGCG_CTRL, CGCG_INT_ENABLE, 1); 3965 WREG32_SOC15(GC, 0, regSDMA0_RLC_CGCG_CTRL, data); 3966 3967 /* Some ASICs only have one SDMA instance, not need to configure SDMA1 */ 3968 if (adev->sdma.num_instances > 1) { 3969 data = RREG32_SOC15(GC, 0, regSDMA1_RLC_CGCG_CTRL); 3970 data = REG_SET_FIELD(data, SDMA1_RLC_CGCG_CTRL, CGCG_INT_ENABLE, 1); 3971 WREG32_SOC15(GC, 0, regSDMA1_RLC_CGCG_CTRL, data); 3972 } 3973 } else { 3974 /* Program RLC_CGCG_CGLS_CTRL */ 3975 def = data = RREG32_SOC15(GC, 0, regRLC_CGCG_CGLS_CTRL); 3976 3977 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGCG) 3978 data &= ~RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK; 3979 3980 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGLS) 3981 data &= ~RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK; 3982 3983 if (def != data) 3984 WREG32_SOC15(GC, 0, regRLC_CGCG_CGLS_CTRL, data); 3985 3986 /* Program RLC_CGCG_CGLS_CTRL_3D */ 3987 def = data = RREG32_SOC15(GC, 0, regRLC_CGCG_CGLS_CTRL_3D); 3988 3989 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_3D_CGCG) 3990 data &= ~RLC_CGCG_CGLS_CTRL_3D__CGCG_EN_MASK; 3991 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_3D_CGLS) 3992 data &= ~RLC_CGCG_CGLS_CTRL_3D__CGLS_EN_MASK; 3993 3994 if (def != data) 3995 WREG32_SOC15(GC, 0, regRLC_CGCG_CGLS_CTRL_3D, data); 3996 3997 data = RREG32_SOC15(GC, 0, regSDMA0_RLC_CGCG_CTRL); 3998 data &= ~SDMA0_RLC_CGCG_CTRL__CGCG_INT_ENABLE_MASK; 3999 WREG32_SOC15(GC, 0, regSDMA0_RLC_CGCG_CTRL, data); 4000 4001 /* Some ASICs only have one SDMA instance, not need to configure SDMA1 */ 4002 if (adev->sdma.num_instances > 1) { 4003 data = RREG32_SOC15(GC, 0, regSDMA1_RLC_CGCG_CTRL); 4004 data &= ~SDMA1_RLC_CGCG_CTRL__CGCG_INT_ENABLE_MASK; 4005 WREG32_SOC15(GC, 0, regSDMA1_RLC_CGCG_CTRL, data); 4006 } 4007 } 4008 } 4009 4010 static void gfx_v12_0_update_medium_grain_clock_gating(struct amdgpu_device *adev, 4011 bool enable) 4012 { 4013 uint32_t data, def; 4014 if (!(adev->cg_flags & (AMD_CG_SUPPORT_GFX_MGCG | AMD_CG_SUPPORT_GFX_MGLS))) 4015 return; 4016 4017 /* It is disabled by HW by default */ 4018 if (enable) { 4019 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGCG) { 4020 /* 1 - RLC_CGTT_MGCG_OVERRIDE */ 4021 def = data = RREG32_SOC15(GC, 0, regRLC_CGTT_MGCG_OVERRIDE); 4022 4023 data &= ~(RLC_CGTT_MGCG_OVERRIDE__GRBM_CGTT_SCLK_OVERRIDE_MASK | 4024 RLC_CGTT_MGCG_OVERRIDE__RLC_CGTT_SCLK_OVERRIDE_MASK | 4025 RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGCG_OVERRIDE_MASK); 4026 4027 if (def != data) 4028 WREG32_SOC15(GC, 0, regRLC_CGTT_MGCG_OVERRIDE, data); 4029 } 4030 } else { 4031 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGCG) { 4032 def = data = RREG32_SOC15(GC, 0, regRLC_CGTT_MGCG_OVERRIDE); 4033 4034 data |= (RLC_CGTT_MGCG_OVERRIDE__RLC_CGTT_SCLK_OVERRIDE_MASK | 4035 RLC_CGTT_MGCG_OVERRIDE__GRBM_CGTT_SCLK_OVERRIDE_MASK | 4036 RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGCG_OVERRIDE_MASK); 4037 4038 if (def != data) 4039 WREG32_SOC15(GC, 0, regRLC_CGTT_MGCG_OVERRIDE, data); 4040 } 4041 } 4042 } 4043 4044 static void gfx_v12_0_update_repeater_fgcg(struct amdgpu_device *adev, 4045 bool enable) 4046 { 4047 uint32_t def, data; 4048 4049 if (!(adev->cg_flags & AMD_CG_SUPPORT_REPEATER_FGCG)) 4050 return; 4051 4052 def = data = RREG32_SOC15(GC, 0, regRLC_CGTT_MGCG_OVERRIDE); 4053 4054 if (enable) 4055 data &= ~(RLC_CGTT_MGCG_OVERRIDE__GFXIP_REPEATER_FGCG_OVERRIDE_MASK | 4056 RLC_CGTT_MGCG_OVERRIDE__RLC_REPEATER_FGCG_OVERRIDE_MASK); 4057 else 4058 data |= RLC_CGTT_MGCG_OVERRIDE__GFXIP_REPEATER_FGCG_OVERRIDE_MASK | 4059 RLC_CGTT_MGCG_OVERRIDE__RLC_REPEATER_FGCG_OVERRIDE_MASK; 4060 4061 if (def != data) 4062 WREG32_SOC15(GC, 0, regRLC_CGTT_MGCG_OVERRIDE, data); 4063 } 4064 4065 static void gfx_v12_0_update_sram_fgcg(struct amdgpu_device *adev, 4066 bool enable) 4067 { 4068 uint32_t def, data; 4069 4070 if (!(adev->cg_flags & AMD_CG_SUPPORT_GFX_FGCG)) 4071 return; 4072 4073 def = data = RREG32_SOC15(GC, 0, regRLC_CGTT_MGCG_OVERRIDE); 4074 4075 if (enable) 4076 data &= ~RLC_CGTT_MGCG_OVERRIDE__GFXIP_FGCG_OVERRIDE_MASK; 4077 else 4078 data |= RLC_CGTT_MGCG_OVERRIDE__GFXIP_FGCG_OVERRIDE_MASK; 4079 4080 if (def != data) 4081 WREG32_SOC15(GC, 0, regRLC_CGTT_MGCG_OVERRIDE, data); 4082 } 4083 4084 static int gfx_v12_0_update_gfx_clock_gating(struct amdgpu_device *adev, 4085 bool enable) 4086 { 4087 amdgpu_gfx_rlc_enter_safe_mode(adev, 0); 4088 4089 gfx_v12_0_update_coarse_grain_clock_gating(adev, enable); 4090 4091 gfx_v12_0_update_medium_grain_clock_gating(adev, enable); 4092 4093 gfx_v12_0_update_repeater_fgcg(adev, enable); 4094 4095 gfx_v12_0_update_sram_fgcg(adev, enable); 4096 4097 gfx_v12_0_update_perf_clk(adev, enable); 4098 4099 if (adev->cg_flags & 4100 (AMD_CG_SUPPORT_GFX_MGCG | 4101 AMD_CG_SUPPORT_GFX_CGLS | 4102 AMD_CG_SUPPORT_GFX_CGCG | 4103 AMD_CG_SUPPORT_GFX_3D_CGCG | 4104 AMD_CG_SUPPORT_GFX_3D_CGLS)) 4105 gfx_v12_0_enable_gui_idle_interrupt(adev, enable); 4106 4107 amdgpu_gfx_rlc_exit_safe_mode(adev, 0); 4108 4109 return 0; 4110 } 4111 4112 static int gfx_v12_0_set_clockgating_state(void *handle, 4113 enum amd_clockgating_state state) 4114 { 4115 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 4116 4117 if (amdgpu_sriov_vf(adev)) 4118 return 0; 4119 4120 switch (adev->ip_versions[GC_HWIP][0]) { 4121 case IP_VERSION(12, 0, 0): 4122 case IP_VERSION(12, 0, 1): 4123 gfx_v12_0_update_gfx_clock_gating(adev, 4124 state == AMD_CG_STATE_GATE); 4125 break; 4126 default: 4127 break; 4128 } 4129 4130 return 0; 4131 } 4132 4133 static void gfx_v12_0_get_clockgating_state(void *handle, u64 *flags) 4134 { 4135 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 4136 int data; 4137 4138 /* AMD_CG_SUPPORT_GFX_MGCG */ 4139 data = RREG32_SOC15(GC, 0, regRLC_CGTT_MGCG_OVERRIDE); 4140 if (!(data & RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGCG_OVERRIDE_MASK)) 4141 *flags |= AMD_CG_SUPPORT_GFX_MGCG; 4142 4143 /* AMD_CG_SUPPORT_REPEATER_FGCG */ 4144 if (!(data & RLC_CGTT_MGCG_OVERRIDE__GFXIP_REPEATER_FGCG_OVERRIDE_MASK)) 4145 *flags |= AMD_CG_SUPPORT_REPEATER_FGCG; 4146 4147 /* AMD_CG_SUPPORT_GFX_FGCG */ 4148 if (!(data & RLC_CGTT_MGCG_OVERRIDE__GFXIP_FGCG_OVERRIDE_MASK)) 4149 *flags |= AMD_CG_SUPPORT_GFX_FGCG; 4150 4151 /* AMD_CG_SUPPORT_GFX_PERF_CLK */ 4152 if (!(data & RLC_CGTT_MGCG_OVERRIDE__PERFMON_CLOCK_STATE_MASK)) 4153 *flags |= AMD_CG_SUPPORT_GFX_PERF_CLK; 4154 4155 /* AMD_CG_SUPPORT_GFX_CGCG */ 4156 data = RREG32_SOC15(GC, 0, regRLC_CGCG_CGLS_CTRL); 4157 if (data & RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK) 4158 *flags |= AMD_CG_SUPPORT_GFX_CGCG; 4159 4160 /* AMD_CG_SUPPORT_GFX_CGLS */ 4161 if (data & RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK) 4162 *flags |= AMD_CG_SUPPORT_GFX_CGLS; 4163 4164 /* AMD_CG_SUPPORT_GFX_3D_CGCG */ 4165 data = RREG32_SOC15(GC, 0, regRLC_CGCG_CGLS_CTRL_3D); 4166 if (data & RLC_CGCG_CGLS_CTRL_3D__CGCG_EN_MASK) 4167 *flags |= AMD_CG_SUPPORT_GFX_3D_CGCG; 4168 4169 /* AMD_CG_SUPPORT_GFX_3D_CGLS */ 4170 if (data & RLC_CGCG_CGLS_CTRL_3D__CGLS_EN_MASK) 4171 *flags |= AMD_CG_SUPPORT_GFX_3D_CGLS; 4172 } 4173 4174 static u64 gfx_v12_0_ring_get_rptr_gfx(struct amdgpu_ring *ring) 4175 { 4176 /* gfx12 is 32bit rptr*/ 4177 return *(uint32_t *)ring->rptr_cpu_addr; 4178 } 4179 4180 static u64 gfx_v12_0_ring_get_wptr_gfx(struct amdgpu_ring *ring) 4181 { 4182 struct amdgpu_device *adev = ring->adev; 4183 u64 wptr; 4184 4185 /* XXX check if swapping is necessary on BE */ 4186 if (ring->use_doorbell) { 4187 wptr = atomic64_read((atomic64_t *)ring->wptr_cpu_addr); 4188 } else { 4189 wptr = RREG32_SOC15(GC, 0, regCP_RB0_WPTR); 4190 wptr += (u64)RREG32_SOC15(GC, 0, regCP_RB0_WPTR_HI) << 32; 4191 } 4192 4193 return wptr; 4194 } 4195 4196 static void gfx_v12_0_ring_set_wptr_gfx(struct amdgpu_ring *ring) 4197 { 4198 struct amdgpu_device *adev = ring->adev; 4199 uint32_t *wptr_saved; 4200 uint32_t *is_queue_unmap; 4201 uint64_t aggregated_db_index; 4202 uint32_t mqd_size = adev->mqds[AMDGPU_HW_IP_GFX].mqd_size; 4203 uint64_t wptr_tmp; 4204 4205 if (ring->is_mes_queue) { 4206 wptr_saved = (uint32_t *)(ring->mqd_ptr + mqd_size); 4207 is_queue_unmap = (uint32_t *)(ring->mqd_ptr + mqd_size + 4208 sizeof(uint32_t)); 4209 aggregated_db_index = 4210 amdgpu_mes_get_aggregated_doorbell_index(adev, 4211 ring->hw_prio); 4212 4213 wptr_tmp = ring->wptr & ring->buf_mask; 4214 atomic64_set((atomic64_t *)ring->wptr_cpu_addr, wptr_tmp); 4215 *wptr_saved = wptr_tmp; 4216 /* assume doorbell always being used by mes mapped queue */ 4217 if (*is_queue_unmap) { 4218 WDOORBELL64(aggregated_db_index, wptr_tmp); 4219 WDOORBELL64(ring->doorbell_index, wptr_tmp); 4220 } else { 4221 WDOORBELL64(ring->doorbell_index, wptr_tmp); 4222 4223 if (*is_queue_unmap) 4224 WDOORBELL64(aggregated_db_index, wptr_tmp); 4225 } 4226 } else { 4227 if (ring->use_doorbell) { 4228 /* XXX check if swapping is necessary on BE */ 4229 atomic64_set((atomic64_t *)ring->wptr_cpu_addr, 4230 ring->wptr); 4231 WDOORBELL64(ring->doorbell_index, ring->wptr); 4232 } else { 4233 WREG32_SOC15(GC, 0, regCP_RB0_WPTR, 4234 lower_32_bits(ring->wptr)); 4235 WREG32_SOC15(GC, 0, regCP_RB0_WPTR_HI, 4236 upper_32_bits(ring->wptr)); 4237 } 4238 } 4239 } 4240 4241 static u64 gfx_v12_0_ring_get_rptr_compute(struct amdgpu_ring *ring) 4242 { 4243 /* gfx12 hardware is 32bit rptr */ 4244 return *(uint32_t *)ring->rptr_cpu_addr; 4245 } 4246 4247 static u64 gfx_v12_0_ring_get_wptr_compute(struct amdgpu_ring *ring) 4248 { 4249 u64 wptr; 4250 4251 /* XXX check if swapping is necessary on BE */ 4252 if (ring->use_doorbell) 4253 wptr = atomic64_read((atomic64_t *)ring->wptr_cpu_addr); 4254 else 4255 BUG(); 4256 return wptr; 4257 } 4258 4259 static void gfx_v12_0_ring_set_wptr_compute(struct amdgpu_ring *ring) 4260 { 4261 struct amdgpu_device *adev = ring->adev; 4262 uint32_t *wptr_saved; 4263 uint32_t *is_queue_unmap; 4264 uint64_t aggregated_db_index; 4265 uint32_t mqd_size = adev->mqds[AMDGPU_HW_IP_COMPUTE].mqd_size; 4266 uint64_t wptr_tmp; 4267 4268 if (ring->is_mes_queue) { 4269 wptr_saved = (uint32_t *)(ring->mqd_ptr + mqd_size); 4270 is_queue_unmap = (uint32_t *)(ring->mqd_ptr + mqd_size + 4271 sizeof(uint32_t)); 4272 aggregated_db_index = 4273 amdgpu_mes_get_aggregated_doorbell_index(adev, 4274 ring->hw_prio); 4275 4276 wptr_tmp = ring->wptr & ring->buf_mask; 4277 atomic64_set((atomic64_t *)ring->wptr_cpu_addr, wptr_tmp); 4278 *wptr_saved = wptr_tmp; 4279 /* assume doorbell always used by mes mapped queue */ 4280 if (*is_queue_unmap) { 4281 WDOORBELL64(aggregated_db_index, wptr_tmp); 4282 WDOORBELL64(ring->doorbell_index, wptr_tmp); 4283 } else { 4284 WDOORBELL64(ring->doorbell_index, wptr_tmp); 4285 4286 if (*is_queue_unmap) 4287 WDOORBELL64(aggregated_db_index, wptr_tmp); 4288 } 4289 } else { 4290 /* XXX check if swapping is necessary on BE */ 4291 if (ring->use_doorbell) { 4292 atomic64_set((atomic64_t *)ring->wptr_cpu_addr, 4293 ring->wptr); 4294 WDOORBELL64(ring->doorbell_index, ring->wptr); 4295 } else { 4296 BUG(); /* only DOORBELL method supported on gfx12 now */ 4297 } 4298 } 4299 } 4300 4301 static void gfx_v12_0_ring_emit_hdp_flush(struct amdgpu_ring *ring) 4302 { 4303 struct amdgpu_device *adev = ring->adev; 4304 u32 ref_and_mask, reg_mem_engine; 4305 const struct nbio_hdp_flush_reg *nbio_hf_reg = adev->nbio.hdp_flush_reg; 4306 4307 if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) { 4308 switch (ring->me) { 4309 case 1: 4310 ref_and_mask = nbio_hf_reg->ref_and_mask_cp2 << ring->pipe; 4311 break; 4312 case 2: 4313 ref_and_mask = nbio_hf_reg->ref_and_mask_cp6 << ring->pipe; 4314 break; 4315 default: 4316 return; 4317 } 4318 reg_mem_engine = 0; 4319 } else { 4320 ref_and_mask = nbio_hf_reg->ref_and_mask_cp0; 4321 reg_mem_engine = 1; /* pfp */ 4322 } 4323 4324 gfx_v12_0_wait_reg_mem(ring, reg_mem_engine, 0, 1, 4325 adev->nbio.funcs->get_hdp_flush_req_offset(adev), 4326 adev->nbio.funcs->get_hdp_flush_done_offset(adev), 4327 ref_and_mask, ref_and_mask, 0x20); 4328 } 4329 4330 static void gfx_v12_0_ring_emit_ib_gfx(struct amdgpu_ring *ring, 4331 struct amdgpu_job *job, 4332 struct amdgpu_ib *ib, 4333 uint32_t flags) 4334 { 4335 unsigned vmid = AMDGPU_JOB_GET_VMID(job); 4336 u32 header, control = 0; 4337 4338 BUG_ON(ib->flags & AMDGPU_IB_FLAG_CE); 4339 4340 header = PACKET3(PACKET3_INDIRECT_BUFFER, 2); 4341 4342 control |= ib->length_dw | (vmid << 24); 4343 4344 if (ring->is_mes_queue) 4345 /* inherit vmid from mqd */ 4346 control |= 0x400000; 4347 4348 amdgpu_ring_write(ring, header); 4349 BUG_ON(ib->gpu_addr & 0x3); /* Dword align */ 4350 amdgpu_ring_write(ring, 4351 #ifdef __BIG_ENDIAN 4352 (2 << 0) | 4353 #endif 4354 lower_32_bits(ib->gpu_addr)); 4355 amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr)); 4356 amdgpu_ring_write(ring, control); 4357 } 4358 4359 static void gfx_v12_0_ring_emit_ib_compute(struct amdgpu_ring *ring, 4360 struct amdgpu_job *job, 4361 struct amdgpu_ib *ib, 4362 uint32_t flags) 4363 { 4364 unsigned vmid = AMDGPU_JOB_GET_VMID(job); 4365 u32 control = INDIRECT_BUFFER_VALID | ib->length_dw | (vmid << 24); 4366 4367 if (ring->is_mes_queue) 4368 /* inherit vmid from mqd */ 4369 control |= 0x40000000; 4370 4371 amdgpu_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2)); 4372 BUG_ON(ib->gpu_addr & 0x3); /* Dword align */ 4373 amdgpu_ring_write(ring, 4374 #ifdef __BIG_ENDIAN 4375 (2 << 0) | 4376 #endif 4377 lower_32_bits(ib->gpu_addr)); 4378 amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr)); 4379 amdgpu_ring_write(ring, control); 4380 } 4381 4382 static void gfx_v12_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, 4383 u64 seq, unsigned flags) 4384 { 4385 bool write64bit = flags & AMDGPU_FENCE_FLAG_64BIT; 4386 bool int_sel = flags & AMDGPU_FENCE_FLAG_INT; 4387 4388 /* RELEASE_MEM - flush caches, send int */ 4389 amdgpu_ring_write(ring, PACKET3(PACKET3_RELEASE_MEM, 6)); 4390 amdgpu_ring_write(ring, (PACKET3_RELEASE_MEM_GCR_SEQ | 4391 PACKET3_RELEASE_MEM_GCR_GL2_WB | 4392 PACKET3_RELEASE_MEM_CACHE_POLICY(3) | 4393 PACKET3_RELEASE_MEM_EVENT_TYPE(CACHE_FLUSH_AND_INV_TS_EVENT) | 4394 PACKET3_RELEASE_MEM_EVENT_INDEX(5))); 4395 amdgpu_ring_write(ring, (PACKET3_RELEASE_MEM_DATA_SEL(write64bit ? 2 : 1) | 4396 PACKET3_RELEASE_MEM_INT_SEL(int_sel ? 2 : 0))); 4397 4398 /* 4399 * the address should be Qword aligned if 64bit write, Dword 4400 * aligned if only send 32bit data low (discard data high) 4401 */ 4402 if (write64bit) 4403 BUG_ON(addr & 0x7); 4404 else 4405 BUG_ON(addr & 0x3); 4406 amdgpu_ring_write(ring, lower_32_bits(addr)); 4407 amdgpu_ring_write(ring, upper_32_bits(addr)); 4408 amdgpu_ring_write(ring, lower_32_bits(seq)); 4409 amdgpu_ring_write(ring, upper_32_bits(seq)); 4410 amdgpu_ring_write(ring, ring->is_mes_queue ? 4411 (ring->hw_queue_id | AMDGPU_FENCE_MES_QUEUE_FLAG) : 0); 4412 } 4413 4414 static void gfx_v12_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring) 4415 { 4416 int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX); 4417 uint32_t seq = ring->fence_drv.sync_seq; 4418 uint64_t addr = ring->fence_drv.gpu_addr; 4419 4420 gfx_v12_0_wait_reg_mem(ring, usepfp, 1, 0, lower_32_bits(addr), 4421 upper_32_bits(addr), seq, 0xffffffff, 4); 4422 } 4423 4424 static void gfx_v12_0_ring_invalidate_tlbs(struct amdgpu_ring *ring, 4425 uint16_t pasid, uint32_t flush_type, 4426 bool all_hub, uint8_t dst_sel) 4427 { 4428 amdgpu_ring_write(ring, PACKET3(PACKET3_INVALIDATE_TLBS, 0)); 4429 amdgpu_ring_write(ring, 4430 PACKET3_INVALIDATE_TLBS_DST_SEL(dst_sel) | 4431 PACKET3_INVALIDATE_TLBS_ALL_HUB(all_hub) | 4432 PACKET3_INVALIDATE_TLBS_PASID(pasid) | 4433 PACKET3_INVALIDATE_TLBS_FLUSH_TYPE(flush_type)); 4434 } 4435 4436 static void gfx_v12_0_ring_emit_vm_flush(struct amdgpu_ring *ring, 4437 unsigned vmid, uint64_t pd_addr) 4438 { 4439 if (ring->is_mes_queue) 4440 gfx_v12_0_ring_invalidate_tlbs(ring, 0, 0, false, 0); 4441 else 4442 amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr); 4443 4444 /* compute doesn't have PFP */ 4445 if (ring->funcs->type == AMDGPU_RING_TYPE_GFX) { 4446 /* sync PFP to ME, otherwise we might get invalid PFP reads */ 4447 amdgpu_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0)); 4448 amdgpu_ring_write(ring, 0x0); 4449 } 4450 } 4451 4452 static void gfx_v12_0_ring_emit_fence_kiq(struct amdgpu_ring *ring, u64 addr, 4453 u64 seq, unsigned int flags) 4454 { 4455 struct amdgpu_device *adev = ring->adev; 4456 4457 /* we only allocate 32bit for each seq wb address */ 4458 BUG_ON(flags & AMDGPU_FENCE_FLAG_64BIT); 4459 4460 /* write fence seq to the "addr" */ 4461 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3)); 4462 amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) | 4463 WRITE_DATA_DST_SEL(5) | WR_CONFIRM)); 4464 amdgpu_ring_write(ring, lower_32_bits(addr)); 4465 amdgpu_ring_write(ring, upper_32_bits(addr)); 4466 amdgpu_ring_write(ring, lower_32_bits(seq)); 4467 4468 if (flags & AMDGPU_FENCE_FLAG_INT) { 4469 /* set register to trigger INT */ 4470 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3)); 4471 amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) | 4472 WRITE_DATA_DST_SEL(0) | WR_CONFIRM)); 4473 amdgpu_ring_write(ring, SOC15_REG_OFFSET(GC, 0, regCPC_INT_STATUS)); 4474 amdgpu_ring_write(ring, 0); 4475 amdgpu_ring_write(ring, 0x20000000); /* src_id is 178 */ 4476 } 4477 } 4478 4479 static void gfx_v12_0_ring_emit_cntxcntl(struct amdgpu_ring *ring, 4480 uint32_t flags) 4481 { 4482 uint32_t dw2 = 0; 4483 4484 dw2 |= 0x80000000; /* set load_enable otherwise this package is just NOPs */ 4485 if (flags & AMDGPU_HAVE_CTX_SWITCH) { 4486 /* set load_global_config & load_global_uconfig */ 4487 dw2 |= 0x8001; 4488 /* set load_cs_sh_regs */ 4489 dw2 |= 0x01000000; 4490 /* set load_per_context_state & load_gfx_sh_regs for GFX */ 4491 dw2 |= 0x10002; 4492 } 4493 4494 amdgpu_ring_write(ring, PACKET3(PACKET3_CONTEXT_CONTROL, 1)); 4495 amdgpu_ring_write(ring, dw2); 4496 amdgpu_ring_write(ring, 0); 4497 } 4498 4499 static unsigned gfx_v12_0_ring_emit_init_cond_exec(struct amdgpu_ring *ring, 4500 uint64_t addr) 4501 { 4502 unsigned ret; 4503 4504 amdgpu_ring_write(ring, PACKET3(PACKET3_COND_EXEC, 3)); 4505 amdgpu_ring_write(ring, lower_32_bits(addr)); 4506 amdgpu_ring_write(ring, upper_32_bits(addr)); 4507 /* discard following DWs if *cond_exec_gpu_addr==0 */ 4508 amdgpu_ring_write(ring, 0); 4509 ret = ring->wptr & ring->buf_mask; 4510 /* patch dummy value later */ 4511 amdgpu_ring_write(ring, 0); 4512 4513 return ret; 4514 } 4515 4516 static int gfx_v12_0_ring_preempt_ib(struct amdgpu_ring *ring) 4517 { 4518 int i, r = 0; 4519 struct amdgpu_device *adev = ring->adev; 4520 struct amdgpu_kiq *kiq = &adev->gfx.kiq[0]; 4521 struct amdgpu_ring *kiq_ring = &kiq->ring; 4522 unsigned long flags; 4523 4524 if (adev->enable_mes) 4525 return -EINVAL; 4526 4527 if (!kiq->pmf || !kiq->pmf->kiq_unmap_queues) 4528 return -EINVAL; 4529 4530 spin_lock_irqsave(&kiq->ring_lock, flags); 4531 4532 if (amdgpu_ring_alloc(kiq_ring, kiq->pmf->unmap_queues_size)) { 4533 spin_unlock_irqrestore(&kiq->ring_lock, flags); 4534 return -ENOMEM; 4535 } 4536 4537 /* assert preemption condition */ 4538 amdgpu_ring_set_preempt_cond_exec(ring, false); 4539 4540 /* assert IB preemption, emit the trailing fence */ 4541 kiq->pmf->kiq_unmap_queues(kiq_ring, ring, PREEMPT_QUEUES_NO_UNMAP, 4542 ring->trail_fence_gpu_addr, 4543 ++ring->trail_seq); 4544 amdgpu_ring_commit(kiq_ring); 4545 4546 spin_unlock_irqrestore(&kiq->ring_lock, flags); 4547 4548 /* poll the trailing fence */ 4549 for (i = 0; i < adev->usec_timeout; i++) { 4550 if (ring->trail_seq == 4551 le32_to_cpu(*(ring->trail_fence_cpu_addr))) 4552 break; 4553 udelay(1); 4554 } 4555 4556 if (i >= adev->usec_timeout) { 4557 r = -EINVAL; 4558 DRM_ERROR("ring %d failed to preempt ib\n", ring->idx); 4559 } 4560 4561 /* deassert preemption condition */ 4562 amdgpu_ring_set_preempt_cond_exec(ring, true); 4563 return r; 4564 } 4565 4566 static void gfx_v12_0_ring_emit_frame_cntl(struct amdgpu_ring *ring, 4567 bool start, 4568 bool secure) 4569 { 4570 uint32_t v = secure ? FRAME_TMZ : 0; 4571 4572 amdgpu_ring_write(ring, PACKET3(PACKET3_FRAME_CONTROL, 0)); 4573 amdgpu_ring_write(ring, v | FRAME_CMD(start ? 0 : 1)); 4574 } 4575 4576 static void gfx_v12_0_ring_emit_rreg(struct amdgpu_ring *ring, uint32_t reg, 4577 uint32_t reg_val_offs) 4578 { 4579 struct amdgpu_device *adev = ring->adev; 4580 4581 amdgpu_ring_write(ring, PACKET3(PACKET3_COPY_DATA, 4)); 4582 amdgpu_ring_write(ring, 0 | /* src: register*/ 4583 (5 << 8) | /* dst: memory */ 4584 (1 << 20)); /* write confirm */ 4585 amdgpu_ring_write(ring, reg); 4586 amdgpu_ring_write(ring, 0); 4587 amdgpu_ring_write(ring, lower_32_bits(adev->wb.gpu_addr + 4588 reg_val_offs * 4)); 4589 amdgpu_ring_write(ring, upper_32_bits(adev->wb.gpu_addr + 4590 reg_val_offs * 4)); 4591 } 4592 4593 static void gfx_v12_0_ring_emit_wreg(struct amdgpu_ring *ring, 4594 uint32_t reg, 4595 uint32_t val) 4596 { 4597 uint32_t cmd = 0; 4598 4599 switch (ring->funcs->type) { 4600 case AMDGPU_RING_TYPE_GFX: 4601 cmd = WRITE_DATA_ENGINE_SEL(1) | WR_CONFIRM; 4602 break; 4603 case AMDGPU_RING_TYPE_KIQ: 4604 cmd = (1 << 16); /* no inc addr */ 4605 break; 4606 default: 4607 cmd = WR_CONFIRM; 4608 break; 4609 } 4610 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3)); 4611 amdgpu_ring_write(ring, cmd); 4612 amdgpu_ring_write(ring, reg); 4613 amdgpu_ring_write(ring, 0); 4614 amdgpu_ring_write(ring, val); 4615 } 4616 4617 static void gfx_v12_0_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg, 4618 uint32_t val, uint32_t mask) 4619 { 4620 gfx_v12_0_wait_reg_mem(ring, 0, 0, 0, reg, 0, val, mask, 0x20); 4621 } 4622 4623 static void gfx_v12_0_ring_emit_reg_write_reg_wait(struct amdgpu_ring *ring, 4624 uint32_t reg0, uint32_t reg1, 4625 uint32_t ref, uint32_t mask) 4626 { 4627 int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX); 4628 4629 gfx_v12_0_wait_reg_mem(ring, usepfp, 0, 1, reg0, reg1, 4630 ref, mask, 0x20); 4631 } 4632 4633 static void gfx_v12_0_ring_soft_recovery(struct amdgpu_ring *ring, 4634 unsigned vmid) 4635 { 4636 struct amdgpu_device *adev = ring->adev; 4637 uint32_t value = 0; 4638 4639 value = REG_SET_FIELD(value, SQ_CMD, CMD, 0x03); 4640 value = REG_SET_FIELD(value, SQ_CMD, MODE, 0x01); 4641 value = REG_SET_FIELD(value, SQ_CMD, CHECK_VMID, 1); 4642 value = REG_SET_FIELD(value, SQ_CMD, VM_ID, vmid); 4643 amdgpu_gfx_rlc_enter_safe_mode(adev, 0); 4644 WREG32_SOC15(GC, 0, regSQ_CMD, value); 4645 amdgpu_gfx_rlc_exit_safe_mode(adev, 0); 4646 } 4647 4648 static void 4649 gfx_v12_0_set_gfx_eop_interrupt_state(struct amdgpu_device *adev, 4650 uint32_t me, uint32_t pipe, 4651 enum amdgpu_interrupt_state state) 4652 { 4653 uint32_t cp_int_cntl, cp_int_cntl_reg; 4654 4655 if (!me) { 4656 switch (pipe) { 4657 case 0: 4658 cp_int_cntl_reg = SOC15_REG_OFFSET(GC, 0, regCP_INT_CNTL_RING0); 4659 break; 4660 default: 4661 DRM_DEBUG("invalid pipe %d\n", pipe); 4662 return; 4663 } 4664 } else { 4665 DRM_DEBUG("invalid me %d\n", me); 4666 return; 4667 } 4668 4669 switch (state) { 4670 case AMDGPU_IRQ_STATE_DISABLE: 4671 cp_int_cntl = RREG32_SOC15_IP(GC, cp_int_cntl_reg); 4672 cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_INT_CNTL_RING0, 4673 TIME_STAMP_INT_ENABLE, 0); 4674 cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_INT_CNTL_RING0, 4675 GENERIC0_INT_ENABLE, 0); 4676 WREG32_SOC15_IP(GC, cp_int_cntl_reg, cp_int_cntl); 4677 break; 4678 case AMDGPU_IRQ_STATE_ENABLE: 4679 cp_int_cntl = RREG32_SOC15_IP(GC, cp_int_cntl_reg); 4680 cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_INT_CNTL_RING0, 4681 TIME_STAMP_INT_ENABLE, 1); 4682 cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_INT_CNTL_RING0, 4683 GENERIC0_INT_ENABLE, 1); 4684 WREG32_SOC15_IP(GC, cp_int_cntl_reg, cp_int_cntl); 4685 break; 4686 default: 4687 break; 4688 } 4689 } 4690 4691 static void gfx_v12_0_set_compute_eop_interrupt_state(struct amdgpu_device *adev, 4692 int me, int pipe, 4693 enum amdgpu_interrupt_state state) 4694 { 4695 u32 mec_int_cntl, mec_int_cntl_reg; 4696 4697 /* 4698 * amdgpu controls only the first MEC. That's why this function only 4699 * handles the setting of interrupts for this specific MEC. All other 4700 * pipes' interrupts are set by amdkfd. 4701 */ 4702 4703 if (me == 1) { 4704 switch (pipe) { 4705 case 0: 4706 mec_int_cntl_reg = SOC15_REG_OFFSET(GC, 0, regCP_ME1_PIPE0_INT_CNTL); 4707 break; 4708 case 1: 4709 mec_int_cntl_reg = SOC15_REG_OFFSET(GC, 0, regCP_ME1_PIPE1_INT_CNTL); 4710 break; 4711 default: 4712 DRM_DEBUG("invalid pipe %d\n", pipe); 4713 return; 4714 } 4715 } else { 4716 DRM_DEBUG("invalid me %d\n", me); 4717 return; 4718 } 4719 4720 switch (state) { 4721 case AMDGPU_IRQ_STATE_DISABLE: 4722 mec_int_cntl = RREG32_SOC15_IP(GC, mec_int_cntl_reg); 4723 mec_int_cntl = REG_SET_FIELD(mec_int_cntl, CP_ME1_PIPE0_INT_CNTL, 4724 TIME_STAMP_INT_ENABLE, 0); 4725 mec_int_cntl = REG_SET_FIELD(mec_int_cntl, CP_ME1_PIPE0_INT_CNTL, 4726 GENERIC0_INT_ENABLE, 0); 4727 WREG32_SOC15_IP(GC, mec_int_cntl_reg, mec_int_cntl); 4728 break; 4729 case AMDGPU_IRQ_STATE_ENABLE: 4730 mec_int_cntl = RREG32_SOC15_IP(GC, mec_int_cntl_reg); 4731 mec_int_cntl = REG_SET_FIELD(mec_int_cntl, CP_ME1_PIPE0_INT_CNTL, 4732 TIME_STAMP_INT_ENABLE, 1); 4733 mec_int_cntl = REG_SET_FIELD(mec_int_cntl, CP_ME1_PIPE0_INT_CNTL, 4734 GENERIC0_INT_ENABLE, 1); 4735 WREG32_SOC15_IP(GC, mec_int_cntl_reg, mec_int_cntl); 4736 break; 4737 default: 4738 break; 4739 } 4740 } 4741 4742 static int gfx_v12_0_set_eop_interrupt_state(struct amdgpu_device *adev, 4743 struct amdgpu_irq_src *src, 4744 unsigned type, 4745 enum amdgpu_interrupt_state state) 4746 { 4747 switch (type) { 4748 case AMDGPU_CP_IRQ_GFX_ME0_PIPE0_EOP: 4749 gfx_v12_0_set_gfx_eop_interrupt_state(adev, 0, 0, state); 4750 break; 4751 case AMDGPU_CP_IRQ_GFX_ME0_PIPE1_EOP: 4752 gfx_v12_0_set_gfx_eop_interrupt_state(adev, 0, 1, state); 4753 break; 4754 case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP: 4755 gfx_v12_0_set_compute_eop_interrupt_state(adev, 1, 0, state); 4756 break; 4757 case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE1_EOP: 4758 gfx_v12_0_set_compute_eop_interrupt_state(adev, 1, 1, state); 4759 break; 4760 case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE2_EOP: 4761 gfx_v12_0_set_compute_eop_interrupt_state(adev, 1, 2, state); 4762 break; 4763 case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE3_EOP: 4764 gfx_v12_0_set_compute_eop_interrupt_state(adev, 1, 3, state); 4765 break; 4766 default: 4767 break; 4768 } 4769 return 0; 4770 } 4771 4772 static int gfx_v12_0_eop_irq(struct amdgpu_device *adev, 4773 struct amdgpu_irq_src *source, 4774 struct amdgpu_iv_entry *entry) 4775 { 4776 int i; 4777 u8 me_id, pipe_id, queue_id; 4778 struct amdgpu_ring *ring; 4779 uint32_t mes_queue_id = entry->src_data[0]; 4780 4781 DRM_DEBUG("IH: CP EOP\n"); 4782 4783 if (adev->enable_mes && (mes_queue_id & AMDGPU_FENCE_MES_QUEUE_FLAG)) { 4784 struct amdgpu_mes_queue *queue; 4785 4786 mes_queue_id &= AMDGPU_FENCE_MES_QUEUE_ID_MASK; 4787 4788 spin_lock(&adev->mes.queue_id_lock); 4789 queue = idr_find(&adev->mes.queue_id_idr, mes_queue_id); 4790 if (queue) { 4791 DRM_DEBUG("process mes queue id = %d\n", mes_queue_id); 4792 amdgpu_fence_process(queue->ring); 4793 } 4794 spin_unlock(&adev->mes.queue_id_lock); 4795 } else { 4796 me_id = (entry->ring_id & 0x0c) >> 2; 4797 pipe_id = (entry->ring_id & 0x03) >> 0; 4798 queue_id = (entry->ring_id & 0x70) >> 4; 4799 4800 switch (me_id) { 4801 case 0: 4802 if (pipe_id == 0) 4803 amdgpu_fence_process(&adev->gfx.gfx_ring[0]); 4804 else 4805 amdgpu_fence_process(&adev->gfx.gfx_ring[1]); 4806 break; 4807 case 1: 4808 case 2: 4809 for (i = 0; i < adev->gfx.num_compute_rings; i++) { 4810 ring = &adev->gfx.compute_ring[i]; 4811 /* Per-queue interrupt is supported for MEC starting from VI. 4812 * The interrupt can only be enabled/disabled per pipe instead 4813 * of per queue. 4814 */ 4815 if ((ring->me == me_id) && 4816 (ring->pipe == pipe_id) && 4817 (ring->queue == queue_id)) 4818 amdgpu_fence_process(ring); 4819 } 4820 break; 4821 } 4822 } 4823 4824 return 0; 4825 } 4826 4827 static int gfx_v12_0_set_priv_reg_fault_state(struct amdgpu_device *adev, 4828 struct amdgpu_irq_src *source, 4829 unsigned int type, 4830 enum amdgpu_interrupt_state state) 4831 { 4832 u32 cp_int_cntl_reg, cp_int_cntl; 4833 int i, j; 4834 4835 switch (state) { 4836 case AMDGPU_IRQ_STATE_DISABLE: 4837 case AMDGPU_IRQ_STATE_ENABLE: 4838 for (i = 0; i < adev->gfx.me.num_me; i++) { 4839 for (j = 0; j < adev->gfx.me.num_pipe_per_me; j++) { 4840 cp_int_cntl_reg = gfx_v12_0_get_cpg_int_cntl(adev, i, j); 4841 4842 if (cp_int_cntl_reg) { 4843 cp_int_cntl = RREG32_SOC15_IP(GC, cp_int_cntl_reg); 4844 cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_INT_CNTL_RING0, 4845 PRIV_REG_INT_ENABLE, 4846 state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0); 4847 WREG32_SOC15_IP(GC, cp_int_cntl_reg, cp_int_cntl); 4848 } 4849 } 4850 } 4851 for (i = 0; i < adev->gfx.mec.num_mec; i++) { 4852 for (j = 0; j < adev->gfx.mec.num_pipe_per_mec; j++) { 4853 /* MECs start at 1 */ 4854 cp_int_cntl_reg = gfx_v12_0_get_cpc_int_cntl(adev, i + 1, j); 4855 4856 if (cp_int_cntl_reg) { 4857 cp_int_cntl = RREG32_SOC15_IP(GC, cp_int_cntl_reg); 4858 cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_ME1_PIPE0_INT_CNTL, 4859 PRIV_REG_INT_ENABLE, 4860 state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0); 4861 WREG32_SOC15_IP(GC, cp_int_cntl_reg, cp_int_cntl); 4862 } 4863 } 4864 } 4865 break; 4866 default: 4867 break; 4868 } 4869 4870 return 0; 4871 } 4872 4873 static int gfx_v12_0_set_bad_op_fault_state(struct amdgpu_device *adev, 4874 struct amdgpu_irq_src *source, 4875 unsigned type, 4876 enum amdgpu_interrupt_state state) 4877 { 4878 u32 cp_int_cntl_reg, cp_int_cntl; 4879 int i, j; 4880 4881 switch (state) { 4882 case AMDGPU_IRQ_STATE_DISABLE: 4883 case AMDGPU_IRQ_STATE_ENABLE: 4884 for (i = 0; i < adev->gfx.me.num_me; i++) { 4885 for (j = 0; j < adev->gfx.me.num_pipe_per_me; j++) { 4886 cp_int_cntl_reg = gfx_v12_0_get_cpg_int_cntl(adev, i, j); 4887 4888 if (cp_int_cntl_reg) { 4889 cp_int_cntl = RREG32_SOC15_IP(GC, cp_int_cntl_reg); 4890 cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_INT_CNTL_RING0, 4891 OPCODE_ERROR_INT_ENABLE, 4892 state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0); 4893 WREG32_SOC15_IP(GC, cp_int_cntl_reg, cp_int_cntl); 4894 } 4895 } 4896 } 4897 for (i = 0; i < adev->gfx.mec.num_mec; i++) { 4898 for (j = 0; j < adev->gfx.mec.num_pipe_per_mec; j++) { 4899 /* MECs start at 1 */ 4900 cp_int_cntl_reg = gfx_v12_0_get_cpc_int_cntl(adev, i + 1, j); 4901 4902 if (cp_int_cntl_reg) { 4903 cp_int_cntl = RREG32_SOC15_IP(GC, cp_int_cntl_reg); 4904 cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_ME1_PIPE0_INT_CNTL, 4905 OPCODE_ERROR_INT_ENABLE, 4906 state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0); 4907 WREG32_SOC15_IP(GC, cp_int_cntl_reg, cp_int_cntl); 4908 } 4909 } 4910 } 4911 break; 4912 default: 4913 break; 4914 } 4915 return 0; 4916 } 4917 4918 static int gfx_v12_0_set_priv_inst_fault_state(struct amdgpu_device *adev, 4919 struct amdgpu_irq_src *source, 4920 unsigned int type, 4921 enum amdgpu_interrupt_state state) 4922 { 4923 u32 cp_int_cntl_reg, cp_int_cntl; 4924 int i, j; 4925 4926 switch (state) { 4927 case AMDGPU_IRQ_STATE_DISABLE: 4928 case AMDGPU_IRQ_STATE_ENABLE: 4929 for (i = 0; i < adev->gfx.me.num_me; i++) { 4930 for (j = 0; j < adev->gfx.me.num_pipe_per_me; j++) { 4931 cp_int_cntl_reg = gfx_v12_0_get_cpg_int_cntl(adev, i, j); 4932 4933 if (cp_int_cntl_reg) { 4934 cp_int_cntl = RREG32_SOC15_IP(GC, cp_int_cntl_reg); 4935 cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_INT_CNTL_RING0, 4936 PRIV_INSTR_INT_ENABLE, 4937 state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0); 4938 WREG32_SOC15_IP(GC, cp_int_cntl_reg, cp_int_cntl); 4939 } 4940 } 4941 } 4942 break; 4943 default: 4944 break; 4945 } 4946 4947 return 0; 4948 } 4949 4950 static void gfx_v12_0_handle_priv_fault(struct amdgpu_device *adev, 4951 struct amdgpu_iv_entry *entry) 4952 { 4953 u8 me_id, pipe_id, queue_id; 4954 struct amdgpu_ring *ring; 4955 int i; 4956 4957 me_id = (entry->ring_id & 0x0c) >> 2; 4958 pipe_id = (entry->ring_id & 0x03) >> 0; 4959 queue_id = (entry->ring_id & 0x70) >> 4; 4960 4961 switch (me_id) { 4962 case 0: 4963 for (i = 0; i < adev->gfx.num_gfx_rings; i++) { 4964 ring = &adev->gfx.gfx_ring[i]; 4965 if (ring->me == me_id && ring->pipe == pipe_id && 4966 ring->queue == queue_id) 4967 drm_sched_fault(&ring->sched); 4968 } 4969 break; 4970 case 1: 4971 case 2: 4972 for (i = 0; i < adev->gfx.num_compute_rings; i++) { 4973 ring = &adev->gfx.compute_ring[i]; 4974 if (ring->me == me_id && ring->pipe == pipe_id && 4975 ring->queue == queue_id) 4976 drm_sched_fault(&ring->sched); 4977 } 4978 break; 4979 default: 4980 BUG(); 4981 break; 4982 } 4983 } 4984 4985 static int gfx_v12_0_priv_reg_irq(struct amdgpu_device *adev, 4986 struct amdgpu_irq_src *source, 4987 struct amdgpu_iv_entry *entry) 4988 { 4989 DRM_ERROR("Illegal register access in command stream\n"); 4990 gfx_v12_0_handle_priv_fault(adev, entry); 4991 return 0; 4992 } 4993 4994 static int gfx_v12_0_bad_op_irq(struct amdgpu_device *adev, 4995 struct amdgpu_irq_src *source, 4996 struct amdgpu_iv_entry *entry) 4997 { 4998 DRM_ERROR("Illegal opcode in command stream \n"); 4999 gfx_v12_0_handle_priv_fault(adev, entry); 5000 return 0; 5001 } 5002 5003 static int gfx_v12_0_priv_inst_irq(struct amdgpu_device *adev, 5004 struct amdgpu_irq_src *source, 5005 struct amdgpu_iv_entry *entry) 5006 { 5007 DRM_ERROR("Illegal instruction in command stream\n"); 5008 gfx_v12_0_handle_priv_fault(adev, entry); 5009 return 0; 5010 } 5011 5012 static void gfx_v12_0_emit_mem_sync(struct amdgpu_ring *ring) 5013 { 5014 const unsigned int gcr_cntl = 5015 PACKET3_ACQUIRE_MEM_GCR_CNTL_GL2_INV(1) | 5016 PACKET3_ACQUIRE_MEM_GCR_CNTL_GL2_WB(1) | 5017 PACKET3_ACQUIRE_MEM_GCR_CNTL_GLM_INV(1) | 5018 PACKET3_ACQUIRE_MEM_GCR_CNTL_GLM_WB(1) | 5019 PACKET3_ACQUIRE_MEM_GCR_CNTL_GL1_INV(1) | 5020 PACKET3_ACQUIRE_MEM_GCR_CNTL_GLV_INV(1) | 5021 PACKET3_ACQUIRE_MEM_GCR_CNTL_GLK_INV(1) | 5022 PACKET3_ACQUIRE_MEM_GCR_CNTL_GLI_INV(1); 5023 5024 /* ACQUIRE_MEM - make one or more surfaces valid for use by the subsequent operations */ 5025 amdgpu_ring_write(ring, PACKET3(PACKET3_ACQUIRE_MEM, 6)); 5026 amdgpu_ring_write(ring, 0); /* CP_COHER_CNTL */ 5027 amdgpu_ring_write(ring, 0xffffffff); /* CP_COHER_SIZE */ 5028 amdgpu_ring_write(ring, 0xffffff); /* CP_COHER_SIZE_HI */ 5029 amdgpu_ring_write(ring, 0); /* CP_COHER_BASE */ 5030 amdgpu_ring_write(ring, 0); /* CP_COHER_BASE_HI */ 5031 amdgpu_ring_write(ring, 0x0000000A); /* POLL_INTERVAL */ 5032 amdgpu_ring_write(ring, gcr_cntl); /* GCR_CNTL */ 5033 } 5034 5035 static void gfx_v12_ring_insert_nop(struct amdgpu_ring *ring, uint32_t num_nop) 5036 { 5037 /* Header itself is a NOP packet */ 5038 if (num_nop == 1) { 5039 amdgpu_ring_write(ring, ring->funcs->nop); 5040 return; 5041 } 5042 5043 /* Max HW optimization till 0x3ffe, followed by remaining one NOP at a time*/ 5044 amdgpu_ring_write(ring, PACKET3(PACKET3_NOP, min(num_nop - 2, 0x3ffe))); 5045 5046 /* Header is at index 0, followed by num_nops - 1 NOP packet's */ 5047 amdgpu_ring_insert_nop(ring, num_nop - 1); 5048 } 5049 5050 static void gfx_v12_0_ring_emit_cleaner_shader(struct amdgpu_ring *ring) 5051 { 5052 /* Emit the cleaner shader */ 5053 amdgpu_ring_write(ring, PACKET3(PACKET3_RUN_CLEANER_SHADER, 0)); 5054 amdgpu_ring_write(ring, 0); /* RESERVED field, programmed to zero */ 5055 } 5056 5057 static void gfx_v12_ip_print(struct amdgpu_ip_block *ip_block, struct drm_printer *p) 5058 { 5059 struct amdgpu_device *adev = ip_block->adev; 5060 uint32_t i, j, k, reg, index = 0; 5061 uint32_t reg_count = ARRAY_SIZE(gc_reg_list_12_0); 5062 5063 if (!adev->gfx.ip_dump_core) 5064 return; 5065 5066 for (i = 0; i < reg_count; i++) 5067 drm_printf(p, "%-50s \t 0x%08x\n", 5068 gc_reg_list_12_0[i].reg_name, 5069 adev->gfx.ip_dump_core[i]); 5070 5071 /* print compute queue registers for all instances */ 5072 if (!adev->gfx.ip_dump_compute_queues) 5073 return; 5074 5075 reg_count = ARRAY_SIZE(gc_cp_reg_list_12); 5076 drm_printf(p, "\nnum_mec: %d num_pipe: %d num_queue: %d\n", 5077 adev->gfx.mec.num_mec, 5078 adev->gfx.mec.num_pipe_per_mec, 5079 adev->gfx.mec.num_queue_per_pipe); 5080 5081 for (i = 0; i < adev->gfx.mec.num_mec; i++) { 5082 for (j = 0; j < adev->gfx.mec.num_pipe_per_mec; j++) { 5083 for (k = 0; k < adev->gfx.mec.num_queue_per_pipe; k++) { 5084 drm_printf(p, "\nmec %d, pipe %d, queue %d\n", i, j, k); 5085 for (reg = 0; reg < reg_count; reg++) { 5086 drm_printf(p, "%-50s \t 0x%08x\n", 5087 gc_cp_reg_list_12[reg].reg_name, 5088 adev->gfx.ip_dump_compute_queues[index + reg]); 5089 } 5090 index += reg_count; 5091 } 5092 } 5093 } 5094 5095 /* print gfx queue registers for all instances */ 5096 if (!adev->gfx.ip_dump_gfx_queues) 5097 return; 5098 5099 index = 0; 5100 reg_count = ARRAY_SIZE(gc_gfx_queue_reg_list_12); 5101 drm_printf(p, "\nnum_me: %d num_pipe: %d num_queue: %d\n", 5102 adev->gfx.me.num_me, 5103 adev->gfx.me.num_pipe_per_me, 5104 adev->gfx.me.num_queue_per_pipe); 5105 5106 for (i = 0; i < adev->gfx.me.num_me; i++) { 5107 for (j = 0; j < adev->gfx.me.num_pipe_per_me; j++) { 5108 for (k = 0; k < adev->gfx.me.num_queue_per_pipe; k++) { 5109 drm_printf(p, "\nme %d, pipe %d, queue %d\n", i, j, k); 5110 for (reg = 0; reg < reg_count; reg++) { 5111 drm_printf(p, "%-50s \t 0x%08x\n", 5112 gc_gfx_queue_reg_list_12[reg].reg_name, 5113 adev->gfx.ip_dump_gfx_queues[index + reg]); 5114 } 5115 index += reg_count; 5116 } 5117 } 5118 } 5119 } 5120 5121 static void gfx_v12_ip_dump(struct amdgpu_ip_block *ip_block) 5122 { 5123 struct amdgpu_device *adev = ip_block->adev; 5124 uint32_t i, j, k, reg, index = 0; 5125 uint32_t reg_count = ARRAY_SIZE(gc_reg_list_12_0); 5126 5127 if (!adev->gfx.ip_dump_core) 5128 return; 5129 5130 amdgpu_gfx_off_ctrl(adev, false); 5131 for (i = 0; i < reg_count; i++) 5132 adev->gfx.ip_dump_core[i] = RREG32(SOC15_REG_ENTRY_OFFSET(gc_reg_list_12_0[i])); 5133 amdgpu_gfx_off_ctrl(adev, true); 5134 5135 /* dump compute queue registers for all instances */ 5136 if (!adev->gfx.ip_dump_compute_queues) 5137 return; 5138 5139 reg_count = ARRAY_SIZE(gc_cp_reg_list_12); 5140 amdgpu_gfx_off_ctrl(adev, false); 5141 mutex_lock(&adev->srbm_mutex); 5142 for (i = 0; i < adev->gfx.mec.num_mec; i++) { 5143 for (j = 0; j < adev->gfx.mec.num_pipe_per_mec; j++) { 5144 for (k = 0; k < adev->gfx.mec.num_queue_per_pipe; k++) { 5145 /* ME0 is for GFX so start from 1 for CP */ 5146 soc24_grbm_select(adev, adev->gfx.me.num_me + i, j, k, 0); 5147 for (reg = 0; reg < reg_count; reg++) { 5148 adev->gfx.ip_dump_compute_queues[index + reg] = 5149 RREG32(SOC15_REG_ENTRY_OFFSET( 5150 gc_cp_reg_list_12[reg])); 5151 } 5152 index += reg_count; 5153 } 5154 } 5155 } 5156 soc24_grbm_select(adev, 0, 0, 0, 0); 5157 mutex_unlock(&adev->srbm_mutex); 5158 amdgpu_gfx_off_ctrl(adev, true); 5159 5160 /* dump gfx queue registers for all instances */ 5161 if (!adev->gfx.ip_dump_gfx_queues) 5162 return; 5163 5164 index = 0; 5165 reg_count = ARRAY_SIZE(gc_gfx_queue_reg_list_12); 5166 amdgpu_gfx_off_ctrl(adev, false); 5167 mutex_lock(&adev->srbm_mutex); 5168 for (i = 0; i < adev->gfx.me.num_me; i++) { 5169 for (j = 0; j < adev->gfx.me.num_pipe_per_me; j++) { 5170 for (k = 0; k < adev->gfx.me.num_queue_per_pipe; k++) { 5171 soc24_grbm_select(adev, i, j, k, 0); 5172 5173 for (reg = 0; reg < reg_count; reg++) { 5174 adev->gfx.ip_dump_gfx_queues[index + reg] = 5175 RREG32(SOC15_REG_ENTRY_OFFSET( 5176 gc_gfx_queue_reg_list_12[reg])); 5177 } 5178 index += reg_count; 5179 } 5180 } 5181 } 5182 soc24_grbm_select(adev, 0, 0, 0, 0); 5183 mutex_unlock(&adev->srbm_mutex); 5184 amdgpu_gfx_off_ctrl(adev, true); 5185 } 5186 5187 static int gfx_v12_0_reset_kgq(struct amdgpu_ring *ring, unsigned int vmid) 5188 { 5189 struct amdgpu_device *adev = ring->adev; 5190 int r; 5191 5192 if (amdgpu_sriov_vf(adev)) 5193 return -EINVAL; 5194 5195 r = amdgpu_mes_reset_legacy_queue(ring->adev, ring, vmid, false); 5196 if (r) { 5197 dev_err(adev->dev, "reset via MES failed %d\n", r); 5198 return r; 5199 } 5200 5201 r = amdgpu_bo_reserve(ring->mqd_obj, false); 5202 if (unlikely(r != 0)) { 5203 dev_err(adev->dev, "fail to resv mqd_obj\n"); 5204 return r; 5205 } 5206 r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr); 5207 if (!r) { 5208 r = gfx_v12_0_kgq_init_queue(ring, true); 5209 amdgpu_bo_kunmap(ring->mqd_obj); 5210 ring->mqd_ptr = NULL; 5211 } 5212 amdgpu_bo_unreserve(ring->mqd_obj); 5213 if (r) { 5214 DRM_ERROR("fail to unresv mqd_obj\n"); 5215 return r; 5216 } 5217 5218 r = amdgpu_mes_map_legacy_queue(adev, ring); 5219 if (r) { 5220 dev_err(adev->dev, "failed to remap kgq\n"); 5221 return r; 5222 } 5223 5224 return amdgpu_ring_test_ring(ring); 5225 } 5226 5227 static int gfx_v12_0_reset_kcq(struct amdgpu_ring *ring, unsigned int vmid) 5228 { 5229 struct amdgpu_device *adev = ring->adev; 5230 int r, i; 5231 5232 if (amdgpu_sriov_vf(adev)) 5233 return -EINVAL; 5234 5235 amdgpu_gfx_rlc_enter_safe_mode(adev, 0); 5236 mutex_lock(&adev->srbm_mutex); 5237 soc24_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0); 5238 WREG32_SOC15(GC, 0, regCP_HQD_DEQUEUE_REQUEST, 0x2); 5239 WREG32_SOC15(GC, 0, regSPI_COMPUTE_QUEUE_RESET, 0x1); 5240 for (i = 0; i < adev->usec_timeout; i++) { 5241 if (!(RREG32_SOC15(GC, 0, regCP_HQD_ACTIVE) & 1)) 5242 break; 5243 udelay(1); 5244 } 5245 soc24_grbm_select(adev, 0, 0, 0, 0); 5246 mutex_unlock(&adev->srbm_mutex); 5247 amdgpu_gfx_rlc_exit_safe_mode(adev, 0); 5248 5249 r = amdgpu_bo_reserve(ring->mqd_obj, false); 5250 if (unlikely(r != 0)) { 5251 DRM_ERROR("fail to resv mqd_obj\n"); 5252 return r; 5253 } 5254 r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr); 5255 if (!r) { 5256 r = gfx_v12_0_kcq_init_queue(ring, true); 5257 amdgpu_bo_kunmap(ring->mqd_obj); 5258 ring->mqd_ptr = NULL; 5259 } 5260 amdgpu_bo_unreserve(ring->mqd_obj); 5261 if (r) { 5262 DRM_ERROR("fail to unresv mqd_obj\n"); 5263 return r; 5264 } 5265 r = amdgpu_mes_map_legacy_queue(adev, ring); 5266 if (r) { 5267 dev_err(adev->dev, "failed to remap kcq\n"); 5268 return r; 5269 } 5270 5271 return amdgpu_ring_test_ring(ring); 5272 } 5273 5274 static const struct amd_ip_funcs gfx_v12_0_ip_funcs = { 5275 .name = "gfx_v12_0", 5276 .early_init = gfx_v12_0_early_init, 5277 .late_init = gfx_v12_0_late_init, 5278 .sw_init = gfx_v12_0_sw_init, 5279 .sw_fini = gfx_v12_0_sw_fini, 5280 .hw_init = gfx_v12_0_hw_init, 5281 .hw_fini = gfx_v12_0_hw_fini, 5282 .suspend = gfx_v12_0_suspend, 5283 .resume = gfx_v12_0_resume, 5284 .is_idle = gfx_v12_0_is_idle, 5285 .wait_for_idle = gfx_v12_0_wait_for_idle, 5286 .set_clockgating_state = gfx_v12_0_set_clockgating_state, 5287 .set_powergating_state = gfx_v12_0_set_powergating_state, 5288 .get_clockgating_state = gfx_v12_0_get_clockgating_state, 5289 .dump_ip_state = gfx_v12_ip_dump, 5290 .print_ip_state = gfx_v12_ip_print, 5291 }; 5292 5293 static const struct amdgpu_ring_funcs gfx_v12_0_ring_funcs_gfx = { 5294 .type = AMDGPU_RING_TYPE_GFX, 5295 .align_mask = 0xff, 5296 .nop = PACKET3(PACKET3_NOP, 0x3FFF), 5297 .support_64bit_ptrs = true, 5298 .secure_submission_supported = true, 5299 .get_rptr = gfx_v12_0_ring_get_rptr_gfx, 5300 .get_wptr = gfx_v12_0_ring_get_wptr_gfx, 5301 .set_wptr = gfx_v12_0_ring_set_wptr_gfx, 5302 .emit_frame_size = /* totally 242 maximum if 16 IBs */ 5303 5 + /* COND_EXEC */ 5304 7 + /* PIPELINE_SYNC */ 5305 SOC15_FLUSH_GPU_TLB_NUM_WREG * 5 + 5306 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 + 5307 2 + /* VM_FLUSH */ 5308 8 + /* FENCE for VM_FLUSH */ 5309 5 + /* COND_EXEC */ 5310 7 + /* HDP_flush */ 5311 4 + /* VGT_flush */ 5312 31 + /* DE_META */ 5313 3 + /* CNTX_CTRL */ 5314 5 + /* HDP_INVL */ 5315 8 + 8 + /* FENCE x2 */ 5316 8 + /* gfx_v12_0_emit_mem_sync */ 5317 2, /* gfx_v12_0_ring_emit_cleaner_shader */ 5318 .emit_ib_size = 4, /* gfx_v12_0_ring_emit_ib_gfx */ 5319 .emit_ib = gfx_v12_0_ring_emit_ib_gfx, 5320 .emit_fence = gfx_v12_0_ring_emit_fence, 5321 .emit_pipeline_sync = gfx_v12_0_ring_emit_pipeline_sync, 5322 .emit_vm_flush = gfx_v12_0_ring_emit_vm_flush, 5323 .emit_hdp_flush = gfx_v12_0_ring_emit_hdp_flush, 5324 .test_ring = gfx_v12_0_ring_test_ring, 5325 .test_ib = gfx_v12_0_ring_test_ib, 5326 .insert_nop = gfx_v12_ring_insert_nop, 5327 .pad_ib = amdgpu_ring_generic_pad_ib, 5328 .emit_cntxcntl = gfx_v12_0_ring_emit_cntxcntl, 5329 .init_cond_exec = gfx_v12_0_ring_emit_init_cond_exec, 5330 .preempt_ib = gfx_v12_0_ring_preempt_ib, 5331 .emit_frame_cntl = gfx_v12_0_ring_emit_frame_cntl, 5332 .emit_wreg = gfx_v12_0_ring_emit_wreg, 5333 .emit_reg_wait = gfx_v12_0_ring_emit_reg_wait, 5334 .emit_reg_write_reg_wait = gfx_v12_0_ring_emit_reg_write_reg_wait, 5335 .soft_recovery = gfx_v12_0_ring_soft_recovery, 5336 .emit_mem_sync = gfx_v12_0_emit_mem_sync, 5337 .reset = gfx_v12_0_reset_kgq, 5338 .emit_cleaner_shader = gfx_v12_0_ring_emit_cleaner_shader, 5339 .begin_use = amdgpu_gfx_enforce_isolation_ring_begin_use, 5340 .end_use = amdgpu_gfx_enforce_isolation_ring_end_use, 5341 }; 5342 5343 static const struct amdgpu_ring_funcs gfx_v12_0_ring_funcs_compute = { 5344 .type = AMDGPU_RING_TYPE_COMPUTE, 5345 .align_mask = 0xff, 5346 .nop = PACKET3(PACKET3_NOP, 0x3FFF), 5347 .support_64bit_ptrs = true, 5348 .get_rptr = gfx_v12_0_ring_get_rptr_compute, 5349 .get_wptr = gfx_v12_0_ring_get_wptr_compute, 5350 .set_wptr = gfx_v12_0_ring_set_wptr_compute, 5351 .emit_frame_size = 5352 7 + /* gfx_v12_0_ring_emit_hdp_flush */ 5353 5 + /* hdp invalidate */ 5354 7 + /* gfx_v12_0_ring_emit_pipeline_sync */ 5355 SOC15_FLUSH_GPU_TLB_NUM_WREG * 5 + 5356 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 + 5357 2 + /* gfx_v12_0_ring_emit_vm_flush */ 5358 8 + 8 + 8 + /* gfx_v12_0_ring_emit_fence x3 for user fence, vm fence */ 5359 8 + /* gfx_v12_0_emit_mem_sync */ 5360 2, /* gfx_v12_0_ring_emit_cleaner_shader */ 5361 .emit_ib_size = 7, /* gfx_v12_0_ring_emit_ib_compute */ 5362 .emit_ib = gfx_v12_0_ring_emit_ib_compute, 5363 .emit_fence = gfx_v12_0_ring_emit_fence, 5364 .emit_pipeline_sync = gfx_v12_0_ring_emit_pipeline_sync, 5365 .emit_vm_flush = gfx_v12_0_ring_emit_vm_flush, 5366 .emit_hdp_flush = gfx_v12_0_ring_emit_hdp_flush, 5367 .test_ring = gfx_v12_0_ring_test_ring, 5368 .test_ib = gfx_v12_0_ring_test_ib, 5369 .insert_nop = gfx_v12_ring_insert_nop, 5370 .pad_ib = amdgpu_ring_generic_pad_ib, 5371 .emit_wreg = gfx_v12_0_ring_emit_wreg, 5372 .emit_reg_wait = gfx_v12_0_ring_emit_reg_wait, 5373 .emit_reg_write_reg_wait = gfx_v12_0_ring_emit_reg_write_reg_wait, 5374 .soft_recovery = gfx_v12_0_ring_soft_recovery, 5375 .emit_mem_sync = gfx_v12_0_emit_mem_sync, 5376 .reset = gfx_v12_0_reset_kcq, 5377 .emit_cleaner_shader = gfx_v12_0_ring_emit_cleaner_shader, 5378 .begin_use = amdgpu_gfx_enforce_isolation_ring_begin_use, 5379 .end_use = amdgpu_gfx_enforce_isolation_ring_end_use, 5380 }; 5381 5382 static const struct amdgpu_ring_funcs gfx_v12_0_ring_funcs_kiq = { 5383 .type = AMDGPU_RING_TYPE_KIQ, 5384 .align_mask = 0xff, 5385 .nop = PACKET3(PACKET3_NOP, 0x3FFF), 5386 .support_64bit_ptrs = true, 5387 .get_rptr = gfx_v12_0_ring_get_rptr_compute, 5388 .get_wptr = gfx_v12_0_ring_get_wptr_compute, 5389 .set_wptr = gfx_v12_0_ring_set_wptr_compute, 5390 .emit_frame_size = 5391 7 + /* gfx_v12_0_ring_emit_hdp_flush */ 5392 5 + /*hdp invalidate */ 5393 7 + /* gfx_v12_0_ring_emit_pipeline_sync */ 5394 SOC15_FLUSH_GPU_TLB_NUM_WREG * 5 + 5395 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 + 5396 2 + /* gfx_v12_0_ring_emit_vm_flush */ 5397 8 + 8 + 8, /* gfx_v12_0_ring_emit_fence_kiq x3 for user fence, vm fence */ 5398 .emit_ib_size = 7, /* gfx_v12_0_ring_emit_ib_compute */ 5399 .emit_ib = gfx_v12_0_ring_emit_ib_compute, 5400 .emit_fence = gfx_v12_0_ring_emit_fence_kiq, 5401 .test_ring = gfx_v12_0_ring_test_ring, 5402 .test_ib = gfx_v12_0_ring_test_ib, 5403 .insert_nop = amdgpu_ring_insert_nop, 5404 .pad_ib = amdgpu_ring_generic_pad_ib, 5405 .emit_rreg = gfx_v12_0_ring_emit_rreg, 5406 .emit_wreg = gfx_v12_0_ring_emit_wreg, 5407 .emit_reg_wait = gfx_v12_0_ring_emit_reg_wait, 5408 .emit_reg_write_reg_wait = gfx_v12_0_ring_emit_reg_write_reg_wait, 5409 }; 5410 5411 static void gfx_v12_0_set_ring_funcs(struct amdgpu_device *adev) 5412 { 5413 int i; 5414 5415 adev->gfx.kiq[0].ring.funcs = &gfx_v12_0_ring_funcs_kiq; 5416 5417 for (i = 0; i < adev->gfx.num_gfx_rings; i++) 5418 adev->gfx.gfx_ring[i].funcs = &gfx_v12_0_ring_funcs_gfx; 5419 5420 for (i = 0; i < adev->gfx.num_compute_rings; i++) 5421 adev->gfx.compute_ring[i].funcs = &gfx_v12_0_ring_funcs_compute; 5422 } 5423 5424 static const struct amdgpu_irq_src_funcs gfx_v12_0_eop_irq_funcs = { 5425 .set = gfx_v12_0_set_eop_interrupt_state, 5426 .process = gfx_v12_0_eop_irq, 5427 }; 5428 5429 static const struct amdgpu_irq_src_funcs gfx_v12_0_priv_reg_irq_funcs = { 5430 .set = gfx_v12_0_set_priv_reg_fault_state, 5431 .process = gfx_v12_0_priv_reg_irq, 5432 }; 5433 5434 static const struct amdgpu_irq_src_funcs gfx_v12_0_bad_op_irq_funcs = { 5435 .set = gfx_v12_0_set_bad_op_fault_state, 5436 .process = gfx_v12_0_bad_op_irq, 5437 }; 5438 5439 static const struct amdgpu_irq_src_funcs gfx_v12_0_priv_inst_irq_funcs = { 5440 .set = gfx_v12_0_set_priv_inst_fault_state, 5441 .process = gfx_v12_0_priv_inst_irq, 5442 }; 5443 5444 static void gfx_v12_0_set_irq_funcs(struct amdgpu_device *adev) 5445 { 5446 adev->gfx.eop_irq.num_types = AMDGPU_CP_IRQ_LAST; 5447 adev->gfx.eop_irq.funcs = &gfx_v12_0_eop_irq_funcs; 5448 5449 adev->gfx.priv_reg_irq.num_types = 1; 5450 adev->gfx.priv_reg_irq.funcs = &gfx_v12_0_priv_reg_irq_funcs; 5451 5452 adev->gfx.bad_op_irq.num_types = 1; 5453 adev->gfx.bad_op_irq.funcs = &gfx_v12_0_bad_op_irq_funcs; 5454 5455 adev->gfx.priv_inst_irq.num_types = 1; 5456 adev->gfx.priv_inst_irq.funcs = &gfx_v12_0_priv_inst_irq_funcs; 5457 } 5458 5459 static void gfx_v12_0_set_imu_funcs(struct amdgpu_device *adev) 5460 { 5461 if (adev->flags & AMD_IS_APU) 5462 adev->gfx.imu.mode = MISSION_MODE; 5463 else 5464 adev->gfx.imu.mode = DEBUG_MODE; 5465 5466 adev->gfx.imu.funcs = &gfx_v12_0_imu_funcs; 5467 } 5468 5469 static void gfx_v12_0_set_rlc_funcs(struct amdgpu_device *adev) 5470 { 5471 adev->gfx.rlc.funcs = &gfx_v12_0_rlc_funcs; 5472 } 5473 5474 static void gfx_v12_0_set_mqd_funcs(struct amdgpu_device *adev) 5475 { 5476 /* set gfx eng mqd */ 5477 adev->mqds[AMDGPU_HW_IP_GFX].mqd_size = 5478 sizeof(struct v12_gfx_mqd); 5479 adev->mqds[AMDGPU_HW_IP_GFX].init_mqd = 5480 gfx_v12_0_gfx_mqd_init; 5481 /* set compute eng mqd */ 5482 adev->mqds[AMDGPU_HW_IP_COMPUTE].mqd_size = 5483 sizeof(struct v12_compute_mqd); 5484 adev->mqds[AMDGPU_HW_IP_COMPUTE].init_mqd = 5485 gfx_v12_0_compute_mqd_init; 5486 } 5487 5488 static void gfx_v12_0_set_user_wgp_inactive_bitmap_per_sh(struct amdgpu_device *adev, 5489 u32 bitmap) 5490 { 5491 u32 data; 5492 5493 if (!bitmap) 5494 return; 5495 5496 data = bitmap << GC_USER_SHADER_ARRAY_CONFIG__INACTIVE_WGPS__SHIFT; 5497 data &= GC_USER_SHADER_ARRAY_CONFIG__INACTIVE_WGPS_MASK; 5498 5499 WREG32_SOC15(GC, 0, regGC_USER_SHADER_ARRAY_CONFIG, data); 5500 } 5501 5502 static u32 gfx_v12_0_get_wgp_active_bitmap_per_sh(struct amdgpu_device *adev) 5503 { 5504 u32 data, wgp_bitmask; 5505 data = RREG32_SOC15(GC, 0, regCC_GC_SHADER_ARRAY_CONFIG); 5506 data |= RREG32_SOC15(GC, 0, regGC_USER_SHADER_ARRAY_CONFIG); 5507 5508 data &= CC_GC_SHADER_ARRAY_CONFIG__INACTIVE_WGPS_MASK; 5509 data >>= CC_GC_SHADER_ARRAY_CONFIG__INACTIVE_WGPS__SHIFT; 5510 5511 wgp_bitmask = 5512 amdgpu_gfx_create_bitmask(adev->gfx.config.max_cu_per_sh >> 1); 5513 5514 return (~data) & wgp_bitmask; 5515 } 5516 5517 static u32 gfx_v12_0_get_cu_active_bitmap_per_sh(struct amdgpu_device *adev) 5518 { 5519 u32 wgp_idx, wgp_active_bitmap; 5520 u32 cu_bitmap_per_wgp, cu_active_bitmap; 5521 5522 wgp_active_bitmap = gfx_v12_0_get_wgp_active_bitmap_per_sh(adev); 5523 cu_active_bitmap = 0; 5524 5525 for (wgp_idx = 0; wgp_idx < 16; wgp_idx++) { 5526 /* if there is one WGP enabled, it means 2 CUs will be enabled */ 5527 cu_bitmap_per_wgp = 3 << (2 * wgp_idx); 5528 if (wgp_active_bitmap & (1 << wgp_idx)) 5529 cu_active_bitmap |= cu_bitmap_per_wgp; 5530 } 5531 5532 return cu_active_bitmap; 5533 } 5534 5535 static int gfx_v12_0_get_cu_info(struct amdgpu_device *adev, 5536 struct amdgpu_cu_info *cu_info) 5537 { 5538 int i, j, k, counter, active_cu_number = 0; 5539 u32 mask, bitmap; 5540 unsigned disable_masks[8 * 2]; 5541 5542 if (!adev || !cu_info) 5543 return -EINVAL; 5544 5545 amdgpu_gfx_parse_disable_cu(disable_masks, 8, 2); 5546 5547 mutex_lock(&adev->grbm_idx_mutex); 5548 for (i = 0; i < adev->gfx.config.max_shader_engines; i++) { 5549 for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) { 5550 bitmap = i * adev->gfx.config.max_sh_per_se + j; 5551 if (!((gfx_v12_0_get_sa_active_bitmap(adev) >> bitmap) & 1)) 5552 continue; 5553 mask = 1; 5554 counter = 0; 5555 gfx_v12_0_select_se_sh(adev, i, j, 0xffffffff, 0); 5556 if (i < 8 && j < 2) 5557 gfx_v12_0_set_user_wgp_inactive_bitmap_per_sh( 5558 adev, disable_masks[i * 2 + j]); 5559 bitmap = gfx_v12_0_get_cu_active_bitmap_per_sh(adev); 5560 5561 /** 5562 * GFX12 could support more than 4 SEs, while the bitmap 5563 * in cu_info struct is 4x4 and ioctl interface struct 5564 * drm_amdgpu_info_device should keep stable. 5565 * So we use last two columns of bitmap to store cu mask for 5566 * SEs 4 to 7, the layout of the bitmap is as below: 5567 * SE0: {SH0,SH1} --> {bitmap[0][0], bitmap[0][1]} 5568 * SE1: {SH0,SH1} --> {bitmap[1][0], bitmap[1][1]} 5569 * SE2: {SH0,SH1} --> {bitmap[2][0], bitmap[2][1]} 5570 * SE3: {SH0,SH1} --> {bitmap[3][0], bitmap[3][1]} 5571 * SE4: {SH0,SH1} --> {bitmap[0][2], bitmap[0][3]} 5572 * SE5: {SH0,SH1} --> {bitmap[1][2], bitmap[1][3]} 5573 * SE6: {SH0,SH1} --> {bitmap[2][2], bitmap[2][3]} 5574 * SE7: {SH0,SH1} --> {bitmap[3][2], bitmap[3][3]} 5575 */ 5576 cu_info->bitmap[0][i % 4][j + (i / 4) * 2] = bitmap; 5577 5578 for (k = 0; k < adev->gfx.config.max_cu_per_sh; k++) { 5579 if (bitmap & mask) 5580 counter++; 5581 5582 mask <<= 1; 5583 } 5584 active_cu_number += counter; 5585 } 5586 } 5587 gfx_v12_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0); 5588 mutex_unlock(&adev->grbm_idx_mutex); 5589 5590 cu_info->number = active_cu_number; 5591 cu_info->simd_per_cu = NUM_SIMD_PER_CU; 5592 5593 return 0; 5594 } 5595 5596 const struct amdgpu_ip_block_version gfx_v12_0_ip_block = { 5597 .type = AMD_IP_BLOCK_TYPE_GFX, 5598 .major = 12, 5599 .minor = 0, 5600 .rev = 0, 5601 .funcs = &gfx_v12_0_ip_funcs, 5602 }; 5603