1 /* 2 * Copyright 2021 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 */ 23 #include <linux/delay.h> 24 #include <linux/kernel.h> 25 #include <linux/firmware.h> 26 #include <linux/module.h> 27 #include <linux/pci.h> 28 #include "amdgpu.h" 29 #include "amdgpu_gfx.h" 30 #include "amdgpu_psp.h" 31 #include "amdgpu_smu.h" 32 #include "amdgpu_atomfirmware.h" 33 #include "imu_v11_0.h" 34 #include "soc21.h" 35 #include "nvd.h" 36 37 #include "gc/gc_11_0_0_offset.h" 38 #include "gc/gc_11_0_0_sh_mask.h" 39 #include "smuio/smuio_13_0_6_offset.h" 40 #include "smuio/smuio_13_0_6_sh_mask.h" 41 #include "navi10_enum.h" 42 #include "ivsrcid/gfx/irqsrcs_gfx_11_0_0.h" 43 44 #include "soc15.h" 45 #include "soc15d.h" 46 #include "clearstate_gfx11.h" 47 #include "v11_structs.h" 48 #include "gfx_v11_0.h" 49 #include "gfx_v11_0_3.h" 50 #include "nbio_v4_3.h" 51 #include "mes_v11_0.h" 52 53 #define GFX11_NUM_GFX_RINGS 1 54 #define GFX11_MEC_HPD_SIZE 2048 55 56 #define RLCG_UCODE_LOADING_START_ADDRESS 0x00002000L 57 #define RLC_PG_DELAY_3_DEFAULT_GC_11_0_1 0x1388 58 59 #define regCGTT_WD_CLK_CTRL 0x5086 60 #define regCGTT_WD_CLK_CTRL_BASE_IDX 1 61 #define regRLC_RLCS_BOOTLOAD_STATUS_gc_11_0_1 0x4e7e 62 #define regRLC_RLCS_BOOTLOAD_STATUS_gc_11_0_1_BASE_IDX 1 63 #define regPC_CONFIG_CNTL_1 0x194d 64 #define regPC_CONFIG_CNTL_1_BASE_IDX 1 65 66 MODULE_FIRMWARE("amdgpu/gc_11_0_0_pfp.bin"); 67 MODULE_FIRMWARE("amdgpu/gc_11_0_0_me.bin"); 68 MODULE_FIRMWARE("amdgpu/gc_11_0_0_mec.bin"); 69 MODULE_FIRMWARE("amdgpu/gc_11_0_0_rlc.bin"); 70 MODULE_FIRMWARE("amdgpu/gc_11_0_0_rlc_1.bin"); 71 MODULE_FIRMWARE("amdgpu/gc_11_0_0_toc.bin"); 72 MODULE_FIRMWARE("amdgpu/gc_11_0_1_pfp.bin"); 73 MODULE_FIRMWARE("amdgpu/gc_11_0_1_me.bin"); 74 MODULE_FIRMWARE("amdgpu/gc_11_0_1_mec.bin"); 75 MODULE_FIRMWARE("amdgpu/gc_11_0_1_rlc.bin"); 76 MODULE_FIRMWARE("amdgpu/gc_11_0_2_pfp.bin"); 77 MODULE_FIRMWARE("amdgpu/gc_11_0_2_me.bin"); 78 MODULE_FIRMWARE("amdgpu/gc_11_0_2_mec.bin"); 79 MODULE_FIRMWARE("amdgpu/gc_11_0_2_rlc.bin"); 80 MODULE_FIRMWARE("amdgpu/gc_11_0_3_pfp.bin"); 81 MODULE_FIRMWARE("amdgpu/gc_11_0_3_me.bin"); 82 MODULE_FIRMWARE("amdgpu/gc_11_0_3_mec.bin"); 83 MODULE_FIRMWARE("amdgpu/gc_11_0_3_rlc.bin"); 84 MODULE_FIRMWARE("amdgpu/gc_11_0_4_pfp.bin"); 85 MODULE_FIRMWARE("amdgpu/gc_11_0_4_me.bin"); 86 MODULE_FIRMWARE("amdgpu/gc_11_0_4_mec.bin"); 87 MODULE_FIRMWARE("amdgpu/gc_11_0_4_rlc.bin"); 88 MODULE_FIRMWARE("amdgpu/gc_11_5_0_pfp.bin"); 89 MODULE_FIRMWARE("amdgpu/gc_11_5_0_me.bin"); 90 MODULE_FIRMWARE("amdgpu/gc_11_5_0_mec.bin"); 91 MODULE_FIRMWARE("amdgpu/gc_11_5_0_rlc.bin"); 92 MODULE_FIRMWARE("amdgpu/gc_11_5_1_pfp.bin"); 93 MODULE_FIRMWARE("amdgpu/gc_11_5_1_me.bin"); 94 MODULE_FIRMWARE("amdgpu/gc_11_5_1_mec.bin"); 95 MODULE_FIRMWARE("amdgpu/gc_11_5_1_rlc.bin"); 96 MODULE_FIRMWARE("amdgpu/gc_11_5_2_pfp.bin"); 97 MODULE_FIRMWARE("amdgpu/gc_11_5_2_me.bin"); 98 MODULE_FIRMWARE("amdgpu/gc_11_5_2_mec.bin"); 99 MODULE_FIRMWARE("amdgpu/gc_11_5_2_rlc.bin"); 100 101 static const struct amdgpu_hwip_reg_entry gc_reg_list_11_0[] = { 102 SOC15_REG_ENTRY_STR(GC, 0, regGRBM_STATUS), 103 SOC15_REG_ENTRY_STR(GC, 0, regGRBM_STATUS2), 104 SOC15_REG_ENTRY_STR(GC, 0, regGRBM_STATUS3), 105 SOC15_REG_ENTRY_STR(GC, 0, regCP_STALLED_STAT1), 106 SOC15_REG_ENTRY_STR(GC, 0, regCP_STALLED_STAT2), 107 SOC15_REG_ENTRY_STR(GC, 0, regCP_STALLED_STAT3), 108 SOC15_REG_ENTRY_STR(GC, 0, regCP_CPC_STALLED_STAT1), 109 SOC15_REG_ENTRY_STR(GC, 0, regCP_CPF_STALLED_STAT1), 110 SOC15_REG_ENTRY_STR(GC, 0, regCP_BUSY_STAT), 111 SOC15_REG_ENTRY_STR(GC, 0, regCP_CPC_BUSY_STAT), 112 SOC15_REG_ENTRY_STR(GC, 0, regCP_CPF_BUSY_STAT), 113 SOC15_REG_ENTRY_STR(GC, 0, regCP_CPC_BUSY_STAT2), 114 SOC15_REG_ENTRY_STR(GC, 0, regCP_CPF_BUSY_STAT2), 115 SOC15_REG_ENTRY_STR(GC, 0, regCP_CPF_STATUS), 116 SOC15_REG_ENTRY_STR(GC, 0, regCP_GFX_ERROR), 117 SOC15_REG_ENTRY_STR(GC, 0, regCP_GFX_HPD_STATUS0), 118 SOC15_REG_ENTRY_STR(GC, 0, regCP_RB_BASE), 119 SOC15_REG_ENTRY_STR(GC, 0, regCP_RB_RPTR), 120 SOC15_REG_ENTRY_STR(GC, 0, regCP_RB_WPTR), 121 SOC15_REG_ENTRY_STR(GC, 0, regCP_RB0_BASE), 122 SOC15_REG_ENTRY_STR(GC, 0, regCP_RB0_RPTR), 123 SOC15_REG_ENTRY_STR(GC, 0, regCP_RB0_WPTR), 124 SOC15_REG_ENTRY_STR(GC, 0, regCP_RB1_BASE), 125 SOC15_REG_ENTRY_STR(GC, 0, regCP_RB1_RPTR), 126 SOC15_REG_ENTRY_STR(GC, 0, regCP_RB1_WPTR), 127 SOC15_REG_ENTRY_STR(GC, 0, regCP_IB1_CMD_BUFSZ), 128 SOC15_REG_ENTRY_STR(GC, 0, regCP_IB2_CMD_BUFSZ), 129 SOC15_REG_ENTRY_STR(GC, 0, regCP_IB1_BASE_LO), 130 SOC15_REG_ENTRY_STR(GC, 0, regCP_IB1_BASE_HI), 131 SOC15_REG_ENTRY_STR(GC, 0, regCP_IB1_BUFSZ), 132 SOC15_REG_ENTRY_STR(GC, 0, regCP_IB2_BASE_LO), 133 SOC15_REG_ENTRY_STR(GC, 0, regCP_IB2_BASE_HI), 134 SOC15_REG_ENTRY_STR(GC, 0, regCP_IB2_BUFSZ), 135 SOC15_REG_ENTRY_STR(GC, 0, regCPF_UTCL1_STATUS), 136 SOC15_REG_ENTRY_STR(GC, 0, regCPC_UTCL1_STATUS), 137 SOC15_REG_ENTRY_STR(GC, 0, regCPG_UTCL1_STATUS), 138 SOC15_REG_ENTRY_STR(GC, 0, regGDS_PROTECTION_FAULT), 139 SOC15_REG_ENTRY_STR(GC, 0, regGDS_VM_PROTECTION_FAULT), 140 SOC15_REG_ENTRY_STR(GC, 0, regIA_UTCL1_STATUS), 141 SOC15_REG_ENTRY_STR(GC, 0, regIA_UTCL1_STATUS_2), 142 SOC15_REG_ENTRY_STR(GC, 0, regPA_CL_CNTL_STATUS), 143 SOC15_REG_ENTRY_STR(GC, 0, regRLC_UTCL1_STATUS), 144 SOC15_REG_ENTRY_STR(GC, 0, regRMI_UTCL1_STATUS), 145 SOC15_REG_ENTRY_STR(GC, 0, regSQC_CACHES), 146 SOC15_REG_ENTRY_STR(GC, 0, regSQG_STATUS), 147 SOC15_REG_ENTRY_STR(GC, 0, regWD_UTCL1_STATUS), 148 SOC15_REG_ENTRY_STR(GC, 0, regGCVM_L2_PROTECTION_FAULT_CNTL), 149 SOC15_REG_ENTRY_STR(GC, 0, regGCVM_L2_PROTECTION_FAULT_STATUS), 150 SOC15_REG_ENTRY_STR(GC, 0, regCP_DEBUG), 151 SOC15_REG_ENTRY_STR(GC, 0, regCP_MEC_CNTL), 152 SOC15_REG_ENTRY_STR(GC, 0, regCP_MES_CNTL), 153 SOC15_REG_ENTRY_STR(GC, 0, regCP_MEC1_INSTR_PNTR), 154 SOC15_REG_ENTRY_STR(GC, 0, regCP_MES_DEBUG_INTERRUPT_INSTR_PNTR), 155 SOC15_REG_ENTRY_STR(GC, 0, regCP_MES_INSTR_PNTR), 156 SOC15_REG_ENTRY_STR(GC, 0, regCP_ME_INSTR_PNTR), 157 SOC15_REG_ENTRY_STR(GC, 0, regCP_PFP_INSTR_PNTR), 158 SOC15_REG_ENTRY_STR(GC, 0, regCP_CPC_STATUS), 159 /* cp header registers */ 160 SOC15_REG_ENTRY_STR(GC, 0, regCP_MEC_ME1_HEADER_DUMP), 161 SOC15_REG_ENTRY_STR(GC, 0, regCP_PFP_HEADER_DUMP), 162 SOC15_REG_ENTRY_STR(GC, 0, regCP_ME_HEADER_DUMP), 163 SOC15_REG_ENTRY_STR(GC, 0, regCP_MES_HEADER_DUMP), 164 /* SE status registers */ 165 SOC15_REG_ENTRY_STR(GC, 0, regGRBM_STATUS_SE0), 166 SOC15_REG_ENTRY_STR(GC, 0, regGRBM_STATUS_SE1), 167 SOC15_REG_ENTRY_STR(GC, 0, regGRBM_STATUS_SE2), 168 SOC15_REG_ENTRY_STR(GC, 0, regGRBM_STATUS_SE3), 169 SOC15_REG_ENTRY_STR(GC, 0, regGRBM_STATUS_SE4), 170 SOC15_REG_ENTRY_STR(GC, 0, regGRBM_STATUS_SE5) 171 }; 172 173 static const struct amdgpu_hwip_reg_entry gc_cp_reg_list_11[] = { 174 /* compute registers */ 175 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_VMID), 176 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_PERSISTENT_STATE), 177 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_PIPE_PRIORITY), 178 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_QUEUE_PRIORITY), 179 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_QUANTUM), 180 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_PQ_BASE), 181 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_PQ_BASE_HI), 182 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_PQ_RPTR), 183 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_PQ_WPTR_POLL_ADDR), 184 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_PQ_WPTR_POLL_ADDR_HI), 185 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_PQ_DOORBELL_CONTROL), 186 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_PQ_CONTROL), 187 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_IB_BASE_ADDR), 188 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_IB_BASE_ADDR_HI), 189 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_IB_RPTR), 190 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_IB_CONTROL), 191 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_DEQUEUE_REQUEST), 192 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_EOP_BASE_ADDR), 193 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_EOP_BASE_ADDR_HI), 194 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_EOP_CONTROL), 195 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_EOP_RPTR), 196 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_EOP_WPTR), 197 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_EOP_EVENTS), 198 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_CTX_SAVE_BASE_ADDR_LO), 199 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_CTX_SAVE_BASE_ADDR_HI), 200 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_CTX_SAVE_CONTROL), 201 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_CNTL_STACK_OFFSET), 202 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_CNTL_STACK_SIZE), 203 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_WG_STATE_OFFSET), 204 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_CTX_SAVE_SIZE), 205 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_GDS_RESOURCE_STATE), 206 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_ERROR), 207 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_EOP_WPTR_MEM), 208 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_PQ_WPTR_LO), 209 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_PQ_WPTR_HI), 210 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_SUSPEND_CNTL_STACK_OFFSET), 211 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_SUSPEND_CNTL_STACK_DW_CNT), 212 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_SUSPEND_WG_STATE_OFFSET), 213 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_DEQUEUE_STATUS) 214 }; 215 216 static const struct amdgpu_hwip_reg_entry gc_gfx_queue_reg_list_11[] = { 217 /* gfx queue registers */ 218 SOC15_REG_ENTRY_STR(GC, 0, regCP_GFX_HQD_ACTIVE), 219 SOC15_REG_ENTRY_STR(GC, 0, regCP_GFX_HQD_VMID), 220 SOC15_REG_ENTRY_STR(GC, 0, regCP_GFX_HQD_QUEUE_PRIORITY), 221 SOC15_REG_ENTRY_STR(GC, 0, regCP_GFX_HQD_QUANTUM), 222 SOC15_REG_ENTRY_STR(GC, 0, regCP_GFX_HQD_BASE), 223 SOC15_REG_ENTRY_STR(GC, 0, regCP_GFX_HQD_BASE_HI), 224 SOC15_REG_ENTRY_STR(GC, 0, regCP_GFX_HQD_OFFSET), 225 SOC15_REG_ENTRY_STR(GC, 0, regCP_GFX_HQD_CNTL), 226 SOC15_REG_ENTRY_STR(GC, 0, regCP_GFX_HQD_CSMD_RPTR), 227 SOC15_REG_ENTRY_STR(GC, 0, regCP_GFX_HQD_WPTR), 228 SOC15_REG_ENTRY_STR(GC, 0, regCP_GFX_HQD_WPTR_HI), 229 SOC15_REG_ENTRY_STR(GC, 0, regCP_GFX_HQD_DEQUEUE_REQUEST), 230 SOC15_REG_ENTRY_STR(GC, 0, regCP_GFX_HQD_MAPPED), 231 SOC15_REG_ENTRY_STR(GC, 0, regCP_GFX_HQD_QUE_MGR_CONTROL), 232 SOC15_REG_ENTRY_STR(GC, 0, regCP_GFX_HQD_HQ_CONTROL0), 233 SOC15_REG_ENTRY_STR(GC, 0, regCP_GFX_HQD_HQ_STATUS0), 234 SOC15_REG_ENTRY_STR(GC, 0, regCP_GFX_MQD_BASE_ADDR), 235 SOC15_REG_ENTRY_STR(GC, 0, regCP_GFX_MQD_BASE_ADDR_HI), 236 SOC15_REG_ENTRY_STR(GC, 0, regCP_RB_WPTR_POLL_ADDR_LO), 237 SOC15_REG_ENTRY_STR(GC, 0, regCP_RB_WPTR_POLL_ADDR_HI), 238 SOC15_REG_ENTRY_STR(GC, 0, regCP_RB_RPTR), 239 SOC15_REG_ENTRY_STR(GC, 0, regCP_IB1_BASE_LO), 240 SOC15_REG_ENTRY_STR(GC, 0, regCP_IB1_BASE_HI), 241 SOC15_REG_ENTRY_STR(GC, 0, regCP_IB1_CMD_BUFSZ), 242 SOC15_REG_ENTRY_STR(GC, 0, regCP_IB1_BUFSZ) 243 }; 244 245 static const struct soc15_reg_golden golden_settings_gc_11_0[] = { 246 SOC15_REG_GOLDEN_VALUE(GC, 0, regTCP_CNTL, 0x20000000, 0x20000000) 247 }; 248 249 static const struct soc15_reg_golden golden_settings_gc_11_0_1[] = 250 { 251 SOC15_REG_GOLDEN_VALUE(GC, 0, regCGTT_GS_NGG_CLK_CTRL, 0x9fff8fff, 0x00000010), 252 SOC15_REG_GOLDEN_VALUE(GC, 0, regCGTT_WD_CLK_CTRL, 0xffff8fff, 0x00000010), 253 SOC15_REG_GOLDEN_VALUE(GC, 0, regCPF_GCR_CNTL, 0x0007ffff, 0x0000c200), 254 SOC15_REG_GOLDEN_VALUE(GC, 0, regGL2C_CTRL3, 0xffff001b, 0x00f01988), 255 SOC15_REG_GOLDEN_VALUE(GC, 0, regPA_CL_ENHANCE, 0xf0ffffff, 0x00880007), 256 SOC15_REG_GOLDEN_VALUE(GC, 0, regPA_SC_ENHANCE_3, 0xfffffffd, 0x00000008), 257 SOC15_REG_GOLDEN_VALUE(GC, 0, regPA_SC_VRS_SURFACE_CNTL_1, 0xfff891ff, 0x55480100), 258 SOC15_REG_GOLDEN_VALUE(GC, 0, regTA_CNTL_AUX, 0xf7f7ffff, 0x01030000), 259 SOC15_REG_GOLDEN_VALUE(GC, 0, regTCP_CNTL2, 0xfcffffff, 0x0000000a) 260 }; 261 262 #define DEFAULT_SH_MEM_CONFIG \ 263 ((SH_MEM_ADDRESS_MODE_64 << SH_MEM_CONFIG__ADDRESS_MODE__SHIFT) | \ 264 (SH_MEM_ALIGNMENT_MODE_UNALIGNED << SH_MEM_CONFIG__ALIGNMENT_MODE__SHIFT) | \ 265 (3 << SH_MEM_CONFIG__INITIAL_INST_PREFETCH__SHIFT)) 266 267 static void gfx_v11_0_disable_gpa_mode(struct amdgpu_device *adev); 268 static void gfx_v11_0_set_ring_funcs(struct amdgpu_device *adev); 269 static void gfx_v11_0_set_irq_funcs(struct amdgpu_device *adev); 270 static void gfx_v11_0_set_gds_init(struct amdgpu_device *adev); 271 static void gfx_v11_0_set_rlc_funcs(struct amdgpu_device *adev); 272 static void gfx_v11_0_set_mqd_funcs(struct amdgpu_device *adev); 273 static void gfx_v11_0_set_imu_funcs(struct amdgpu_device *adev); 274 static int gfx_v11_0_get_cu_info(struct amdgpu_device *adev, 275 struct amdgpu_cu_info *cu_info); 276 static uint64_t gfx_v11_0_get_gpu_clock_counter(struct amdgpu_device *adev); 277 static void gfx_v11_0_select_se_sh(struct amdgpu_device *adev, u32 se_num, 278 u32 sh_num, u32 instance, int xcc_id); 279 static u32 gfx_v11_0_get_wgp_active_bitmap_per_sh(struct amdgpu_device *adev); 280 281 static void gfx_v11_0_ring_emit_de_meta(struct amdgpu_ring *ring, bool resume); 282 static void gfx_v11_0_ring_emit_frame_cntl(struct amdgpu_ring *ring, bool start, bool secure); 283 static void gfx_v11_0_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg, 284 uint32_t val); 285 static int gfx_v11_0_wait_for_rlc_autoload_complete(struct amdgpu_device *adev); 286 static void gfx_v11_0_ring_invalidate_tlbs(struct amdgpu_ring *ring, 287 uint16_t pasid, uint32_t flush_type, 288 bool all_hub, uint8_t dst_sel); 289 static void gfx_v11_0_set_safe_mode(struct amdgpu_device *adev, int xcc_id); 290 static void gfx_v11_0_unset_safe_mode(struct amdgpu_device *adev, int xcc_id); 291 static void gfx_v11_0_update_perf_clk(struct amdgpu_device *adev, 292 bool enable); 293 294 static void gfx11_kiq_set_resources(struct amdgpu_ring *kiq_ring, uint64_t queue_mask) 295 { 296 amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_SET_RESOURCES, 6)); 297 amdgpu_ring_write(kiq_ring, PACKET3_SET_RESOURCES_VMID_MASK(0) | 298 PACKET3_SET_RESOURCES_UNMAP_LATENTY(0xa) | /* unmap_latency: 0xa (~ 1s) */ 299 PACKET3_SET_RESOURCES_QUEUE_TYPE(0)); /* vmid_mask:0 queue_type:0 (KIQ) */ 300 amdgpu_ring_write(kiq_ring, lower_32_bits(queue_mask)); /* queue mask lo */ 301 amdgpu_ring_write(kiq_ring, upper_32_bits(queue_mask)); /* queue mask hi */ 302 amdgpu_ring_write(kiq_ring, 0); /* gws mask lo */ 303 amdgpu_ring_write(kiq_ring, 0); /* gws mask hi */ 304 amdgpu_ring_write(kiq_ring, 0); /* oac mask */ 305 amdgpu_ring_write(kiq_ring, 0); /* gds heap base:0, gds heap size:0 */ 306 } 307 308 static void gfx11_kiq_map_queues(struct amdgpu_ring *kiq_ring, 309 struct amdgpu_ring *ring) 310 { 311 uint64_t mqd_addr = amdgpu_bo_gpu_offset(ring->mqd_obj); 312 uint64_t wptr_addr = ring->wptr_gpu_addr; 313 uint32_t me = 0, eng_sel = 0; 314 315 switch (ring->funcs->type) { 316 case AMDGPU_RING_TYPE_COMPUTE: 317 me = 1; 318 eng_sel = 0; 319 break; 320 case AMDGPU_RING_TYPE_GFX: 321 me = 0; 322 eng_sel = 4; 323 break; 324 case AMDGPU_RING_TYPE_MES: 325 me = 2; 326 eng_sel = 5; 327 break; 328 default: 329 WARN_ON(1); 330 } 331 332 amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_MAP_QUEUES, 5)); 333 /* Q_sel:0, vmid:0, vidmem: 1, engine:0, num_Q:1*/ 334 amdgpu_ring_write(kiq_ring, /* Q_sel: 0, vmid: 0, engine: 0, num_Q: 1 */ 335 PACKET3_MAP_QUEUES_QUEUE_SEL(0) | /* Queue_Sel */ 336 PACKET3_MAP_QUEUES_VMID(0) | /* VMID */ 337 PACKET3_MAP_QUEUES_QUEUE(ring->queue) | 338 PACKET3_MAP_QUEUES_PIPE(ring->pipe) | 339 PACKET3_MAP_QUEUES_ME((me)) | 340 PACKET3_MAP_QUEUES_QUEUE_TYPE(0) | /*queue_type: normal compute queue */ 341 PACKET3_MAP_QUEUES_ALLOC_FORMAT(0) | /* alloc format: all_on_one_pipe */ 342 PACKET3_MAP_QUEUES_ENGINE_SEL(eng_sel) | 343 PACKET3_MAP_QUEUES_NUM_QUEUES(1)); /* num_queues: must be 1 */ 344 amdgpu_ring_write(kiq_ring, PACKET3_MAP_QUEUES_DOORBELL_OFFSET(ring->doorbell_index)); 345 amdgpu_ring_write(kiq_ring, lower_32_bits(mqd_addr)); 346 amdgpu_ring_write(kiq_ring, upper_32_bits(mqd_addr)); 347 amdgpu_ring_write(kiq_ring, lower_32_bits(wptr_addr)); 348 amdgpu_ring_write(kiq_ring, upper_32_bits(wptr_addr)); 349 } 350 351 static void gfx11_kiq_unmap_queues(struct amdgpu_ring *kiq_ring, 352 struct amdgpu_ring *ring, 353 enum amdgpu_unmap_queues_action action, 354 u64 gpu_addr, u64 seq) 355 { 356 struct amdgpu_device *adev = kiq_ring->adev; 357 uint32_t eng_sel = ring->funcs->type == AMDGPU_RING_TYPE_GFX ? 4 : 0; 358 359 if (adev->enable_mes && !adev->gfx.kiq[0].ring.sched.ready) { 360 amdgpu_mes_unmap_legacy_queue(adev, ring, action, gpu_addr, seq); 361 return; 362 } 363 364 amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_UNMAP_QUEUES, 4)); 365 amdgpu_ring_write(kiq_ring, /* Q_sel: 0, vmid: 0, engine: 0, num_Q: 1 */ 366 PACKET3_UNMAP_QUEUES_ACTION(action) | 367 PACKET3_UNMAP_QUEUES_QUEUE_SEL(0) | 368 PACKET3_UNMAP_QUEUES_ENGINE_SEL(eng_sel) | 369 PACKET3_UNMAP_QUEUES_NUM_QUEUES(1)); 370 amdgpu_ring_write(kiq_ring, 371 PACKET3_UNMAP_QUEUES_DOORBELL_OFFSET0(ring->doorbell_index)); 372 373 if (action == PREEMPT_QUEUES_NO_UNMAP) { 374 amdgpu_ring_write(kiq_ring, lower_32_bits(gpu_addr)); 375 amdgpu_ring_write(kiq_ring, upper_32_bits(gpu_addr)); 376 amdgpu_ring_write(kiq_ring, seq); 377 } else { 378 amdgpu_ring_write(kiq_ring, 0); 379 amdgpu_ring_write(kiq_ring, 0); 380 amdgpu_ring_write(kiq_ring, 0); 381 } 382 } 383 384 static void gfx11_kiq_query_status(struct amdgpu_ring *kiq_ring, 385 struct amdgpu_ring *ring, 386 u64 addr, 387 u64 seq) 388 { 389 uint32_t eng_sel = ring->funcs->type == AMDGPU_RING_TYPE_GFX ? 4 : 0; 390 391 amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_QUERY_STATUS, 5)); 392 amdgpu_ring_write(kiq_ring, 393 PACKET3_QUERY_STATUS_CONTEXT_ID(0) | 394 PACKET3_QUERY_STATUS_INTERRUPT_SEL(0) | 395 PACKET3_QUERY_STATUS_COMMAND(2)); 396 amdgpu_ring_write(kiq_ring, /* Q_sel: 0, vmid: 0, engine: 0, num_Q: 1 */ 397 PACKET3_QUERY_STATUS_DOORBELL_OFFSET(ring->doorbell_index) | 398 PACKET3_QUERY_STATUS_ENG_SEL(eng_sel)); 399 amdgpu_ring_write(kiq_ring, lower_32_bits(addr)); 400 amdgpu_ring_write(kiq_ring, upper_32_bits(addr)); 401 amdgpu_ring_write(kiq_ring, lower_32_bits(seq)); 402 amdgpu_ring_write(kiq_ring, upper_32_bits(seq)); 403 } 404 405 static void gfx11_kiq_invalidate_tlbs(struct amdgpu_ring *kiq_ring, 406 uint16_t pasid, uint32_t flush_type, 407 bool all_hub) 408 { 409 gfx_v11_0_ring_invalidate_tlbs(kiq_ring, pasid, flush_type, all_hub, 1); 410 } 411 412 static const struct kiq_pm4_funcs gfx_v11_0_kiq_pm4_funcs = { 413 .kiq_set_resources = gfx11_kiq_set_resources, 414 .kiq_map_queues = gfx11_kiq_map_queues, 415 .kiq_unmap_queues = gfx11_kiq_unmap_queues, 416 .kiq_query_status = gfx11_kiq_query_status, 417 .kiq_invalidate_tlbs = gfx11_kiq_invalidate_tlbs, 418 .set_resources_size = 8, 419 .map_queues_size = 7, 420 .unmap_queues_size = 6, 421 .query_status_size = 7, 422 .invalidate_tlbs_size = 2, 423 }; 424 425 static void gfx_v11_0_set_kiq_pm4_funcs(struct amdgpu_device *adev) 426 { 427 adev->gfx.kiq[0].pmf = &gfx_v11_0_kiq_pm4_funcs; 428 } 429 430 static void gfx_v11_0_init_golden_registers(struct amdgpu_device *adev) 431 { 432 if (amdgpu_sriov_vf(adev)) 433 return; 434 435 switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { 436 case IP_VERSION(11, 0, 1): 437 case IP_VERSION(11, 0, 4): 438 soc15_program_register_sequence(adev, 439 golden_settings_gc_11_0_1, 440 (const u32)ARRAY_SIZE(golden_settings_gc_11_0_1)); 441 break; 442 default: 443 break; 444 } 445 soc15_program_register_sequence(adev, 446 golden_settings_gc_11_0, 447 (const u32)ARRAY_SIZE(golden_settings_gc_11_0)); 448 449 } 450 451 static void gfx_v11_0_write_data_to_reg(struct amdgpu_ring *ring, int eng_sel, 452 bool wc, uint32_t reg, uint32_t val) 453 { 454 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3)); 455 amdgpu_ring_write(ring, WRITE_DATA_ENGINE_SEL(eng_sel) | 456 WRITE_DATA_DST_SEL(0) | (wc ? WR_CONFIRM : 0)); 457 amdgpu_ring_write(ring, reg); 458 amdgpu_ring_write(ring, 0); 459 amdgpu_ring_write(ring, val); 460 } 461 462 static void gfx_v11_0_wait_reg_mem(struct amdgpu_ring *ring, int eng_sel, 463 int mem_space, int opt, uint32_t addr0, 464 uint32_t addr1, uint32_t ref, uint32_t mask, 465 uint32_t inv) 466 { 467 amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5)); 468 amdgpu_ring_write(ring, 469 /* memory (1) or register (0) */ 470 (WAIT_REG_MEM_MEM_SPACE(mem_space) | 471 WAIT_REG_MEM_OPERATION(opt) | /* wait */ 472 WAIT_REG_MEM_FUNCTION(3) | /* equal */ 473 WAIT_REG_MEM_ENGINE(eng_sel))); 474 475 if (mem_space) 476 BUG_ON(addr0 & 0x3); /* Dword align */ 477 amdgpu_ring_write(ring, addr0); 478 amdgpu_ring_write(ring, addr1); 479 amdgpu_ring_write(ring, ref); 480 amdgpu_ring_write(ring, mask); 481 amdgpu_ring_write(ring, inv); /* poll interval */ 482 } 483 484 static void gfx_v11_ring_insert_nop(struct amdgpu_ring *ring, uint32_t num_nop) 485 { 486 int i; 487 488 /* Header itself is a NOP packet */ 489 if (num_nop == 1) { 490 amdgpu_ring_write(ring, ring->funcs->nop); 491 return; 492 } 493 494 /* Max HW optimization till 0x3ffe, followed by remaining one NOP at a time*/ 495 amdgpu_ring_write(ring, PACKET3(PACKET3_NOP, min(num_nop - 2, 0x3ffe))); 496 497 /* Header is at index 0, followed by num_nops - 1 NOP packet's */ 498 for (i = 1; i < num_nop; i++) 499 amdgpu_ring_write(ring, ring->funcs->nop); 500 } 501 502 static int gfx_v11_0_ring_test_ring(struct amdgpu_ring *ring) 503 { 504 struct amdgpu_device *adev = ring->adev; 505 uint32_t scratch = SOC15_REG_OFFSET(GC, 0, regSCRATCH_REG0); 506 uint32_t tmp = 0; 507 unsigned i; 508 int r; 509 510 WREG32(scratch, 0xCAFEDEAD); 511 r = amdgpu_ring_alloc(ring, 5); 512 if (r) { 513 DRM_ERROR("amdgpu: cp failed to lock ring %d (%d).\n", 514 ring->idx, r); 515 return r; 516 } 517 518 if (ring->funcs->type == AMDGPU_RING_TYPE_KIQ) { 519 gfx_v11_0_ring_emit_wreg(ring, scratch, 0xDEADBEEF); 520 } else { 521 amdgpu_ring_write(ring, PACKET3(PACKET3_SET_UCONFIG_REG, 1)); 522 amdgpu_ring_write(ring, scratch - 523 PACKET3_SET_UCONFIG_REG_START); 524 amdgpu_ring_write(ring, 0xDEADBEEF); 525 } 526 amdgpu_ring_commit(ring); 527 528 for (i = 0; i < adev->usec_timeout; i++) { 529 tmp = RREG32(scratch); 530 if (tmp == 0xDEADBEEF) 531 break; 532 if (amdgpu_emu_mode == 1) 533 msleep(1); 534 else 535 udelay(1); 536 } 537 538 if (i >= adev->usec_timeout) 539 r = -ETIMEDOUT; 540 return r; 541 } 542 543 static int gfx_v11_0_ring_test_ib(struct amdgpu_ring *ring, long timeout) 544 { 545 struct amdgpu_device *adev = ring->adev; 546 struct amdgpu_ib ib; 547 struct dma_fence *f = NULL; 548 unsigned index; 549 uint64_t gpu_addr; 550 volatile uint32_t *cpu_ptr; 551 long r; 552 553 /* MES KIQ fw hasn't indirect buffer support for now */ 554 if (adev->enable_mes_kiq && 555 ring->funcs->type == AMDGPU_RING_TYPE_KIQ) 556 return 0; 557 558 memset(&ib, 0, sizeof(ib)); 559 560 if (ring->is_mes_queue) { 561 uint32_t padding, offset; 562 563 offset = amdgpu_mes_ctx_get_offs(ring, AMDGPU_MES_CTX_IB_OFFS); 564 padding = amdgpu_mes_ctx_get_offs(ring, 565 AMDGPU_MES_CTX_PADDING_OFFS); 566 567 ib.gpu_addr = amdgpu_mes_ctx_get_offs_gpu_addr(ring, offset); 568 ib.ptr = amdgpu_mes_ctx_get_offs_cpu_addr(ring, offset); 569 570 gpu_addr = amdgpu_mes_ctx_get_offs_gpu_addr(ring, padding); 571 cpu_ptr = amdgpu_mes_ctx_get_offs_cpu_addr(ring, padding); 572 *cpu_ptr = cpu_to_le32(0xCAFEDEAD); 573 } else { 574 r = amdgpu_device_wb_get(adev, &index); 575 if (r) 576 return r; 577 578 gpu_addr = adev->wb.gpu_addr + (index * 4); 579 adev->wb.wb[index] = cpu_to_le32(0xCAFEDEAD); 580 cpu_ptr = &adev->wb.wb[index]; 581 582 r = amdgpu_ib_get(adev, NULL, 20, AMDGPU_IB_POOL_DIRECT, &ib); 583 if (r) { 584 DRM_ERROR("amdgpu: failed to get ib (%ld).\n", r); 585 goto err1; 586 } 587 } 588 589 ib.ptr[0] = PACKET3(PACKET3_WRITE_DATA, 3); 590 ib.ptr[1] = WRITE_DATA_DST_SEL(5) | WR_CONFIRM; 591 ib.ptr[2] = lower_32_bits(gpu_addr); 592 ib.ptr[3] = upper_32_bits(gpu_addr); 593 ib.ptr[4] = 0xDEADBEEF; 594 ib.length_dw = 5; 595 596 r = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f); 597 if (r) 598 goto err2; 599 600 r = dma_fence_wait_timeout(f, false, timeout); 601 if (r == 0) { 602 r = -ETIMEDOUT; 603 goto err2; 604 } else if (r < 0) { 605 goto err2; 606 } 607 608 if (le32_to_cpu(*cpu_ptr) == 0xDEADBEEF) 609 r = 0; 610 else 611 r = -EINVAL; 612 err2: 613 if (!ring->is_mes_queue) 614 amdgpu_ib_free(adev, &ib, NULL); 615 dma_fence_put(f); 616 err1: 617 if (!ring->is_mes_queue) 618 amdgpu_device_wb_free(adev, index); 619 return r; 620 } 621 622 static void gfx_v11_0_free_microcode(struct amdgpu_device *adev) 623 { 624 amdgpu_ucode_release(&adev->gfx.pfp_fw); 625 amdgpu_ucode_release(&adev->gfx.me_fw); 626 amdgpu_ucode_release(&adev->gfx.rlc_fw); 627 amdgpu_ucode_release(&adev->gfx.mec_fw); 628 629 kfree(adev->gfx.rlc.register_list_format); 630 } 631 632 static int gfx_v11_0_init_toc_microcode(struct amdgpu_device *adev, const char *ucode_prefix) 633 { 634 const struct psp_firmware_header_v1_0 *toc_hdr; 635 int err = 0; 636 637 err = amdgpu_ucode_request(adev, &adev->psp.toc_fw, 638 "amdgpu/%s_toc.bin", ucode_prefix); 639 if (err) 640 goto out; 641 642 toc_hdr = (const struct psp_firmware_header_v1_0 *)adev->psp.toc_fw->data; 643 adev->psp.toc.fw_version = le32_to_cpu(toc_hdr->header.ucode_version); 644 adev->psp.toc.feature_version = le32_to_cpu(toc_hdr->sos.fw_version); 645 adev->psp.toc.size_bytes = le32_to_cpu(toc_hdr->header.ucode_size_bytes); 646 adev->psp.toc.start_addr = (uint8_t *)toc_hdr + 647 le32_to_cpu(toc_hdr->header.ucode_array_offset_bytes); 648 return 0; 649 out: 650 amdgpu_ucode_release(&adev->psp.toc_fw); 651 return err; 652 } 653 654 static void gfx_v11_0_check_fw_cp_gfx_shadow(struct amdgpu_device *adev) 655 { 656 switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { 657 case IP_VERSION(11, 0, 0): 658 case IP_VERSION(11, 0, 2): 659 case IP_VERSION(11, 0, 3): 660 if ((adev->gfx.me_fw_version >= 1505) && 661 (adev->gfx.pfp_fw_version >= 1600) && 662 (adev->gfx.mec_fw_version >= 512)) { 663 if (amdgpu_sriov_vf(adev)) 664 adev->gfx.cp_gfx_shadow = true; 665 else 666 adev->gfx.cp_gfx_shadow = false; 667 } 668 break; 669 default: 670 adev->gfx.cp_gfx_shadow = false; 671 break; 672 } 673 } 674 675 static int gfx_v11_0_init_microcode(struct amdgpu_device *adev) 676 { 677 char ucode_prefix[25]; 678 int err; 679 const struct rlc_firmware_header_v2_0 *rlc_hdr; 680 uint16_t version_major; 681 uint16_t version_minor; 682 683 DRM_DEBUG("\n"); 684 685 amdgpu_ucode_ip_version_decode(adev, GC_HWIP, ucode_prefix, sizeof(ucode_prefix)); 686 err = amdgpu_ucode_request(adev, &adev->gfx.pfp_fw, 687 "amdgpu/%s_pfp.bin", ucode_prefix); 688 if (err) 689 goto out; 690 /* check pfp fw hdr version to decide if enable rs64 for gfx11.*/ 691 adev->gfx.rs64_enable = amdgpu_ucode_hdr_version( 692 (union amdgpu_firmware_header *) 693 adev->gfx.pfp_fw->data, 2, 0); 694 if (adev->gfx.rs64_enable) { 695 dev_info(adev->dev, "CP RS64 enable\n"); 696 amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_RS64_PFP); 697 amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_RS64_PFP_P0_STACK); 698 amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_RS64_PFP_P1_STACK); 699 } else { 700 amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_PFP); 701 } 702 703 err = amdgpu_ucode_request(adev, &adev->gfx.me_fw, 704 "amdgpu/%s_me.bin", ucode_prefix); 705 if (err) 706 goto out; 707 if (adev->gfx.rs64_enable) { 708 amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_RS64_ME); 709 amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_RS64_ME_P0_STACK); 710 amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_RS64_ME_P1_STACK); 711 } else { 712 amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_ME); 713 } 714 715 if (!amdgpu_sriov_vf(adev)) { 716 if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(11, 0, 0) && 717 adev->pdev->revision == 0xCE) 718 err = amdgpu_ucode_request(adev, &adev->gfx.rlc_fw, 719 "amdgpu/gc_11_0_0_rlc_1.bin"); 720 else 721 err = amdgpu_ucode_request(adev, &adev->gfx.rlc_fw, 722 "amdgpu/%s_rlc.bin", ucode_prefix); 723 if (err) 724 goto out; 725 rlc_hdr = (const struct rlc_firmware_header_v2_0 *)adev->gfx.rlc_fw->data; 726 version_major = le16_to_cpu(rlc_hdr->header.header_version_major); 727 version_minor = le16_to_cpu(rlc_hdr->header.header_version_minor); 728 err = amdgpu_gfx_rlc_init_microcode(adev, version_major, version_minor); 729 if (err) 730 goto out; 731 } 732 733 err = amdgpu_ucode_request(adev, &adev->gfx.mec_fw, 734 "amdgpu/%s_mec.bin", ucode_prefix); 735 if (err) 736 goto out; 737 if (adev->gfx.rs64_enable) { 738 amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_RS64_MEC); 739 amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_RS64_MEC_P0_STACK); 740 amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_RS64_MEC_P1_STACK); 741 amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_RS64_MEC_P2_STACK); 742 amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_RS64_MEC_P3_STACK); 743 } else { 744 amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_MEC1); 745 amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_MEC1_JT); 746 } 747 748 if (adev->firmware.load_type == AMDGPU_FW_LOAD_RLC_BACKDOOR_AUTO) 749 err = gfx_v11_0_init_toc_microcode(adev, ucode_prefix); 750 751 /* only one MEC for gfx 11.0.0. */ 752 adev->gfx.mec2_fw = NULL; 753 754 gfx_v11_0_check_fw_cp_gfx_shadow(adev); 755 756 if (adev->gfx.imu.funcs && adev->gfx.imu.funcs->init_microcode) { 757 err = adev->gfx.imu.funcs->init_microcode(adev); 758 if (err) 759 DRM_ERROR("Failed to init imu firmware!\n"); 760 return err; 761 } 762 763 out: 764 if (err) { 765 amdgpu_ucode_release(&adev->gfx.pfp_fw); 766 amdgpu_ucode_release(&adev->gfx.me_fw); 767 amdgpu_ucode_release(&adev->gfx.rlc_fw); 768 amdgpu_ucode_release(&adev->gfx.mec_fw); 769 } 770 771 return err; 772 } 773 774 static u32 gfx_v11_0_get_csb_size(struct amdgpu_device *adev) 775 { 776 u32 count = 0; 777 const struct cs_section_def *sect = NULL; 778 const struct cs_extent_def *ext = NULL; 779 780 /* begin clear state */ 781 count += 2; 782 /* context control state */ 783 count += 3; 784 785 for (sect = gfx11_cs_data; sect->section != NULL; ++sect) { 786 for (ext = sect->section; ext->extent != NULL; ++ext) { 787 if (sect->id == SECT_CONTEXT) 788 count += 2 + ext->reg_count; 789 else 790 return 0; 791 } 792 } 793 794 /* set PA_SC_TILE_STEERING_OVERRIDE */ 795 count += 3; 796 /* end clear state */ 797 count += 2; 798 /* clear state */ 799 count += 2; 800 801 return count; 802 } 803 804 static void gfx_v11_0_get_csb_buffer(struct amdgpu_device *adev, 805 volatile u32 *buffer) 806 { 807 u32 count = 0, i; 808 const struct cs_section_def *sect = NULL; 809 const struct cs_extent_def *ext = NULL; 810 int ctx_reg_offset; 811 812 if (adev->gfx.rlc.cs_data == NULL) 813 return; 814 if (buffer == NULL) 815 return; 816 817 buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0)); 818 buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_BEGIN_CLEAR_STATE); 819 820 buffer[count++] = cpu_to_le32(PACKET3(PACKET3_CONTEXT_CONTROL, 1)); 821 buffer[count++] = cpu_to_le32(0x80000000); 822 buffer[count++] = cpu_to_le32(0x80000000); 823 824 for (sect = adev->gfx.rlc.cs_data; sect->section != NULL; ++sect) { 825 for (ext = sect->section; ext->extent != NULL; ++ext) { 826 if (sect->id == SECT_CONTEXT) { 827 buffer[count++] = 828 cpu_to_le32(PACKET3(PACKET3_SET_CONTEXT_REG, ext->reg_count)); 829 buffer[count++] = cpu_to_le32(ext->reg_index - 830 PACKET3_SET_CONTEXT_REG_START); 831 for (i = 0; i < ext->reg_count; i++) 832 buffer[count++] = cpu_to_le32(ext->extent[i]); 833 } else { 834 return; 835 } 836 } 837 } 838 839 ctx_reg_offset = 840 SOC15_REG_OFFSET(GC, 0, regPA_SC_TILE_STEERING_OVERRIDE) - PACKET3_SET_CONTEXT_REG_START; 841 buffer[count++] = cpu_to_le32(PACKET3(PACKET3_SET_CONTEXT_REG, 1)); 842 buffer[count++] = cpu_to_le32(ctx_reg_offset); 843 buffer[count++] = cpu_to_le32(adev->gfx.config.pa_sc_tile_steering_override); 844 845 buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0)); 846 buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_END_CLEAR_STATE); 847 848 buffer[count++] = cpu_to_le32(PACKET3(PACKET3_CLEAR_STATE, 0)); 849 buffer[count++] = cpu_to_le32(0); 850 } 851 852 static void gfx_v11_0_rlc_fini(struct amdgpu_device *adev) 853 { 854 /* clear state block */ 855 amdgpu_bo_free_kernel(&adev->gfx.rlc.clear_state_obj, 856 &adev->gfx.rlc.clear_state_gpu_addr, 857 (void **)&adev->gfx.rlc.cs_ptr); 858 859 /* jump table block */ 860 amdgpu_bo_free_kernel(&adev->gfx.rlc.cp_table_obj, 861 &adev->gfx.rlc.cp_table_gpu_addr, 862 (void **)&adev->gfx.rlc.cp_table_ptr); 863 } 864 865 static void gfx_v11_0_init_rlcg_reg_access_ctrl(struct amdgpu_device *adev) 866 { 867 struct amdgpu_rlcg_reg_access_ctrl *reg_access_ctrl; 868 869 reg_access_ctrl = &adev->gfx.rlc.reg_access_ctrl[0]; 870 reg_access_ctrl->scratch_reg0 = SOC15_REG_OFFSET(GC, 0, regSCRATCH_REG0); 871 reg_access_ctrl->scratch_reg1 = SOC15_REG_OFFSET(GC, 0, regSCRATCH_REG1); 872 reg_access_ctrl->scratch_reg2 = SOC15_REG_OFFSET(GC, 0, regSCRATCH_REG2); 873 reg_access_ctrl->scratch_reg3 = SOC15_REG_OFFSET(GC, 0, regSCRATCH_REG3); 874 reg_access_ctrl->grbm_cntl = SOC15_REG_OFFSET(GC, 0, regGRBM_GFX_CNTL); 875 reg_access_ctrl->grbm_idx = SOC15_REG_OFFSET(GC, 0, regGRBM_GFX_INDEX); 876 reg_access_ctrl->spare_int = SOC15_REG_OFFSET(GC, 0, regRLC_SPARE_INT_0); 877 adev->gfx.rlc.rlcg_reg_access_supported = true; 878 } 879 880 static int gfx_v11_0_rlc_init(struct amdgpu_device *adev) 881 { 882 const struct cs_section_def *cs_data; 883 int r; 884 885 adev->gfx.rlc.cs_data = gfx11_cs_data; 886 887 cs_data = adev->gfx.rlc.cs_data; 888 889 if (cs_data) { 890 /* init clear state block */ 891 r = amdgpu_gfx_rlc_init_csb(adev); 892 if (r) 893 return r; 894 } 895 896 /* init spm vmid with 0xf */ 897 if (adev->gfx.rlc.funcs->update_spm_vmid) 898 adev->gfx.rlc.funcs->update_spm_vmid(adev, NULL, 0xf); 899 900 return 0; 901 } 902 903 static void gfx_v11_0_mec_fini(struct amdgpu_device *adev) 904 { 905 amdgpu_bo_free_kernel(&adev->gfx.mec.hpd_eop_obj, NULL, NULL); 906 amdgpu_bo_free_kernel(&adev->gfx.mec.mec_fw_obj, NULL, NULL); 907 amdgpu_bo_free_kernel(&adev->gfx.mec.mec_fw_data_obj, NULL, NULL); 908 } 909 910 static void gfx_v11_0_me_init(struct amdgpu_device *adev) 911 { 912 bitmap_zero(adev->gfx.me.queue_bitmap, AMDGPU_MAX_GFX_QUEUES); 913 914 amdgpu_gfx_graphics_queue_acquire(adev); 915 } 916 917 static int gfx_v11_0_mec_init(struct amdgpu_device *adev) 918 { 919 int r; 920 u32 *hpd; 921 size_t mec_hpd_size; 922 923 bitmap_zero(adev->gfx.mec_bitmap[0].queue_bitmap, AMDGPU_MAX_COMPUTE_QUEUES); 924 925 /* take ownership of the relevant compute queues */ 926 amdgpu_gfx_compute_queue_acquire(adev); 927 mec_hpd_size = adev->gfx.num_compute_rings * GFX11_MEC_HPD_SIZE; 928 929 if (mec_hpd_size) { 930 r = amdgpu_bo_create_reserved(adev, mec_hpd_size, PAGE_SIZE, 931 AMDGPU_GEM_DOMAIN_GTT, 932 &adev->gfx.mec.hpd_eop_obj, 933 &adev->gfx.mec.hpd_eop_gpu_addr, 934 (void **)&hpd); 935 if (r) { 936 dev_warn(adev->dev, "(%d) create HDP EOP bo failed\n", r); 937 gfx_v11_0_mec_fini(adev); 938 return r; 939 } 940 941 memset(hpd, 0, mec_hpd_size); 942 943 amdgpu_bo_kunmap(adev->gfx.mec.hpd_eop_obj); 944 amdgpu_bo_unreserve(adev->gfx.mec.hpd_eop_obj); 945 } 946 947 return 0; 948 } 949 950 static uint32_t wave_read_ind(struct amdgpu_device *adev, uint32_t wave, uint32_t address) 951 { 952 WREG32_SOC15(GC, 0, regSQ_IND_INDEX, 953 (wave << SQ_IND_INDEX__WAVE_ID__SHIFT) | 954 (address << SQ_IND_INDEX__INDEX__SHIFT)); 955 return RREG32_SOC15(GC, 0, regSQ_IND_DATA); 956 } 957 958 static void wave_read_regs(struct amdgpu_device *adev, uint32_t wave, 959 uint32_t thread, uint32_t regno, 960 uint32_t num, uint32_t *out) 961 { 962 WREG32_SOC15(GC, 0, regSQ_IND_INDEX, 963 (wave << SQ_IND_INDEX__WAVE_ID__SHIFT) | 964 (regno << SQ_IND_INDEX__INDEX__SHIFT) | 965 (thread << SQ_IND_INDEX__WORKITEM_ID__SHIFT) | 966 (SQ_IND_INDEX__AUTO_INCR_MASK)); 967 while (num--) 968 *(out++) = RREG32_SOC15(GC, 0, regSQ_IND_DATA); 969 } 970 971 static void gfx_v11_0_read_wave_data(struct amdgpu_device *adev, uint32_t xcc_id, uint32_t simd, uint32_t wave, uint32_t *dst, int *no_fields) 972 { 973 /* in gfx11 the SIMD_ID is specified as part of the INSTANCE 974 * field when performing a select_se_sh so it should be 975 * zero here */ 976 WARN_ON(simd != 0); 977 978 /* type 3 wave data */ 979 dst[(*no_fields)++] = 3; 980 dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_STATUS); 981 dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_PC_LO); 982 dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_PC_HI); 983 dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_EXEC_LO); 984 dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_EXEC_HI); 985 dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_HW_ID1); 986 dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_HW_ID2); 987 dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_GPR_ALLOC); 988 dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_LDS_ALLOC); 989 dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_TRAPSTS); 990 dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_IB_STS); 991 dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_IB_STS2); 992 dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_IB_DBG1); 993 dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_M0); 994 dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_MODE); 995 } 996 997 static void gfx_v11_0_read_wave_sgprs(struct amdgpu_device *adev, uint32_t xcc_id, uint32_t simd, 998 uint32_t wave, uint32_t start, 999 uint32_t size, uint32_t *dst) 1000 { 1001 WARN_ON(simd != 0); 1002 1003 wave_read_regs( 1004 adev, wave, 0, start + SQIND_WAVE_SGPRS_OFFSET, size, 1005 dst); 1006 } 1007 1008 static void gfx_v11_0_read_wave_vgprs(struct amdgpu_device *adev, uint32_t xcc_id, uint32_t simd, 1009 uint32_t wave, uint32_t thread, 1010 uint32_t start, uint32_t size, 1011 uint32_t *dst) 1012 { 1013 wave_read_regs( 1014 adev, wave, thread, 1015 start + SQIND_WAVE_VGPRS_OFFSET, size, dst); 1016 } 1017 1018 static void gfx_v11_0_select_me_pipe_q(struct amdgpu_device *adev, 1019 u32 me, u32 pipe, u32 q, u32 vm, u32 xcc_id) 1020 { 1021 soc21_grbm_select(adev, me, pipe, q, vm); 1022 } 1023 1024 /* all sizes are in bytes */ 1025 #define MQD_SHADOW_BASE_SIZE 73728 1026 #define MQD_SHADOW_BASE_ALIGNMENT 256 1027 #define MQD_FWWORKAREA_SIZE 484 1028 #define MQD_FWWORKAREA_ALIGNMENT 256 1029 1030 static int gfx_v11_0_get_gfx_shadow_info(struct amdgpu_device *adev, 1031 struct amdgpu_gfx_shadow_info *shadow_info) 1032 { 1033 if (adev->gfx.cp_gfx_shadow) { 1034 shadow_info->shadow_size = MQD_SHADOW_BASE_SIZE; 1035 shadow_info->shadow_alignment = MQD_SHADOW_BASE_ALIGNMENT; 1036 shadow_info->csa_size = MQD_FWWORKAREA_SIZE; 1037 shadow_info->csa_alignment = MQD_FWWORKAREA_ALIGNMENT; 1038 return 0; 1039 } else { 1040 memset(shadow_info, 0, sizeof(struct amdgpu_gfx_shadow_info)); 1041 return -ENOTSUPP; 1042 } 1043 } 1044 1045 static const struct amdgpu_gfx_funcs gfx_v11_0_gfx_funcs = { 1046 .get_gpu_clock_counter = &gfx_v11_0_get_gpu_clock_counter, 1047 .select_se_sh = &gfx_v11_0_select_se_sh, 1048 .read_wave_data = &gfx_v11_0_read_wave_data, 1049 .read_wave_sgprs = &gfx_v11_0_read_wave_sgprs, 1050 .read_wave_vgprs = &gfx_v11_0_read_wave_vgprs, 1051 .select_me_pipe_q = &gfx_v11_0_select_me_pipe_q, 1052 .update_perfmon_mgcg = &gfx_v11_0_update_perf_clk, 1053 .get_gfx_shadow_info = &gfx_v11_0_get_gfx_shadow_info, 1054 }; 1055 1056 static int gfx_v11_0_gpu_early_init(struct amdgpu_device *adev) 1057 { 1058 switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { 1059 case IP_VERSION(11, 0, 0): 1060 case IP_VERSION(11, 0, 2): 1061 adev->gfx.config.max_hw_contexts = 8; 1062 adev->gfx.config.sc_prim_fifo_size_frontend = 0x20; 1063 adev->gfx.config.sc_prim_fifo_size_backend = 0x100; 1064 adev->gfx.config.sc_hiz_tile_fifo_size = 0; 1065 adev->gfx.config.sc_earlyz_tile_fifo_size = 0x4C0; 1066 break; 1067 case IP_VERSION(11, 0, 3): 1068 adev->gfx.ras = &gfx_v11_0_3_ras; 1069 adev->gfx.config.max_hw_contexts = 8; 1070 adev->gfx.config.sc_prim_fifo_size_frontend = 0x20; 1071 adev->gfx.config.sc_prim_fifo_size_backend = 0x100; 1072 adev->gfx.config.sc_hiz_tile_fifo_size = 0; 1073 adev->gfx.config.sc_earlyz_tile_fifo_size = 0x4C0; 1074 break; 1075 case IP_VERSION(11, 0, 1): 1076 case IP_VERSION(11, 0, 4): 1077 case IP_VERSION(11, 5, 0): 1078 case IP_VERSION(11, 5, 1): 1079 case IP_VERSION(11, 5, 2): 1080 adev->gfx.config.max_hw_contexts = 8; 1081 adev->gfx.config.sc_prim_fifo_size_frontend = 0x20; 1082 adev->gfx.config.sc_prim_fifo_size_backend = 0x100; 1083 adev->gfx.config.sc_hiz_tile_fifo_size = 0x80; 1084 adev->gfx.config.sc_earlyz_tile_fifo_size = 0x300; 1085 break; 1086 default: 1087 BUG(); 1088 break; 1089 } 1090 1091 return 0; 1092 } 1093 1094 static int gfx_v11_0_gfx_ring_init(struct amdgpu_device *adev, int ring_id, 1095 int me, int pipe, int queue) 1096 { 1097 struct amdgpu_ring *ring; 1098 unsigned int irq_type; 1099 unsigned int hw_prio; 1100 1101 ring = &adev->gfx.gfx_ring[ring_id]; 1102 1103 ring->me = me; 1104 ring->pipe = pipe; 1105 ring->queue = queue; 1106 1107 ring->ring_obj = NULL; 1108 ring->use_doorbell = true; 1109 1110 if (!ring_id) 1111 ring->doorbell_index = adev->doorbell_index.gfx_ring0 << 1; 1112 else 1113 ring->doorbell_index = adev->doorbell_index.gfx_ring1 << 1; 1114 ring->vm_hub = AMDGPU_GFXHUB(0); 1115 sprintf(ring->name, "gfx_%d.%d.%d", ring->me, ring->pipe, ring->queue); 1116 1117 irq_type = AMDGPU_CP_IRQ_GFX_ME0_PIPE0_EOP + ring->pipe; 1118 hw_prio = amdgpu_gfx_is_high_priority_graphics_queue(adev, ring) ? 1119 AMDGPU_GFX_PIPE_PRIO_HIGH : AMDGPU_GFX_PIPE_PRIO_NORMAL; 1120 return amdgpu_ring_init(adev, ring, 1024, &adev->gfx.eop_irq, irq_type, 1121 hw_prio, NULL); 1122 } 1123 1124 static int gfx_v11_0_compute_ring_init(struct amdgpu_device *adev, int ring_id, 1125 int mec, int pipe, int queue) 1126 { 1127 int r; 1128 unsigned irq_type; 1129 struct amdgpu_ring *ring; 1130 unsigned int hw_prio; 1131 1132 ring = &adev->gfx.compute_ring[ring_id]; 1133 1134 /* mec0 is me1 */ 1135 ring->me = mec + 1; 1136 ring->pipe = pipe; 1137 ring->queue = queue; 1138 1139 ring->ring_obj = NULL; 1140 ring->use_doorbell = true; 1141 ring->doorbell_index = (adev->doorbell_index.mec_ring0 + ring_id) << 1; 1142 ring->eop_gpu_addr = adev->gfx.mec.hpd_eop_gpu_addr 1143 + (ring_id * GFX11_MEC_HPD_SIZE); 1144 ring->vm_hub = AMDGPU_GFXHUB(0); 1145 sprintf(ring->name, "comp_%d.%d.%d", ring->me, ring->pipe, ring->queue); 1146 1147 irq_type = AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP 1148 + ((ring->me - 1) * adev->gfx.mec.num_pipe_per_mec) 1149 + ring->pipe; 1150 hw_prio = amdgpu_gfx_is_high_priority_compute_queue(adev, ring) ? 1151 AMDGPU_GFX_PIPE_PRIO_HIGH : AMDGPU_GFX_PIPE_PRIO_NORMAL; 1152 /* type-2 packets are deprecated on MEC, use type-3 instead */ 1153 r = amdgpu_ring_init(adev, ring, 1024, &adev->gfx.eop_irq, irq_type, 1154 hw_prio, NULL); 1155 if (r) 1156 return r; 1157 1158 return 0; 1159 } 1160 1161 static struct { 1162 SOC21_FIRMWARE_ID id; 1163 unsigned int offset; 1164 unsigned int size; 1165 } rlc_autoload_info[SOC21_FIRMWARE_ID_MAX]; 1166 1167 static void gfx_v11_0_parse_rlc_toc(struct amdgpu_device *adev, void *rlc_toc) 1168 { 1169 RLC_TABLE_OF_CONTENT *ucode = rlc_toc; 1170 1171 while (ucode && (ucode->id > SOC21_FIRMWARE_ID_INVALID) && 1172 (ucode->id < SOC21_FIRMWARE_ID_MAX)) { 1173 rlc_autoload_info[ucode->id].id = ucode->id; 1174 rlc_autoload_info[ucode->id].offset = ucode->offset * 4; 1175 rlc_autoload_info[ucode->id].size = ucode->size * 4; 1176 1177 ucode++; 1178 } 1179 } 1180 1181 static uint32_t gfx_v11_0_calc_toc_total_size(struct amdgpu_device *adev) 1182 { 1183 uint32_t total_size = 0; 1184 SOC21_FIRMWARE_ID id; 1185 1186 gfx_v11_0_parse_rlc_toc(adev, adev->psp.toc.start_addr); 1187 1188 for (id = SOC21_FIRMWARE_ID_RLC_G_UCODE; id < SOC21_FIRMWARE_ID_MAX; id++) 1189 total_size += rlc_autoload_info[id].size; 1190 1191 /* In case the offset in rlc toc ucode is aligned */ 1192 if (total_size < rlc_autoload_info[SOC21_FIRMWARE_ID_MAX-1].offset) 1193 total_size = rlc_autoload_info[SOC21_FIRMWARE_ID_MAX-1].offset + 1194 rlc_autoload_info[SOC21_FIRMWARE_ID_MAX-1].size; 1195 1196 return total_size; 1197 } 1198 1199 static int gfx_v11_0_rlc_autoload_buffer_init(struct amdgpu_device *adev) 1200 { 1201 int r; 1202 uint32_t total_size; 1203 1204 total_size = gfx_v11_0_calc_toc_total_size(adev); 1205 1206 r = amdgpu_bo_create_reserved(adev, total_size, 64 * 1024, 1207 AMDGPU_GEM_DOMAIN_VRAM | 1208 AMDGPU_GEM_DOMAIN_GTT, 1209 &adev->gfx.rlc.rlc_autoload_bo, 1210 &adev->gfx.rlc.rlc_autoload_gpu_addr, 1211 (void **)&adev->gfx.rlc.rlc_autoload_ptr); 1212 1213 if (r) { 1214 dev_err(adev->dev, "(%d) failed to create fw autoload bo\n", r); 1215 return r; 1216 } 1217 1218 return 0; 1219 } 1220 1221 static void gfx_v11_0_rlc_backdoor_autoload_copy_ucode(struct amdgpu_device *adev, 1222 SOC21_FIRMWARE_ID id, 1223 const void *fw_data, 1224 uint32_t fw_size, 1225 uint32_t *fw_autoload_mask) 1226 { 1227 uint32_t toc_offset; 1228 uint32_t toc_fw_size; 1229 char *ptr = adev->gfx.rlc.rlc_autoload_ptr; 1230 1231 if (id <= SOC21_FIRMWARE_ID_INVALID || id >= SOC21_FIRMWARE_ID_MAX) 1232 return; 1233 1234 toc_offset = rlc_autoload_info[id].offset; 1235 toc_fw_size = rlc_autoload_info[id].size; 1236 1237 if (fw_size == 0) 1238 fw_size = toc_fw_size; 1239 1240 if (fw_size > toc_fw_size) 1241 fw_size = toc_fw_size; 1242 1243 memcpy(ptr + toc_offset, fw_data, fw_size); 1244 1245 if (fw_size < toc_fw_size) 1246 memset(ptr + toc_offset + fw_size, 0, toc_fw_size - fw_size); 1247 1248 if ((id != SOC21_FIRMWARE_ID_RS64_PFP) && (id != SOC21_FIRMWARE_ID_RS64_ME)) 1249 *(uint64_t *)fw_autoload_mask |= 1ULL << id; 1250 } 1251 1252 static void gfx_v11_0_rlc_backdoor_autoload_copy_toc_ucode(struct amdgpu_device *adev, 1253 uint32_t *fw_autoload_mask) 1254 { 1255 void *data; 1256 uint32_t size; 1257 uint64_t *toc_ptr; 1258 1259 *(uint64_t *)fw_autoload_mask |= 0x1; 1260 1261 DRM_DEBUG("rlc autoload enabled fw: 0x%llx\n", *(uint64_t *)fw_autoload_mask); 1262 1263 data = adev->psp.toc.start_addr; 1264 size = rlc_autoload_info[SOC21_FIRMWARE_ID_RLC_TOC].size; 1265 1266 toc_ptr = (uint64_t *)data + size / 8 - 1; 1267 *toc_ptr = *(uint64_t *)fw_autoload_mask; 1268 1269 gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev, SOC21_FIRMWARE_ID_RLC_TOC, 1270 data, size, fw_autoload_mask); 1271 } 1272 1273 static void gfx_v11_0_rlc_backdoor_autoload_copy_gfx_ucode(struct amdgpu_device *adev, 1274 uint32_t *fw_autoload_mask) 1275 { 1276 const __le32 *fw_data; 1277 uint32_t fw_size; 1278 const struct gfx_firmware_header_v1_0 *cp_hdr; 1279 const struct gfx_firmware_header_v2_0 *cpv2_hdr; 1280 const struct rlc_firmware_header_v2_0 *rlc_hdr; 1281 const struct rlc_firmware_header_v2_2 *rlcv22_hdr; 1282 uint16_t version_major, version_minor; 1283 1284 if (adev->gfx.rs64_enable) { 1285 /* pfp ucode */ 1286 cpv2_hdr = (const struct gfx_firmware_header_v2_0 *) 1287 adev->gfx.pfp_fw->data; 1288 /* instruction */ 1289 fw_data = (const __le32 *)(adev->gfx.pfp_fw->data + 1290 le32_to_cpu(cpv2_hdr->ucode_offset_bytes)); 1291 fw_size = le32_to_cpu(cpv2_hdr->ucode_size_bytes); 1292 gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev, SOC21_FIRMWARE_ID_RS64_PFP, 1293 fw_data, fw_size, fw_autoload_mask); 1294 /* data */ 1295 fw_data = (const __le32 *)(adev->gfx.pfp_fw->data + 1296 le32_to_cpu(cpv2_hdr->data_offset_bytes)); 1297 fw_size = le32_to_cpu(cpv2_hdr->data_size_bytes); 1298 gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev, SOC21_FIRMWARE_ID_RS64_PFP_P0_STACK, 1299 fw_data, fw_size, fw_autoload_mask); 1300 gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev, SOC21_FIRMWARE_ID_RS64_PFP_P1_STACK, 1301 fw_data, fw_size, fw_autoload_mask); 1302 /* me ucode */ 1303 cpv2_hdr = (const struct gfx_firmware_header_v2_0 *) 1304 adev->gfx.me_fw->data; 1305 /* instruction */ 1306 fw_data = (const __le32 *)(adev->gfx.me_fw->data + 1307 le32_to_cpu(cpv2_hdr->ucode_offset_bytes)); 1308 fw_size = le32_to_cpu(cpv2_hdr->ucode_size_bytes); 1309 gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev, SOC21_FIRMWARE_ID_RS64_ME, 1310 fw_data, fw_size, fw_autoload_mask); 1311 /* data */ 1312 fw_data = (const __le32 *)(adev->gfx.me_fw->data + 1313 le32_to_cpu(cpv2_hdr->data_offset_bytes)); 1314 fw_size = le32_to_cpu(cpv2_hdr->data_size_bytes); 1315 gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev, SOC21_FIRMWARE_ID_RS64_ME_P0_STACK, 1316 fw_data, fw_size, fw_autoload_mask); 1317 gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev, SOC21_FIRMWARE_ID_RS64_ME_P1_STACK, 1318 fw_data, fw_size, fw_autoload_mask); 1319 /* mec ucode */ 1320 cpv2_hdr = (const struct gfx_firmware_header_v2_0 *) 1321 adev->gfx.mec_fw->data; 1322 /* instruction */ 1323 fw_data = (const __le32 *) (adev->gfx.mec_fw->data + 1324 le32_to_cpu(cpv2_hdr->ucode_offset_bytes)); 1325 fw_size = le32_to_cpu(cpv2_hdr->ucode_size_bytes); 1326 gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev, SOC21_FIRMWARE_ID_RS64_MEC, 1327 fw_data, fw_size, fw_autoload_mask); 1328 /* data */ 1329 fw_data = (const __le32 *) (adev->gfx.mec_fw->data + 1330 le32_to_cpu(cpv2_hdr->data_offset_bytes)); 1331 fw_size = le32_to_cpu(cpv2_hdr->data_size_bytes); 1332 gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev, SOC21_FIRMWARE_ID_RS64_MEC_P0_STACK, 1333 fw_data, fw_size, fw_autoload_mask); 1334 gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev, SOC21_FIRMWARE_ID_RS64_MEC_P1_STACK, 1335 fw_data, fw_size, fw_autoload_mask); 1336 gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev, SOC21_FIRMWARE_ID_RS64_MEC_P2_STACK, 1337 fw_data, fw_size, fw_autoload_mask); 1338 gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev, SOC21_FIRMWARE_ID_RS64_MEC_P3_STACK, 1339 fw_data, fw_size, fw_autoload_mask); 1340 } else { 1341 /* pfp ucode */ 1342 cp_hdr = (const struct gfx_firmware_header_v1_0 *) 1343 adev->gfx.pfp_fw->data; 1344 fw_data = (const __le32 *)(adev->gfx.pfp_fw->data + 1345 le32_to_cpu(cp_hdr->header.ucode_array_offset_bytes)); 1346 fw_size = le32_to_cpu(cp_hdr->header.ucode_size_bytes); 1347 gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev, SOC21_FIRMWARE_ID_CP_PFP, 1348 fw_data, fw_size, fw_autoload_mask); 1349 1350 /* me ucode */ 1351 cp_hdr = (const struct gfx_firmware_header_v1_0 *) 1352 adev->gfx.me_fw->data; 1353 fw_data = (const __le32 *)(adev->gfx.me_fw->data + 1354 le32_to_cpu(cp_hdr->header.ucode_array_offset_bytes)); 1355 fw_size = le32_to_cpu(cp_hdr->header.ucode_size_bytes); 1356 gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev, SOC21_FIRMWARE_ID_CP_ME, 1357 fw_data, fw_size, fw_autoload_mask); 1358 1359 /* mec ucode */ 1360 cp_hdr = (const struct gfx_firmware_header_v1_0 *) 1361 adev->gfx.mec_fw->data; 1362 fw_data = (const __le32 *) (adev->gfx.mec_fw->data + 1363 le32_to_cpu(cp_hdr->header.ucode_array_offset_bytes)); 1364 fw_size = le32_to_cpu(cp_hdr->header.ucode_size_bytes) - 1365 cp_hdr->jt_size * 4; 1366 gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev, SOC21_FIRMWARE_ID_CP_MEC, 1367 fw_data, fw_size, fw_autoload_mask); 1368 } 1369 1370 /* rlc ucode */ 1371 rlc_hdr = (const struct rlc_firmware_header_v2_0 *) 1372 adev->gfx.rlc_fw->data; 1373 fw_data = (const __le32 *)(adev->gfx.rlc_fw->data + 1374 le32_to_cpu(rlc_hdr->header.ucode_array_offset_bytes)); 1375 fw_size = le32_to_cpu(rlc_hdr->header.ucode_size_bytes); 1376 gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev, SOC21_FIRMWARE_ID_RLC_G_UCODE, 1377 fw_data, fw_size, fw_autoload_mask); 1378 1379 version_major = le16_to_cpu(rlc_hdr->header.header_version_major); 1380 version_minor = le16_to_cpu(rlc_hdr->header.header_version_minor); 1381 if (version_major == 2) { 1382 if (version_minor >= 2) { 1383 rlcv22_hdr = (const struct rlc_firmware_header_v2_2 *)adev->gfx.rlc_fw->data; 1384 1385 fw_data = (const __le32 *)(adev->gfx.rlc_fw->data + 1386 le32_to_cpu(rlcv22_hdr->rlc_iram_ucode_offset_bytes)); 1387 fw_size = le32_to_cpu(rlcv22_hdr->rlc_iram_ucode_size_bytes); 1388 gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev, SOC21_FIRMWARE_ID_RLX6_UCODE, 1389 fw_data, fw_size, fw_autoload_mask); 1390 1391 fw_data = (const __le32 *)(adev->gfx.rlc_fw->data + 1392 le32_to_cpu(rlcv22_hdr->rlc_dram_ucode_offset_bytes)); 1393 fw_size = le32_to_cpu(rlcv22_hdr->rlc_dram_ucode_size_bytes); 1394 gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev, SOC21_FIRMWARE_ID_RLX6_DRAM_BOOT, 1395 fw_data, fw_size, fw_autoload_mask); 1396 } 1397 } 1398 } 1399 1400 static void gfx_v11_0_rlc_backdoor_autoload_copy_sdma_ucode(struct amdgpu_device *adev, 1401 uint32_t *fw_autoload_mask) 1402 { 1403 const __le32 *fw_data; 1404 uint32_t fw_size; 1405 const struct sdma_firmware_header_v2_0 *sdma_hdr; 1406 1407 sdma_hdr = (const struct sdma_firmware_header_v2_0 *) 1408 adev->sdma.instance[0].fw->data; 1409 fw_data = (const __le32 *) (adev->sdma.instance[0].fw->data + 1410 le32_to_cpu(sdma_hdr->header.ucode_array_offset_bytes)); 1411 fw_size = le32_to_cpu(sdma_hdr->ctx_ucode_size_bytes); 1412 1413 gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev, 1414 SOC21_FIRMWARE_ID_SDMA_UCODE_TH0, fw_data, fw_size, fw_autoload_mask); 1415 1416 fw_data = (const __le32 *) (adev->sdma.instance[0].fw->data + 1417 le32_to_cpu(sdma_hdr->ctl_ucode_offset)); 1418 fw_size = le32_to_cpu(sdma_hdr->ctl_ucode_size_bytes); 1419 1420 gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev, 1421 SOC21_FIRMWARE_ID_SDMA_UCODE_TH1, fw_data, fw_size, fw_autoload_mask); 1422 } 1423 1424 static void gfx_v11_0_rlc_backdoor_autoload_copy_mes_ucode(struct amdgpu_device *adev, 1425 uint32_t *fw_autoload_mask) 1426 { 1427 const __le32 *fw_data; 1428 unsigned fw_size; 1429 const struct mes_firmware_header_v1_0 *mes_hdr; 1430 int pipe, ucode_id, data_id; 1431 1432 for (pipe = 0; pipe < 2; pipe++) { 1433 if (pipe==0) { 1434 ucode_id = SOC21_FIRMWARE_ID_RS64_MES_P0; 1435 data_id = SOC21_FIRMWARE_ID_RS64_MES_P0_STACK; 1436 } else { 1437 ucode_id = SOC21_FIRMWARE_ID_RS64_MES_P1; 1438 data_id = SOC21_FIRMWARE_ID_RS64_MES_P1_STACK; 1439 } 1440 1441 mes_hdr = (const struct mes_firmware_header_v1_0 *) 1442 adev->mes.fw[pipe]->data; 1443 1444 fw_data = (const __le32 *)(adev->mes.fw[pipe]->data + 1445 le32_to_cpu(mes_hdr->mes_ucode_offset_bytes)); 1446 fw_size = le32_to_cpu(mes_hdr->mes_ucode_size_bytes); 1447 1448 gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev, 1449 ucode_id, fw_data, fw_size, fw_autoload_mask); 1450 1451 fw_data = (const __le32 *)(adev->mes.fw[pipe]->data + 1452 le32_to_cpu(mes_hdr->mes_ucode_data_offset_bytes)); 1453 fw_size = le32_to_cpu(mes_hdr->mes_ucode_data_size_bytes); 1454 1455 gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev, 1456 data_id, fw_data, fw_size, fw_autoload_mask); 1457 } 1458 } 1459 1460 static int gfx_v11_0_rlc_backdoor_autoload_enable(struct amdgpu_device *adev) 1461 { 1462 uint32_t rlc_g_offset, rlc_g_size; 1463 uint64_t gpu_addr; 1464 uint32_t autoload_fw_id[2]; 1465 1466 memset(autoload_fw_id, 0, sizeof(uint32_t) * 2); 1467 1468 /* RLC autoload sequence 2: copy ucode */ 1469 gfx_v11_0_rlc_backdoor_autoload_copy_sdma_ucode(adev, autoload_fw_id); 1470 gfx_v11_0_rlc_backdoor_autoload_copy_gfx_ucode(adev, autoload_fw_id); 1471 gfx_v11_0_rlc_backdoor_autoload_copy_mes_ucode(adev, autoload_fw_id); 1472 gfx_v11_0_rlc_backdoor_autoload_copy_toc_ucode(adev, autoload_fw_id); 1473 1474 rlc_g_offset = rlc_autoload_info[SOC21_FIRMWARE_ID_RLC_G_UCODE].offset; 1475 rlc_g_size = rlc_autoload_info[SOC21_FIRMWARE_ID_RLC_G_UCODE].size; 1476 gpu_addr = adev->gfx.rlc.rlc_autoload_gpu_addr + rlc_g_offset; 1477 1478 WREG32_SOC15(GC, 0, regGFX_IMU_RLC_BOOTLOADER_ADDR_HI, upper_32_bits(gpu_addr)); 1479 WREG32_SOC15(GC, 0, regGFX_IMU_RLC_BOOTLOADER_ADDR_LO, lower_32_bits(gpu_addr)); 1480 1481 WREG32_SOC15(GC, 0, regGFX_IMU_RLC_BOOTLOADER_SIZE, rlc_g_size); 1482 1483 /* RLC autoload sequence 3: load IMU fw */ 1484 if (adev->gfx.imu.funcs->load_microcode) 1485 adev->gfx.imu.funcs->load_microcode(adev); 1486 /* RLC autoload sequence 4 init IMU fw */ 1487 if (adev->gfx.imu.funcs->setup_imu) 1488 adev->gfx.imu.funcs->setup_imu(adev); 1489 if (adev->gfx.imu.funcs->start_imu) 1490 adev->gfx.imu.funcs->start_imu(adev); 1491 1492 /* RLC autoload sequence 5 disable gpa mode */ 1493 gfx_v11_0_disable_gpa_mode(adev); 1494 1495 return 0; 1496 } 1497 1498 static void gfx_v11_0_alloc_ip_dump(struct amdgpu_device *adev) 1499 { 1500 uint32_t reg_count = ARRAY_SIZE(gc_reg_list_11_0); 1501 uint32_t *ptr; 1502 uint32_t inst; 1503 1504 ptr = kcalloc(reg_count, sizeof(uint32_t), GFP_KERNEL); 1505 if (!ptr) { 1506 DRM_ERROR("Failed to allocate memory for GFX IP Dump\n"); 1507 adev->gfx.ip_dump_core = NULL; 1508 } else { 1509 adev->gfx.ip_dump_core = ptr; 1510 } 1511 1512 /* Allocate memory for compute queue registers for all the instances */ 1513 reg_count = ARRAY_SIZE(gc_cp_reg_list_11); 1514 inst = adev->gfx.mec.num_mec * adev->gfx.mec.num_pipe_per_mec * 1515 adev->gfx.mec.num_queue_per_pipe; 1516 1517 ptr = kcalloc(reg_count * inst, sizeof(uint32_t), GFP_KERNEL); 1518 if (!ptr) { 1519 DRM_ERROR("Failed to allocate memory for Compute Queues IP Dump\n"); 1520 adev->gfx.ip_dump_compute_queues = NULL; 1521 } else { 1522 adev->gfx.ip_dump_compute_queues = ptr; 1523 } 1524 1525 /* Allocate memory for gfx queue registers for all the instances */ 1526 reg_count = ARRAY_SIZE(gc_gfx_queue_reg_list_11); 1527 inst = adev->gfx.me.num_me * adev->gfx.me.num_pipe_per_me * 1528 adev->gfx.me.num_queue_per_pipe; 1529 1530 ptr = kcalloc(reg_count * inst, sizeof(uint32_t), GFP_KERNEL); 1531 if (!ptr) { 1532 DRM_ERROR("Failed to allocate memory for GFX Queues IP Dump\n"); 1533 adev->gfx.ip_dump_gfx_queues = NULL; 1534 } else { 1535 adev->gfx.ip_dump_gfx_queues = ptr; 1536 } 1537 } 1538 1539 static int gfx_v11_0_sw_init(void *handle) 1540 { 1541 int i, j, k, r, ring_id = 0; 1542 int xcc_id = 0; 1543 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1544 1545 switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { 1546 case IP_VERSION(11, 0, 0): 1547 case IP_VERSION(11, 0, 2): 1548 case IP_VERSION(11, 0, 3): 1549 adev->gfx.me.num_me = 1; 1550 adev->gfx.me.num_pipe_per_me = 1; 1551 adev->gfx.me.num_queue_per_pipe = 1; 1552 adev->gfx.mec.num_mec = 2; 1553 adev->gfx.mec.num_pipe_per_mec = 4; 1554 adev->gfx.mec.num_queue_per_pipe = 4; 1555 break; 1556 case IP_VERSION(11, 0, 1): 1557 case IP_VERSION(11, 0, 4): 1558 case IP_VERSION(11, 5, 0): 1559 case IP_VERSION(11, 5, 1): 1560 case IP_VERSION(11, 5, 2): 1561 adev->gfx.me.num_me = 1; 1562 adev->gfx.me.num_pipe_per_me = 1; 1563 adev->gfx.me.num_queue_per_pipe = 1; 1564 adev->gfx.mec.num_mec = 1; 1565 adev->gfx.mec.num_pipe_per_mec = 4; 1566 adev->gfx.mec.num_queue_per_pipe = 4; 1567 break; 1568 default: 1569 adev->gfx.me.num_me = 1; 1570 adev->gfx.me.num_pipe_per_me = 1; 1571 adev->gfx.me.num_queue_per_pipe = 1; 1572 adev->gfx.mec.num_mec = 1; 1573 adev->gfx.mec.num_pipe_per_mec = 4; 1574 adev->gfx.mec.num_queue_per_pipe = 8; 1575 break; 1576 } 1577 1578 /* Enable CG flag in one VF mode for enabling RLC safe mode enter/exit */ 1579 if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(11, 0, 3) && 1580 amdgpu_sriov_is_pp_one_vf(adev)) 1581 adev->cg_flags = AMD_CG_SUPPORT_GFX_CGCG; 1582 1583 /* EOP Event */ 1584 r = amdgpu_irq_add_id(adev, SOC21_IH_CLIENTID_GRBM_CP, 1585 GFX_11_0_0__SRCID__CP_EOP_INTERRUPT, 1586 &adev->gfx.eop_irq); 1587 if (r) 1588 return r; 1589 1590 /* Bad opcode Event */ 1591 r = amdgpu_irq_add_id(adev, SOC21_IH_CLIENTID_GRBM_CP, 1592 GFX_11_0_0__SRCID__CP_BAD_OPCODE_ERROR, 1593 &adev->gfx.bad_op_irq); 1594 if (r) 1595 return r; 1596 1597 /* Privileged reg */ 1598 r = amdgpu_irq_add_id(adev, SOC21_IH_CLIENTID_GRBM_CP, 1599 GFX_11_0_0__SRCID__CP_PRIV_REG_FAULT, 1600 &adev->gfx.priv_reg_irq); 1601 if (r) 1602 return r; 1603 1604 /* Privileged inst */ 1605 r = amdgpu_irq_add_id(adev, SOC21_IH_CLIENTID_GRBM_CP, 1606 GFX_11_0_0__SRCID__CP_PRIV_INSTR_FAULT, 1607 &adev->gfx.priv_inst_irq); 1608 if (r) 1609 return r; 1610 1611 /* FED error */ 1612 r = amdgpu_irq_add_id(adev, SOC21_IH_CLIENTID_GFX, 1613 GFX_11_0_0__SRCID__RLC_GC_FED_INTERRUPT, 1614 &adev->gfx.rlc_gc_fed_irq); 1615 if (r) 1616 return r; 1617 1618 adev->gfx.gfx_current_status = AMDGPU_GFX_NORMAL_MODE; 1619 1620 gfx_v11_0_me_init(adev); 1621 1622 r = gfx_v11_0_rlc_init(adev); 1623 if (r) { 1624 DRM_ERROR("Failed to init rlc BOs!\n"); 1625 return r; 1626 } 1627 1628 r = gfx_v11_0_mec_init(adev); 1629 if (r) { 1630 DRM_ERROR("Failed to init MEC BOs!\n"); 1631 return r; 1632 } 1633 1634 /* set up the gfx ring */ 1635 for (i = 0; i < adev->gfx.me.num_me; i++) { 1636 for (j = 0; j < adev->gfx.me.num_queue_per_pipe; j++) { 1637 for (k = 0; k < adev->gfx.me.num_pipe_per_me; k++) { 1638 if (!amdgpu_gfx_is_me_queue_enabled(adev, i, k, j)) 1639 continue; 1640 1641 r = gfx_v11_0_gfx_ring_init(adev, ring_id, 1642 i, k, j); 1643 if (r) 1644 return r; 1645 ring_id++; 1646 } 1647 } 1648 } 1649 1650 ring_id = 0; 1651 /* set up the compute queues - allocate horizontally across pipes */ 1652 for (i = 0; i < adev->gfx.mec.num_mec; ++i) { 1653 for (j = 0; j < adev->gfx.mec.num_queue_per_pipe; j++) { 1654 for (k = 0; k < adev->gfx.mec.num_pipe_per_mec; k++) { 1655 if (!amdgpu_gfx_is_mec_queue_enabled(adev, 0, i, 1656 k, j)) 1657 continue; 1658 1659 r = gfx_v11_0_compute_ring_init(adev, ring_id, 1660 i, k, j); 1661 if (r) 1662 return r; 1663 1664 ring_id++; 1665 } 1666 } 1667 } 1668 1669 if (!adev->enable_mes_kiq) { 1670 r = amdgpu_gfx_kiq_init(adev, GFX11_MEC_HPD_SIZE, 0); 1671 if (r) { 1672 DRM_ERROR("Failed to init KIQ BOs!\n"); 1673 return r; 1674 } 1675 1676 r = amdgpu_gfx_kiq_init_ring(adev, xcc_id); 1677 if (r) 1678 return r; 1679 } 1680 1681 r = amdgpu_gfx_mqd_sw_init(adev, sizeof(struct v11_compute_mqd), 0); 1682 if (r) 1683 return r; 1684 1685 /* allocate visible FB for rlc auto-loading fw */ 1686 if (adev->firmware.load_type == AMDGPU_FW_LOAD_RLC_BACKDOOR_AUTO) { 1687 r = gfx_v11_0_rlc_autoload_buffer_init(adev); 1688 if (r) 1689 return r; 1690 } 1691 1692 r = gfx_v11_0_gpu_early_init(adev); 1693 if (r) 1694 return r; 1695 1696 if (amdgpu_gfx_ras_sw_init(adev)) { 1697 dev_err(adev->dev, "Failed to initialize gfx ras block!\n"); 1698 return -EINVAL; 1699 } 1700 1701 gfx_v11_0_alloc_ip_dump(adev); 1702 1703 return 0; 1704 } 1705 1706 static void gfx_v11_0_pfp_fini(struct amdgpu_device *adev) 1707 { 1708 amdgpu_bo_free_kernel(&adev->gfx.pfp.pfp_fw_obj, 1709 &adev->gfx.pfp.pfp_fw_gpu_addr, 1710 (void **)&adev->gfx.pfp.pfp_fw_ptr); 1711 1712 amdgpu_bo_free_kernel(&adev->gfx.pfp.pfp_fw_data_obj, 1713 &adev->gfx.pfp.pfp_fw_data_gpu_addr, 1714 (void **)&adev->gfx.pfp.pfp_fw_data_ptr); 1715 } 1716 1717 static void gfx_v11_0_me_fini(struct amdgpu_device *adev) 1718 { 1719 amdgpu_bo_free_kernel(&adev->gfx.me.me_fw_obj, 1720 &adev->gfx.me.me_fw_gpu_addr, 1721 (void **)&adev->gfx.me.me_fw_ptr); 1722 1723 amdgpu_bo_free_kernel(&adev->gfx.me.me_fw_data_obj, 1724 &adev->gfx.me.me_fw_data_gpu_addr, 1725 (void **)&adev->gfx.me.me_fw_data_ptr); 1726 } 1727 1728 static void gfx_v11_0_rlc_autoload_buffer_fini(struct amdgpu_device *adev) 1729 { 1730 amdgpu_bo_free_kernel(&adev->gfx.rlc.rlc_autoload_bo, 1731 &adev->gfx.rlc.rlc_autoload_gpu_addr, 1732 (void **)&adev->gfx.rlc.rlc_autoload_ptr); 1733 } 1734 1735 static int gfx_v11_0_sw_fini(void *handle) 1736 { 1737 int i; 1738 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1739 1740 for (i = 0; i < adev->gfx.num_gfx_rings; i++) 1741 amdgpu_ring_fini(&adev->gfx.gfx_ring[i]); 1742 for (i = 0; i < adev->gfx.num_compute_rings; i++) 1743 amdgpu_ring_fini(&adev->gfx.compute_ring[i]); 1744 1745 amdgpu_gfx_mqd_sw_fini(adev, 0); 1746 1747 if (!adev->enable_mes_kiq) { 1748 amdgpu_gfx_kiq_free_ring(&adev->gfx.kiq[0].ring); 1749 amdgpu_gfx_kiq_fini(adev, 0); 1750 } 1751 1752 gfx_v11_0_pfp_fini(adev); 1753 gfx_v11_0_me_fini(adev); 1754 gfx_v11_0_rlc_fini(adev); 1755 gfx_v11_0_mec_fini(adev); 1756 1757 if (adev->firmware.load_type == AMDGPU_FW_LOAD_RLC_BACKDOOR_AUTO) 1758 gfx_v11_0_rlc_autoload_buffer_fini(adev); 1759 1760 gfx_v11_0_free_microcode(adev); 1761 1762 kfree(adev->gfx.ip_dump_core); 1763 kfree(adev->gfx.ip_dump_compute_queues); 1764 kfree(adev->gfx.ip_dump_gfx_queues); 1765 1766 return 0; 1767 } 1768 1769 static void gfx_v11_0_select_se_sh(struct amdgpu_device *adev, u32 se_num, 1770 u32 sh_num, u32 instance, int xcc_id) 1771 { 1772 u32 data; 1773 1774 if (instance == 0xffffffff) 1775 data = REG_SET_FIELD(0, GRBM_GFX_INDEX, 1776 INSTANCE_BROADCAST_WRITES, 1); 1777 else 1778 data = REG_SET_FIELD(0, GRBM_GFX_INDEX, INSTANCE_INDEX, 1779 instance); 1780 1781 if (se_num == 0xffffffff) 1782 data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SE_BROADCAST_WRITES, 1783 1); 1784 else 1785 data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SE_INDEX, se_num); 1786 1787 if (sh_num == 0xffffffff) 1788 data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SA_BROADCAST_WRITES, 1789 1); 1790 else 1791 data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SA_INDEX, sh_num); 1792 1793 WREG32_SOC15(GC, 0, regGRBM_GFX_INDEX, data); 1794 } 1795 1796 static u32 gfx_v11_0_get_sa_active_bitmap(struct amdgpu_device *adev) 1797 { 1798 u32 gc_disabled_sa_mask, gc_user_disabled_sa_mask, sa_mask; 1799 1800 gc_disabled_sa_mask = RREG32_SOC15(GC, 0, regCC_GC_SA_UNIT_DISABLE); 1801 gc_disabled_sa_mask = REG_GET_FIELD(gc_disabled_sa_mask, 1802 CC_GC_SA_UNIT_DISABLE, 1803 SA_DISABLE); 1804 gc_user_disabled_sa_mask = RREG32_SOC15(GC, 0, regGC_USER_SA_UNIT_DISABLE); 1805 gc_user_disabled_sa_mask = REG_GET_FIELD(gc_user_disabled_sa_mask, 1806 GC_USER_SA_UNIT_DISABLE, 1807 SA_DISABLE); 1808 sa_mask = amdgpu_gfx_create_bitmask(adev->gfx.config.max_sh_per_se * 1809 adev->gfx.config.max_shader_engines); 1810 1811 return sa_mask & (~(gc_disabled_sa_mask | gc_user_disabled_sa_mask)); 1812 } 1813 1814 static u32 gfx_v11_0_get_rb_active_bitmap(struct amdgpu_device *adev) 1815 { 1816 u32 gc_disabled_rb_mask, gc_user_disabled_rb_mask; 1817 u32 rb_mask; 1818 1819 gc_disabled_rb_mask = RREG32_SOC15(GC, 0, regCC_RB_BACKEND_DISABLE); 1820 gc_disabled_rb_mask = REG_GET_FIELD(gc_disabled_rb_mask, 1821 CC_RB_BACKEND_DISABLE, 1822 BACKEND_DISABLE); 1823 gc_user_disabled_rb_mask = RREG32_SOC15(GC, 0, regGC_USER_RB_BACKEND_DISABLE); 1824 gc_user_disabled_rb_mask = REG_GET_FIELD(gc_user_disabled_rb_mask, 1825 GC_USER_RB_BACKEND_DISABLE, 1826 BACKEND_DISABLE); 1827 rb_mask = amdgpu_gfx_create_bitmask(adev->gfx.config.max_backends_per_se * 1828 adev->gfx.config.max_shader_engines); 1829 1830 return rb_mask & (~(gc_disabled_rb_mask | gc_user_disabled_rb_mask)); 1831 } 1832 1833 static void gfx_v11_0_setup_rb(struct amdgpu_device *adev) 1834 { 1835 u32 rb_bitmap_width_per_sa; 1836 u32 max_sa; 1837 u32 active_sa_bitmap; 1838 u32 global_active_rb_bitmap; 1839 u32 active_rb_bitmap = 0; 1840 u32 i; 1841 1842 /* query sa bitmap from SA_UNIT_DISABLE registers */ 1843 active_sa_bitmap = gfx_v11_0_get_sa_active_bitmap(adev); 1844 /* query rb bitmap from RB_BACKEND_DISABLE registers */ 1845 global_active_rb_bitmap = gfx_v11_0_get_rb_active_bitmap(adev); 1846 1847 /* generate active rb bitmap according to active sa bitmap */ 1848 max_sa = adev->gfx.config.max_shader_engines * 1849 adev->gfx.config.max_sh_per_se; 1850 rb_bitmap_width_per_sa = adev->gfx.config.max_backends_per_se / 1851 adev->gfx.config.max_sh_per_se; 1852 for (i = 0; i < max_sa; i++) { 1853 if (active_sa_bitmap & (1 << i)) 1854 active_rb_bitmap |= (0x3 << (i * rb_bitmap_width_per_sa)); 1855 } 1856 1857 active_rb_bitmap &= global_active_rb_bitmap; 1858 adev->gfx.config.backend_enable_mask = active_rb_bitmap; 1859 adev->gfx.config.num_rbs = hweight32(active_rb_bitmap); 1860 } 1861 1862 #define DEFAULT_SH_MEM_BASES (0x6000) 1863 #define LDS_APP_BASE 0x1 1864 #define SCRATCH_APP_BASE 0x2 1865 1866 static void gfx_v11_0_init_compute_vmid(struct amdgpu_device *adev) 1867 { 1868 int i; 1869 uint32_t sh_mem_bases; 1870 uint32_t data; 1871 1872 /* 1873 * Configure apertures: 1874 * LDS: 0x60000000'00000000 - 0x60000001'00000000 (4GB) 1875 * Scratch: 0x60000001'00000000 - 0x60000002'00000000 (4GB) 1876 * GPUVM: 0x60010000'00000000 - 0x60020000'00000000 (1TB) 1877 */ 1878 sh_mem_bases = (LDS_APP_BASE << SH_MEM_BASES__SHARED_BASE__SHIFT) | 1879 SCRATCH_APP_BASE; 1880 1881 mutex_lock(&adev->srbm_mutex); 1882 for (i = adev->vm_manager.first_kfd_vmid; i < AMDGPU_NUM_VMID; i++) { 1883 soc21_grbm_select(adev, 0, 0, 0, i); 1884 /* CP and shaders */ 1885 WREG32_SOC15(GC, 0, regSH_MEM_CONFIG, DEFAULT_SH_MEM_CONFIG); 1886 WREG32_SOC15(GC, 0, regSH_MEM_BASES, sh_mem_bases); 1887 1888 /* Enable trap for each kfd vmid. */ 1889 data = RREG32_SOC15(GC, 0, regSPI_GDBG_PER_VMID_CNTL); 1890 data = REG_SET_FIELD(data, SPI_GDBG_PER_VMID_CNTL, TRAP_EN, 1); 1891 WREG32_SOC15(GC, 0, regSPI_GDBG_PER_VMID_CNTL, data); 1892 } 1893 soc21_grbm_select(adev, 0, 0, 0, 0); 1894 mutex_unlock(&adev->srbm_mutex); 1895 1896 /* Initialize all compute VMIDs to have no GDS, GWS, or OA 1897 acccess. These should be enabled by FW for target VMIDs. */ 1898 for (i = adev->vm_manager.first_kfd_vmid; i < AMDGPU_NUM_VMID; i++) { 1899 WREG32_SOC15_OFFSET(GC, 0, regGDS_VMID0_BASE, 2 * i, 0); 1900 WREG32_SOC15_OFFSET(GC, 0, regGDS_VMID0_SIZE, 2 * i, 0); 1901 WREG32_SOC15_OFFSET(GC, 0, regGDS_GWS_VMID0, i, 0); 1902 WREG32_SOC15_OFFSET(GC, 0, regGDS_OA_VMID0, i, 0); 1903 } 1904 } 1905 1906 static void gfx_v11_0_init_gds_vmid(struct amdgpu_device *adev) 1907 { 1908 int vmid; 1909 1910 /* 1911 * Initialize all compute and user-gfx VMIDs to have no GDS, GWS, or OA 1912 * access. Compute VMIDs should be enabled by FW for target VMIDs, 1913 * the driver can enable them for graphics. VMID0 should maintain 1914 * access so that HWS firmware can save/restore entries. 1915 */ 1916 for (vmid = 1; vmid < 16; vmid++) { 1917 WREG32_SOC15_OFFSET(GC, 0, regGDS_VMID0_BASE, 2 * vmid, 0); 1918 WREG32_SOC15_OFFSET(GC, 0, regGDS_VMID0_SIZE, 2 * vmid, 0); 1919 WREG32_SOC15_OFFSET(GC, 0, regGDS_GWS_VMID0, vmid, 0); 1920 WREG32_SOC15_OFFSET(GC, 0, regGDS_OA_VMID0, vmid, 0); 1921 } 1922 } 1923 1924 static void gfx_v11_0_tcp_harvest(struct amdgpu_device *adev) 1925 { 1926 /* TODO: harvest feature to be added later. */ 1927 } 1928 1929 static void gfx_v11_0_get_tcc_info(struct amdgpu_device *adev) 1930 { 1931 /* TCCs are global (not instanced). */ 1932 uint32_t tcc_disable = RREG32_SOC15(GC, 0, regCGTS_TCC_DISABLE) | 1933 RREG32_SOC15(GC, 0, regCGTS_USER_TCC_DISABLE); 1934 1935 adev->gfx.config.tcc_disabled_mask = 1936 REG_GET_FIELD(tcc_disable, CGTS_TCC_DISABLE, TCC_DISABLE) | 1937 (REG_GET_FIELD(tcc_disable, CGTS_TCC_DISABLE, HI_TCC_DISABLE) << 16); 1938 } 1939 1940 static void gfx_v11_0_constants_init(struct amdgpu_device *adev) 1941 { 1942 u32 tmp; 1943 int i; 1944 1945 if (!amdgpu_sriov_vf(adev)) 1946 WREG32_FIELD15_PREREG(GC, 0, GRBM_CNTL, READ_TIMEOUT, 0xff); 1947 1948 gfx_v11_0_setup_rb(adev); 1949 gfx_v11_0_get_cu_info(adev, &adev->gfx.cu_info); 1950 gfx_v11_0_get_tcc_info(adev); 1951 adev->gfx.config.pa_sc_tile_steering_override = 0; 1952 1953 /* Set whether texture coordinate truncation is conformant. */ 1954 tmp = RREG32_SOC15(GC, 0, regTA_CNTL2); 1955 adev->gfx.config.ta_cntl2_truncate_coord_mode = 1956 REG_GET_FIELD(tmp, TA_CNTL2, TRUNCATE_COORD_MODE); 1957 1958 /* XXX SH_MEM regs */ 1959 /* where to put LDS, scratch, GPUVM in FSA64 space */ 1960 mutex_lock(&adev->srbm_mutex); 1961 for (i = 0; i < adev->vm_manager.id_mgr[AMDGPU_GFXHUB(0)].num_ids; i++) { 1962 soc21_grbm_select(adev, 0, 0, 0, i); 1963 /* CP and shaders */ 1964 WREG32_SOC15(GC, 0, regSH_MEM_CONFIG, DEFAULT_SH_MEM_CONFIG); 1965 if (i != 0) { 1966 tmp = REG_SET_FIELD(0, SH_MEM_BASES, PRIVATE_BASE, 1967 (adev->gmc.private_aperture_start >> 48)); 1968 tmp = REG_SET_FIELD(tmp, SH_MEM_BASES, SHARED_BASE, 1969 (adev->gmc.shared_aperture_start >> 48)); 1970 WREG32_SOC15(GC, 0, regSH_MEM_BASES, tmp); 1971 } 1972 } 1973 soc21_grbm_select(adev, 0, 0, 0, 0); 1974 1975 mutex_unlock(&adev->srbm_mutex); 1976 1977 gfx_v11_0_init_compute_vmid(adev); 1978 gfx_v11_0_init_gds_vmid(adev); 1979 } 1980 1981 static u32 gfx_v11_0_get_cpg_int_cntl(struct amdgpu_device *adev, 1982 int me, int pipe) 1983 { 1984 if (me != 0) 1985 return 0; 1986 1987 switch (pipe) { 1988 case 0: 1989 return SOC15_REG_OFFSET(GC, 0, regCP_INT_CNTL_RING0); 1990 case 1: 1991 return SOC15_REG_OFFSET(GC, 0, regCP_INT_CNTL_RING1); 1992 default: 1993 return 0; 1994 } 1995 } 1996 1997 static u32 gfx_v11_0_get_cpc_int_cntl(struct amdgpu_device *adev, 1998 int me, int pipe) 1999 { 2000 /* 2001 * amdgpu controls only the first MEC. That's why this function only 2002 * handles the setting of interrupts for this specific MEC. All other 2003 * pipes' interrupts are set by amdkfd. 2004 */ 2005 if (me != 1) 2006 return 0; 2007 2008 switch (pipe) { 2009 case 0: 2010 return SOC15_REG_OFFSET(GC, 0, regCP_ME1_PIPE0_INT_CNTL); 2011 case 1: 2012 return SOC15_REG_OFFSET(GC, 0, regCP_ME1_PIPE1_INT_CNTL); 2013 case 2: 2014 return SOC15_REG_OFFSET(GC, 0, regCP_ME1_PIPE2_INT_CNTL); 2015 case 3: 2016 return SOC15_REG_OFFSET(GC, 0, regCP_ME1_PIPE3_INT_CNTL); 2017 default: 2018 return 0; 2019 } 2020 } 2021 2022 static void gfx_v11_0_enable_gui_idle_interrupt(struct amdgpu_device *adev, 2023 bool enable) 2024 { 2025 u32 tmp, cp_int_cntl_reg; 2026 int i, j; 2027 2028 if (amdgpu_sriov_vf(adev)) 2029 return; 2030 2031 for (i = 0; i < adev->gfx.me.num_me; i++) { 2032 for (j = 0; j < adev->gfx.me.num_pipe_per_me; j++) { 2033 cp_int_cntl_reg = gfx_v11_0_get_cpg_int_cntl(adev, i, j); 2034 2035 if (cp_int_cntl_reg) { 2036 tmp = RREG32_SOC15_IP(GC, cp_int_cntl_reg); 2037 tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CNTX_BUSY_INT_ENABLE, 2038 enable ? 1 : 0); 2039 tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CNTX_EMPTY_INT_ENABLE, 2040 enable ? 1 : 0); 2041 tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CMP_BUSY_INT_ENABLE, 2042 enable ? 1 : 0); 2043 tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, GFX_IDLE_INT_ENABLE, 2044 enable ? 1 : 0); 2045 WREG32_SOC15_IP(GC, cp_int_cntl_reg, tmp); 2046 } 2047 } 2048 } 2049 } 2050 2051 static int gfx_v11_0_init_csb(struct amdgpu_device *adev) 2052 { 2053 adev->gfx.rlc.funcs->get_csb_buffer(adev, adev->gfx.rlc.cs_ptr); 2054 2055 WREG32_SOC15(GC, 0, regRLC_CSIB_ADDR_HI, 2056 adev->gfx.rlc.clear_state_gpu_addr >> 32); 2057 WREG32_SOC15(GC, 0, regRLC_CSIB_ADDR_LO, 2058 adev->gfx.rlc.clear_state_gpu_addr & 0xfffffffc); 2059 WREG32_SOC15(GC, 0, regRLC_CSIB_LENGTH, adev->gfx.rlc.clear_state_size); 2060 2061 return 0; 2062 } 2063 2064 static void gfx_v11_0_rlc_stop(struct amdgpu_device *adev) 2065 { 2066 u32 tmp = RREG32_SOC15(GC, 0, regRLC_CNTL); 2067 2068 tmp = REG_SET_FIELD(tmp, RLC_CNTL, RLC_ENABLE_F32, 0); 2069 WREG32_SOC15(GC, 0, regRLC_CNTL, tmp); 2070 } 2071 2072 static void gfx_v11_0_rlc_reset(struct amdgpu_device *adev) 2073 { 2074 WREG32_FIELD15_PREREG(GC, 0, GRBM_SOFT_RESET, SOFT_RESET_RLC, 1); 2075 udelay(50); 2076 WREG32_FIELD15_PREREG(GC, 0, GRBM_SOFT_RESET, SOFT_RESET_RLC, 0); 2077 udelay(50); 2078 } 2079 2080 static void gfx_v11_0_rlc_smu_handshake_cntl(struct amdgpu_device *adev, 2081 bool enable) 2082 { 2083 uint32_t rlc_pg_cntl; 2084 2085 rlc_pg_cntl = RREG32_SOC15(GC, 0, regRLC_PG_CNTL); 2086 2087 if (!enable) { 2088 /* RLC_PG_CNTL[23] = 0 (default) 2089 * RLC will wait for handshake acks with SMU 2090 * GFXOFF will be enabled 2091 * RLC_PG_CNTL[23] = 1 2092 * RLC will not issue any message to SMU 2093 * hence no handshake between SMU & RLC 2094 * GFXOFF will be disabled 2095 */ 2096 rlc_pg_cntl |= RLC_PG_CNTL__SMU_HANDSHAKE_DISABLE_MASK; 2097 } else 2098 rlc_pg_cntl &= ~RLC_PG_CNTL__SMU_HANDSHAKE_DISABLE_MASK; 2099 WREG32_SOC15(GC, 0, regRLC_PG_CNTL, rlc_pg_cntl); 2100 } 2101 2102 static void gfx_v11_0_rlc_start(struct amdgpu_device *adev) 2103 { 2104 /* TODO: enable rlc & smu handshake until smu 2105 * and gfxoff feature works as expected */ 2106 if (!(amdgpu_pp_feature_mask & PP_GFXOFF_MASK)) 2107 gfx_v11_0_rlc_smu_handshake_cntl(adev, false); 2108 2109 WREG32_FIELD15_PREREG(GC, 0, RLC_CNTL, RLC_ENABLE_F32, 1); 2110 udelay(50); 2111 } 2112 2113 static void gfx_v11_0_rlc_enable_srm(struct amdgpu_device *adev) 2114 { 2115 uint32_t tmp; 2116 2117 /* enable Save Restore Machine */ 2118 tmp = RREG32(SOC15_REG_OFFSET(GC, 0, regRLC_SRM_CNTL)); 2119 tmp |= RLC_SRM_CNTL__AUTO_INCR_ADDR_MASK; 2120 tmp |= RLC_SRM_CNTL__SRM_ENABLE_MASK; 2121 WREG32(SOC15_REG_OFFSET(GC, 0, regRLC_SRM_CNTL), tmp); 2122 } 2123 2124 static void gfx_v11_0_load_rlcg_microcode(struct amdgpu_device *adev) 2125 { 2126 const struct rlc_firmware_header_v2_0 *hdr; 2127 const __le32 *fw_data; 2128 unsigned i, fw_size; 2129 2130 hdr = (const struct rlc_firmware_header_v2_0 *)adev->gfx.rlc_fw->data; 2131 fw_data = (const __le32 *)(adev->gfx.rlc_fw->data + 2132 le32_to_cpu(hdr->header.ucode_array_offset_bytes)); 2133 fw_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4; 2134 2135 WREG32_SOC15(GC, 0, regRLC_GPM_UCODE_ADDR, 2136 RLCG_UCODE_LOADING_START_ADDRESS); 2137 2138 for (i = 0; i < fw_size; i++) 2139 WREG32_SOC15(GC, 0, regRLC_GPM_UCODE_DATA, 2140 le32_to_cpup(fw_data++)); 2141 2142 WREG32_SOC15(GC, 0, regRLC_GPM_UCODE_ADDR, adev->gfx.rlc_fw_version); 2143 } 2144 2145 static void gfx_v11_0_load_rlc_iram_dram_microcode(struct amdgpu_device *adev) 2146 { 2147 const struct rlc_firmware_header_v2_2 *hdr; 2148 const __le32 *fw_data; 2149 unsigned i, fw_size; 2150 u32 tmp; 2151 2152 hdr = (const struct rlc_firmware_header_v2_2 *)adev->gfx.rlc_fw->data; 2153 2154 fw_data = (const __le32 *)(adev->gfx.rlc_fw->data + 2155 le32_to_cpu(hdr->rlc_iram_ucode_offset_bytes)); 2156 fw_size = le32_to_cpu(hdr->rlc_iram_ucode_size_bytes) / 4; 2157 2158 WREG32_SOC15(GC, 0, regRLC_LX6_IRAM_ADDR, 0); 2159 2160 for (i = 0; i < fw_size; i++) { 2161 if ((amdgpu_emu_mode == 1) && (i % 100 == 99)) 2162 msleep(1); 2163 WREG32_SOC15(GC, 0, regRLC_LX6_IRAM_DATA, 2164 le32_to_cpup(fw_data++)); 2165 } 2166 2167 WREG32_SOC15(GC, 0, regRLC_LX6_IRAM_ADDR, adev->gfx.rlc_fw_version); 2168 2169 fw_data = (const __le32 *)(adev->gfx.rlc_fw->data + 2170 le32_to_cpu(hdr->rlc_dram_ucode_offset_bytes)); 2171 fw_size = le32_to_cpu(hdr->rlc_dram_ucode_size_bytes) / 4; 2172 2173 WREG32_SOC15(GC, 0, regRLC_LX6_DRAM_ADDR, 0); 2174 for (i = 0; i < fw_size; i++) { 2175 if ((amdgpu_emu_mode == 1) && (i % 100 == 99)) 2176 msleep(1); 2177 WREG32_SOC15(GC, 0, regRLC_LX6_DRAM_DATA, 2178 le32_to_cpup(fw_data++)); 2179 } 2180 2181 WREG32_SOC15(GC, 0, regRLC_LX6_IRAM_ADDR, adev->gfx.rlc_fw_version); 2182 2183 tmp = RREG32_SOC15(GC, 0, regRLC_LX6_CNTL); 2184 tmp = REG_SET_FIELD(tmp, RLC_LX6_CNTL, PDEBUG_ENABLE, 1); 2185 tmp = REG_SET_FIELD(tmp, RLC_LX6_CNTL, BRESET, 0); 2186 WREG32_SOC15(GC, 0, regRLC_LX6_CNTL, tmp); 2187 } 2188 2189 static void gfx_v11_0_load_rlcp_rlcv_microcode(struct amdgpu_device *adev) 2190 { 2191 const struct rlc_firmware_header_v2_3 *hdr; 2192 const __le32 *fw_data; 2193 unsigned i, fw_size; 2194 u32 tmp; 2195 2196 hdr = (const struct rlc_firmware_header_v2_3 *)adev->gfx.rlc_fw->data; 2197 2198 fw_data = (const __le32 *)(adev->gfx.rlc_fw->data + 2199 le32_to_cpu(hdr->rlcp_ucode_offset_bytes)); 2200 fw_size = le32_to_cpu(hdr->rlcp_ucode_size_bytes) / 4; 2201 2202 WREG32_SOC15(GC, 0, regRLC_PACE_UCODE_ADDR, 0); 2203 2204 for (i = 0; i < fw_size; i++) { 2205 if ((amdgpu_emu_mode == 1) && (i % 100 == 99)) 2206 msleep(1); 2207 WREG32_SOC15(GC, 0, regRLC_PACE_UCODE_DATA, 2208 le32_to_cpup(fw_data++)); 2209 } 2210 2211 WREG32_SOC15(GC, 0, regRLC_PACE_UCODE_ADDR, adev->gfx.rlc_fw_version); 2212 2213 tmp = RREG32_SOC15(GC, 0, regRLC_GPM_THREAD_ENABLE); 2214 tmp = REG_SET_FIELD(tmp, RLC_GPM_THREAD_ENABLE, THREAD1_ENABLE, 1); 2215 WREG32_SOC15(GC, 0, regRLC_GPM_THREAD_ENABLE, tmp); 2216 2217 fw_data = (const __le32 *)(adev->gfx.rlc_fw->data + 2218 le32_to_cpu(hdr->rlcv_ucode_offset_bytes)); 2219 fw_size = le32_to_cpu(hdr->rlcv_ucode_size_bytes) / 4; 2220 2221 WREG32_SOC15(GC, 0, regRLC_GPU_IOV_UCODE_ADDR, 0); 2222 2223 for (i = 0; i < fw_size; i++) { 2224 if ((amdgpu_emu_mode == 1) && (i % 100 == 99)) 2225 msleep(1); 2226 WREG32_SOC15(GC, 0, regRLC_GPU_IOV_UCODE_DATA, 2227 le32_to_cpup(fw_data++)); 2228 } 2229 2230 WREG32_SOC15(GC, 0, regRLC_GPU_IOV_UCODE_ADDR, adev->gfx.rlc_fw_version); 2231 2232 tmp = RREG32_SOC15(GC, 0, regRLC_GPU_IOV_F32_CNTL); 2233 tmp = REG_SET_FIELD(tmp, RLC_GPU_IOV_F32_CNTL, ENABLE, 1); 2234 WREG32_SOC15(GC, 0, regRLC_GPU_IOV_F32_CNTL, tmp); 2235 } 2236 2237 static int gfx_v11_0_rlc_load_microcode(struct amdgpu_device *adev) 2238 { 2239 const struct rlc_firmware_header_v2_0 *hdr; 2240 uint16_t version_major; 2241 uint16_t version_minor; 2242 2243 if (!adev->gfx.rlc_fw) 2244 return -EINVAL; 2245 2246 hdr = (const struct rlc_firmware_header_v2_0 *)adev->gfx.rlc_fw->data; 2247 amdgpu_ucode_print_rlc_hdr(&hdr->header); 2248 2249 version_major = le16_to_cpu(hdr->header.header_version_major); 2250 version_minor = le16_to_cpu(hdr->header.header_version_minor); 2251 2252 if (version_major == 2) { 2253 gfx_v11_0_load_rlcg_microcode(adev); 2254 if (amdgpu_dpm == 1) { 2255 if (version_minor >= 2) 2256 gfx_v11_0_load_rlc_iram_dram_microcode(adev); 2257 if (version_minor == 3) 2258 gfx_v11_0_load_rlcp_rlcv_microcode(adev); 2259 } 2260 2261 return 0; 2262 } 2263 2264 return -EINVAL; 2265 } 2266 2267 static int gfx_v11_0_rlc_resume(struct amdgpu_device *adev) 2268 { 2269 int r; 2270 2271 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) { 2272 gfx_v11_0_init_csb(adev); 2273 2274 if (!amdgpu_sriov_vf(adev)) /* enable RLC SRM */ 2275 gfx_v11_0_rlc_enable_srm(adev); 2276 } else { 2277 if (amdgpu_sriov_vf(adev)) { 2278 gfx_v11_0_init_csb(adev); 2279 return 0; 2280 } 2281 2282 adev->gfx.rlc.funcs->stop(adev); 2283 2284 /* disable CG */ 2285 WREG32_SOC15(GC, 0, regRLC_CGCG_CGLS_CTRL, 0); 2286 2287 /* disable PG */ 2288 WREG32_SOC15(GC, 0, regRLC_PG_CNTL, 0); 2289 2290 if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT) { 2291 /* legacy rlc firmware loading */ 2292 r = gfx_v11_0_rlc_load_microcode(adev); 2293 if (r) 2294 return r; 2295 } 2296 2297 gfx_v11_0_init_csb(adev); 2298 2299 adev->gfx.rlc.funcs->start(adev); 2300 } 2301 return 0; 2302 } 2303 2304 static int gfx_v11_0_config_me_cache(struct amdgpu_device *adev, uint64_t addr) 2305 { 2306 uint32_t usec_timeout = 50000; /* wait for 50ms */ 2307 uint32_t tmp; 2308 int i; 2309 2310 /* Trigger an invalidation of the L1 instruction caches */ 2311 tmp = RREG32_SOC15(GC, 0, regCP_ME_IC_OP_CNTL); 2312 tmp = REG_SET_FIELD(tmp, CP_ME_IC_OP_CNTL, INVALIDATE_CACHE, 1); 2313 WREG32_SOC15(GC, 0, regCP_ME_IC_OP_CNTL, tmp); 2314 2315 /* Wait for invalidation complete */ 2316 for (i = 0; i < usec_timeout; i++) { 2317 tmp = RREG32_SOC15(GC, 0, regCP_ME_IC_OP_CNTL); 2318 if (1 == REG_GET_FIELD(tmp, CP_ME_IC_OP_CNTL, 2319 INVALIDATE_CACHE_COMPLETE)) 2320 break; 2321 udelay(1); 2322 } 2323 2324 if (i >= usec_timeout) { 2325 dev_err(adev->dev, "failed to invalidate instruction cache\n"); 2326 return -EINVAL; 2327 } 2328 2329 if (amdgpu_emu_mode == 1) 2330 adev->hdp.funcs->flush_hdp(adev, NULL); 2331 2332 tmp = RREG32_SOC15(GC, 0, regCP_ME_IC_BASE_CNTL); 2333 tmp = REG_SET_FIELD(tmp, CP_ME_IC_BASE_CNTL, VMID, 0); 2334 tmp = REG_SET_FIELD(tmp, CP_ME_IC_BASE_CNTL, CACHE_POLICY, 0); 2335 tmp = REG_SET_FIELD(tmp, CP_ME_IC_BASE_CNTL, EXE_DISABLE, 0); 2336 tmp = REG_SET_FIELD(tmp, CP_ME_IC_BASE_CNTL, ADDRESS_CLAMP, 1); 2337 WREG32_SOC15(GC, 0, regCP_ME_IC_BASE_CNTL, tmp); 2338 2339 /* Program me ucode address into intruction cache address register */ 2340 WREG32_SOC15(GC, 0, regCP_ME_IC_BASE_LO, 2341 lower_32_bits(addr) & 0xFFFFF000); 2342 WREG32_SOC15(GC, 0, regCP_ME_IC_BASE_HI, 2343 upper_32_bits(addr)); 2344 2345 return 0; 2346 } 2347 2348 static int gfx_v11_0_config_pfp_cache(struct amdgpu_device *adev, uint64_t addr) 2349 { 2350 uint32_t usec_timeout = 50000; /* wait for 50ms */ 2351 uint32_t tmp; 2352 int i; 2353 2354 /* Trigger an invalidation of the L1 instruction caches */ 2355 tmp = RREG32_SOC15(GC, 0, regCP_PFP_IC_OP_CNTL); 2356 tmp = REG_SET_FIELD(tmp, CP_PFP_IC_OP_CNTL, INVALIDATE_CACHE, 1); 2357 WREG32_SOC15(GC, 0, regCP_PFP_IC_OP_CNTL, tmp); 2358 2359 /* Wait for invalidation complete */ 2360 for (i = 0; i < usec_timeout; i++) { 2361 tmp = RREG32_SOC15(GC, 0, regCP_PFP_IC_OP_CNTL); 2362 if (1 == REG_GET_FIELD(tmp, CP_PFP_IC_OP_CNTL, 2363 INVALIDATE_CACHE_COMPLETE)) 2364 break; 2365 udelay(1); 2366 } 2367 2368 if (i >= usec_timeout) { 2369 dev_err(adev->dev, "failed to invalidate instruction cache\n"); 2370 return -EINVAL; 2371 } 2372 2373 if (amdgpu_emu_mode == 1) 2374 adev->hdp.funcs->flush_hdp(adev, NULL); 2375 2376 tmp = RREG32_SOC15(GC, 0, regCP_PFP_IC_BASE_CNTL); 2377 tmp = REG_SET_FIELD(tmp, CP_PFP_IC_BASE_CNTL, VMID, 0); 2378 tmp = REG_SET_FIELD(tmp, CP_PFP_IC_BASE_CNTL, CACHE_POLICY, 0); 2379 tmp = REG_SET_FIELD(tmp, CP_PFP_IC_BASE_CNTL, EXE_DISABLE, 0); 2380 tmp = REG_SET_FIELD(tmp, CP_PFP_IC_BASE_CNTL, ADDRESS_CLAMP, 1); 2381 WREG32_SOC15(GC, 0, regCP_PFP_IC_BASE_CNTL, tmp); 2382 2383 /* Program pfp ucode address into intruction cache address register */ 2384 WREG32_SOC15(GC, 0, regCP_PFP_IC_BASE_LO, 2385 lower_32_bits(addr) & 0xFFFFF000); 2386 WREG32_SOC15(GC, 0, regCP_PFP_IC_BASE_HI, 2387 upper_32_bits(addr)); 2388 2389 return 0; 2390 } 2391 2392 static int gfx_v11_0_config_mec_cache(struct amdgpu_device *adev, uint64_t addr) 2393 { 2394 uint32_t usec_timeout = 50000; /* wait for 50ms */ 2395 uint32_t tmp; 2396 int i; 2397 2398 /* Trigger an invalidation of the L1 instruction caches */ 2399 tmp = RREG32_SOC15(GC, 0, regCP_CPC_IC_OP_CNTL); 2400 tmp = REG_SET_FIELD(tmp, CP_CPC_IC_OP_CNTL, INVALIDATE_CACHE, 1); 2401 2402 WREG32_SOC15(GC, 0, regCP_CPC_IC_OP_CNTL, tmp); 2403 2404 /* Wait for invalidation complete */ 2405 for (i = 0; i < usec_timeout; i++) { 2406 tmp = RREG32_SOC15(GC, 0, regCP_CPC_IC_OP_CNTL); 2407 if (1 == REG_GET_FIELD(tmp, CP_CPC_IC_OP_CNTL, 2408 INVALIDATE_CACHE_COMPLETE)) 2409 break; 2410 udelay(1); 2411 } 2412 2413 if (i >= usec_timeout) { 2414 dev_err(adev->dev, "failed to invalidate instruction cache\n"); 2415 return -EINVAL; 2416 } 2417 2418 if (amdgpu_emu_mode == 1) 2419 adev->hdp.funcs->flush_hdp(adev, NULL); 2420 2421 tmp = RREG32_SOC15(GC, 0, regCP_CPC_IC_BASE_CNTL); 2422 tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, CACHE_POLICY, 0); 2423 tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, EXE_DISABLE, 0); 2424 tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, ADDRESS_CLAMP, 1); 2425 WREG32_SOC15(GC, 0, regCP_CPC_IC_BASE_CNTL, tmp); 2426 2427 /* Program mec1 ucode address into intruction cache address register */ 2428 WREG32_SOC15(GC, 0, regCP_CPC_IC_BASE_LO, 2429 lower_32_bits(addr) & 0xFFFFF000); 2430 WREG32_SOC15(GC, 0, regCP_CPC_IC_BASE_HI, 2431 upper_32_bits(addr)); 2432 2433 return 0; 2434 } 2435 2436 static int gfx_v11_0_config_pfp_cache_rs64(struct amdgpu_device *adev, uint64_t addr, uint64_t addr2) 2437 { 2438 uint32_t usec_timeout = 50000; /* wait for 50ms */ 2439 uint32_t tmp; 2440 unsigned i, pipe_id; 2441 const struct gfx_firmware_header_v2_0 *pfp_hdr; 2442 2443 pfp_hdr = (const struct gfx_firmware_header_v2_0 *) 2444 adev->gfx.pfp_fw->data; 2445 2446 WREG32_SOC15(GC, 0, regCP_PFP_IC_BASE_LO, 2447 lower_32_bits(addr)); 2448 WREG32_SOC15(GC, 0, regCP_PFP_IC_BASE_HI, 2449 upper_32_bits(addr)); 2450 2451 tmp = RREG32_SOC15(GC, 0, regCP_PFP_IC_BASE_CNTL); 2452 tmp = REG_SET_FIELD(tmp, CP_PFP_IC_BASE_CNTL, VMID, 0); 2453 tmp = REG_SET_FIELD(tmp, CP_PFP_IC_BASE_CNTL, CACHE_POLICY, 0); 2454 tmp = REG_SET_FIELD(tmp, CP_PFP_IC_BASE_CNTL, EXE_DISABLE, 0); 2455 WREG32_SOC15(GC, 0, regCP_PFP_IC_BASE_CNTL, tmp); 2456 2457 /* 2458 * Programming any of the CP_PFP_IC_BASE registers 2459 * forces invalidation of the ME L1 I$. Wait for the 2460 * invalidation complete 2461 */ 2462 for (i = 0; i < usec_timeout; i++) { 2463 tmp = RREG32_SOC15(GC, 0, regCP_PFP_IC_OP_CNTL); 2464 if (1 == REG_GET_FIELD(tmp, CP_PFP_IC_OP_CNTL, 2465 INVALIDATE_CACHE_COMPLETE)) 2466 break; 2467 udelay(1); 2468 } 2469 2470 if (i >= usec_timeout) { 2471 dev_err(adev->dev, "failed to invalidate instruction cache\n"); 2472 return -EINVAL; 2473 } 2474 2475 /* Prime the L1 instruction caches */ 2476 tmp = RREG32_SOC15(GC, 0, regCP_PFP_IC_OP_CNTL); 2477 tmp = REG_SET_FIELD(tmp, CP_PFP_IC_OP_CNTL, PRIME_ICACHE, 1); 2478 WREG32_SOC15(GC, 0, regCP_PFP_IC_OP_CNTL, tmp); 2479 /* Waiting for cache primed*/ 2480 for (i = 0; i < usec_timeout; i++) { 2481 tmp = RREG32_SOC15(GC, 0, regCP_PFP_IC_OP_CNTL); 2482 if (1 == REG_GET_FIELD(tmp, CP_PFP_IC_OP_CNTL, 2483 ICACHE_PRIMED)) 2484 break; 2485 udelay(1); 2486 } 2487 2488 if (i >= usec_timeout) { 2489 dev_err(adev->dev, "failed to prime instruction cache\n"); 2490 return -EINVAL; 2491 } 2492 2493 mutex_lock(&adev->srbm_mutex); 2494 for (pipe_id = 0; pipe_id < adev->gfx.me.num_pipe_per_me; pipe_id++) { 2495 soc21_grbm_select(adev, 0, pipe_id, 0, 0); 2496 WREG32_SOC15(GC, 0, regCP_PFP_PRGRM_CNTR_START, 2497 (pfp_hdr->ucode_start_addr_hi << 30) | 2498 (pfp_hdr->ucode_start_addr_lo >> 2)); 2499 WREG32_SOC15(GC, 0, regCP_PFP_PRGRM_CNTR_START_HI, 2500 pfp_hdr->ucode_start_addr_hi >> 2); 2501 2502 /* 2503 * Program CP_ME_CNTL to reset given PIPE to take 2504 * effect of CP_PFP_PRGRM_CNTR_START. 2505 */ 2506 tmp = RREG32_SOC15(GC, 0, regCP_ME_CNTL); 2507 if (pipe_id == 0) 2508 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, 2509 PFP_PIPE0_RESET, 1); 2510 else 2511 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, 2512 PFP_PIPE1_RESET, 1); 2513 WREG32_SOC15(GC, 0, regCP_ME_CNTL, tmp); 2514 2515 /* Clear pfp pipe0 reset bit. */ 2516 if (pipe_id == 0) 2517 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, 2518 PFP_PIPE0_RESET, 0); 2519 else 2520 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, 2521 PFP_PIPE1_RESET, 0); 2522 WREG32_SOC15(GC, 0, regCP_ME_CNTL, tmp); 2523 2524 WREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_BASE0_LO, 2525 lower_32_bits(addr2)); 2526 WREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_BASE0_HI, 2527 upper_32_bits(addr2)); 2528 } 2529 soc21_grbm_select(adev, 0, 0, 0, 0); 2530 mutex_unlock(&adev->srbm_mutex); 2531 2532 tmp = RREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_BASE_CNTL); 2533 tmp = REG_SET_FIELD(tmp, CP_GFX_RS64_DC_BASE_CNTL, VMID, 0); 2534 tmp = REG_SET_FIELD(tmp, CP_GFX_RS64_DC_BASE_CNTL, CACHE_POLICY, 0); 2535 WREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_BASE_CNTL, tmp); 2536 2537 /* Invalidate the data caches */ 2538 tmp = RREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_OP_CNTL); 2539 tmp = REG_SET_FIELD(tmp, CP_GFX_RS64_DC_OP_CNTL, INVALIDATE_DCACHE, 1); 2540 WREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_OP_CNTL, tmp); 2541 2542 for (i = 0; i < usec_timeout; i++) { 2543 tmp = RREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_OP_CNTL); 2544 if (1 == REG_GET_FIELD(tmp, CP_GFX_RS64_DC_OP_CNTL, 2545 INVALIDATE_DCACHE_COMPLETE)) 2546 break; 2547 udelay(1); 2548 } 2549 2550 if (i >= usec_timeout) { 2551 dev_err(adev->dev, "failed to invalidate RS64 data cache\n"); 2552 return -EINVAL; 2553 } 2554 2555 return 0; 2556 } 2557 2558 static int gfx_v11_0_config_me_cache_rs64(struct amdgpu_device *adev, uint64_t addr, uint64_t addr2) 2559 { 2560 uint32_t usec_timeout = 50000; /* wait for 50ms */ 2561 uint32_t tmp; 2562 unsigned i, pipe_id; 2563 const struct gfx_firmware_header_v2_0 *me_hdr; 2564 2565 me_hdr = (const struct gfx_firmware_header_v2_0 *) 2566 adev->gfx.me_fw->data; 2567 2568 WREG32_SOC15(GC, 0, regCP_ME_IC_BASE_LO, 2569 lower_32_bits(addr)); 2570 WREG32_SOC15(GC, 0, regCP_ME_IC_BASE_HI, 2571 upper_32_bits(addr)); 2572 2573 tmp = RREG32_SOC15(GC, 0, regCP_ME_IC_BASE_CNTL); 2574 tmp = REG_SET_FIELD(tmp, CP_ME_IC_BASE_CNTL, VMID, 0); 2575 tmp = REG_SET_FIELD(tmp, CP_ME_IC_BASE_CNTL, CACHE_POLICY, 0); 2576 tmp = REG_SET_FIELD(tmp, CP_ME_IC_BASE_CNTL, EXE_DISABLE, 0); 2577 WREG32_SOC15(GC, 0, regCP_ME_IC_BASE_CNTL, tmp); 2578 2579 /* 2580 * Programming any of the CP_ME_IC_BASE registers 2581 * forces invalidation of the ME L1 I$. Wait for the 2582 * invalidation complete 2583 */ 2584 for (i = 0; i < usec_timeout; i++) { 2585 tmp = RREG32_SOC15(GC, 0, regCP_ME_IC_OP_CNTL); 2586 if (1 == REG_GET_FIELD(tmp, CP_ME_IC_OP_CNTL, 2587 INVALIDATE_CACHE_COMPLETE)) 2588 break; 2589 udelay(1); 2590 } 2591 2592 if (i >= usec_timeout) { 2593 dev_err(adev->dev, "failed to invalidate instruction cache\n"); 2594 return -EINVAL; 2595 } 2596 2597 /* Prime the instruction caches */ 2598 tmp = RREG32_SOC15(GC, 0, regCP_ME_IC_OP_CNTL); 2599 tmp = REG_SET_FIELD(tmp, CP_ME_IC_OP_CNTL, PRIME_ICACHE, 1); 2600 WREG32_SOC15(GC, 0, regCP_ME_IC_OP_CNTL, tmp); 2601 2602 /* Waiting for instruction cache primed*/ 2603 for (i = 0; i < usec_timeout; i++) { 2604 tmp = RREG32_SOC15(GC, 0, regCP_ME_IC_OP_CNTL); 2605 if (1 == REG_GET_FIELD(tmp, CP_ME_IC_OP_CNTL, 2606 ICACHE_PRIMED)) 2607 break; 2608 udelay(1); 2609 } 2610 2611 if (i >= usec_timeout) { 2612 dev_err(adev->dev, "failed to prime instruction cache\n"); 2613 return -EINVAL; 2614 } 2615 2616 mutex_lock(&adev->srbm_mutex); 2617 for (pipe_id = 0; pipe_id < adev->gfx.me.num_pipe_per_me; pipe_id++) { 2618 soc21_grbm_select(adev, 0, pipe_id, 0, 0); 2619 WREG32_SOC15(GC, 0, regCP_ME_PRGRM_CNTR_START, 2620 (me_hdr->ucode_start_addr_hi << 30) | 2621 (me_hdr->ucode_start_addr_lo >> 2) ); 2622 WREG32_SOC15(GC, 0, regCP_ME_PRGRM_CNTR_START_HI, 2623 me_hdr->ucode_start_addr_hi>>2); 2624 2625 /* 2626 * Program CP_ME_CNTL to reset given PIPE to take 2627 * effect of CP_PFP_PRGRM_CNTR_START. 2628 */ 2629 tmp = RREG32_SOC15(GC, 0, regCP_ME_CNTL); 2630 if (pipe_id == 0) 2631 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, 2632 ME_PIPE0_RESET, 1); 2633 else 2634 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, 2635 ME_PIPE1_RESET, 1); 2636 WREG32_SOC15(GC, 0, regCP_ME_CNTL, tmp); 2637 2638 /* Clear pfp pipe0 reset bit. */ 2639 if (pipe_id == 0) 2640 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, 2641 ME_PIPE0_RESET, 0); 2642 else 2643 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, 2644 ME_PIPE1_RESET, 0); 2645 WREG32_SOC15(GC, 0, regCP_ME_CNTL, tmp); 2646 2647 WREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_BASE1_LO, 2648 lower_32_bits(addr2)); 2649 WREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_BASE1_HI, 2650 upper_32_bits(addr2)); 2651 } 2652 soc21_grbm_select(adev, 0, 0, 0, 0); 2653 mutex_unlock(&adev->srbm_mutex); 2654 2655 tmp = RREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_BASE_CNTL); 2656 tmp = REG_SET_FIELD(tmp, CP_GFX_RS64_DC_BASE_CNTL, VMID, 0); 2657 tmp = REG_SET_FIELD(tmp, CP_GFX_RS64_DC_BASE_CNTL, CACHE_POLICY, 0); 2658 WREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_BASE_CNTL, tmp); 2659 2660 /* Invalidate the data caches */ 2661 tmp = RREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_OP_CNTL); 2662 tmp = REG_SET_FIELD(tmp, CP_GFX_RS64_DC_OP_CNTL, INVALIDATE_DCACHE, 1); 2663 WREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_OP_CNTL, tmp); 2664 2665 for (i = 0; i < usec_timeout; i++) { 2666 tmp = RREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_OP_CNTL); 2667 if (1 == REG_GET_FIELD(tmp, CP_GFX_RS64_DC_OP_CNTL, 2668 INVALIDATE_DCACHE_COMPLETE)) 2669 break; 2670 udelay(1); 2671 } 2672 2673 if (i >= usec_timeout) { 2674 dev_err(adev->dev, "failed to invalidate RS64 data cache\n"); 2675 return -EINVAL; 2676 } 2677 2678 return 0; 2679 } 2680 2681 static int gfx_v11_0_config_mec_cache_rs64(struct amdgpu_device *adev, uint64_t addr, uint64_t addr2) 2682 { 2683 uint32_t usec_timeout = 50000; /* wait for 50ms */ 2684 uint32_t tmp; 2685 unsigned i; 2686 const struct gfx_firmware_header_v2_0 *mec_hdr; 2687 2688 mec_hdr = (const struct gfx_firmware_header_v2_0 *) 2689 adev->gfx.mec_fw->data; 2690 2691 tmp = RREG32_SOC15(GC, 0, regCP_CPC_IC_BASE_CNTL); 2692 tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, VMID, 0); 2693 tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, EXE_DISABLE, 0); 2694 tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, CACHE_POLICY, 0); 2695 WREG32_SOC15(GC, 0, regCP_CPC_IC_BASE_CNTL, tmp); 2696 2697 tmp = RREG32_SOC15(GC, 0, regCP_MEC_DC_BASE_CNTL); 2698 tmp = REG_SET_FIELD(tmp, CP_MEC_DC_BASE_CNTL, VMID, 0); 2699 tmp = REG_SET_FIELD(tmp, CP_MEC_DC_BASE_CNTL, CACHE_POLICY, 0); 2700 WREG32_SOC15(GC, 0, regCP_MEC_DC_BASE_CNTL, tmp); 2701 2702 mutex_lock(&adev->srbm_mutex); 2703 for (i = 0; i < adev->gfx.mec.num_pipe_per_mec; i++) { 2704 soc21_grbm_select(adev, 1, i, 0, 0); 2705 2706 WREG32_SOC15(GC, 0, regCP_MEC_MDBASE_LO, addr2); 2707 WREG32_SOC15(GC, 0, regCP_MEC_MDBASE_HI, 2708 upper_32_bits(addr2)); 2709 2710 WREG32_SOC15(GC, 0, regCP_MEC_RS64_PRGRM_CNTR_START, 2711 mec_hdr->ucode_start_addr_lo >> 2 | 2712 mec_hdr->ucode_start_addr_hi << 30); 2713 WREG32_SOC15(GC, 0, regCP_MEC_RS64_PRGRM_CNTR_START_HI, 2714 mec_hdr->ucode_start_addr_hi >> 2); 2715 2716 WREG32_SOC15(GC, 0, regCP_CPC_IC_BASE_LO, addr); 2717 WREG32_SOC15(GC, 0, regCP_CPC_IC_BASE_HI, 2718 upper_32_bits(addr)); 2719 } 2720 mutex_unlock(&adev->srbm_mutex); 2721 soc21_grbm_select(adev, 0, 0, 0, 0); 2722 2723 /* Trigger an invalidation of the L1 instruction caches */ 2724 tmp = RREG32_SOC15(GC, 0, regCP_MEC_DC_OP_CNTL); 2725 tmp = REG_SET_FIELD(tmp, CP_MEC_DC_OP_CNTL, INVALIDATE_DCACHE, 1); 2726 WREG32_SOC15(GC, 0, regCP_MEC_DC_OP_CNTL, tmp); 2727 2728 /* Wait for invalidation complete */ 2729 for (i = 0; i < usec_timeout; i++) { 2730 tmp = RREG32_SOC15(GC, 0, regCP_MEC_DC_OP_CNTL); 2731 if (1 == REG_GET_FIELD(tmp, CP_MEC_DC_OP_CNTL, 2732 INVALIDATE_DCACHE_COMPLETE)) 2733 break; 2734 udelay(1); 2735 } 2736 2737 if (i >= usec_timeout) { 2738 dev_err(adev->dev, "failed to invalidate instruction cache\n"); 2739 return -EINVAL; 2740 } 2741 2742 /* Trigger an invalidation of the L1 instruction caches */ 2743 tmp = RREG32_SOC15(GC, 0, regCP_CPC_IC_OP_CNTL); 2744 tmp = REG_SET_FIELD(tmp, CP_CPC_IC_OP_CNTL, INVALIDATE_CACHE, 1); 2745 WREG32_SOC15(GC, 0, regCP_CPC_IC_OP_CNTL, tmp); 2746 2747 /* Wait for invalidation complete */ 2748 for (i = 0; i < usec_timeout; i++) { 2749 tmp = RREG32_SOC15(GC, 0, regCP_CPC_IC_OP_CNTL); 2750 if (1 == REG_GET_FIELD(tmp, CP_CPC_IC_OP_CNTL, 2751 INVALIDATE_CACHE_COMPLETE)) 2752 break; 2753 udelay(1); 2754 } 2755 2756 if (i >= usec_timeout) { 2757 dev_err(adev->dev, "failed to invalidate instruction cache\n"); 2758 return -EINVAL; 2759 } 2760 2761 return 0; 2762 } 2763 2764 static void gfx_v11_0_config_gfx_rs64(struct amdgpu_device *adev) 2765 { 2766 const struct gfx_firmware_header_v2_0 *pfp_hdr; 2767 const struct gfx_firmware_header_v2_0 *me_hdr; 2768 const struct gfx_firmware_header_v2_0 *mec_hdr; 2769 uint32_t pipe_id, tmp; 2770 2771 mec_hdr = (const struct gfx_firmware_header_v2_0 *) 2772 adev->gfx.mec_fw->data; 2773 me_hdr = (const struct gfx_firmware_header_v2_0 *) 2774 adev->gfx.me_fw->data; 2775 pfp_hdr = (const struct gfx_firmware_header_v2_0 *) 2776 adev->gfx.pfp_fw->data; 2777 2778 /* config pfp program start addr */ 2779 for (pipe_id = 0; pipe_id < 2; pipe_id++) { 2780 soc21_grbm_select(adev, 0, pipe_id, 0, 0); 2781 WREG32_SOC15(GC, 0, regCP_PFP_PRGRM_CNTR_START, 2782 (pfp_hdr->ucode_start_addr_hi << 30) | 2783 (pfp_hdr->ucode_start_addr_lo >> 2)); 2784 WREG32_SOC15(GC, 0, regCP_PFP_PRGRM_CNTR_START_HI, 2785 pfp_hdr->ucode_start_addr_hi >> 2); 2786 } 2787 soc21_grbm_select(adev, 0, 0, 0, 0); 2788 2789 /* reset pfp pipe */ 2790 tmp = RREG32_SOC15(GC, 0, regCP_ME_CNTL); 2791 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, PFP_PIPE0_RESET, 1); 2792 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, PFP_PIPE1_RESET, 1); 2793 WREG32_SOC15(GC, 0, regCP_ME_CNTL, tmp); 2794 2795 /* clear pfp pipe reset */ 2796 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, PFP_PIPE0_RESET, 0); 2797 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, PFP_PIPE1_RESET, 0); 2798 WREG32_SOC15(GC, 0, regCP_ME_CNTL, tmp); 2799 2800 /* config me program start addr */ 2801 for (pipe_id = 0; pipe_id < 2; pipe_id++) { 2802 soc21_grbm_select(adev, 0, pipe_id, 0, 0); 2803 WREG32_SOC15(GC, 0, regCP_ME_PRGRM_CNTR_START, 2804 (me_hdr->ucode_start_addr_hi << 30) | 2805 (me_hdr->ucode_start_addr_lo >> 2) ); 2806 WREG32_SOC15(GC, 0, regCP_ME_PRGRM_CNTR_START_HI, 2807 me_hdr->ucode_start_addr_hi>>2); 2808 } 2809 soc21_grbm_select(adev, 0, 0, 0, 0); 2810 2811 /* reset me pipe */ 2812 tmp = RREG32_SOC15(GC, 0, regCP_ME_CNTL); 2813 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, ME_PIPE0_RESET, 1); 2814 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, ME_PIPE1_RESET, 1); 2815 WREG32_SOC15(GC, 0, regCP_ME_CNTL, tmp); 2816 2817 /* clear me pipe reset */ 2818 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, ME_PIPE0_RESET, 0); 2819 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, ME_PIPE1_RESET, 0); 2820 WREG32_SOC15(GC, 0, regCP_ME_CNTL, tmp); 2821 2822 /* config mec program start addr */ 2823 for (pipe_id = 0; pipe_id < 4; pipe_id++) { 2824 soc21_grbm_select(adev, 1, pipe_id, 0, 0); 2825 WREG32_SOC15(GC, 0, regCP_MEC_RS64_PRGRM_CNTR_START, 2826 mec_hdr->ucode_start_addr_lo >> 2 | 2827 mec_hdr->ucode_start_addr_hi << 30); 2828 WREG32_SOC15(GC, 0, regCP_MEC_RS64_PRGRM_CNTR_START_HI, 2829 mec_hdr->ucode_start_addr_hi >> 2); 2830 } 2831 soc21_grbm_select(adev, 0, 0, 0, 0); 2832 2833 /* reset mec pipe */ 2834 tmp = RREG32_SOC15(GC, 0, regCP_MEC_RS64_CNTL); 2835 tmp = REG_SET_FIELD(tmp, CP_MEC_RS64_CNTL, MEC_PIPE0_RESET, 1); 2836 tmp = REG_SET_FIELD(tmp, CP_MEC_RS64_CNTL, MEC_PIPE1_RESET, 1); 2837 tmp = REG_SET_FIELD(tmp, CP_MEC_RS64_CNTL, MEC_PIPE2_RESET, 1); 2838 tmp = REG_SET_FIELD(tmp, CP_MEC_RS64_CNTL, MEC_PIPE3_RESET, 1); 2839 WREG32_SOC15(GC, 0, regCP_MEC_RS64_CNTL, tmp); 2840 2841 /* clear mec pipe reset */ 2842 tmp = REG_SET_FIELD(tmp, CP_MEC_RS64_CNTL, MEC_PIPE0_RESET, 0); 2843 tmp = REG_SET_FIELD(tmp, CP_MEC_RS64_CNTL, MEC_PIPE1_RESET, 0); 2844 tmp = REG_SET_FIELD(tmp, CP_MEC_RS64_CNTL, MEC_PIPE2_RESET, 0); 2845 tmp = REG_SET_FIELD(tmp, CP_MEC_RS64_CNTL, MEC_PIPE3_RESET, 0); 2846 WREG32_SOC15(GC, 0, regCP_MEC_RS64_CNTL, tmp); 2847 } 2848 2849 static int gfx_v11_0_wait_for_rlc_autoload_complete(struct amdgpu_device *adev) 2850 { 2851 uint32_t cp_status; 2852 uint32_t bootload_status; 2853 int i, r; 2854 uint64_t addr, addr2; 2855 2856 for (i = 0; i < adev->usec_timeout; i++) { 2857 cp_status = RREG32_SOC15(GC, 0, regCP_STAT); 2858 2859 if (amdgpu_ip_version(adev, GC_HWIP, 0) == 2860 IP_VERSION(11, 0, 1) || 2861 amdgpu_ip_version(adev, GC_HWIP, 0) == 2862 IP_VERSION(11, 0, 4) || 2863 amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(11, 5, 0) || 2864 amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(11, 5, 1) || 2865 amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(11, 5, 2)) 2866 bootload_status = RREG32_SOC15(GC, 0, 2867 regRLC_RLCS_BOOTLOAD_STATUS_gc_11_0_1); 2868 else 2869 bootload_status = RREG32_SOC15(GC, 0, regRLC_RLCS_BOOTLOAD_STATUS); 2870 2871 if ((cp_status == 0) && 2872 (REG_GET_FIELD(bootload_status, 2873 RLC_RLCS_BOOTLOAD_STATUS, BOOTLOAD_COMPLETE) == 1)) { 2874 break; 2875 } 2876 udelay(1); 2877 } 2878 2879 if (i >= adev->usec_timeout) { 2880 dev_err(adev->dev, "rlc autoload: gc ucode autoload timeout\n"); 2881 return -ETIMEDOUT; 2882 } 2883 2884 if (adev->firmware.load_type == AMDGPU_FW_LOAD_RLC_BACKDOOR_AUTO) { 2885 if (adev->gfx.rs64_enable) { 2886 addr = adev->gfx.rlc.rlc_autoload_gpu_addr + 2887 rlc_autoload_info[SOC21_FIRMWARE_ID_RS64_ME].offset; 2888 addr2 = adev->gfx.rlc.rlc_autoload_gpu_addr + 2889 rlc_autoload_info[SOC21_FIRMWARE_ID_RS64_ME_P0_STACK].offset; 2890 r = gfx_v11_0_config_me_cache_rs64(adev, addr, addr2); 2891 if (r) 2892 return r; 2893 addr = adev->gfx.rlc.rlc_autoload_gpu_addr + 2894 rlc_autoload_info[SOC21_FIRMWARE_ID_RS64_PFP].offset; 2895 addr2 = adev->gfx.rlc.rlc_autoload_gpu_addr + 2896 rlc_autoload_info[SOC21_FIRMWARE_ID_RS64_PFP_P0_STACK].offset; 2897 r = gfx_v11_0_config_pfp_cache_rs64(adev, addr, addr2); 2898 if (r) 2899 return r; 2900 addr = adev->gfx.rlc.rlc_autoload_gpu_addr + 2901 rlc_autoload_info[SOC21_FIRMWARE_ID_RS64_MEC].offset; 2902 addr2 = adev->gfx.rlc.rlc_autoload_gpu_addr + 2903 rlc_autoload_info[SOC21_FIRMWARE_ID_RS64_MEC_P0_STACK].offset; 2904 r = gfx_v11_0_config_mec_cache_rs64(adev, addr, addr2); 2905 if (r) 2906 return r; 2907 } else { 2908 addr = adev->gfx.rlc.rlc_autoload_gpu_addr + 2909 rlc_autoload_info[SOC21_FIRMWARE_ID_CP_ME].offset; 2910 r = gfx_v11_0_config_me_cache(adev, addr); 2911 if (r) 2912 return r; 2913 addr = adev->gfx.rlc.rlc_autoload_gpu_addr + 2914 rlc_autoload_info[SOC21_FIRMWARE_ID_CP_PFP].offset; 2915 r = gfx_v11_0_config_pfp_cache(adev, addr); 2916 if (r) 2917 return r; 2918 addr = adev->gfx.rlc.rlc_autoload_gpu_addr + 2919 rlc_autoload_info[SOC21_FIRMWARE_ID_CP_MEC].offset; 2920 r = gfx_v11_0_config_mec_cache(adev, addr); 2921 if (r) 2922 return r; 2923 } 2924 } 2925 2926 return 0; 2927 } 2928 2929 static int gfx_v11_0_cp_gfx_enable(struct amdgpu_device *adev, bool enable) 2930 { 2931 int i; 2932 u32 tmp = RREG32_SOC15(GC, 0, regCP_ME_CNTL); 2933 2934 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, ME_HALT, enable ? 0 : 1); 2935 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, PFP_HALT, enable ? 0 : 1); 2936 WREG32_SOC15(GC, 0, regCP_ME_CNTL, tmp); 2937 2938 for (i = 0; i < adev->usec_timeout; i++) { 2939 if (RREG32_SOC15(GC, 0, regCP_STAT) == 0) 2940 break; 2941 udelay(1); 2942 } 2943 2944 if (i >= adev->usec_timeout) 2945 DRM_ERROR("failed to %s cp gfx\n", enable ? "unhalt" : "halt"); 2946 2947 return 0; 2948 } 2949 2950 static int gfx_v11_0_cp_gfx_load_pfp_microcode(struct amdgpu_device *adev) 2951 { 2952 int r; 2953 const struct gfx_firmware_header_v1_0 *pfp_hdr; 2954 const __le32 *fw_data; 2955 unsigned i, fw_size; 2956 2957 pfp_hdr = (const struct gfx_firmware_header_v1_0 *) 2958 adev->gfx.pfp_fw->data; 2959 2960 amdgpu_ucode_print_gfx_hdr(&pfp_hdr->header); 2961 2962 fw_data = (const __le32 *)(adev->gfx.pfp_fw->data + 2963 le32_to_cpu(pfp_hdr->header.ucode_array_offset_bytes)); 2964 fw_size = le32_to_cpu(pfp_hdr->header.ucode_size_bytes); 2965 2966 r = amdgpu_bo_create_reserved(adev, pfp_hdr->header.ucode_size_bytes, 2967 PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT, 2968 &adev->gfx.pfp.pfp_fw_obj, 2969 &adev->gfx.pfp.pfp_fw_gpu_addr, 2970 (void **)&adev->gfx.pfp.pfp_fw_ptr); 2971 if (r) { 2972 dev_err(adev->dev, "(%d) failed to create pfp fw bo\n", r); 2973 gfx_v11_0_pfp_fini(adev); 2974 return r; 2975 } 2976 2977 memcpy(adev->gfx.pfp.pfp_fw_ptr, fw_data, fw_size); 2978 2979 amdgpu_bo_kunmap(adev->gfx.pfp.pfp_fw_obj); 2980 amdgpu_bo_unreserve(adev->gfx.pfp.pfp_fw_obj); 2981 2982 gfx_v11_0_config_pfp_cache(adev, adev->gfx.pfp.pfp_fw_gpu_addr); 2983 2984 WREG32_SOC15(GC, 0, regCP_HYP_PFP_UCODE_ADDR, 0); 2985 2986 for (i = 0; i < pfp_hdr->jt_size; i++) 2987 WREG32_SOC15(GC, 0, regCP_HYP_PFP_UCODE_DATA, 2988 le32_to_cpup(fw_data + pfp_hdr->jt_offset + i)); 2989 2990 WREG32_SOC15(GC, 0, regCP_HYP_PFP_UCODE_ADDR, adev->gfx.pfp_fw_version); 2991 2992 return 0; 2993 } 2994 2995 static int gfx_v11_0_cp_gfx_load_pfp_microcode_rs64(struct amdgpu_device *adev) 2996 { 2997 int r; 2998 const struct gfx_firmware_header_v2_0 *pfp_hdr; 2999 const __le32 *fw_ucode, *fw_data; 3000 unsigned i, pipe_id, fw_ucode_size, fw_data_size; 3001 uint32_t tmp; 3002 uint32_t usec_timeout = 50000; /* wait for 50ms */ 3003 3004 pfp_hdr = (const struct gfx_firmware_header_v2_0 *) 3005 adev->gfx.pfp_fw->data; 3006 3007 amdgpu_ucode_print_gfx_hdr(&pfp_hdr->header); 3008 3009 /* instruction */ 3010 fw_ucode = (const __le32 *)(adev->gfx.pfp_fw->data + 3011 le32_to_cpu(pfp_hdr->ucode_offset_bytes)); 3012 fw_ucode_size = le32_to_cpu(pfp_hdr->ucode_size_bytes); 3013 /* data */ 3014 fw_data = (const __le32 *)(adev->gfx.pfp_fw->data + 3015 le32_to_cpu(pfp_hdr->data_offset_bytes)); 3016 fw_data_size = le32_to_cpu(pfp_hdr->data_size_bytes); 3017 3018 /* 64kb align */ 3019 r = amdgpu_bo_create_reserved(adev, fw_ucode_size, 3020 64 * 1024, 3021 AMDGPU_GEM_DOMAIN_VRAM | 3022 AMDGPU_GEM_DOMAIN_GTT, 3023 &adev->gfx.pfp.pfp_fw_obj, 3024 &adev->gfx.pfp.pfp_fw_gpu_addr, 3025 (void **)&adev->gfx.pfp.pfp_fw_ptr); 3026 if (r) { 3027 dev_err(adev->dev, "(%d) failed to create pfp ucode fw bo\n", r); 3028 gfx_v11_0_pfp_fini(adev); 3029 return r; 3030 } 3031 3032 r = amdgpu_bo_create_reserved(adev, fw_data_size, 3033 64 * 1024, 3034 AMDGPU_GEM_DOMAIN_VRAM | 3035 AMDGPU_GEM_DOMAIN_GTT, 3036 &adev->gfx.pfp.pfp_fw_data_obj, 3037 &adev->gfx.pfp.pfp_fw_data_gpu_addr, 3038 (void **)&adev->gfx.pfp.pfp_fw_data_ptr); 3039 if (r) { 3040 dev_err(adev->dev, "(%d) failed to create pfp data fw bo\n", r); 3041 gfx_v11_0_pfp_fini(adev); 3042 return r; 3043 } 3044 3045 memcpy(adev->gfx.pfp.pfp_fw_ptr, fw_ucode, fw_ucode_size); 3046 memcpy(adev->gfx.pfp.pfp_fw_data_ptr, fw_data, fw_data_size); 3047 3048 amdgpu_bo_kunmap(adev->gfx.pfp.pfp_fw_obj); 3049 amdgpu_bo_kunmap(adev->gfx.pfp.pfp_fw_data_obj); 3050 amdgpu_bo_unreserve(adev->gfx.pfp.pfp_fw_obj); 3051 amdgpu_bo_unreserve(adev->gfx.pfp.pfp_fw_data_obj); 3052 3053 if (amdgpu_emu_mode == 1) 3054 adev->hdp.funcs->flush_hdp(adev, NULL); 3055 3056 WREG32_SOC15(GC, 0, regCP_PFP_IC_BASE_LO, 3057 lower_32_bits(adev->gfx.pfp.pfp_fw_gpu_addr)); 3058 WREG32_SOC15(GC, 0, regCP_PFP_IC_BASE_HI, 3059 upper_32_bits(adev->gfx.pfp.pfp_fw_gpu_addr)); 3060 3061 tmp = RREG32_SOC15(GC, 0, regCP_PFP_IC_BASE_CNTL); 3062 tmp = REG_SET_FIELD(tmp, CP_PFP_IC_BASE_CNTL, VMID, 0); 3063 tmp = REG_SET_FIELD(tmp, CP_PFP_IC_BASE_CNTL, CACHE_POLICY, 0); 3064 tmp = REG_SET_FIELD(tmp, CP_PFP_IC_BASE_CNTL, EXE_DISABLE, 0); 3065 WREG32_SOC15(GC, 0, regCP_PFP_IC_BASE_CNTL, tmp); 3066 3067 /* 3068 * Programming any of the CP_PFP_IC_BASE registers 3069 * forces invalidation of the ME L1 I$. Wait for the 3070 * invalidation complete 3071 */ 3072 for (i = 0; i < usec_timeout; i++) { 3073 tmp = RREG32_SOC15(GC, 0, regCP_PFP_IC_OP_CNTL); 3074 if (1 == REG_GET_FIELD(tmp, CP_PFP_IC_OP_CNTL, 3075 INVALIDATE_CACHE_COMPLETE)) 3076 break; 3077 udelay(1); 3078 } 3079 3080 if (i >= usec_timeout) { 3081 dev_err(adev->dev, "failed to invalidate instruction cache\n"); 3082 return -EINVAL; 3083 } 3084 3085 /* Prime the L1 instruction caches */ 3086 tmp = RREG32_SOC15(GC, 0, regCP_PFP_IC_OP_CNTL); 3087 tmp = REG_SET_FIELD(tmp, CP_PFP_IC_OP_CNTL, PRIME_ICACHE, 1); 3088 WREG32_SOC15(GC, 0, regCP_PFP_IC_OP_CNTL, tmp); 3089 /* Waiting for cache primed*/ 3090 for (i = 0; i < usec_timeout; i++) { 3091 tmp = RREG32_SOC15(GC, 0, regCP_PFP_IC_OP_CNTL); 3092 if (1 == REG_GET_FIELD(tmp, CP_PFP_IC_OP_CNTL, 3093 ICACHE_PRIMED)) 3094 break; 3095 udelay(1); 3096 } 3097 3098 if (i >= usec_timeout) { 3099 dev_err(adev->dev, "failed to prime instruction cache\n"); 3100 return -EINVAL; 3101 } 3102 3103 mutex_lock(&adev->srbm_mutex); 3104 for (pipe_id = 0; pipe_id < adev->gfx.me.num_pipe_per_me; pipe_id++) { 3105 soc21_grbm_select(adev, 0, pipe_id, 0, 0); 3106 WREG32_SOC15(GC, 0, regCP_PFP_PRGRM_CNTR_START, 3107 (pfp_hdr->ucode_start_addr_hi << 30) | 3108 (pfp_hdr->ucode_start_addr_lo >> 2) ); 3109 WREG32_SOC15(GC, 0, regCP_PFP_PRGRM_CNTR_START_HI, 3110 pfp_hdr->ucode_start_addr_hi>>2); 3111 3112 /* 3113 * Program CP_ME_CNTL to reset given PIPE to take 3114 * effect of CP_PFP_PRGRM_CNTR_START. 3115 */ 3116 tmp = RREG32_SOC15(GC, 0, regCP_ME_CNTL); 3117 if (pipe_id == 0) 3118 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, 3119 PFP_PIPE0_RESET, 1); 3120 else 3121 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, 3122 PFP_PIPE1_RESET, 1); 3123 WREG32_SOC15(GC, 0, regCP_ME_CNTL, tmp); 3124 3125 /* Clear pfp pipe0 reset bit. */ 3126 if (pipe_id == 0) 3127 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, 3128 PFP_PIPE0_RESET, 0); 3129 else 3130 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, 3131 PFP_PIPE1_RESET, 0); 3132 WREG32_SOC15(GC, 0, regCP_ME_CNTL, tmp); 3133 3134 WREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_BASE0_LO, 3135 lower_32_bits(adev->gfx.pfp.pfp_fw_data_gpu_addr)); 3136 WREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_BASE0_HI, 3137 upper_32_bits(adev->gfx.pfp.pfp_fw_data_gpu_addr)); 3138 } 3139 soc21_grbm_select(adev, 0, 0, 0, 0); 3140 mutex_unlock(&adev->srbm_mutex); 3141 3142 tmp = RREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_BASE_CNTL); 3143 tmp = REG_SET_FIELD(tmp, CP_GFX_RS64_DC_BASE_CNTL, VMID, 0); 3144 tmp = REG_SET_FIELD(tmp, CP_GFX_RS64_DC_BASE_CNTL, CACHE_POLICY, 0); 3145 WREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_BASE_CNTL, tmp); 3146 3147 /* Invalidate the data caches */ 3148 tmp = RREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_OP_CNTL); 3149 tmp = REG_SET_FIELD(tmp, CP_GFX_RS64_DC_OP_CNTL, INVALIDATE_DCACHE, 1); 3150 WREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_OP_CNTL, tmp); 3151 3152 for (i = 0; i < usec_timeout; i++) { 3153 tmp = RREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_OP_CNTL); 3154 if (1 == REG_GET_FIELD(tmp, CP_GFX_RS64_DC_OP_CNTL, 3155 INVALIDATE_DCACHE_COMPLETE)) 3156 break; 3157 udelay(1); 3158 } 3159 3160 if (i >= usec_timeout) { 3161 dev_err(adev->dev, "failed to invalidate RS64 data cache\n"); 3162 return -EINVAL; 3163 } 3164 3165 return 0; 3166 } 3167 3168 static int gfx_v11_0_cp_gfx_load_me_microcode(struct amdgpu_device *adev) 3169 { 3170 int r; 3171 const struct gfx_firmware_header_v1_0 *me_hdr; 3172 const __le32 *fw_data; 3173 unsigned i, fw_size; 3174 3175 me_hdr = (const struct gfx_firmware_header_v1_0 *) 3176 adev->gfx.me_fw->data; 3177 3178 amdgpu_ucode_print_gfx_hdr(&me_hdr->header); 3179 3180 fw_data = (const __le32 *)(adev->gfx.me_fw->data + 3181 le32_to_cpu(me_hdr->header.ucode_array_offset_bytes)); 3182 fw_size = le32_to_cpu(me_hdr->header.ucode_size_bytes); 3183 3184 r = amdgpu_bo_create_reserved(adev, me_hdr->header.ucode_size_bytes, 3185 PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT, 3186 &adev->gfx.me.me_fw_obj, 3187 &adev->gfx.me.me_fw_gpu_addr, 3188 (void **)&adev->gfx.me.me_fw_ptr); 3189 if (r) { 3190 dev_err(adev->dev, "(%d) failed to create me fw bo\n", r); 3191 gfx_v11_0_me_fini(adev); 3192 return r; 3193 } 3194 3195 memcpy(adev->gfx.me.me_fw_ptr, fw_data, fw_size); 3196 3197 amdgpu_bo_kunmap(adev->gfx.me.me_fw_obj); 3198 amdgpu_bo_unreserve(adev->gfx.me.me_fw_obj); 3199 3200 gfx_v11_0_config_me_cache(adev, adev->gfx.me.me_fw_gpu_addr); 3201 3202 WREG32_SOC15(GC, 0, regCP_HYP_ME_UCODE_ADDR, 0); 3203 3204 for (i = 0; i < me_hdr->jt_size; i++) 3205 WREG32_SOC15(GC, 0, regCP_HYP_ME_UCODE_DATA, 3206 le32_to_cpup(fw_data + me_hdr->jt_offset + i)); 3207 3208 WREG32_SOC15(GC, 0, regCP_HYP_ME_UCODE_ADDR, adev->gfx.me_fw_version); 3209 3210 return 0; 3211 } 3212 3213 static int gfx_v11_0_cp_gfx_load_me_microcode_rs64(struct amdgpu_device *adev) 3214 { 3215 int r; 3216 const struct gfx_firmware_header_v2_0 *me_hdr; 3217 const __le32 *fw_ucode, *fw_data; 3218 unsigned i, pipe_id, fw_ucode_size, fw_data_size; 3219 uint32_t tmp; 3220 uint32_t usec_timeout = 50000; /* wait for 50ms */ 3221 3222 me_hdr = (const struct gfx_firmware_header_v2_0 *) 3223 adev->gfx.me_fw->data; 3224 3225 amdgpu_ucode_print_gfx_hdr(&me_hdr->header); 3226 3227 /* instruction */ 3228 fw_ucode = (const __le32 *)(adev->gfx.me_fw->data + 3229 le32_to_cpu(me_hdr->ucode_offset_bytes)); 3230 fw_ucode_size = le32_to_cpu(me_hdr->ucode_size_bytes); 3231 /* data */ 3232 fw_data = (const __le32 *)(adev->gfx.me_fw->data + 3233 le32_to_cpu(me_hdr->data_offset_bytes)); 3234 fw_data_size = le32_to_cpu(me_hdr->data_size_bytes); 3235 3236 /* 64kb align*/ 3237 r = amdgpu_bo_create_reserved(adev, fw_ucode_size, 3238 64 * 1024, 3239 AMDGPU_GEM_DOMAIN_VRAM | 3240 AMDGPU_GEM_DOMAIN_GTT, 3241 &adev->gfx.me.me_fw_obj, 3242 &adev->gfx.me.me_fw_gpu_addr, 3243 (void **)&adev->gfx.me.me_fw_ptr); 3244 if (r) { 3245 dev_err(adev->dev, "(%d) failed to create me ucode bo\n", r); 3246 gfx_v11_0_me_fini(adev); 3247 return r; 3248 } 3249 3250 r = amdgpu_bo_create_reserved(adev, fw_data_size, 3251 64 * 1024, 3252 AMDGPU_GEM_DOMAIN_VRAM | 3253 AMDGPU_GEM_DOMAIN_GTT, 3254 &adev->gfx.me.me_fw_data_obj, 3255 &adev->gfx.me.me_fw_data_gpu_addr, 3256 (void **)&adev->gfx.me.me_fw_data_ptr); 3257 if (r) { 3258 dev_err(adev->dev, "(%d) failed to create me data bo\n", r); 3259 gfx_v11_0_pfp_fini(adev); 3260 return r; 3261 } 3262 3263 memcpy(adev->gfx.me.me_fw_ptr, fw_ucode, fw_ucode_size); 3264 memcpy(adev->gfx.me.me_fw_data_ptr, fw_data, fw_data_size); 3265 3266 amdgpu_bo_kunmap(adev->gfx.me.me_fw_obj); 3267 amdgpu_bo_kunmap(adev->gfx.me.me_fw_data_obj); 3268 amdgpu_bo_unreserve(adev->gfx.me.me_fw_obj); 3269 amdgpu_bo_unreserve(adev->gfx.me.me_fw_data_obj); 3270 3271 if (amdgpu_emu_mode == 1) 3272 adev->hdp.funcs->flush_hdp(adev, NULL); 3273 3274 WREG32_SOC15(GC, 0, regCP_ME_IC_BASE_LO, 3275 lower_32_bits(adev->gfx.me.me_fw_gpu_addr)); 3276 WREG32_SOC15(GC, 0, regCP_ME_IC_BASE_HI, 3277 upper_32_bits(adev->gfx.me.me_fw_gpu_addr)); 3278 3279 tmp = RREG32_SOC15(GC, 0, regCP_ME_IC_BASE_CNTL); 3280 tmp = REG_SET_FIELD(tmp, CP_ME_IC_BASE_CNTL, VMID, 0); 3281 tmp = REG_SET_FIELD(tmp, CP_ME_IC_BASE_CNTL, CACHE_POLICY, 0); 3282 tmp = REG_SET_FIELD(tmp, CP_ME_IC_BASE_CNTL, EXE_DISABLE, 0); 3283 WREG32_SOC15(GC, 0, regCP_ME_IC_BASE_CNTL, tmp); 3284 3285 /* 3286 * Programming any of the CP_ME_IC_BASE registers 3287 * forces invalidation of the ME L1 I$. Wait for the 3288 * invalidation complete 3289 */ 3290 for (i = 0; i < usec_timeout; i++) { 3291 tmp = RREG32_SOC15(GC, 0, regCP_ME_IC_OP_CNTL); 3292 if (1 == REG_GET_FIELD(tmp, CP_ME_IC_OP_CNTL, 3293 INVALIDATE_CACHE_COMPLETE)) 3294 break; 3295 udelay(1); 3296 } 3297 3298 if (i >= usec_timeout) { 3299 dev_err(adev->dev, "failed to invalidate instruction cache\n"); 3300 return -EINVAL; 3301 } 3302 3303 /* Prime the instruction caches */ 3304 tmp = RREG32_SOC15(GC, 0, regCP_ME_IC_OP_CNTL); 3305 tmp = REG_SET_FIELD(tmp, CP_ME_IC_OP_CNTL, PRIME_ICACHE, 1); 3306 WREG32_SOC15(GC, 0, regCP_ME_IC_OP_CNTL, tmp); 3307 3308 /* Waiting for instruction cache primed*/ 3309 for (i = 0; i < usec_timeout; i++) { 3310 tmp = RREG32_SOC15(GC, 0, regCP_ME_IC_OP_CNTL); 3311 if (1 == REG_GET_FIELD(tmp, CP_ME_IC_OP_CNTL, 3312 ICACHE_PRIMED)) 3313 break; 3314 udelay(1); 3315 } 3316 3317 if (i >= usec_timeout) { 3318 dev_err(adev->dev, "failed to prime instruction cache\n"); 3319 return -EINVAL; 3320 } 3321 3322 mutex_lock(&adev->srbm_mutex); 3323 for (pipe_id = 0; pipe_id < adev->gfx.me.num_pipe_per_me; pipe_id++) { 3324 soc21_grbm_select(adev, 0, pipe_id, 0, 0); 3325 WREG32_SOC15(GC, 0, regCP_ME_PRGRM_CNTR_START, 3326 (me_hdr->ucode_start_addr_hi << 30) | 3327 (me_hdr->ucode_start_addr_lo >> 2) ); 3328 WREG32_SOC15(GC, 0, regCP_ME_PRGRM_CNTR_START_HI, 3329 me_hdr->ucode_start_addr_hi>>2); 3330 3331 /* 3332 * Program CP_ME_CNTL to reset given PIPE to take 3333 * effect of CP_PFP_PRGRM_CNTR_START. 3334 */ 3335 tmp = RREG32_SOC15(GC, 0, regCP_ME_CNTL); 3336 if (pipe_id == 0) 3337 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, 3338 ME_PIPE0_RESET, 1); 3339 else 3340 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, 3341 ME_PIPE1_RESET, 1); 3342 WREG32_SOC15(GC, 0, regCP_ME_CNTL, tmp); 3343 3344 /* Clear pfp pipe0 reset bit. */ 3345 if (pipe_id == 0) 3346 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, 3347 ME_PIPE0_RESET, 0); 3348 else 3349 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, 3350 ME_PIPE1_RESET, 0); 3351 WREG32_SOC15(GC, 0, regCP_ME_CNTL, tmp); 3352 3353 WREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_BASE1_LO, 3354 lower_32_bits(adev->gfx.me.me_fw_data_gpu_addr)); 3355 WREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_BASE1_HI, 3356 upper_32_bits(adev->gfx.me.me_fw_data_gpu_addr)); 3357 } 3358 soc21_grbm_select(adev, 0, 0, 0, 0); 3359 mutex_unlock(&adev->srbm_mutex); 3360 3361 tmp = RREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_BASE_CNTL); 3362 tmp = REG_SET_FIELD(tmp, CP_GFX_RS64_DC_BASE_CNTL, VMID, 0); 3363 tmp = REG_SET_FIELD(tmp, CP_GFX_RS64_DC_BASE_CNTL, CACHE_POLICY, 0); 3364 WREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_BASE_CNTL, tmp); 3365 3366 /* Invalidate the data caches */ 3367 tmp = RREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_OP_CNTL); 3368 tmp = REG_SET_FIELD(tmp, CP_GFX_RS64_DC_OP_CNTL, INVALIDATE_DCACHE, 1); 3369 WREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_OP_CNTL, tmp); 3370 3371 for (i = 0; i < usec_timeout; i++) { 3372 tmp = RREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_OP_CNTL); 3373 if (1 == REG_GET_FIELD(tmp, CP_GFX_RS64_DC_OP_CNTL, 3374 INVALIDATE_DCACHE_COMPLETE)) 3375 break; 3376 udelay(1); 3377 } 3378 3379 if (i >= usec_timeout) { 3380 dev_err(adev->dev, "failed to invalidate RS64 data cache\n"); 3381 return -EINVAL; 3382 } 3383 3384 return 0; 3385 } 3386 3387 static int gfx_v11_0_cp_gfx_load_microcode(struct amdgpu_device *adev) 3388 { 3389 int r; 3390 3391 if (!adev->gfx.me_fw || !adev->gfx.pfp_fw) 3392 return -EINVAL; 3393 3394 gfx_v11_0_cp_gfx_enable(adev, false); 3395 3396 if (adev->gfx.rs64_enable) 3397 r = gfx_v11_0_cp_gfx_load_pfp_microcode_rs64(adev); 3398 else 3399 r = gfx_v11_0_cp_gfx_load_pfp_microcode(adev); 3400 if (r) { 3401 dev_err(adev->dev, "(%d) failed to load pfp fw\n", r); 3402 return r; 3403 } 3404 3405 if (adev->gfx.rs64_enable) 3406 r = gfx_v11_0_cp_gfx_load_me_microcode_rs64(adev); 3407 else 3408 r = gfx_v11_0_cp_gfx_load_me_microcode(adev); 3409 if (r) { 3410 dev_err(adev->dev, "(%d) failed to load me fw\n", r); 3411 return r; 3412 } 3413 3414 return 0; 3415 } 3416 3417 static int gfx_v11_0_cp_gfx_start(struct amdgpu_device *adev) 3418 { 3419 struct amdgpu_ring *ring; 3420 const struct cs_section_def *sect = NULL; 3421 const struct cs_extent_def *ext = NULL; 3422 int r, i; 3423 int ctx_reg_offset; 3424 3425 /* init the CP */ 3426 WREG32_SOC15(GC, 0, regCP_MAX_CONTEXT, 3427 adev->gfx.config.max_hw_contexts - 1); 3428 WREG32_SOC15(GC, 0, regCP_DEVICE_ID, 1); 3429 3430 if (!amdgpu_async_gfx_ring) 3431 gfx_v11_0_cp_gfx_enable(adev, true); 3432 3433 ring = &adev->gfx.gfx_ring[0]; 3434 r = amdgpu_ring_alloc(ring, gfx_v11_0_get_csb_size(adev)); 3435 if (r) { 3436 DRM_ERROR("amdgpu: cp failed to lock ring (%d).\n", r); 3437 return r; 3438 } 3439 3440 amdgpu_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0)); 3441 amdgpu_ring_write(ring, PACKET3_PREAMBLE_BEGIN_CLEAR_STATE); 3442 3443 amdgpu_ring_write(ring, PACKET3(PACKET3_CONTEXT_CONTROL, 1)); 3444 amdgpu_ring_write(ring, 0x80000000); 3445 amdgpu_ring_write(ring, 0x80000000); 3446 3447 for (sect = gfx11_cs_data; sect->section != NULL; ++sect) { 3448 for (ext = sect->section; ext->extent != NULL; ++ext) { 3449 if (sect->id == SECT_CONTEXT) { 3450 amdgpu_ring_write(ring, 3451 PACKET3(PACKET3_SET_CONTEXT_REG, 3452 ext->reg_count)); 3453 amdgpu_ring_write(ring, ext->reg_index - 3454 PACKET3_SET_CONTEXT_REG_START); 3455 for (i = 0; i < ext->reg_count; i++) 3456 amdgpu_ring_write(ring, ext->extent[i]); 3457 } 3458 } 3459 } 3460 3461 ctx_reg_offset = 3462 SOC15_REG_OFFSET(GC, 0, regPA_SC_TILE_STEERING_OVERRIDE) - PACKET3_SET_CONTEXT_REG_START; 3463 amdgpu_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 1)); 3464 amdgpu_ring_write(ring, ctx_reg_offset); 3465 amdgpu_ring_write(ring, adev->gfx.config.pa_sc_tile_steering_override); 3466 3467 amdgpu_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0)); 3468 amdgpu_ring_write(ring, PACKET3_PREAMBLE_END_CLEAR_STATE); 3469 3470 amdgpu_ring_write(ring, PACKET3(PACKET3_CLEAR_STATE, 0)); 3471 amdgpu_ring_write(ring, 0); 3472 3473 amdgpu_ring_commit(ring); 3474 3475 /* submit cs packet to copy state 0 to next available state */ 3476 if (adev->gfx.num_gfx_rings > 1) { 3477 /* maximum supported gfx ring is 2 */ 3478 ring = &adev->gfx.gfx_ring[1]; 3479 r = amdgpu_ring_alloc(ring, 2); 3480 if (r) { 3481 DRM_ERROR("amdgpu: cp failed to lock ring (%d).\n", r); 3482 return r; 3483 } 3484 3485 amdgpu_ring_write(ring, PACKET3(PACKET3_CLEAR_STATE, 0)); 3486 amdgpu_ring_write(ring, 0); 3487 3488 amdgpu_ring_commit(ring); 3489 } 3490 return 0; 3491 } 3492 3493 static void gfx_v11_0_cp_gfx_switch_pipe(struct amdgpu_device *adev, 3494 CP_PIPE_ID pipe) 3495 { 3496 u32 tmp; 3497 3498 tmp = RREG32_SOC15(GC, 0, regGRBM_GFX_CNTL); 3499 tmp = REG_SET_FIELD(tmp, GRBM_GFX_CNTL, PIPEID, pipe); 3500 3501 WREG32_SOC15(GC, 0, regGRBM_GFX_CNTL, tmp); 3502 } 3503 3504 static void gfx_v11_0_cp_gfx_set_doorbell(struct amdgpu_device *adev, 3505 struct amdgpu_ring *ring) 3506 { 3507 u32 tmp; 3508 3509 tmp = RREG32_SOC15(GC, 0, regCP_RB_DOORBELL_CONTROL); 3510 if (ring->use_doorbell) { 3511 tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL, 3512 DOORBELL_OFFSET, ring->doorbell_index); 3513 tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL, 3514 DOORBELL_EN, 1); 3515 } else { 3516 tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL, 3517 DOORBELL_EN, 0); 3518 } 3519 WREG32_SOC15(GC, 0, regCP_RB_DOORBELL_CONTROL, tmp); 3520 3521 tmp = REG_SET_FIELD(0, CP_RB_DOORBELL_RANGE_LOWER, 3522 DOORBELL_RANGE_LOWER, ring->doorbell_index); 3523 WREG32_SOC15(GC, 0, regCP_RB_DOORBELL_RANGE_LOWER, tmp); 3524 3525 WREG32_SOC15(GC, 0, regCP_RB_DOORBELL_RANGE_UPPER, 3526 CP_RB_DOORBELL_RANGE_UPPER__DOORBELL_RANGE_UPPER_MASK); 3527 } 3528 3529 static int gfx_v11_0_cp_gfx_resume(struct amdgpu_device *adev) 3530 { 3531 struct amdgpu_ring *ring; 3532 u32 tmp; 3533 u32 rb_bufsz; 3534 u64 rb_addr, rptr_addr, wptr_gpu_addr; 3535 3536 /* Set the write pointer delay */ 3537 WREG32_SOC15(GC, 0, regCP_RB_WPTR_DELAY, 0); 3538 3539 /* set the RB to use vmid 0 */ 3540 WREG32_SOC15(GC, 0, regCP_RB_VMID, 0); 3541 3542 /* Init gfx ring 0 for pipe 0 */ 3543 mutex_lock(&adev->srbm_mutex); 3544 gfx_v11_0_cp_gfx_switch_pipe(adev, PIPE_ID0); 3545 3546 /* Set ring buffer size */ 3547 ring = &adev->gfx.gfx_ring[0]; 3548 rb_bufsz = order_base_2(ring->ring_size / 8); 3549 tmp = REG_SET_FIELD(0, CP_RB0_CNTL, RB_BUFSZ, rb_bufsz); 3550 tmp = REG_SET_FIELD(tmp, CP_RB0_CNTL, RB_BLKSZ, rb_bufsz - 2); 3551 WREG32_SOC15(GC, 0, regCP_RB0_CNTL, tmp); 3552 3553 /* Initialize the ring buffer's write pointers */ 3554 ring->wptr = 0; 3555 WREG32_SOC15(GC, 0, regCP_RB0_WPTR, lower_32_bits(ring->wptr)); 3556 WREG32_SOC15(GC, 0, regCP_RB0_WPTR_HI, upper_32_bits(ring->wptr)); 3557 3558 /* set the wb address wether it's enabled or not */ 3559 rptr_addr = ring->rptr_gpu_addr; 3560 WREG32_SOC15(GC, 0, regCP_RB0_RPTR_ADDR, lower_32_bits(rptr_addr)); 3561 WREG32_SOC15(GC, 0, regCP_RB0_RPTR_ADDR_HI, upper_32_bits(rptr_addr) & 3562 CP_RB_RPTR_ADDR_HI__RB_RPTR_ADDR_HI_MASK); 3563 3564 wptr_gpu_addr = ring->wptr_gpu_addr; 3565 WREG32_SOC15(GC, 0, regCP_RB_WPTR_POLL_ADDR_LO, 3566 lower_32_bits(wptr_gpu_addr)); 3567 WREG32_SOC15(GC, 0, regCP_RB_WPTR_POLL_ADDR_HI, 3568 upper_32_bits(wptr_gpu_addr)); 3569 3570 mdelay(1); 3571 WREG32_SOC15(GC, 0, regCP_RB0_CNTL, tmp); 3572 3573 rb_addr = ring->gpu_addr >> 8; 3574 WREG32_SOC15(GC, 0, regCP_RB0_BASE, rb_addr); 3575 WREG32_SOC15(GC, 0, regCP_RB0_BASE_HI, upper_32_bits(rb_addr)); 3576 3577 WREG32_SOC15(GC, 0, regCP_RB_ACTIVE, 1); 3578 3579 gfx_v11_0_cp_gfx_set_doorbell(adev, ring); 3580 mutex_unlock(&adev->srbm_mutex); 3581 3582 /* Init gfx ring 1 for pipe 1 */ 3583 if (adev->gfx.num_gfx_rings > 1) { 3584 mutex_lock(&adev->srbm_mutex); 3585 gfx_v11_0_cp_gfx_switch_pipe(adev, PIPE_ID1); 3586 /* maximum supported gfx ring is 2 */ 3587 ring = &adev->gfx.gfx_ring[1]; 3588 rb_bufsz = order_base_2(ring->ring_size / 8); 3589 tmp = REG_SET_FIELD(0, CP_RB1_CNTL, RB_BUFSZ, rb_bufsz); 3590 tmp = REG_SET_FIELD(tmp, CP_RB1_CNTL, RB_BLKSZ, rb_bufsz - 2); 3591 WREG32_SOC15(GC, 0, regCP_RB1_CNTL, tmp); 3592 /* Initialize the ring buffer's write pointers */ 3593 ring->wptr = 0; 3594 WREG32_SOC15(GC, 0, regCP_RB1_WPTR, lower_32_bits(ring->wptr)); 3595 WREG32_SOC15(GC, 0, regCP_RB1_WPTR_HI, upper_32_bits(ring->wptr)); 3596 /* Set the wb address wether it's enabled or not */ 3597 rptr_addr = ring->rptr_gpu_addr; 3598 WREG32_SOC15(GC, 0, regCP_RB1_RPTR_ADDR, lower_32_bits(rptr_addr)); 3599 WREG32_SOC15(GC, 0, regCP_RB1_RPTR_ADDR_HI, upper_32_bits(rptr_addr) & 3600 CP_RB1_RPTR_ADDR_HI__RB_RPTR_ADDR_HI_MASK); 3601 wptr_gpu_addr = ring->wptr_gpu_addr; 3602 WREG32_SOC15(GC, 0, regCP_RB_WPTR_POLL_ADDR_LO, 3603 lower_32_bits(wptr_gpu_addr)); 3604 WREG32_SOC15(GC, 0, regCP_RB_WPTR_POLL_ADDR_HI, 3605 upper_32_bits(wptr_gpu_addr)); 3606 3607 mdelay(1); 3608 WREG32_SOC15(GC, 0, regCP_RB1_CNTL, tmp); 3609 3610 rb_addr = ring->gpu_addr >> 8; 3611 WREG32_SOC15(GC, 0, regCP_RB1_BASE, rb_addr); 3612 WREG32_SOC15(GC, 0, regCP_RB1_BASE_HI, upper_32_bits(rb_addr)); 3613 WREG32_SOC15(GC, 0, regCP_RB1_ACTIVE, 1); 3614 3615 gfx_v11_0_cp_gfx_set_doorbell(adev, ring); 3616 mutex_unlock(&adev->srbm_mutex); 3617 } 3618 /* Switch to pipe 0 */ 3619 mutex_lock(&adev->srbm_mutex); 3620 gfx_v11_0_cp_gfx_switch_pipe(adev, PIPE_ID0); 3621 mutex_unlock(&adev->srbm_mutex); 3622 3623 /* start the ring */ 3624 gfx_v11_0_cp_gfx_start(adev); 3625 3626 return 0; 3627 } 3628 3629 static void gfx_v11_0_cp_compute_enable(struct amdgpu_device *adev, bool enable) 3630 { 3631 u32 data; 3632 3633 if (adev->gfx.rs64_enable) { 3634 data = RREG32_SOC15(GC, 0, regCP_MEC_RS64_CNTL); 3635 data = REG_SET_FIELD(data, CP_MEC_RS64_CNTL, MEC_INVALIDATE_ICACHE, 3636 enable ? 0 : 1); 3637 data = REG_SET_FIELD(data, CP_MEC_RS64_CNTL, MEC_PIPE0_RESET, 3638 enable ? 0 : 1); 3639 data = REG_SET_FIELD(data, CP_MEC_RS64_CNTL, MEC_PIPE1_RESET, 3640 enable ? 0 : 1); 3641 data = REG_SET_FIELD(data, CP_MEC_RS64_CNTL, MEC_PIPE2_RESET, 3642 enable ? 0 : 1); 3643 data = REG_SET_FIELD(data, CP_MEC_RS64_CNTL, MEC_PIPE3_RESET, 3644 enable ? 0 : 1); 3645 data = REG_SET_FIELD(data, CP_MEC_RS64_CNTL, MEC_PIPE0_ACTIVE, 3646 enable ? 1 : 0); 3647 data = REG_SET_FIELD(data, CP_MEC_RS64_CNTL, MEC_PIPE1_ACTIVE, 3648 enable ? 1 : 0); 3649 data = REG_SET_FIELD(data, CP_MEC_RS64_CNTL, MEC_PIPE2_ACTIVE, 3650 enable ? 1 : 0); 3651 data = REG_SET_FIELD(data, CP_MEC_RS64_CNTL, MEC_PIPE3_ACTIVE, 3652 enable ? 1 : 0); 3653 data = REG_SET_FIELD(data, CP_MEC_RS64_CNTL, MEC_HALT, 3654 enable ? 0 : 1); 3655 WREG32_SOC15(GC, 0, regCP_MEC_RS64_CNTL, data); 3656 } else { 3657 data = RREG32_SOC15(GC, 0, regCP_MEC_CNTL); 3658 3659 if (enable) { 3660 data = REG_SET_FIELD(data, CP_MEC_CNTL, MEC_ME1_HALT, 0); 3661 if (!adev->enable_mes_kiq) 3662 data = REG_SET_FIELD(data, CP_MEC_CNTL, 3663 MEC_ME2_HALT, 0); 3664 } else { 3665 data = REG_SET_FIELD(data, CP_MEC_CNTL, MEC_ME1_HALT, 1); 3666 data = REG_SET_FIELD(data, CP_MEC_CNTL, MEC_ME2_HALT, 1); 3667 } 3668 WREG32_SOC15(GC, 0, regCP_MEC_CNTL, data); 3669 } 3670 3671 udelay(50); 3672 } 3673 3674 static int gfx_v11_0_cp_compute_load_microcode(struct amdgpu_device *adev) 3675 { 3676 const struct gfx_firmware_header_v1_0 *mec_hdr; 3677 const __le32 *fw_data; 3678 unsigned i, fw_size; 3679 u32 *fw = NULL; 3680 int r; 3681 3682 if (!adev->gfx.mec_fw) 3683 return -EINVAL; 3684 3685 gfx_v11_0_cp_compute_enable(adev, false); 3686 3687 mec_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data; 3688 amdgpu_ucode_print_gfx_hdr(&mec_hdr->header); 3689 3690 fw_data = (const __le32 *) 3691 (adev->gfx.mec_fw->data + 3692 le32_to_cpu(mec_hdr->header.ucode_array_offset_bytes)); 3693 fw_size = le32_to_cpu(mec_hdr->header.ucode_size_bytes); 3694 3695 r = amdgpu_bo_create_reserved(adev, mec_hdr->header.ucode_size_bytes, 3696 PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT, 3697 &adev->gfx.mec.mec_fw_obj, 3698 &adev->gfx.mec.mec_fw_gpu_addr, 3699 (void **)&fw); 3700 if (r) { 3701 dev_err(adev->dev, "(%d) failed to create mec fw bo\n", r); 3702 gfx_v11_0_mec_fini(adev); 3703 return r; 3704 } 3705 3706 memcpy(fw, fw_data, fw_size); 3707 3708 amdgpu_bo_kunmap(adev->gfx.mec.mec_fw_obj); 3709 amdgpu_bo_unreserve(adev->gfx.mec.mec_fw_obj); 3710 3711 gfx_v11_0_config_mec_cache(adev, adev->gfx.mec.mec_fw_gpu_addr); 3712 3713 /* MEC1 */ 3714 WREG32_SOC15(GC, 0, regCP_MEC_ME1_UCODE_ADDR, 0); 3715 3716 for (i = 0; i < mec_hdr->jt_size; i++) 3717 WREG32_SOC15(GC, 0, regCP_MEC_ME1_UCODE_DATA, 3718 le32_to_cpup(fw_data + mec_hdr->jt_offset + i)); 3719 3720 WREG32_SOC15(GC, 0, regCP_MEC_ME1_UCODE_ADDR, adev->gfx.mec_fw_version); 3721 3722 return 0; 3723 } 3724 3725 static int gfx_v11_0_cp_compute_load_microcode_rs64(struct amdgpu_device *adev) 3726 { 3727 const struct gfx_firmware_header_v2_0 *mec_hdr; 3728 const __le32 *fw_ucode, *fw_data; 3729 u32 tmp, fw_ucode_size, fw_data_size; 3730 u32 i, usec_timeout = 50000; /* Wait for 50 ms */ 3731 u32 *fw_ucode_ptr, *fw_data_ptr; 3732 int r; 3733 3734 if (!adev->gfx.mec_fw) 3735 return -EINVAL; 3736 3737 gfx_v11_0_cp_compute_enable(adev, false); 3738 3739 mec_hdr = (const struct gfx_firmware_header_v2_0 *)adev->gfx.mec_fw->data; 3740 amdgpu_ucode_print_gfx_hdr(&mec_hdr->header); 3741 3742 fw_ucode = (const __le32 *) (adev->gfx.mec_fw->data + 3743 le32_to_cpu(mec_hdr->ucode_offset_bytes)); 3744 fw_ucode_size = le32_to_cpu(mec_hdr->ucode_size_bytes); 3745 3746 fw_data = (const __le32 *) (adev->gfx.mec_fw->data + 3747 le32_to_cpu(mec_hdr->data_offset_bytes)); 3748 fw_data_size = le32_to_cpu(mec_hdr->data_size_bytes); 3749 3750 r = amdgpu_bo_create_reserved(adev, fw_ucode_size, 3751 64 * 1024, 3752 AMDGPU_GEM_DOMAIN_VRAM | 3753 AMDGPU_GEM_DOMAIN_GTT, 3754 &adev->gfx.mec.mec_fw_obj, 3755 &adev->gfx.mec.mec_fw_gpu_addr, 3756 (void **)&fw_ucode_ptr); 3757 if (r) { 3758 dev_err(adev->dev, "(%d) failed to create mec fw ucode bo\n", r); 3759 gfx_v11_0_mec_fini(adev); 3760 return r; 3761 } 3762 3763 r = amdgpu_bo_create_reserved(adev, fw_data_size, 3764 64 * 1024, 3765 AMDGPU_GEM_DOMAIN_VRAM | 3766 AMDGPU_GEM_DOMAIN_GTT, 3767 &adev->gfx.mec.mec_fw_data_obj, 3768 &adev->gfx.mec.mec_fw_data_gpu_addr, 3769 (void **)&fw_data_ptr); 3770 if (r) { 3771 dev_err(adev->dev, "(%d) failed to create mec fw ucode bo\n", r); 3772 gfx_v11_0_mec_fini(adev); 3773 return r; 3774 } 3775 3776 memcpy(fw_ucode_ptr, fw_ucode, fw_ucode_size); 3777 memcpy(fw_data_ptr, fw_data, fw_data_size); 3778 3779 amdgpu_bo_kunmap(adev->gfx.mec.mec_fw_obj); 3780 amdgpu_bo_kunmap(adev->gfx.mec.mec_fw_data_obj); 3781 amdgpu_bo_unreserve(adev->gfx.mec.mec_fw_obj); 3782 amdgpu_bo_unreserve(adev->gfx.mec.mec_fw_data_obj); 3783 3784 tmp = RREG32_SOC15(GC, 0, regCP_CPC_IC_BASE_CNTL); 3785 tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, VMID, 0); 3786 tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, EXE_DISABLE, 0); 3787 tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, CACHE_POLICY, 0); 3788 WREG32_SOC15(GC, 0, regCP_CPC_IC_BASE_CNTL, tmp); 3789 3790 tmp = RREG32_SOC15(GC, 0, regCP_MEC_DC_BASE_CNTL); 3791 tmp = REG_SET_FIELD(tmp, CP_MEC_DC_BASE_CNTL, VMID, 0); 3792 tmp = REG_SET_FIELD(tmp, CP_MEC_DC_BASE_CNTL, CACHE_POLICY, 0); 3793 WREG32_SOC15(GC, 0, regCP_MEC_DC_BASE_CNTL, tmp); 3794 3795 mutex_lock(&adev->srbm_mutex); 3796 for (i = 0; i < adev->gfx.mec.num_pipe_per_mec; i++) { 3797 soc21_grbm_select(adev, 1, i, 0, 0); 3798 3799 WREG32_SOC15(GC, 0, regCP_MEC_MDBASE_LO, adev->gfx.mec.mec_fw_data_gpu_addr); 3800 WREG32_SOC15(GC, 0, regCP_MEC_MDBASE_HI, 3801 upper_32_bits(adev->gfx.mec.mec_fw_data_gpu_addr)); 3802 3803 WREG32_SOC15(GC, 0, regCP_MEC_RS64_PRGRM_CNTR_START, 3804 mec_hdr->ucode_start_addr_lo >> 2 | 3805 mec_hdr->ucode_start_addr_hi << 30); 3806 WREG32_SOC15(GC, 0, regCP_MEC_RS64_PRGRM_CNTR_START_HI, 3807 mec_hdr->ucode_start_addr_hi >> 2); 3808 3809 WREG32_SOC15(GC, 0, regCP_CPC_IC_BASE_LO, adev->gfx.mec.mec_fw_gpu_addr); 3810 WREG32_SOC15(GC, 0, regCP_CPC_IC_BASE_HI, 3811 upper_32_bits(adev->gfx.mec.mec_fw_gpu_addr)); 3812 } 3813 mutex_unlock(&adev->srbm_mutex); 3814 soc21_grbm_select(adev, 0, 0, 0, 0); 3815 3816 /* Trigger an invalidation of the L1 instruction caches */ 3817 tmp = RREG32_SOC15(GC, 0, regCP_MEC_DC_OP_CNTL); 3818 tmp = REG_SET_FIELD(tmp, CP_MEC_DC_OP_CNTL, INVALIDATE_DCACHE, 1); 3819 WREG32_SOC15(GC, 0, regCP_MEC_DC_OP_CNTL, tmp); 3820 3821 /* Wait for invalidation complete */ 3822 for (i = 0; i < usec_timeout; i++) { 3823 tmp = RREG32_SOC15(GC, 0, regCP_MEC_DC_OP_CNTL); 3824 if (1 == REG_GET_FIELD(tmp, CP_MEC_DC_OP_CNTL, 3825 INVALIDATE_DCACHE_COMPLETE)) 3826 break; 3827 udelay(1); 3828 } 3829 3830 if (i >= usec_timeout) { 3831 dev_err(adev->dev, "failed to invalidate instruction cache\n"); 3832 return -EINVAL; 3833 } 3834 3835 /* Trigger an invalidation of the L1 instruction caches */ 3836 tmp = RREG32_SOC15(GC, 0, regCP_CPC_IC_OP_CNTL); 3837 tmp = REG_SET_FIELD(tmp, CP_CPC_IC_OP_CNTL, INVALIDATE_CACHE, 1); 3838 WREG32_SOC15(GC, 0, regCP_CPC_IC_OP_CNTL, tmp); 3839 3840 /* Wait for invalidation complete */ 3841 for (i = 0; i < usec_timeout; i++) { 3842 tmp = RREG32_SOC15(GC, 0, regCP_CPC_IC_OP_CNTL); 3843 if (1 == REG_GET_FIELD(tmp, CP_CPC_IC_OP_CNTL, 3844 INVALIDATE_CACHE_COMPLETE)) 3845 break; 3846 udelay(1); 3847 } 3848 3849 if (i >= usec_timeout) { 3850 dev_err(adev->dev, "failed to invalidate instruction cache\n"); 3851 return -EINVAL; 3852 } 3853 3854 return 0; 3855 } 3856 3857 static void gfx_v11_0_kiq_setting(struct amdgpu_ring *ring) 3858 { 3859 uint32_t tmp; 3860 struct amdgpu_device *adev = ring->adev; 3861 3862 /* tell RLC which is KIQ queue */ 3863 tmp = RREG32_SOC15(GC, 0, regRLC_CP_SCHEDULERS); 3864 tmp &= 0xffffff00; 3865 tmp |= (ring->me << 5) | (ring->pipe << 3) | (ring->queue); 3866 WREG32_SOC15(GC, 0, regRLC_CP_SCHEDULERS, tmp); 3867 tmp |= 0x80; 3868 WREG32_SOC15(GC, 0, regRLC_CP_SCHEDULERS, tmp); 3869 } 3870 3871 static void gfx_v11_0_cp_set_doorbell_range(struct amdgpu_device *adev) 3872 { 3873 /* set graphics engine doorbell range */ 3874 WREG32_SOC15(GC, 0, regCP_RB_DOORBELL_RANGE_LOWER, 3875 (adev->doorbell_index.gfx_ring0 * 2) << 2); 3876 WREG32_SOC15(GC, 0, regCP_RB_DOORBELL_RANGE_UPPER, 3877 (adev->doorbell_index.gfx_userqueue_end * 2) << 2); 3878 3879 /* set compute engine doorbell range */ 3880 WREG32_SOC15(GC, 0, regCP_MEC_DOORBELL_RANGE_LOWER, 3881 (adev->doorbell_index.kiq * 2) << 2); 3882 WREG32_SOC15(GC, 0, regCP_MEC_DOORBELL_RANGE_UPPER, 3883 (adev->doorbell_index.userqueue_end * 2) << 2); 3884 } 3885 3886 static void gfx_v11_0_gfx_mqd_set_priority(struct amdgpu_device *adev, 3887 struct v11_gfx_mqd *mqd, 3888 struct amdgpu_mqd_prop *prop) 3889 { 3890 bool priority = 0; 3891 u32 tmp; 3892 3893 /* set up default queue priority level 3894 * 0x0 = low priority, 0x1 = high priority 3895 */ 3896 if (prop->hqd_pipe_priority == AMDGPU_GFX_PIPE_PRIO_HIGH) 3897 priority = 1; 3898 3899 tmp = RREG32_SOC15(GC, 0, regCP_GFX_HQD_QUEUE_PRIORITY); 3900 tmp = REG_SET_FIELD(tmp, CP_GFX_HQD_QUEUE_PRIORITY, PRIORITY_LEVEL, priority); 3901 mqd->cp_gfx_hqd_queue_priority = tmp; 3902 } 3903 3904 static int gfx_v11_0_gfx_mqd_init(struct amdgpu_device *adev, void *m, 3905 struct amdgpu_mqd_prop *prop) 3906 { 3907 struct v11_gfx_mqd *mqd = m; 3908 uint64_t hqd_gpu_addr, wb_gpu_addr; 3909 uint32_t tmp; 3910 uint32_t rb_bufsz; 3911 3912 /* set up gfx hqd wptr */ 3913 mqd->cp_gfx_hqd_wptr = 0; 3914 mqd->cp_gfx_hqd_wptr_hi = 0; 3915 3916 /* set the pointer to the MQD */ 3917 mqd->cp_mqd_base_addr = prop->mqd_gpu_addr & 0xfffffffc; 3918 mqd->cp_mqd_base_addr_hi = upper_32_bits(prop->mqd_gpu_addr); 3919 3920 /* set up mqd control */ 3921 tmp = RREG32_SOC15(GC, 0, regCP_GFX_MQD_CONTROL); 3922 tmp = REG_SET_FIELD(tmp, CP_GFX_MQD_CONTROL, VMID, 0); 3923 tmp = REG_SET_FIELD(tmp, CP_GFX_MQD_CONTROL, PRIV_STATE, 1); 3924 tmp = REG_SET_FIELD(tmp, CP_GFX_MQD_CONTROL, CACHE_POLICY, 0); 3925 mqd->cp_gfx_mqd_control = tmp; 3926 3927 /* set up gfx_hqd_vimd with 0x0 to indicate the ring buffer's vmid */ 3928 tmp = RREG32_SOC15(GC, 0, regCP_GFX_HQD_VMID); 3929 tmp = REG_SET_FIELD(tmp, CP_GFX_HQD_VMID, VMID, 0); 3930 mqd->cp_gfx_hqd_vmid = 0; 3931 3932 /* set up gfx queue priority */ 3933 gfx_v11_0_gfx_mqd_set_priority(adev, mqd, prop); 3934 3935 /* set up time quantum */ 3936 tmp = RREG32_SOC15(GC, 0, regCP_GFX_HQD_QUANTUM); 3937 tmp = REG_SET_FIELD(tmp, CP_GFX_HQD_QUANTUM, QUANTUM_EN, 1); 3938 mqd->cp_gfx_hqd_quantum = tmp; 3939 3940 /* set up gfx hqd base. this is similar as CP_RB_BASE */ 3941 hqd_gpu_addr = prop->hqd_base_gpu_addr >> 8; 3942 mqd->cp_gfx_hqd_base = hqd_gpu_addr; 3943 mqd->cp_gfx_hqd_base_hi = upper_32_bits(hqd_gpu_addr); 3944 3945 /* set up hqd_rptr_addr/_hi, similar as CP_RB_RPTR */ 3946 wb_gpu_addr = prop->rptr_gpu_addr; 3947 mqd->cp_gfx_hqd_rptr_addr = wb_gpu_addr & 0xfffffffc; 3948 mqd->cp_gfx_hqd_rptr_addr_hi = 3949 upper_32_bits(wb_gpu_addr) & 0xffff; 3950 3951 /* set up rb_wptr_poll addr */ 3952 wb_gpu_addr = prop->wptr_gpu_addr; 3953 mqd->cp_rb_wptr_poll_addr_lo = wb_gpu_addr & 0xfffffffc; 3954 mqd->cp_rb_wptr_poll_addr_hi = upper_32_bits(wb_gpu_addr) & 0xffff; 3955 3956 /* set up the gfx_hqd_control, similar as CP_RB0_CNTL */ 3957 rb_bufsz = order_base_2(prop->queue_size / 4) - 1; 3958 tmp = RREG32_SOC15(GC, 0, regCP_GFX_HQD_CNTL); 3959 tmp = REG_SET_FIELD(tmp, CP_GFX_HQD_CNTL, RB_BUFSZ, rb_bufsz); 3960 tmp = REG_SET_FIELD(tmp, CP_GFX_HQD_CNTL, RB_BLKSZ, rb_bufsz - 2); 3961 #ifdef __BIG_ENDIAN 3962 tmp = REG_SET_FIELD(tmp, CP_GFX_HQD_CNTL, BUF_SWAP, 1); 3963 #endif 3964 mqd->cp_gfx_hqd_cntl = tmp; 3965 3966 /* set up cp_doorbell_control */ 3967 tmp = RREG32_SOC15(GC, 0, regCP_RB_DOORBELL_CONTROL); 3968 if (prop->use_doorbell) { 3969 tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL, 3970 DOORBELL_OFFSET, prop->doorbell_index); 3971 tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL, 3972 DOORBELL_EN, 1); 3973 } else 3974 tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL, 3975 DOORBELL_EN, 0); 3976 mqd->cp_rb_doorbell_control = tmp; 3977 3978 /* reset read and write pointers, similar to CP_RB0_WPTR/_RPTR */ 3979 mqd->cp_gfx_hqd_rptr = RREG32_SOC15(GC, 0, regCP_GFX_HQD_RPTR); 3980 3981 /* active the queue */ 3982 mqd->cp_gfx_hqd_active = 1; 3983 3984 return 0; 3985 } 3986 3987 static int gfx_v11_0_kgq_init_queue(struct amdgpu_ring *ring, bool reset) 3988 { 3989 struct amdgpu_device *adev = ring->adev; 3990 struct v11_gfx_mqd *mqd = ring->mqd_ptr; 3991 int mqd_idx = ring - &adev->gfx.gfx_ring[0]; 3992 3993 if (!reset && !amdgpu_in_reset(adev) && !adev->in_suspend) { 3994 memset((void *)mqd, 0, sizeof(*mqd)); 3995 mutex_lock(&adev->srbm_mutex); 3996 soc21_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0); 3997 amdgpu_ring_init_mqd(ring); 3998 soc21_grbm_select(adev, 0, 0, 0, 0); 3999 mutex_unlock(&adev->srbm_mutex); 4000 if (adev->gfx.me.mqd_backup[mqd_idx]) 4001 memcpy_fromio(adev->gfx.me.mqd_backup[mqd_idx], mqd, sizeof(*mqd)); 4002 } else { 4003 /* restore mqd with the backup copy */ 4004 if (adev->gfx.me.mqd_backup[mqd_idx]) 4005 memcpy_toio(mqd, adev->gfx.me.mqd_backup[mqd_idx], sizeof(*mqd)); 4006 /* reset the ring */ 4007 ring->wptr = 0; 4008 *ring->wptr_cpu_addr = 0; 4009 amdgpu_ring_clear_ring(ring); 4010 } 4011 4012 return 0; 4013 } 4014 4015 static int gfx_v11_0_cp_async_gfx_ring_resume(struct amdgpu_device *adev) 4016 { 4017 int r, i; 4018 struct amdgpu_ring *ring; 4019 4020 for (i = 0; i < adev->gfx.num_gfx_rings; i++) { 4021 ring = &adev->gfx.gfx_ring[i]; 4022 4023 r = amdgpu_bo_reserve(ring->mqd_obj, false); 4024 if (unlikely(r != 0)) 4025 return r; 4026 4027 r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr); 4028 if (!r) { 4029 r = gfx_v11_0_kgq_init_queue(ring, false); 4030 amdgpu_bo_kunmap(ring->mqd_obj); 4031 ring->mqd_ptr = NULL; 4032 } 4033 amdgpu_bo_unreserve(ring->mqd_obj); 4034 if (r) 4035 return r; 4036 } 4037 4038 r = amdgpu_gfx_enable_kgq(adev, 0); 4039 if (r) 4040 return r; 4041 4042 return gfx_v11_0_cp_gfx_start(adev); 4043 } 4044 4045 static int gfx_v11_0_compute_mqd_init(struct amdgpu_device *adev, void *m, 4046 struct amdgpu_mqd_prop *prop) 4047 { 4048 struct v11_compute_mqd *mqd = m; 4049 uint64_t hqd_gpu_addr, wb_gpu_addr, eop_base_addr; 4050 uint32_t tmp; 4051 4052 mqd->header = 0xC0310800; 4053 mqd->compute_pipelinestat_enable = 0x00000001; 4054 mqd->compute_static_thread_mgmt_se0 = 0xffffffff; 4055 mqd->compute_static_thread_mgmt_se1 = 0xffffffff; 4056 mqd->compute_static_thread_mgmt_se2 = 0xffffffff; 4057 mqd->compute_static_thread_mgmt_se3 = 0xffffffff; 4058 mqd->compute_misc_reserved = 0x00000007; 4059 4060 eop_base_addr = prop->eop_gpu_addr >> 8; 4061 mqd->cp_hqd_eop_base_addr_lo = eop_base_addr; 4062 mqd->cp_hqd_eop_base_addr_hi = upper_32_bits(eop_base_addr); 4063 4064 /* set the EOP size, register value is 2^(EOP_SIZE+1) dwords */ 4065 tmp = RREG32_SOC15(GC, 0, regCP_HQD_EOP_CONTROL); 4066 tmp = REG_SET_FIELD(tmp, CP_HQD_EOP_CONTROL, EOP_SIZE, 4067 (order_base_2(GFX11_MEC_HPD_SIZE / 4) - 1)); 4068 4069 mqd->cp_hqd_eop_control = tmp; 4070 4071 /* enable doorbell? */ 4072 tmp = RREG32_SOC15(GC, 0, regCP_HQD_PQ_DOORBELL_CONTROL); 4073 4074 if (prop->use_doorbell) { 4075 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL, 4076 DOORBELL_OFFSET, prop->doorbell_index); 4077 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL, 4078 DOORBELL_EN, 1); 4079 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL, 4080 DOORBELL_SOURCE, 0); 4081 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL, 4082 DOORBELL_HIT, 0); 4083 } else { 4084 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL, 4085 DOORBELL_EN, 0); 4086 } 4087 4088 mqd->cp_hqd_pq_doorbell_control = tmp; 4089 4090 /* disable the queue if it's active */ 4091 mqd->cp_hqd_dequeue_request = 0; 4092 mqd->cp_hqd_pq_rptr = 0; 4093 mqd->cp_hqd_pq_wptr_lo = 0; 4094 mqd->cp_hqd_pq_wptr_hi = 0; 4095 4096 /* set the pointer to the MQD */ 4097 mqd->cp_mqd_base_addr_lo = prop->mqd_gpu_addr & 0xfffffffc; 4098 mqd->cp_mqd_base_addr_hi = upper_32_bits(prop->mqd_gpu_addr); 4099 4100 /* set MQD vmid to 0 */ 4101 tmp = RREG32_SOC15(GC, 0, regCP_MQD_CONTROL); 4102 tmp = REG_SET_FIELD(tmp, CP_MQD_CONTROL, VMID, 0); 4103 mqd->cp_mqd_control = tmp; 4104 4105 /* set the pointer to the HQD, this is similar CP_RB0_BASE/_HI */ 4106 hqd_gpu_addr = prop->hqd_base_gpu_addr >> 8; 4107 mqd->cp_hqd_pq_base_lo = hqd_gpu_addr; 4108 mqd->cp_hqd_pq_base_hi = upper_32_bits(hqd_gpu_addr); 4109 4110 /* set up the HQD, this is similar to CP_RB0_CNTL */ 4111 tmp = RREG32_SOC15(GC, 0, regCP_HQD_PQ_CONTROL); 4112 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, QUEUE_SIZE, 4113 (order_base_2(prop->queue_size / 4) - 1)); 4114 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, RPTR_BLOCK_SIZE, 4115 (order_base_2(AMDGPU_GPU_PAGE_SIZE / 4) - 1)); 4116 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, UNORD_DISPATCH, 1); 4117 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, TUNNEL_DISPATCH, 4118 prop->allow_tunneling); 4119 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, PRIV_STATE, 1); 4120 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, KMD_QUEUE, 1); 4121 mqd->cp_hqd_pq_control = tmp; 4122 4123 /* set the wb address whether it's enabled or not */ 4124 wb_gpu_addr = prop->rptr_gpu_addr; 4125 mqd->cp_hqd_pq_rptr_report_addr_lo = wb_gpu_addr & 0xfffffffc; 4126 mqd->cp_hqd_pq_rptr_report_addr_hi = 4127 upper_32_bits(wb_gpu_addr) & 0xffff; 4128 4129 /* only used if CP_PQ_WPTR_POLL_CNTL.CP_PQ_WPTR_POLL_CNTL__EN_MASK=1 */ 4130 wb_gpu_addr = prop->wptr_gpu_addr; 4131 mqd->cp_hqd_pq_wptr_poll_addr_lo = wb_gpu_addr & 0xfffffffc; 4132 mqd->cp_hqd_pq_wptr_poll_addr_hi = upper_32_bits(wb_gpu_addr) & 0xffff; 4133 4134 tmp = 0; 4135 /* enable the doorbell if requested */ 4136 if (prop->use_doorbell) { 4137 tmp = RREG32_SOC15(GC, 0, regCP_HQD_PQ_DOORBELL_CONTROL); 4138 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL, 4139 DOORBELL_OFFSET, prop->doorbell_index); 4140 4141 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL, 4142 DOORBELL_EN, 1); 4143 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL, 4144 DOORBELL_SOURCE, 0); 4145 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL, 4146 DOORBELL_HIT, 0); 4147 } 4148 4149 mqd->cp_hqd_pq_doorbell_control = tmp; 4150 4151 /* reset read and write pointers, similar to CP_RB0_WPTR/_RPTR */ 4152 mqd->cp_hqd_pq_rptr = RREG32_SOC15(GC, 0, regCP_HQD_PQ_RPTR); 4153 4154 /* set the vmid for the queue */ 4155 mqd->cp_hqd_vmid = 0; 4156 4157 tmp = RREG32_SOC15(GC, 0, regCP_HQD_PERSISTENT_STATE); 4158 tmp = REG_SET_FIELD(tmp, CP_HQD_PERSISTENT_STATE, PRELOAD_SIZE, 0x55); 4159 mqd->cp_hqd_persistent_state = tmp; 4160 4161 /* set MIN_IB_AVAIL_SIZE */ 4162 tmp = RREG32_SOC15(GC, 0, regCP_HQD_IB_CONTROL); 4163 tmp = REG_SET_FIELD(tmp, CP_HQD_IB_CONTROL, MIN_IB_AVAIL_SIZE, 3); 4164 mqd->cp_hqd_ib_control = tmp; 4165 4166 /* set static priority for a compute queue/ring */ 4167 mqd->cp_hqd_pipe_priority = prop->hqd_pipe_priority; 4168 mqd->cp_hqd_queue_priority = prop->hqd_queue_priority; 4169 4170 mqd->cp_hqd_active = prop->hqd_active; 4171 4172 return 0; 4173 } 4174 4175 static int gfx_v11_0_kiq_init_register(struct amdgpu_ring *ring) 4176 { 4177 struct amdgpu_device *adev = ring->adev; 4178 struct v11_compute_mqd *mqd = ring->mqd_ptr; 4179 int j; 4180 4181 /* inactivate the queue */ 4182 if (amdgpu_sriov_vf(adev)) 4183 WREG32_SOC15(GC, 0, regCP_HQD_ACTIVE, 0); 4184 4185 /* disable wptr polling */ 4186 WREG32_FIELD15_PREREG(GC, 0, CP_PQ_WPTR_POLL_CNTL, EN, 0); 4187 4188 /* write the EOP addr */ 4189 WREG32_SOC15(GC, 0, regCP_HQD_EOP_BASE_ADDR, 4190 mqd->cp_hqd_eop_base_addr_lo); 4191 WREG32_SOC15(GC, 0, regCP_HQD_EOP_BASE_ADDR_HI, 4192 mqd->cp_hqd_eop_base_addr_hi); 4193 4194 /* set the EOP size, register value is 2^(EOP_SIZE+1) dwords */ 4195 WREG32_SOC15(GC, 0, regCP_HQD_EOP_CONTROL, 4196 mqd->cp_hqd_eop_control); 4197 4198 /* enable doorbell? */ 4199 WREG32_SOC15(GC, 0, regCP_HQD_PQ_DOORBELL_CONTROL, 4200 mqd->cp_hqd_pq_doorbell_control); 4201 4202 /* disable the queue if it's active */ 4203 if (RREG32_SOC15(GC, 0, regCP_HQD_ACTIVE) & 1) { 4204 WREG32_SOC15(GC, 0, regCP_HQD_DEQUEUE_REQUEST, 1); 4205 for (j = 0; j < adev->usec_timeout; j++) { 4206 if (!(RREG32_SOC15(GC, 0, regCP_HQD_ACTIVE) & 1)) 4207 break; 4208 udelay(1); 4209 } 4210 WREG32_SOC15(GC, 0, regCP_HQD_DEQUEUE_REQUEST, 4211 mqd->cp_hqd_dequeue_request); 4212 WREG32_SOC15(GC, 0, regCP_HQD_PQ_RPTR, 4213 mqd->cp_hqd_pq_rptr); 4214 WREG32_SOC15(GC, 0, regCP_HQD_PQ_WPTR_LO, 4215 mqd->cp_hqd_pq_wptr_lo); 4216 WREG32_SOC15(GC, 0, regCP_HQD_PQ_WPTR_HI, 4217 mqd->cp_hqd_pq_wptr_hi); 4218 } 4219 4220 /* set the pointer to the MQD */ 4221 WREG32_SOC15(GC, 0, regCP_MQD_BASE_ADDR, 4222 mqd->cp_mqd_base_addr_lo); 4223 WREG32_SOC15(GC, 0, regCP_MQD_BASE_ADDR_HI, 4224 mqd->cp_mqd_base_addr_hi); 4225 4226 /* set MQD vmid to 0 */ 4227 WREG32_SOC15(GC, 0, regCP_MQD_CONTROL, 4228 mqd->cp_mqd_control); 4229 4230 /* set the pointer to the HQD, this is similar CP_RB0_BASE/_HI */ 4231 WREG32_SOC15(GC, 0, regCP_HQD_PQ_BASE, 4232 mqd->cp_hqd_pq_base_lo); 4233 WREG32_SOC15(GC, 0, regCP_HQD_PQ_BASE_HI, 4234 mqd->cp_hqd_pq_base_hi); 4235 4236 /* set up the HQD, this is similar to CP_RB0_CNTL */ 4237 WREG32_SOC15(GC, 0, regCP_HQD_PQ_CONTROL, 4238 mqd->cp_hqd_pq_control); 4239 4240 /* set the wb address whether it's enabled or not */ 4241 WREG32_SOC15(GC, 0, regCP_HQD_PQ_RPTR_REPORT_ADDR, 4242 mqd->cp_hqd_pq_rptr_report_addr_lo); 4243 WREG32_SOC15(GC, 0, regCP_HQD_PQ_RPTR_REPORT_ADDR_HI, 4244 mqd->cp_hqd_pq_rptr_report_addr_hi); 4245 4246 /* only used if CP_PQ_WPTR_POLL_CNTL.CP_PQ_WPTR_POLL_CNTL__EN_MASK=1 */ 4247 WREG32_SOC15(GC, 0, regCP_HQD_PQ_WPTR_POLL_ADDR, 4248 mqd->cp_hqd_pq_wptr_poll_addr_lo); 4249 WREG32_SOC15(GC, 0, regCP_HQD_PQ_WPTR_POLL_ADDR_HI, 4250 mqd->cp_hqd_pq_wptr_poll_addr_hi); 4251 4252 /* enable the doorbell if requested */ 4253 if (ring->use_doorbell) { 4254 WREG32_SOC15(GC, 0, regCP_MEC_DOORBELL_RANGE_LOWER, 4255 (adev->doorbell_index.kiq * 2) << 2); 4256 WREG32_SOC15(GC, 0, regCP_MEC_DOORBELL_RANGE_UPPER, 4257 (adev->doorbell_index.userqueue_end * 2) << 2); 4258 } 4259 4260 WREG32_SOC15(GC, 0, regCP_HQD_PQ_DOORBELL_CONTROL, 4261 mqd->cp_hqd_pq_doorbell_control); 4262 4263 /* reset read and write pointers, similar to CP_RB0_WPTR/_RPTR */ 4264 WREG32_SOC15(GC, 0, regCP_HQD_PQ_WPTR_LO, 4265 mqd->cp_hqd_pq_wptr_lo); 4266 WREG32_SOC15(GC, 0, regCP_HQD_PQ_WPTR_HI, 4267 mqd->cp_hqd_pq_wptr_hi); 4268 4269 /* set the vmid for the queue */ 4270 WREG32_SOC15(GC, 0, regCP_HQD_VMID, mqd->cp_hqd_vmid); 4271 4272 WREG32_SOC15(GC, 0, regCP_HQD_PERSISTENT_STATE, 4273 mqd->cp_hqd_persistent_state); 4274 4275 /* activate the queue */ 4276 WREG32_SOC15(GC, 0, regCP_HQD_ACTIVE, 4277 mqd->cp_hqd_active); 4278 4279 if (ring->use_doorbell) 4280 WREG32_FIELD15_PREREG(GC, 0, CP_PQ_STATUS, DOORBELL_ENABLE, 1); 4281 4282 return 0; 4283 } 4284 4285 static int gfx_v11_0_kiq_init_queue(struct amdgpu_ring *ring) 4286 { 4287 struct amdgpu_device *adev = ring->adev; 4288 struct v11_compute_mqd *mqd = ring->mqd_ptr; 4289 4290 gfx_v11_0_kiq_setting(ring); 4291 4292 if (amdgpu_in_reset(adev)) { /* for GPU_RESET case */ 4293 /* reset MQD to a clean status */ 4294 if (adev->gfx.kiq[0].mqd_backup) 4295 memcpy_toio(mqd, adev->gfx.kiq[0].mqd_backup, sizeof(*mqd)); 4296 4297 /* reset ring buffer */ 4298 ring->wptr = 0; 4299 amdgpu_ring_clear_ring(ring); 4300 4301 mutex_lock(&adev->srbm_mutex); 4302 soc21_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0); 4303 gfx_v11_0_kiq_init_register(ring); 4304 soc21_grbm_select(adev, 0, 0, 0, 0); 4305 mutex_unlock(&adev->srbm_mutex); 4306 } else { 4307 memset((void *)mqd, 0, sizeof(*mqd)); 4308 if (amdgpu_sriov_vf(adev) && adev->in_suspend) 4309 amdgpu_ring_clear_ring(ring); 4310 mutex_lock(&adev->srbm_mutex); 4311 soc21_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0); 4312 amdgpu_ring_init_mqd(ring); 4313 gfx_v11_0_kiq_init_register(ring); 4314 soc21_grbm_select(adev, 0, 0, 0, 0); 4315 mutex_unlock(&adev->srbm_mutex); 4316 4317 if (adev->gfx.kiq[0].mqd_backup) 4318 memcpy_fromio(adev->gfx.kiq[0].mqd_backup, mqd, sizeof(*mqd)); 4319 } 4320 4321 return 0; 4322 } 4323 4324 static int gfx_v11_0_kcq_init_queue(struct amdgpu_ring *ring, bool reset) 4325 { 4326 struct amdgpu_device *adev = ring->adev; 4327 struct v11_compute_mqd *mqd = ring->mqd_ptr; 4328 int mqd_idx = ring - &adev->gfx.compute_ring[0]; 4329 4330 if (!reset && !amdgpu_in_reset(adev) && !adev->in_suspend) { 4331 memset((void *)mqd, 0, sizeof(*mqd)); 4332 mutex_lock(&adev->srbm_mutex); 4333 soc21_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0); 4334 amdgpu_ring_init_mqd(ring); 4335 soc21_grbm_select(adev, 0, 0, 0, 0); 4336 mutex_unlock(&adev->srbm_mutex); 4337 4338 if (adev->gfx.mec.mqd_backup[mqd_idx]) 4339 memcpy_fromio(adev->gfx.mec.mqd_backup[mqd_idx], mqd, sizeof(*mqd)); 4340 } else { 4341 /* restore MQD to a clean status */ 4342 if (adev->gfx.mec.mqd_backup[mqd_idx]) 4343 memcpy_toio(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(*mqd)); 4344 /* reset ring buffer */ 4345 ring->wptr = 0; 4346 atomic64_set((atomic64_t *)ring->wptr_cpu_addr, 0); 4347 amdgpu_ring_clear_ring(ring); 4348 } 4349 4350 return 0; 4351 } 4352 4353 static int gfx_v11_0_kiq_resume(struct amdgpu_device *adev) 4354 { 4355 struct amdgpu_ring *ring; 4356 int r; 4357 4358 ring = &adev->gfx.kiq[0].ring; 4359 4360 r = amdgpu_bo_reserve(ring->mqd_obj, false); 4361 if (unlikely(r != 0)) 4362 return r; 4363 4364 r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr); 4365 if (unlikely(r != 0)) { 4366 amdgpu_bo_unreserve(ring->mqd_obj); 4367 return r; 4368 } 4369 4370 gfx_v11_0_kiq_init_queue(ring); 4371 amdgpu_bo_kunmap(ring->mqd_obj); 4372 ring->mqd_ptr = NULL; 4373 amdgpu_bo_unreserve(ring->mqd_obj); 4374 ring->sched.ready = true; 4375 return 0; 4376 } 4377 4378 static int gfx_v11_0_kcq_resume(struct amdgpu_device *adev) 4379 { 4380 struct amdgpu_ring *ring = NULL; 4381 int r = 0, i; 4382 4383 if (!amdgpu_async_gfx_ring) 4384 gfx_v11_0_cp_compute_enable(adev, true); 4385 4386 for (i = 0; i < adev->gfx.num_compute_rings; i++) { 4387 ring = &adev->gfx.compute_ring[i]; 4388 4389 r = amdgpu_bo_reserve(ring->mqd_obj, false); 4390 if (unlikely(r != 0)) 4391 goto done; 4392 r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr); 4393 if (!r) { 4394 r = gfx_v11_0_kcq_init_queue(ring, false); 4395 amdgpu_bo_kunmap(ring->mqd_obj); 4396 ring->mqd_ptr = NULL; 4397 } 4398 amdgpu_bo_unreserve(ring->mqd_obj); 4399 if (r) 4400 goto done; 4401 } 4402 4403 r = amdgpu_gfx_enable_kcq(adev, 0); 4404 done: 4405 return r; 4406 } 4407 4408 static int gfx_v11_0_cp_resume(struct amdgpu_device *adev) 4409 { 4410 int r, i; 4411 struct amdgpu_ring *ring; 4412 4413 if (!(adev->flags & AMD_IS_APU)) 4414 gfx_v11_0_enable_gui_idle_interrupt(adev, false); 4415 4416 if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT) { 4417 /* legacy firmware loading */ 4418 r = gfx_v11_0_cp_gfx_load_microcode(adev); 4419 if (r) 4420 return r; 4421 4422 if (adev->gfx.rs64_enable) 4423 r = gfx_v11_0_cp_compute_load_microcode_rs64(adev); 4424 else 4425 r = gfx_v11_0_cp_compute_load_microcode(adev); 4426 if (r) 4427 return r; 4428 } 4429 4430 gfx_v11_0_cp_set_doorbell_range(adev); 4431 4432 if (amdgpu_async_gfx_ring) { 4433 gfx_v11_0_cp_compute_enable(adev, true); 4434 gfx_v11_0_cp_gfx_enable(adev, true); 4435 } 4436 4437 if (adev->enable_mes_kiq && adev->mes.kiq_hw_init) 4438 r = amdgpu_mes_kiq_hw_init(adev); 4439 else 4440 r = gfx_v11_0_kiq_resume(adev); 4441 if (r) 4442 return r; 4443 4444 r = gfx_v11_0_kcq_resume(adev); 4445 if (r) 4446 return r; 4447 4448 if (!amdgpu_async_gfx_ring) { 4449 r = gfx_v11_0_cp_gfx_resume(adev); 4450 if (r) 4451 return r; 4452 } else { 4453 r = gfx_v11_0_cp_async_gfx_ring_resume(adev); 4454 if (r) 4455 return r; 4456 } 4457 4458 for (i = 0; i < adev->gfx.num_gfx_rings; i++) { 4459 ring = &adev->gfx.gfx_ring[i]; 4460 r = amdgpu_ring_test_helper(ring); 4461 if (r) 4462 return r; 4463 } 4464 4465 for (i = 0; i < adev->gfx.num_compute_rings; i++) { 4466 ring = &adev->gfx.compute_ring[i]; 4467 r = amdgpu_ring_test_helper(ring); 4468 if (r) 4469 return r; 4470 } 4471 4472 return 0; 4473 } 4474 4475 static void gfx_v11_0_cp_enable(struct amdgpu_device *adev, bool enable) 4476 { 4477 gfx_v11_0_cp_gfx_enable(adev, enable); 4478 gfx_v11_0_cp_compute_enable(adev, enable); 4479 } 4480 4481 static int gfx_v11_0_gfxhub_enable(struct amdgpu_device *adev) 4482 { 4483 int r; 4484 bool value; 4485 4486 r = adev->gfxhub.funcs->gart_enable(adev); 4487 if (r) 4488 return r; 4489 4490 adev->hdp.funcs->flush_hdp(adev, NULL); 4491 4492 value = (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS) ? 4493 false : true; 4494 4495 adev->gfxhub.funcs->set_fault_enable_default(adev, value); 4496 /* TODO investigate why this and the hdp flush above is needed, 4497 * are we missing a flush somewhere else? */ 4498 adev->gmc.gmc_funcs->flush_gpu_tlb(adev, 0, AMDGPU_GFXHUB(0), 0); 4499 4500 return 0; 4501 } 4502 4503 static void gfx_v11_0_select_cp_fw_arch(struct amdgpu_device *adev) 4504 { 4505 u32 tmp; 4506 4507 /* select RS64 */ 4508 if (adev->gfx.rs64_enable) { 4509 tmp = RREG32_SOC15(GC, 0, regCP_GFX_CNTL); 4510 tmp = REG_SET_FIELD(tmp, CP_GFX_CNTL, ENGINE_SEL, 1); 4511 WREG32_SOC15(GC, 0, regCP_GFX_CNTL, tmp); 4512 4513 tmp = RREG32_SOC15(GC, 0, regCP_MEC_ISA_CNTL); 4514 tmp = REG_SET_FIELD(tmp, CP_MEC_ISA_CNTL, ISA_MODE, 1); 4515 WREG32_SOC15(GC, 0, regCP_MEC_ISA_CNTL, tmp); 4516 } 4517 4518 if (amdgpu_emu_mode == 1) 4519 msleep(100); 4520 } 4521 4522 static int get_gb_addr_config(struct amdgpu_device * adev) 4523 { 4524 u32 gb_addr_config; 4525 4526 gb_addr_config = RREG32_SOC15(GC, 0, regGB_ADDR_CONFIG); 4527 if (gb_addr_config == 0) 4528 return -EINVAL; 4529 4530 adev->gfx.config.gb_addr_config_fields.num_pkrs = 4531 1 << REG_GET_FIELD(gb_addr_config, GB_ADDR_CONFIG, NUM_PKRS); 4532 4533 adev->gfx.config.gb_addr_config = gb_addr_config; 4534 4535 adev->gfx.config.gb_addr_config_fields.num_pipes = 1 << 4536 REG_GET_FIELD(adev->gfx.config.gb_addr_config, 4537 GB_ADDR_CONFIG, NUM_PIPES); 4538 4539 adev->gfx.config.max_tile_pipes = 4540 adev->gfx.config.gb_addr_config_fields.num_pipes; 4541 4542 adev->gfx.config.gb_addr_config_fields.max_compress_frags = 1 << 4543 REG_GET_FIELD(adev->gfx.config.gb_addr_config, 4544 GB_ADDR_CONFIG, MAX_COMPRESSED_FRAGS); 4545 adev->gfx.config.gb_addr_config_fields.num_rb_per_se = 1 << 4546 REG_GET_FIELD(adev->gfx.config.gb_addr_config, 4547 GB_ADDR_CONFIG, NUM_RB_PER_SE); 4548 adev->gfx.config.gb_addr_config_fields.num_se = 1 << 4549 REG_GET_FIELD(adev->gfx.config.gb_addr_config, 4550 GB_ADDR_CONFIG, NUM_SHADER_ENGINES); 4551 adev->gfx.config.gb_addr_config_fields.pipe_interleave_size = 1 << (8 + 4552 REG_GET_FIELD(adev->gfx.config.gb_addr_config, 4553 GB_ADDR_CONFIG, PIPE_INTERLEAVE_SIZE)); 4554 4555 return 0; 4556 } 4557 4558 static void gfx_v11_0_disable_gpa_mode(struct amdgpu_device *adev) 4559 { 4560 uint32_t data; 4561 4562 data = RREG32_SOC15(GC, 0, regCPC_PSP_DEBUG); 4563 data |= CPC_PSP_DEBUG__GPA_OVERRIDE_MASK; 4564 WREG32_SOC15(GC, 0, regCPC_PSP_DEBUG, data); 4565 4566 data = RREG32_SOC15(GC, 0, regCPG_PSP_DEBUG); 4567 data |= CPG_PSP_DEBUG__GPA_OVERRIDE_MASK; 4568 WREG32_SOC15(GC, 0, regCPG_PSP_DEBUG, data); 4569 } 4570 4571 static int gfx_v11_0_hw_init(void *handle) 4572 { 4573 int r; 4574 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 4575 4576 if (adev->firmware.load_type == AMDGPU_FW_LOAD_RLC_BACKDOOR_AUTO) { 4577 if (adev->gfx.imu.funcs) { 4578 /* RLC autoload sequence 1: Program rlc ram */ 4579 if (adev->gfx.imu.funcs->program_rlc_ram) 4580 adev->gfx.imu.funcs->program_rlc_ram(adev); 4581 /* rlc autoload firmware */ 4582 r = gfx_v11_0_rlc_backdoor_autoload_enable(adev); 4583 if (r) 4584 return r; 4585 } 4586 } else { 4587 if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT) { 4588 if (adev->gfx.imu.funcs && (amdgpu_dpm > 0)) { 4589 if (adev->gfx.imu.funcs->load_microcode) 4590 adev->gfx.imu.funcs->load_microcode(adev); 4591 if (adev->gfx.imu.funcs->setup_imu) 4592 adev->gfx.imu.funcs->setup_imu(adev); 4593 if (adev->gfx.imu.funcs->start_imu) 4594 adev->gfx.imu.funcs->start_imu(adev); 4595 } 4596 4597 /* disable gpa mode in backdoor loading */ 4598 gfx_v11_0_disable_gpa_mode(adev); 4599 } 4600 } 4601 4602 if ((adev->firmware.load_type == AMDGPU_FW_LOAD_RLC_BACKDOOR_AUTO) || 4603 (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP)) { 4604 r = gfx_v11_0_wait_for_rlc_autoload_complete(adev); 4605 if (r) { 4606 dev_err(adev->dev, "(%d) failed to wait rlc autoload complete\n", r); 4607 return r; 4608 } 4609 } 4610 4611 adev->gfx.is_poweron = true; 4612 4613 if(get_gb_addr_config(adev)) 4614 DRM_WARN("Invalid gb_addr_config !\n"); 4615 4616 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP && 4617 adev->gfx.rs64_enable) 4618 gfx_v11_0_config_gfx_rs64(adev); 4619 4620 r = gfx_v11_0_gfxhub_enable(adev); 4621 if (r) 4622 return r; 4623 4624 if (!amdgpu_emu_mode) 4625 gfx_v11_0_init_golden_registers(adev); 4626 4627 if ((adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT) || 4628 (adev->firmware.load_type == AMDGPU_FW_LOAD_RLC_BACKDOOR_AUTO && amdgpu_dpm == 1)) { 4629 /** 4630 * For gfx 11, rlc firmware loading relies on smu firmware is 4631 * loaded firstly, so in direct type, it has to load smc ucode 4632 * here before rlc. 4633 */ 4634 r = amdgpu_pm_load_smu_firmware(adev, NULL); 4635 if (r) 4636 return r; 4637 } 4638 4639 gfx_v11_0_constants_init(adev); 4640 4641 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) 4642 gfx_v11_0_select_cp_fw_arch(adev); 4643 4644 if (adev->nbio.funcs->gc_doorbell_init) 4645 adev->nbio.funcs->gc_doorbell_init(adev); 4646 4647 r = gfx_v11_0_rlc_resume(adev); 4648 if (r) 4649 return r; 4650 4651 /* 4652 * init golden registers and rlc resume may override some registers, 4653 * reconfig them here 4654 */ 4655 gfx_v11_0_tcp_harvest(adev); 4656 4657 r = gfx_v11_0_cp_resume(adev); 4658 if (r) 4659 return r; 4660 4661 /* get IMU version from HW if it's not set */ 4662 if (!adev->gfx.imu_fw_version) 4663 adev->gfx.imu_fw_version = RREG32_SOC15(GC, 0, regGFX_IMU_SCRATCH_0); 4664 4665 return r; 4666 } 4667 4668 static int gfx_v11_0_hw_fini(void *handle) 4669 { 4670 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 4671 4672 amdgpu_irq_put(adev, &adev->gfx.priv_reg_irq, 0); 4673 amdgpu_irq_put(adev, &adev->gfx.priv_inst_irq, 0); 4674 amdgpu_irq_put(adev, &adev->gfx.bad_op_irq, 0); 4675 4676 if (!adev->no_hw_access) { 4677 if (amdgpu_async_gfx_ring) { 4678 if (amdgpu_gfx_disable_kgq(adev, 0)) 4679 DRM_ERROR("KGQ disable failed\n"); 4680 } 4681 4682 if (amdgpu_gfx_disable_kcq(adev, 0)) 4683 DRM_ERROR("KCQ disable failed\n"); 4684 4685 amdgpu_mes_kiq_hw_fini(adev); 4686 } 4687 4688 if (amdgpu_sriov_vf(adev)) 4689 /* Remove the steps disabling CPG and clearing KIQ position, 4690 * so that CP could perform IDLE-SAVE during switch. Those 4691 * steps are necessary to avoid a DMAR error in gfx9 but it is 4692 * not reproduced on gfx11. 4693 */ 4694 return 0; 4695 4696 gfx_v11_0_cp_enable(adev, false); 4697 gfx_v11_0_enable_gui_idle_interrupt(adev, false); 4698 4699 adev->gfxhub.funcs->gart_disable(adev); 4700 4701 adev->gfx.is_poweron = false; 4702 4703 return 0; 4704 } 4705 4706 static int gfx_v11_0_suspend(void *handle) 4707 { 4708 return gfx_v11_0_hw_fini(handle); 4709 } 4710 4711 static int gfx_v11_0_resume(void *handle) 4712 { 4713 return gfx_v11_0_hw_init(handle); 4714 } 4715 4716 static bool gfx_v11_0_is_idle(void *handle) 4717 { 4718 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 4719 4720 if (REG_GET_FIELD(RREG32_SOC15(GC, 0, regGRBM_STATUS), 4721 GRBM_STATUS, GUI_ACTIVE)) 4722 return false; 4723 else 4724 return true; 4725 } 4726 4727 static int gfx_v11_0_wait_for_idle(void *handle) 4728 { 4729 unsigned i; 4730 u32 tmp; 4731 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 4732 4733 for (i = 0; i < adev->usec_timeout; i++) { 4734 /* read MC_STATUS */ 4735 tmp = RREG32_SOC15(GC, 0, regGRBM_STATUS) & 4736 GRBM_STATUS__GUI_ACTIVE_MASK; 4737 4738 if (!REG_GET_FIELD(tmp, GRBM_STATUS, GUI_ACTIVE)) 4739 return 0; 4740 udelay(1); 4741 } 4742 return -ETIMEDOUT; 4743 } 4744 4745 int gfx_v11_0_request_gfx_index_mutex(struct amdgpu_device *adev, 4746 bool req) 4747 { 4748 u32 i, tmp, val; 4749 4750 for (i = 0; i < adev->usec_timeout; i++) { 4751 /* Request with MeId=2, PipeId=0 */ 4752 tmp = REG_SET_FIELD(0, CP_GFX_INDEX_MUTEX, REQUEST, req); 4753 tmp = REG_SET_FIELD(tmp, CP_GFX_INDEX_MUTEX, CLIENTID, 4); 4754 WREG32_SOC15(GC, 0, regCP_GFX_INDEX_MUTEX, tmp); 4755 4756 val = RREG32_SOC15(GC, 0, regCP_GFX_INDEX_MUTEX); 4757 if (req) { 4758 if (val == tmp) 4759 break; 4760 } else { 4761 tmp = REG_SET_FIELD(tmp, CP_GFX_INDEX_MUTEX, 4762 REQUEST, 1); 4763 4764 /* unlocked or locked by firmware */ 4765 if (val != tmp) 4766 break; 4767 } 4768 udelay(1); 4769 } 4770 4771 if (i >= adev->usec_timeout) 4772 return -EINVAL; 4773 4774 return 0; 4775 } 4776 4777 static int gfx_v11_0_soft_reset(void *handle) 4778 { 4779 u32 grbm_soft_reset = 0; 4780 u32 tmp; 4781 int r, i, j, k; 4782 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 4783 4784 amdgpu_gfx_rlc_enter_safe_mode(adev, 0); 4785 4786 tmp = RREG32_SOC15(GC, 0, regCP_INT_CNTL); 4787 tmp = REG_SET_FIELD(tmp, CP_INT_CNTL, CMP_BUSY_INT_ENABLE, 0); 4788 tmp = REG_SET_FIELD(tmp, CP_INT_CNTL, CNTX_BUSY_INT_ENABLE, 0); 4789 tmp = REG_SET_FIELD(tmp, CP_INT_CNTL, CNTX_EMPTY_INT_ENABLE, 0); 4790 tmp = REG_SET_FIELD(tmp, CP_INT_CNTL, GFX_IDLE_INT_ENABLE, 0); 4791 WREG32_SOC15(GC, 0, regCP_INT_CNTL, tmp); 4792 4793 mutex_lock(&adev->srbm_mutex); 4794 for (i = 0; i < adev->gfx.mec.num_mec; ++i) { 4795 for (j = 0; j < adev->gfx.mec.num_queue_per_pipe; j++) { 4796 for (k = 0; k < adev->gfx.mec.num_pipe_per_mec; k++) { 4797 soc21_grbm_select(adev, i, k, j, 0); 4798 4799 WREG32_SOC15(GC, 0, regCP_HQD_DEQUEUE_REQUEST, 0x2); 4800 WREG32_SOC15(GC, 0, regSPI_COMPUTE_QUEUE_RESET, 0x1); 4801 } 4802 } 4803 } 4804 for (i = 0; i < adev->gfx.me.num_me; ++i) { 4805 for (j = 0; j < adev->gfx.me.num_queue_per_pipe; j++) { 4806 for (k = 0; k < adev->gfx.me.num_pipe_per_me; k++) { 4807 soc21_grbm_select(adev, i, k, j, 0); 4808 4809 WREG32_SOC15(GC, 0, regCP_GFX_HQD_DEQUEUE_REQUEST, 0x1); 4810 } 4811 } 4812 } 4813 soc21_grbm_select(adev, 0, 0, 0, 0); 4814 mutex_unlock(&adev->srbm_mutex); 4815 4816 /* Try to acquire the gfx mutex before access to CP_VMID_RESET */ 4817 mutex_lock(&adev->gfx.reset_sem_mutex); 4818 r = gfx_v11_0_request_gfx_index_mutex(adev, true); 4819 if (r) { 4820 mutex_unlock(&adev->gfx.reset_sem_mutex); 4821 DRM_ERROR("Failed to acquire the gfx mutex during soft reset\n"); 4822 return r; 4823 } 4824 4825 WREG32_SOC15(GC, 0, regCP_VMID_RESET, 0xfffffffe); 4826 4827 // Read CP_VMID_RESET register three times. 4828 // to get sufficient time for GFX_HQD_ACTIVE reach 0 4829 RREG32_SOC15(GC, 0, regCP_VMID_RESET); 4830 RREG32_SOC15(GC, 0, regCP_VMID_RESET); 4831 RREG32_SOC15(GC, 0, regCP_VMID_RESET); 4832 4833 /* release the gfx mutex */ 4834 r = gfx_v11_0_request_gfx_index_mutex(adev, false); 4835 mutex_unlock(&adev->gfx.reset_sem_mutex); 4836 if (r) { 4837 DRM_ERROR("Failed to release the gfx mutex during soft reset\n"); 4838 return r; 4839 } 4840 4841 for (i = 0; i < adev->usec_timeout; i++) { 4842 if (!RREG32_SOC15(GC, 0, regCP_HQD_ACTIVE) && 4843 !RREG32_SOC15(GC, 0, regCP_GFX_HQD_ACTIVE)) 4844 break; 4845 udelay(1); 4846 } 4847 if (i >= adev->usec_timeout) { 4848 printk("Failed to wait all pipes clean\n"); 4849 return -EINVAL; 4850 } 4851 4852 /********** trigger soft reset ***********/ 4853 grbm_soft_reset = RREG32_SOC15(GC, 0, regGRBM_SOFT_RESET); 4854 grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, 4855 SOFT_RESET_CP, 1); 4856 grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, 4857 SOFT_RESET_GFX, 1); 4858 grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, 4859 SOFT_RESET_CPF, 1); 4860 grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, 4861 SOFT_RESET_CPC, 1); 4862 grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, 4863 SOFT_RESET_CPG, 1); 4864 WREG32_SOC15(GC, 0, regGRBM_SOFT_RESET, grbm_soft_reset); 4865 /********** exit soft reset ***********/ 4866 grbm_soft_reset = RREG32_SOC15(GC, 0, regGRBM_SOFT_RESET); 4867 grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, 4868 SOFT_RESET_CP, 0); 4869 grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, 4870 SOFT_RESET_GFX, 0); 4871 grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, 4872 SOFT_RESET_CPF, 0); 4873 grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, 4874 SOFT_RESET_CPC, 0); 4875 grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, 4876 SOFT_RESET_CPG, 0); 4877 WREG32_SOC15(GC, 0, regGRBM_SOFT_RESET, grbm_soft_reset); 4878 4879 tmp = RREG32_SOC15(GC, 0, regCP_SOFT_RESET_CNTL); 4880 tmp = REG_SET_FIELD(tmp, CP_SOFT_RESET_CNTL, CMP_HQD_REG_RESET, 0x1); 4881 WREG32_SOC15(GC, 0, regCP_SOFT_RESET_CNTL, tmp); 4882 4883 WREG32_SOC15(GC, 0, regCP_ME_CNTL, 0x0); 4884 WREG32_SOC15(GC, 0, regCP_MEC_RS64_CNTL, 0x0); 4885 4886 for (i = 0; i < adev->usec_timeout; i++) { 4887 if (!RREG32_SOC15(GC, 0, regCP_VMID_RESET)) 4888 break; 4889 udelay(1); 4890 } 4891 if (i >= adev->usec_timeout) { 4892 printk("Failed to wait CP_VMID_RESET to 0\n"); 4893 return -EINVAL; 4894 } 4895 4896 tmp = RREG32_SOC15(GC, 0, regCP_INT_CNTL); 4897 tmp = REG_SET_FIELD(tmp, CP_INT_CNTL, CMP_BUSY_INT_ENABLE, 1); 4898 tmp = REG_SET_FIELD(tmp, CP_INT_CNTL, CNTX_BUSY_INT_ENABLE, 1); 4899 tmp = REG_SET_FIELD(tmp, CP_INT_CNTL, CNTX_EMPTY_INT_ENABLE, 1); 4900 tmp = REG_SET_FIELD(tmp, CP_INT_CNTL, GFX_IDLE_INT_ENABLE, 1); 4901 WREG32_SOC15(GC, 0, regCP_INT_CNTL, tmp); 4902 4903 amdgpu_gfx_rlc_exit_safe_mode(adev, 0); 4904 4905 return gfx_v11_0_cp_resume(adev); 4906 } 4907 4908 static bool gfx_v11_0_check_soft_reset(void *handle) 4909 { 4910 int i, r; 4911 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 4912 struct amdgpu_ring *ring; 4913 long tmo = msecs_to_jiffies(1000); 4914 4915 for (i = 0; i < adev->gfx.num_gfx_rings; i++) { 4916 ring = &adev->gfx.gfx_ring[i]; 4917 r = amdgpu_ring_test_ib(ring, tmo); 4918 if (r) 4919 return true; 4920 } 4921 4922 for (i = 0; i < adev->gfx.num_compute_rings; i++) { 4923 ring = &adev->gfx.compute_ring[i]; 4924 r = amdgpu_ring_test_ib(ring, tmo); 4925 if (r) 4926 return true; 4927 } 4928 4929 return false; 4930 } 4931 4932 static int gfx_v11_0_post_soft_reset(void *handle) 4933 { 4934 /** 4935 * GFX soft reset will impact MES, need resume MES when do GFX soft reset 4936 */ 4937 return amdgpu_mes_resume((struct amdgpu_device *)handle); 4938 } 4939 4940 static uint64_t gfx_v11_0_get_gpu_clock_counter(struct amdgpu_device *adev) 4941 { 4942 uint64_t clock; 4943 uint64_t clock_counter_lo, clock_counter_hi_pre, clock_counter_hi_after; 4944 4945 if (amdgpu_sriov_vf(adev)) { 4946 amdgpu_gfx_off_ctrl(adev, false); 4947 mutex_lock(&adev->gfx.gpu_clock_mutex); 4948 clock_counter_hi_pre = (uint64_t)RREG32_SOC15(GC, 0, regCP_MES_MTIME_HI); 4949 clock_counter_lo = (uint64_t)RREG32_SOC15(GC, 0, regCP_MES_MTIME_LO); 4950 clock_counter_hi_after = (uint64_t)RREG32_SOC15(GC, 0, regCP_MES_MTIME_HI); 4951 if (clock_counter_hi_pre != clock_counter_hi_after) 4952 clock_counter_lo = (uint64_t)RREG32_SOC15(GC, 0, regCP_MES_MTIME_LO); 4953 mutex_unlock(&adev->gfx.gpu_clock_mutex); 4954 amdgpu_gfx_off_ctrl(adev, true); 4955 } else { 4956 preempt_disable(); 4957 clock_counter_hi_pre = (uint64_t)RREG32_SOC15(SMUIO, 0, regGOLDEN_TSC_COUNT_UPPER); 4958 clock_counter_lo = (uint64_t)RREG32_SOC15(SMUIO, 0, regGOLDEN_TSC_COUNT_LOWER); 4959 clock_counter_hi_after = (uint64_t)RREG32_SOC15(SMUIO, 0, regGOLDEN_TSC_COUNT_UPPER); 4960 if (clock_counter_hi_pre != clock_counter_hi_after) 4961 clock_counter_lo = (uint64_t)RREG32_SOC15(SMUIO, 0, regGOLDEN_TSC_COUNT_LOWER); 4962 preempt_enable(); 4963 } 4964 clock = clock_counter_lo | (clock_counter_hi_after << 32ULL); 4965 4966 return clock; 4967 } 4968 4969 static void gfx_v11_0_ring_emit_gds_switch(struct amdgpu_ring *ring, 4970 uint32_t vmid, 4971 uint32_t gds_base, uint32_t gds_size, 4972 uint32_t gws_base, uint32_t gws_size, 4973 uint32_t oa_base, uint32_t oa_size) 4974 { 4975 struct amdgpu_device *adev = ring->adev; 4976 4977 /* GDS Base */ 4978 gfx_v11_0_write_data_to_reg(ring, 0, false, 4979 SOC15_REG_OFFSET(GC, 0, regGDS_VMID0_BASE) + 2 * vmid, 4980 gds_base); 4981 4982 /* GDS Size */ 4983 gfx_v11_0_write_data_to_reg(ring, 0, false, 4984 SOC15_REG_OFFSET(GC, 0, regGDS_VMID0_SIZE) + 2 * vmid, 4985 gds_size); 4986 4987 /* GWS */ 4988 gfx_v11_0_write_data_to_reg(ring, 0, false, 4989 SOC15_REG_OFFSET(GC, 0, regGDS_GWS_VMID0) + vmid, 4990 gws_size << GDS_GWS_VMID0__SIZE__SHIFT | gws_base); 4991 4992 /* OA */ 4993 gfx_v11_0_write_data_to_reg(ring, 0, false, 4994 SOC15_REG_OFFSET(GC, 0, regGDS_OA_VMID0) + vmid, 4995 (1 << (oa_size + oa_base)) - (1 << oa_base)); 4996 } 4997 4998 static int gfx_v11_0_early_init(void *handle) 4999 { 5000 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 5001 5002 adev->gfx.funcs = &gfx_v11_0_gfx_funcs; 5003 5004 adev->gfx.num_gfx_rings = GFX11_NUM_GFX_RINGS; 5005 adev->gfx.num_compute_rings = min(amdgpu_gfx_get_num_kcq(adev), 5006 AMDGPU_MAX_COMPUTE_RINGS); 5007 5008 gfx_v11_0_set_kiq_pm4_funcs(adev); 5009 gfx_v11_0_set_ring_funcs(adev); 5010 gfx_v11_0_set_irq_funcs(adev); 5011 gfx_v11_0_set_gds_init(adev); 5012 gfx_v11_0_set_rlc_funcs(adev); 5013 gfx_v11_0_set_mqd_funcs(adev); 5014 gfx_v11_0_set_imu_funcs(adev); 5015 5016 gfx_v11_0_init_rlcg_reg_access_ctrl(adev); 5017 5018 return gfx_v11_0_init_microcode(adev); 5019 } 5020 5021 static int gfx_v11_0_late_init(void *handle) 5022 { 5023 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 5024 int r; 5025 5026 r = amdgpu_irq_get(adev, &adev->gfx.priv_reg_irq, 0); 5027 if (r) 5028 return r; 5029 5030 r = amdgpu_irq_get(adev, &adev->gfx.priv_inst_irq, 0); 5031 if (r) 5032 return r; 5033 5034 r = amdgpu_irq_get(adev, &adev->gfx.bad_op_irq, 0); 5035 if (r) 5036 return r; 5037 return 0; 5038 } 5039 5040 static bool gfx_v11_0_is_rlc_enabled(struct amdgpu_device *adev) 5041 { 5042 uint32_t rlc_cntl; 5043 5044 /* if RLC is not enabled, do nothing */ 5045 rlc_cntl = RREG32_SOC15(GC, 0, regRLC_CNTL); 5046 return (REG_GET_FIELD(rlc_cntl, RLC_CNTL, RLC_ENABLE_F32)) ? true : false; 5047 } 5048 5049 static void gfx_v11_0_set_safe_mode(struct amdgpu_device *adev, int xcc_id) 5050 { 5051 uint32_t data; 5052 unsigned i; 5053 5054 data = RLC_SAFE_MODE__CMD_MASK; 5055 data |= (1 << RLC_SAFE_MODE__MESSAGE__SHIFT); 5056 5057 WREG32_SOC15(GC, 0, regRLC_SAFE_MODE, data); 5058 5059 /* wait for RLC_SAFE_MODE */ 5060 for (i = 0; i < adev->usec_timeout; i++) { 5061 if (!REG_GET_FIELD(RREG32_SOC15(GC, 0, regRLC_SAFE_MODE), 5062 RLC_SAFE_MODE, CMD)) 5063 break; 5064 udelay(1); 5065 } 5066 } 5067 5068 static void gfx_v11_0_unset_safe_mode(struct amdgpu_device *adev, int xcc_id) 5069 { 5070 WREG32_SOC15(GC, 0, regRLC_SAFE_MODE, RLC_SAFE_MODE__CMD_MASK); 5071 } 5072 5073 static void gfx_v11_0_update_perf_clk(struct amdgpu_device *adev, 5074 bool enable) 5075 { 5076 uint32_t def, data; 5077 5078 if (!(adev->cg_flags & AMD_CG_SUPPORT_GFX_PERF_CLK)) 5079 return; 5080 5081 def = data = RREG32_SOC15(GC, 0, regRLC_CGTT_MGCG_OVERRIDE); 5082 5083 if (enable) 5084 data &= ~RLC_CGTT_MGCG_OVERRIDE__PERFMON_CLOCK_STATE_MASK; 5085 else 5086 data |= RLC_CGTT_MGCG_OVERRIDE__PERFMON_CLOCK_STATE_MASK; 5087 5088 if (def != data) 5089 WREG32_SOC15(GC, 0, regRLC_CGTT_MGCG_OVERRIDE, data); 5090 } 5091 5092 static void gfx_v11_0_update_sram_fgcg(struct amdgpu_device *adev, 5093 bool enable) 5094 { 5095 uint32_t def, data; 5096 5097 if (!(adev->cg_flags & AMD_CG_SUPPORT_GFX_FGCG)) 5098 return; 5099 5100 def = data = RREG32_SOC15(GC, 0, regRLC_CGTT_MGCG_OVERRIDE); 5101 5102 if (enable) 5103 data &= ~RLC_CGTT_MGCG_OVERRIDE__GFXIP_FGCG_OVERRIDE_MASK; 5104 else 5105 data |= RLC_CGTT_MGCG_OVERRIDE__GFXIP_FGCG_OVERRIDE_MASK; 5106 5107 if (def != data) 5108 WREG32_SOC15(GC, 0, regRLC_CGTT_MGCG_OVERRIDE, data); 5109 } 5110 5111 static void gfx_v11_0_update_repeater_fgcg(struct amdgpu_device *adev, 5112 bool enable) 5113 { 5114 uint32_t def, data; 5115 5116 if (!(adev->cg_flags & AMD_CG_SUPPORT_REPEATER_FGCG)) 5117 return; 5118 5119 def = data = RREG32_SOC15(GC, 0, regRLC_CGTT_MGCG_OVERRIDE); 5120 5121 if (enable) 5122 data &= ~RLC_CGTT_MGCG_OVERRIDE__GFXIP_REPEATER_FGCG_OVERRIDE_MASK; 5123 else 5124 data |= RLC_CGTT_MGCG_OVERRIDE__GFXIP_REPEATER_FGCG_OVERRIDE_MASK; 5125 5126 if (def != data) 5127 WREG32_SOC15(GC, 0, regRLC_CGTT_MGCG_OVERRIDE, data); 5128 } 5129 5130 static void gfx_v11_0_update_medium_grain_clock_gating(struct amdgpu_device *adev, 5131 bool enable) 5132 { 5133 uint32_t data, def; 5134 5135 if (!(adev->cg_flags & (AMD_CG_SUPPORT_GFX_MGCG | AMD_CG_SUPPORT_GFX_MGLS))) 5136 return; 5137 5138 /* It is disabled by HW by default */ 5139 if (enable) { 5140 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGCG) { 5141 /* 1 - RLC_CGTT_MGCG_OVERRIDE */ 5142 def = data = RREG32_SOC15(GC, 0, regRLC_CGTT_MGCG_OVERRIDE); 5143 5144 data &= ~(RLC_CGTT_MGCG_OVERRIDE__GRBM_CGTT_SCLK_OVERRIDE_MASK | 5145 RLC_CGTT_MGCG_OVERRIDE__RLC_CGTT_SCLK_OVERRIDE_MASK | 5146 RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGCG_OVERRIDE_MASK); 5147 5148 if (def != data) 5149 WREG32_SOC15(GC, 0, regRLC_CGTT_MGCG_OVERRIDE, data); 5150 } 5151 } else { 5152 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGCG) { 5153 def = data = RREG32_SOC15(GC, 0, regRLC_CGTT_MGCG_OVERRIDE); 5154 5155 data |= (RLC_CGTT_MGCG_OVERRIDE__RLC_CGTT_SCLK_OVERRIDE_MASK | 5156 RLC_CGTT_MGCG_OVERRIDE__GRBM_CGTT_SCLK_OVERRIDE_MASK | 5157 RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGCG_OVERRIDE_MASK); 5158 5159 if (def != data) 5160 WREG32_SOC15(GC, 0, regRLC_CGTT_MGCG_OVERRIDE, data); 5161 } 5162 } 5163 } 5164 5165 static void gfx_v11_0_update_coarse_grain_clock_gating(struct amdgpu_device *adev, 5166 bool enable) 5167 { 5168 uint32_t def, data; 5169 5170 if (!(adev->cg_flags & 5171 (AMD_CG_SUPPORT_GFX_CGCG | 5172 AMD_CG_SUPPORT_GFX_CGLS | 5173 AMD_CG_SUPPORT_GFX_3D_CGCG | 5174 AMD_CG_SUPPORT_GFX_3D_CGLS))) 5175 return; 5176 5177 if (enable) { 5178 def = data = RREG32_SOC15(GC, 0, regRLC_CGTT_MGCG_OVERRIDE); 5179 5180 /* unset CGCG override */ 5181 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGCG) 5182 data &= ~RLC_CGTT_MGCG_OVERRIDE__GFXIP_CGCG_OVERRIDE_MASK; 5183 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGLS) 5184 data &= ~RLC_CGTT_MGCG_OVERRIDE__GFXIP_CGLS_OVERRIDE_MASK; 5185 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_3D_CGCG || 5186 adev->cg_flags & AMD_CG_SUPPORT_GFX_3D_CGLS) 5187 data &= ~RLC_CGTT_MGCG_OVERRIDE__GFXIP_GFX3D_CG_OVERRIDE_MASK; 5188 5189 /* update CGCG override bits */ 5190 if (def != data) 5191 WREG32_SOC15(GC, 0, regRLC_CGTT_MGCG_OVERRIDE, data); 5192 5193 /* enable cgcg FSM(0x0000363F) */ 5194 def = data = RREG32_SOC15(GC, 0, regRLC_CGCG_CGLS_CTRL); 5195 5196 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGCG) { 5197 data &= ~RLC_CGCG_CGLS_CTRL__CGCG_GFX_IDLE_THRESHOLD_MASK; 5198 data |= (0x36 << RLC_CGCG_CGLS_CTRL__CGCG_GFX_IDLE_THRESHOLD__SHIFT) | 5199 RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK; 5200 } 5201 5202 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGLS) { 5203 data &= ~RLC_CGCG_CGLS_CTRL__CGLS_REP_COMPANSAT_DELAY_MASK; 5204 data |= (0x000F << RLC_CGCG_CGLS_CTRL__CGLS_REP_COMPANSAT_DELAY__SHIFT) | 5205 RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK; 5206 } 5207 5208 if (def != data) 5209 WREG32_SOC15(GC, 0, regRLC_CGCG_CGLS_CTRL, data); 5210 5211 /* Program RLC_CGCG_CGLS_CTRL_3D */ 5212 def = data = RREG32_SOC15(GC, 0, regRLC_CGCG_CGLS_CTRL_3D); 5213 5214 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_3D_CGCG) { 5215 data &= ~RLC_CGCG_CGLS_CTRL_3D__CGCG_GFX_IDLE_THRESHOLD_MASK; 5216 data |= (0x36 << RLC_CGCG_CGLS_CTRL_3D__CGCG_GFX_IDLE_THRESHOLD__SHIFT) | 5217 RLC_CGCG_CGLS_CTRL_3D__CGCG_EN_MASK; 5218 } 5219 5220 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_3D_CGLS) { 5221 data &= ~RLC_CGCG_CGLS_CTRL_3D__CGLS_REP_COMPANSAT_DELAY_MASK; 5222 data |= (0xf << RLC_CGCG_CGLS_CTRL_3D__CGLS_REP_COMPANSAT_DELAY__SHIFT) | 5223 RLC_CGCG_CGLS_CTRL_3D__CGLS_EN_MASK; 5224 } 5225 5226 if (def != data) 5227 WREG32_SOC15(GC, 0, regRLC_CGCG_CGLS_CTRL_3D, data); 5228 5229 /* set IDLE_POLL_COUNT(0x00900100) */ 5230 def = data = RREG32_SOC15(GC, 0, regCP_RB_WPTR_POLL_CNTL); 5231 5232 data &= ~(CP_RB_WPTR_POLL_CNTL__POLL_FREQUENCY_MASK | CP_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK); 5233 data |= (0x0100 << CP_RB_WPTR_POLL_CNTL__POLL_FREQUENCY__SHIFT) | 5234 (0x0090 << CP_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT); 5235 5236 if (def != data) 5237 WREG32_SOC15(GC, 0, regCP_RB_WPTR_POLL_CNTL, data); 5238 5239 data = RREG32_SOC15(GC, 0, regCP_INT_CNTL); 5240 data = REG_SET_FIELD(data, CP_INT_CNTL, CNTX_BUSY_INT_ENABLE, 1); 5241 data = REG_SET_FIELD(data, CP_INT_CNTL, CNTX_EMPTY_INT_ENABLE, 1); 5242 data = REG_SET_FIELD(data, CP_INT_CNTL, CMP_BUSY_INT_ENABLE, 1); 5243 data = REG_SET_FIELD(data, CP_INT_CNTL, GFX_IDLE_INT_ENABLE, 1); 5244 WREG32_SOC15(GC, 0, regCP_INT_CNTL, data); 5245 5246 data = RREG32_SOC15(GC, 0, regSDMA0_RLC_CGCG_CTRL); 5247 data = REG_SET_FIELD(data, SDMA0_RLC_CGCG_CTRL, CGCG_INT_ENABLE, 1); 5248 WREG32_SOC15(GC, 0, regSDMA0_RLC_CGCG_CTRL, data); 5249 5250 /* Some ASICs only have one SDMA instance, not need to configure SDMA1 */ 5251 if (adev->sdma.num_instances > 1) { 5252 data = RREG32_SOC15(GC, 0, regSDMA1_RLC_CGCG_CTRL); 5253 data = REG_SET_FIELD(data, SDMA1_RLC_CGCG_CTRL, CGCG_INT_ENABLE, 1); 5254 WREG32_SOC15(GC, 0, regSDMA1_RLC_CGCG_CTRL, data); 5255 } 5256 } else { 5257 /* Program RLC_CGCG_CGLS_CTRL */ 5258 def = data = RREG32_SOC15(GC, 0, regRLC_CGCG_CGLS_CTRL); 5259 5260 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGCG) 5261 data &= ~RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK; 5262 5263 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGLS) 5264 data &= ~RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK; 5265 5266 if (def != data) 5267 WREG32_SOC15(GC, 0, regRLC_CGCG_CGLS_CTRL, data); 5268 5269 /* Program RLC_CGCG_CGLS_CTRL_3D */ 5270 def = data = RREG32_SOC15(GC, 0, regRLC_CGCG_CGLS_CTRL_3D); 5271 5272 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_3D_CGCG) 5273 data &= ~RLC_CGCG_CGLS_CTRL_3D__CGCG_EN_MASK; 5274 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_3D_CGLS) 5275 data &= ~RLC_CGCG_CGLS_CTRL_3D__CGLS_EN_MASK; 5276 5277 if (def != data) 5278 WREG32_SOC15(GC, 0, regRLC_CGCG_CGLS_CTRL_3D, data); 5279 5280 data = RREG32_SOC15(GC, 0, regSDMA0_RLC_CGCG_CTRL); 5281 data &= ~SDMA0_RLC_CGCG_CTRL__CGCG_INT_ENABLE_MASK; 5282 WREG32_SOC15(GC, 0, regSDMA0_RLC_CGCG_CTRL, data); 5283 5284 /* Some ASICs only have one SDMA instance, not need to configure SDMA1 */ 5285 if (adev->sdma.num_instances > 1) { 5286 data = RREG32_SOC15(GC, 0, regSDMA1_RLC_CGCG_CTRL); 5287 data &= ~SDMA1_RLC_CGCG_CTRL__CGCG_INT_ENABLE_MASK; 5288 WREG32_SOC15(GC, 0, regSDMA1_RLC_CGCG_CTRL, data); 5289 } 5290 } 5291 } 5292 5293 static int gfx_v11_0_update_gfx_clock_gating(struct amdgpu_device *adev, 5294 bool enable) 5295 { 5296 amdgpu_gfx_rlc_enter_safe_mode(adev, 0); 5297 5298 gfx_v11_0_update_coarse_grain_clock_gating(adev, enable); 5299 5300 gfx_v11_0_update_medium_grain_clock_gating(adev, enable); 5301 5302 gfx_v11_0_update_repeater_fgcg(adev, enable); 5303 5304 gfx_v11_0_update_sram_fgcg(adev, enable); 5305 5306 gfx_v11_0_update_perf_clk(adev, enable); 5307 5308 if (adev->cg_flags & 5309 (AMD_CG_SUPPORT_GFX_MGCG | 5310 AMD_CG_SUPPORT_GFX_CGLS | 5311 AMD_CG_SUPPORT_GFX_CGCG | 5312 AMD_CG_SUPPORT_GFX_3D_CGCG | 5313 AMD_CG_SUPPORT_GFX_3D_CGLS)) 5314 gfx_v11_0_enable_gui_idle_interrupt(adev, enable); 5315 5316 amdgpu_gfx_rlc_exit_safe_mode(adev, 0); 5317 5318 return 0; 5319 } 5320 5321 static void gfx_v11_0_update_spm_vmid(struct amdgpu_device *adev, struct amdgpu_ring *ring, unsigned vmid) 5322 { 5323 u32 reg, pre_data, data; 5324 5325 amdgpu_gfx_off_ctrl(adev, false); 5326 reg = SOC15_REG_OFFSET(GC, 0, regRLC_SPM_MC_CNTL); 5327 if (amdgpu_sriov_is_pp_one_vf(adev) && !amdgpu_sriov_runtime(adev)) 5328 pre_data = RREG32_NO_KIQ(reg); 5329 else 5330 pre_data = RREG32(reg); 5331 5332 data = pre_data & (~RLC_SPM_MC_CNTL__RLC_SPM_VMID_MASK); 5333 data |= (vmid & RLC_SPM_MC_CNTL__RLC_SPM_VMID_MASK) << RLC_SPM_MC_CNTL__RLC_SPM_VMID__SHIFT; 5334 5335 if (pre_data != data) { 5336 if (amdgpu_sriov_is_pp_one_vf(adev) && !amdgpu_sriov_runtime(adev)) { 5337 WREG32_SOC15_NO_KIQ(GC, 0, regRLC_SPM_MC_CNTL, data); 5338 } else 5339 WREG32_SOC15(GC, 0, regRLC_SPM_MC_CNTL, data); 5340 } 5341 amdgpu_gfx_off_ctrl(adev, true); 5342 5343 if (ring 5344 && amdgpu_sriov_is_pp_one_vf(adev) 5345 && (pre_data != data) 5346 && ((ring->funcs->type == AMDGPU_RING_TYPE_GFX) 5347 || (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE))) { 5348 amdgpu_ring_emit_wreg(ring, reg, data); 5349 } 5350 } 5351 5352 static const struct amdgpu_rlc_funcs gfx_v11_0_rlc_funcs = { 5353 .is_rlc_enabled = gfx_v11_0_is_rlc_enabled, 5354 .set_safe_mode = gfx_v11_0_set_safe_mode, 5355 .unset_safe_mode = gfx_v11_0_unset_safe_mode, 5356 .init = gfx_v11_0_rlc_init, 5357 .get_csb_size = gfx_v11_0_get_csb_size, 5358 .get_csb_buffer = gfx_v11_0_get_csb_buffer, 5359 .resume = gfx_v11_0_rlc_resume, 5360 .stop = gfx_v11_0_rlc_stop, 5361 .reset = gfx_v11_0_rlc_reset, 5362 .start = gfx_v11_0_rlc_start, 5363 .update_spm_vmid = gfx_v11_0_update_spm_vmid, 5364 }; 5365 5366 static void gfx_v11_cntl_power_gating(struct amdgpu_device *adev, bool enable) 5367 { 5368 u32 data = RREG32_SOC15(GC, 0, regRLC_PG_CNTL); 5369 5370 if (enable && (adev->pg_flags & AMD_PG_SUPPORT_GFX_PG)) 5371 data |= RLC_PG_CNTL__GFX_POWER_GATING_ENABLE_MASK; 5372 else 5373 data &= ~RLC_PG_CNTL__GFX_POWER_GATING_ENABLE_MASK; 5374 5375 WREG32_SOC15(GC, 0, regRLC_PG_CNTL, data); 5376 5377 // Program RLC_PG_DELAY3 for CGPG hysteresis 5378 if (enable && (adev->pg_flags & AMD_PG_SUPPORT_GFX_PG)) { 5379 switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { 5380 case IP_VERSION(11, 0, 1): 5381 case IP_VERSION(11, 0, 4): 5382 case IP_VERSION(11, 5, 0): 5383 case IP_VERSION(11, 5, 1): 5384 case IP_VERSION(11, 5, 2): 5385 WREG32_SOC15(GC, 0, regRLC_PG_DELAY_3, RLC_PG_DELAY_3_DEFAULT_GC_11_0_1); 5386 break; 5387 default: 5388 break; 5389 } 5390 } 5391 } 5392 5393 static void gfx_v11_cntl_pg(struct amdgpu_device *adev, bool enable) 5394 { 5395 amdgpu_gfx_rlc_enter_safe_mode(adev, 0); 5396 5397 gfx_v11_cntl_power_gating(adev, enable); 5398 5399 amdgpu_gfx_rlc_exit_safe_mode(adev, 0); 5400 } 5401 5402 static int gfx_v11_0_set_powergating_state(void *handle, 5403 enum amd_powergating_state state) 5404 { 5405 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 5406 bool enable = (state == AMD_PG_STATE_GATE); 5407 5408 if (amdgpu_sriov_vf(adev)) 5409 return 0; 5410 5411 switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { 5412 case IP_VERSION(11, 0, 0): 5413 case IP_VERSION(11, 0, 2): 5414 case IP_VERSION(11, 0, 3): 5415 amdgpu_gfx_off_ctrl(adev, enable); 5416 break; 5417 case IP_VERSION(11, 0, 1): 5418 case IP_VERSION(11, 0, 4): 5419 case IP_VERSION(11, 5, 0): 5420 case IP_VERSION(11, 5, 1): 5421 case IP_VERSION(11, 5, 2): 5422 if (!enable) 5423 amdgpu_gfx_off_ctrl(adev, false); 5424 5425 gfx_v11_cntl_pg(adev, enable); 5426 5427 if (enable) 5428 amdgpu_gfx_off_ctrl(adev, true); 5429 5430 break; 5431 default: 5432 break; 5433 } 5434 5435 return 0; 5436 } 5437 5438 static int gfx_v11_0_set_clockgating_state(void *handle, 5439 enum amd_clockgating_state state) 5440 { 5441 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 5442 5443 if (amdgpu_sriov_vf(adev)) 5444 return 0; 5445 5446 switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { 5447 case IP_VERSION(11, 0, 0): 5448 case IP_VERSION(11, 0, 1): 5449 case IP_VERSION(11, 0, 2): 5450 case IP_VERSION(11, 0, 3): 5451 case IP_VERSION(11, 0, 4): 5452 case IP_VERSION(11, 5, 0): 5453 case IP_VERSION(11, 5, 1): 5454 case IP_VERSION(11, 5, 2): 5455 gfx_v11_0_update_gfx_clock_gating(adev, 5456 state == AMD_CG_STATE_GATE); 5457 break; 5458 default: 5459 break; 5460 } 5461 5462 return 0; 5463 } 5464 5465 static void gfx_v11_0_get_clockgating_state(void *handle, u64 *flags) 5466 { 5467 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 5468 int data; 5469 5470 /* AMD_CG_SUPPORT_GFX_MGCG */ 5471 data = RREG32_SOC15(GC, 0, regRLC_CGTT_MGCG_OVERRIDE); 5472 if (!(data & RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGCG_OVERRIDE_MASK)) 5473 *flags |= AMD_CG_SUPPORT_GFX_MGCG; 5474 5475 /* AMD_CG_SUPPORT_REPEATER_FGCG */ 5476 if (!(data & RLC_CGTT_MGCG_OVERRIDE__GFXIP_REPEATER_FGCG_OVERRIDE_MASK)) 5477 *flags |= AMD_CG_SUPPORT_REPEATER_FGCG; 5478 5479 /* AMD_CG_SUPPORT_GFX_FGCG */ 5480 if (!(data & RLC_CGTT_MGCG_OVERRIDE__GFXIP_FGCG_OVERRIDE_MASK)) 5481 *flags |= AMD_CG_SUPPORT_GFX_FGCG; 5482 5483 /* AMD_CG_SUPPORT_GFX_PERF_CLK */ 5484 if (!(data & RLC_CGTT_MGCG_OVERRIDE__PERFMON_CLOCK_STATE_MASK)) 5485 *flags |= AMD_CG_SUPPORT_GFX_PERF_CLK; 5486 5487 /* AMD_CG_SUPPORT_GFX_CGCG */ 5488 data = RREG32_SOC15(GC, 0, regRLC_CGCG_CGLS_CTRL); 5489 if (data & RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK) 5490 *flags |= AMD_CG_SUPPORT_GFX_CGCG; 5491 5492 /* AMD_CG_SUPPORT_GFX_CGLS */ 5493 if (data & RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK) 5494 *flags |= AMD_CG_SUPPORT_GFX_CGLS; 5495 5496 /* AMD_CG_SUPPORT_GFX_3D_CGCG */ 5497 data = RREG32_SOC15(GC, 0, regRLC_CGCG_CGLS_CTRL_3D); 5498 if (data & RLC_CGCG_CGLS_CTRL_3D__CGCG_EN_MASK) 5499 *flags |= AMD_CG_SUPPORT_GFX_3D_CGCG; 5500 5501 /* AMD_CG_SUPPORT_GFX_3D_CGLS */ 5502 if (data & RLC_CGCG_CGLS_CTRL_3D__CGLS_EN_MASK) 5503 *flags |= AMD_CG_SUPPORT_GFX_3D_CGLS; 5504 } 5505 5506 static u64 gfx_v11_0_ring_get_rptr_gfx(struct amdgpu_ring *ring) 5507 { 5508 /* gfx11 is 32bit rptr*/ 5509 return *(uint32_t *)ring->rptr_cpu_addr; 5510 } 5511 5512 static u64 gfx_v11_0_ring_get_wptr_gfx(struct amdgpu_ring *ring) 5513 { 5514 struct amdgpu_device *adev = ring->adev; 5515 u64 wptr; 5516 5517 /* XXX check if swapping is necessary on BE */ 5518 if (ring->use_doorbell) { 5519 wptr = atomic64_read((atomic64_t *)ring->wptr_cpu_addr); 5520 } else { 5521 wptr = RREG32_SOC15(GC, 0, regCP_RB0_WPTR); 5522 wptr += (u64)RREG32_SOC15(GC, 0, regCP_RB0_WPTR_HI) << 32; 5523 } 5524 5525 return wptr; 5526 } 5527 5528 static void gfx_v11_0_ring_set_wptr_gfx(struct amdgpu_ring *ring) 5529 { 5530 struct amdgpu_device *adev = ring->adev; 5531 5532 if (ring->use_doorbell) { 5533 /* XXX check if swapping is necessary on BE */ 5534 atomic64_set((atomic64_t *)ring->wptr_cpu_addr, 5535 ring->wptr); 5536 WDOORBELL64(ring->doorbell_index, ring->wptr); 5537 } else { 5538 WREG32_SOC15(GC, 0, regCP_RB0_WPTR, 5539 lower_32_bits(ring->wptr)); 5540 WREG32_SOC15(GC, 0, regCP_RB0_WPTR_HI, 5541 upper_32_bits(ring->wptr)); 5542 } 5543 } 5544 5545 static u64 gfx_v11_0_ring_get_rptr_compute(struct amdgpu_ring *ring) 5546 { 5547 /* gfx11 hardware is 32bit rptr */ 5548 return *(uint32_t *)ring->rptr_cpu_addr; 5549 } 5550 5551 static u64 gfx_v11_0_ring_get_wptr_compute(struct amdgpu_ring *ring) 5552 { 5553 u64 wptr; 5554 5555 /* XXX check if swapping is necessary on BE */ 5556 if (ring->use_doorbell) 5557 wptr = atomic64_read((atomic64_t *)ring->wptr_cpu_addr); 5558 else 5559 BUG(); 5560 return wptr; 5561 } 5562 5563 static void gfx_v11_0_ring_set_wptr_compute(struct amdgpu_ring *ring) 5564 { 5565 struct amdgpu_device *adev = ring->adev; 5566 5567 /* XXX check if swapping is necessary on BE */ 5568 if (ring->use_doorbell) { 5569 atomic64_set((atomic64_t *)ring->wptr_cpu_addr, 5570 ring->wptr); 5571 WDOORBELL64(ring->doorbell_index, ring->wptr); 5572 } else { 5573 BUG(); /* only DOORBELL method supported on gfx11 now */ 5574 } 5575 } 5576 5577 static void gfx_v11_0_ring_emit_hdp_flush(struct amdgpu_ring *ring) 5578 { 5579 struct amdgpu_device *adev = ring->adev; 5580 u32 ref_and_mask, reg_mem_engine; 5581 const struct nbio_hdp_flush_reg *nbio_hf_reg = adev->nbio.hdp_flush_reg; 5582 5583 if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) { 5584 switch (ring->me) { 5585 case 1: 5586 ref_and_mask = nbio_hf_reg->ref_and_mask_cp2 << ring->pipe; 5587 break; 5588 case 2: 5589 ref_and_mask = nbio_hf_reg->ref_and_mask_cp6 << ring->pipe; 5590 break; 5591 default: 5592 return; 5593 } 5594 reg_mem_engine = 0; 5595 } else { 5596 ref_and_mask = nbio_hf_reg->ref_and_mask_cp0 << ring->pipe; 5597 reg_mem_engine = 1; /* pfp */ 5598 } 5599 5600 gfx_v11_0_wait_reg_mem(ring, reg_mem_engine, 0, 1, 5601 adev->nbio.funcs->get_hdp_flush_req_offset(adev), 5602 adev->nbio.funcs->get_hdp_flush_done_offset(adev), 5603 ref_and_mask, ref_and_mask, 0x20); 5604 } 5605 5606 static void gfx_v11_0_ring_emit_ib_gfx(struct amdgpu_ring *ring, 5607 struct amdgpu_job *job, 5608 struct amdgpu_ib *ib, 5609 uint32_t flags) 5610 { 5611 unsigned vmid = AMDGPU_JOB_GET_VMID(job); 5612 u32 header, control = 0; 5613 5614 BUG_ON(ib->flags & AMDGPU_IB_FLAG_CE); 5615 5616 header = PACKET3(PACKET3_INDIRECT_BUFFER, 2); 5617 5618 control |= ib->length_dw | (vmid << 24); 5619 5620 if (ring->adev->gfx.mcbp && (ib->flags & AMDGPU_IB_FLAG_PREEMPT)) { 5621 control |= INDIRECT_BUFFER_PRE_ENB(1); 5622 5623 if (flags & AMDGPU_IB_PREEMPTED) 5624 control |= INDIRECT_BUFFER_PRE_RESUME(1); 5625 5626 if (vmid) 5627 gfx_v11_0_ring_emit_de_meta(ring, 5628 (!amdgpu_sriov_vf(ring->adev) && flags & AMDGPU_IB_PREEMPTED) ? true : false); 5629 } 5630 5631 if (ring->is_mes_queue) 5632 /* inherit vmid from mqd */ 5633 control |= 0x400000; 5634 5635 amdgpu_ring_write(ring, header); 5636 BUG_ON(ib->gpu_addr & 0x3); /* Dword align */ 5637 amdgpu_ring_write(ring, 5638 #ifdef __BIG_ENDIAN 5639 (2 << 0) | 5640 #endif 5641 lower_32_bits(ib->gpu_addr)); 5642 amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr)); 5643 amdgpu_ring_write(ring, control); 5644 } 5645 5646 static void gfx_v11_0_ring_emit_ib_compute(struct amdgpu_ring *ring, 5647 struct amdgpu_job *job, 5648 struct amdgpu_ib *ib, 5649 uint32_t flags) 5650 { 5651 unsigned vmid = AMDGPU_JOB_GET_VMID(job); 5652 u32 control = INDIRECT_BUFFER_VALID | ib->length_dw | (vmid << 24); 5653 5654 if (ring->is_mes_queue) 5655 /* inherit vmid from mqd */ 5656 control |= 0x40000000; 5657 5658 /* Currently, there is a high possibility to get wave ID mismatch 5659 * between ME and GDS, leading to a hw deadlock, because ME generates 5660 * different wave IDs than the GDS expects. This situation happens 5661 * randomly when at least 5 compute pipes use GDS ordered append. 5662 * The wave IDs generated by ME are also wrong after suspend/resume. 5663 * Those are probably bugs somewhere else in the kernel driver. 5664 * 5665 * Writing GDS_COMPUTE_MAX_WAVE_ID resets wave ID counters in ME and 5666 * GDS to 0 for this ring (me/pipe). 5667 */ 5668 if (ib->flags & AMDGPU_IB_FLAG_RESET_GDS_MAX_WAVE_ID) { 5669 amdgpu_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1)); 5670 amdgpu_ring_write(ring, regGDS_COMPUTE_MAX_WAVE_ID); 5671 amdgpu_ring_write(ring, ring->adev->gds.gds_compute_max_wave_id); 5672 } 5673 5674 amdgpu_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2)); 5675 BUG_ON(ib->gpu_addr & 0x3); /* Dword align */ 5676 amdgpu_ring_write(ring, 5677 #ifdef __BIG_ENDIAN 5678 (2 << 0) | 5679 #endif 5680 lower_32_bits(ib->gpu_addr)); 5681 amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr)); 5682 amdgpu_ring_write(ring, control); 5683 } 5684 5685 static void gfx_v11_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, 5686 u64 seq, unsigned flags) 5687 { 5688 bool write64bit = flags & AMDGPU_FENCE_FLAG_64BIT; 5689 bool int_sel = flags & AMDGPU_FENCE_FLAG_INT; 5690 5691 /* RELEASE_MEM - flush caches, send int */ 5692 amdgpu_ring_write(ring, PACKET3(PACKET3_RELEASE_MEM, 6)); 5693 amdgpu_ring_write(ring, (PACKET3_RELEASE_MEM_GCR_SEQ | 5694 PACKET3_RELEASE_MEM_GCR_GL2_WB | 5695 PACKET3_RELEASE_MEM_GCR_GLM_INV | /* must be set with GLM_WB */ 5696 PACKET3_RELEASE_MEM_GCR_GLM_WB | 5697 PACKET3_RELEASE_MEM_CACHE_POLICY(3) | 5698 PACKET3_RELEASE_MEM_EVENT_TYPE(CACHE_FLUSH_AND_INV_TS_EVENT) | 5699 PACKET3_RELEASE_MEM_EVENT_INDEX(5))); 5700 amdgpu_ring_write(ring, (PACKET3_RELEASE_MEM_DATA_SEL(write64bit ? 2 : 1) | 5701 PACKET3_RELEASE_MEM_INT_SEL(int_sel ? 2 : 0))); 5702 5703 /* 5704 * the address should be Qword aligned if 64bit write, Dword 5705 * aligned if only send 32bit data low (discard data high) 5706 */ 5707 if (write64bit) 5708 BUG_ON(addr & 0x7); 5709 else 5710 BUG_ON(addr & 0x3); 5711 amdgpu_ring_write(ring, lower_32_bits(addr)); 5712 amdgpu_ring_write(ring, upper_32_bits(addr)); 5713 amdgpu_ring_write(ring, lower_32_bits(seq)); 5714 amdgpu_ring_write(ring, upper_32_bits(seq)); 5715 amdgpu_ring_write(ring, ring->is_mes_queue ? 5716 (ring->hw_queue_id | AMDGPU_FENCE_MES_QUEUE_FLAG) : 0); 5717 } 5718 5719 static void gfx_v11_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring) 5720 { 5721 int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX); 5722 uint32_t seq = ring->fence_drv.sync_seq; 5723 uint64_t addr = ring->fence_drv.gpu_addr; 5724 5725 gfx_v11_0_wait_reg_mem(ring, usepfp, 1, 0, lower_32_bits(addr), 5726 upper_32_bits(addr), seq, 0xffffffff, 4); 5727 } 5728 5729 static void gfx_v11_0_ring_invalidate_tlbs(struct amdgpu_ring *ring, 5730 uint16_t pasid, uint32_t flush_type, 5731 bool all_hub, uint8_t dst_sel) 5732 { 5733 amdgpu_ring_write(ring, PACKET3(PACKET3_INVALIDATE_TLBS, 0)); 5734 amdgpu_ring_write(ring, 5735 PACKET3_INVALIDATE_TLBS_DST_SEL(dst_sel) | 5736 PACKET3_INVALIDATE_TLBS_ALL_HUB(all_hub) | 5737 PACKET3_INVALIDATE_TLBS_PASID(pasid) | 5738 PACKET3_INVALIDATE_TLBS_FLUSH_TYPE(flush_type)); 5739 } 5740 5741 static void gfx_v11_0_ring_emit_vm_flush(struct amdgpu_ring *ring, 5742 unsigned vmid, uint64_t pd_addr) 5743 { 5744 if (ring->is_mes_queue) 5745 gfx_v11_0_ring_invalidate_tlbs(ring, 0, 0, false, 0); 5746 else 5747 amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr); 5748 5749 /* compute doesn't have PFP */ 5750 if (ring->funcs->type == AMDGPU_RING_TYPE_GFX) { 5751 /* sync PFP to ME, otherwise we might get invalid PFP reads */ 5752 amdgpu_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0)); 5753 amdgpu_ring_write(ring, 0x0); 5754 } 5755 5756 /* Make sure that we can't skip the SET_Q_MODE packets when the VM 5757 * changed in any way. 5758 */ 5759 ring->set_q_mode_offs = 0; 5760 ring->set_q_mode_ptr = NULL; 5761 } 5762 5763 static void gfx_v11_0_ring_emit_fence_kiq(struct amdgpu_ring *ring, u64 addr, 5764 u64 seq, unsigned int flags) 5765 { 5766 struct amdgpu_device *adev = ring->adev; 5767 5768 /* we only allocate 32bit for each seq wb address */ 5769 BUG_ON(flags & AMDGPU_FENCE_FLAG_64BIT); 5770 5771 /* write fence seq to the "addr" */ 5772 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3)); 5773 amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) | 5774 WRITE_DATA_DST_SEL(5) | WR_CONFIRM)); 5775 amdgpu_ring_write(ring, lower_32_bits(addr)); 5776 amdgpu_ring_write(ring, upper_32_bits(addr)); 5777 amdgpu_ring_write(ring, lower_32_bits(seq)); 5778 5779 if (flags & AMDGPU_FENCE_FLAG_INT) { 5780 /* set register to trigger INT */ 5781 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3)); 5782 amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) | 5783 WRITE_DATA_DST_SEL(0) | WR_CONFIRM)); 5784 amdgpu_ring_write(ring, SOC15_REG_OFFSET(GC, 0, regCPC_INT_STATUS)); 5785 amdgpu_ring_write(ring, 0); 5786 amdgpu_ring_write(ring, 0x20000000); /* src_id is 178 */ 5787 } 5788 } 5789 5790 static void gfx_v11_0_ring_emit_cntxcntl(struct amdgpu_ring *ring, 5791 uint32_t flags) 5792 { 5793 uint32_t dw2 = 0; 5794 5795 dw2 |= 0x80000000; /* set load_enable otherwise this package is just NOPs */ 5796 if (flags & AMDGPU_HAVE_CTX_SWITCH) { 5797 /* set load_global_config & load_global_uconfig */ 5798 dw2 |= 0x8001; 5799 /* set load_cs_sh_regs */ 5800 dw2 |= 0x01000000; 5801 /* set load_per_context_state & load_gfx_sh_regs for GFX */ 5802 dw2 |= 0x10002; 5803 } 5804 5805 amdgpu_ring_write(ring, PACKET3(PACKET3_CONTEXT_CONTROL, 1)); 5806 amdgpu_ring_write(ring, dw2); 5807 amdgpu_ring_write(ring, 0); 5808 } 5809 5810 static unsigned gfx_v11_0_ring_emit_init_cond_exec(struct amdgpu_ring *ring, 5811 uint64_t addr) 5812 { 5813 unsigned ret; 5814 5815 amdgpu_ring_write(ring, PACKET3(PACKET3_COND_EXEC, 3)); 5816 amdgpu_ring_write(ring, lower_32_bits(addr)); 5817 amdgpu_ring_write(ring, upper_32_bits(addr)); 5818 /* discard following DWs if *cond_exec_gpu_addr==0 */ 5819 amdgpu_ring_write(ring, 0); 5820 ret = ring->wptr & ring->buf_mask; 5821 /* patch dummy value later */ 5822 amdgpu_ring_write(ring, 0); 5823 5824 return ret; 5825 } 5826 5827 static void gfx_v11_0_ring_emit_gfx_shadow(struct amdgpu_ring *ring, 5828 u64 shadow_va, u64 csa_va, 5829 u64 gds_va, bool init_shadow, 5830 int vmid) 5831 { 5832 struct amdgpu_device *adev = ring->adev; 5833 unsigned int offs, end; 5834 5835 if (!adev->gfx.cp_gfx_shadow || !ring->ring_obj) 5836 return; 5837 5838 /* 5839 * The logic here isn't easy to understand because we need to keep state 5840 * accross multiple executions of the function as well as between the 5841 * CPU and GPU. The general idea is that the newly written GPU command 5842 * has a condition on the previous one and only executed if really 5843 * necessary. 5844 */ 5845 5846 /* 5847 * The dw in the NOP controls if the next SET_Q_MODE packet should be 5848 * executed or not. Reserve 64bits just to be on the save side. 5849 */ 5850 amdgpu_ring_write(ring, PACKET3(PACKET3_NOP, 1)); 5851 offs = ring->wptr & ring->buf_mask; 5852 5853 /* 5854 * We start with skipping the prefix SET_Q_MODE and always executing 5855 * the postfix SET_Q_MODE packet. This is changed below with a 5856 * WRITE_DATA command when the postfix executed. 5857 */ 5858 amdgpu_ring_write(ring, shadow_va ? 1 : 0); 5859 amdgpu_ring_write(ring, 0); 5860 5861 if (ring->set_q_mode_offs) { 5862 uint64_t addr; 5863 5864 addr = amdgpu_bo_gpu_offset(ring->ring_obj); 5865 addr += ring->set_q_mode_offs << 2; 5866 end = gfx_v11_0_ring_emit_init_cond_exec(ring, addr); 5867 } 5868 5869 /* 5870 * When the postfix SET_Q_MODE packet executes we need to make sure that the 5871 * next prefix SET_Q_MODE packet executes as well. 5872 */ 5873 if (!shadow_va) { 5874 uint64_t addr; 5875 5876 addr = amdgpu_bo_gpu_offset(ring->ring_obj); 5877 addr += offs << 2; 5878 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3)); 5879 amdgpu_ring_write(ring, WRITE_DATA_DST_SEL(5) | WR_CONFIRM); 5880 amdgpu_ring_write(ring, lower_32_bits(addr)); 5881 amdgpu_ring_write(ring, upper_32_bits(addr)); 5882 amdgpu_ring_write(ring, 0x1); 5883 } 5884 5885 amdgpu_ring_write(ring, PACKET3(PACKET3_SET_Q_PREEMPTION_MODE, 7)); 5886 amdgpu_ring_write(ring, lower_32_bits(shadow_va)); 5887 amdgpu_ring_write(ring, upper_32_bits(shadow_va)); 5888 amdgpu_ring_write(ring, lower_32_bits(gds_va)); 5889 amdgpu_ring_write(ring, upper_32_bits(gds_va)); 5890 amdgpu_ring_write(ring, lower_32_bits(csa_va)); 5891 amdgpu_ring_write(ring, upper_32_bits(csa_va)); 5892 amdgpu_ring_write(ring, shadow_va ? 5893 PACKET3_SET_Q_PREEMPTION_MODE_IB_VMID(vmid) : 0); 5894 amdgpu_ring_write(ring, init_shadow ? 5895 PACKET3_SET_Q_PREEMPTION_MODE_INIT_SHADOW_MEM : 0); 5896 5897 if (ring->set_q_mode_offs) 5898 amdgpu_ring_patch_cond_exec(ring, end); 5899 5900 if (shadow_va) { 5901 uint64_t token = shadow_va ^ csa_va ^ gds_va ^ vmid; 5902 5903 /* 5904 * If the tokens match try to skip the last postfix SET_Q_MODE 5905 * packet to avoid saving/restoring the state all the time. 5906 */ 5907 if (ring->set_q_mode_ptr && ring->set_q_mode_token == token) 5908 *ring->set_q_mode_ptr = 0; 5909 5910 ring->set_q_mode_token = token; 5911 } else { 5912 ring->set_q_mode_ptr = &ring->ring[ring->set_q_mode_offs]; 5913 } 5914 5915 ring->set_q_mode_offs = offs; 5916 } 5917 5918 static int gfx_v11_0_ring_preempt_ib(struct amdgpu_ring *ring) 5919 { 5920 int i, r = 0; 5921 struct amdgpu_device *adev = ring->adev; 5922 struct amdgpu_kiq *kiq = &adev->gfx.kiq[0]; 5923 struct amdgpu_ring *kiq_ring = &kiq->ring; 5924 unsigned long flags; 5925 5926 if (adev->enable_mes) 5927 return -EINVAL; 5928 5929 if (!kiq->pmf || !kiq->pmf->kiq_unmap_queues) 5930 return -EINVAL; 5931 5932 spin_lock_irqsave(&kiq->ring_lock, flags); 5933 5934 if (amdgpu_ring_alloc(kiq_ring, kiq->pmf->unmap_queues_size)) { 5935 spin_unlock_irqrestore(&kiq->ring_lock, flags); 5936 return -ENOMEM; 5937 } 5938 5939 /* assert preemption condition */ 5940 amdgpu_ring_set_preempt_cond_exec(ring, false); 5941 5942 /* assert IB preemption, emit the trailing fence */ 5943 kiq->pmf->kiq_unmap_queues(kiq_ring, ring, PREEMPT_QUEUES_NO_UNMAP, 5944 ring->trail_fence_gpu_addr, 5945 ++ring->trail_seq); 5946 amdgpu_ring_commit(kiq_ring); 5947 5948 spin_unlock_irqrestore(&kiq->ring_lock, flags); 5949 5950 /* poll the trailing fence */ 5951 for (i = 0; i < adev->usec_timeout; i++) { 5952 if (ring->trail_seq == 5953 le32_to_cpu(*(ring->trail_fence_cpu_addr))) 5954 break; 5955 udelay(1); 5956 } 5957 5958 if (i >= adev->usec_timeout) { 5959 r = -EINVAL; 5960 DRM_ERROR("ring %d failed to preempt ib\n", ring->idx); 5961 } 5962 5963 /* deassert preemption condition */ 5964 amdgpu_ring_set_preempt_cond_exec(ring, true); 5965 return r; 5966 } 5967 5968 static void gfx_v11_0_ring_emit_de_meta(struct amdgpu_ring *ring, bool resume) 5969 { 5970 struct amdgpu_device *adev = ring->adev; 5971 struct v10_de_ib_state de_payload = {0}; 5972 uint64_t offset, gds_addr, de_payload_gpu_addr; 5973 void *de_payload_cpu_addr; 5974 int cnt; 5975 5976 if (ring->is_mes_queue) { 5977 offset = offsetof(struct amdgpu_mes_ctx_meta_data, 5978 gfx[0].gfx_meta_data) + 5979 offsetof(struct v10_gfx_meta_data, de_payload); 5980 de_payload_gpu_addr = 5981 amdgpu_mes_ctx_get_offs_gpu_addr(ring, offset); 5982 de_payload_cpu_addr = 5983 amdgpu_mes_ctx_get_offs_cpu_addr(ring, offset); 5984 5985 offset = offsetof(struct amdgpu_mes_ctx_meta_data, 5986 gfx[0].gds_backup) + 5987 offsetof(struct v10_gfx_meta_data, de_payload); 5988 gds_addr = amdgpu_mes_ctx_get_offs_gpu_addr(ring, offset); 5989 } else { 5990 offset = offsetof(struct v10_gfx_meta_data, de_payload); 5991 de_payload_gpu_addr = amdgpu_csa_vaddr(ring->adev) + offset; 5992 de_payload_cpu_addr = adev->virt.csa_cpu_addr + offset; 5993 5994 gds_addr = ALIGN(amdgpu_csa_vaddr(ring->adev) + 5995 AMDGPU_CSA_SIZE - adev->gds.gds_size, 5996 PAGE_SIZE); 5997 } 5998 5999 de_payload.gds_backup_addrlo = lower_32_bits(gds_addr); 6000 de_payload.gds_backup_addrhi = upper_32_bits(gds_addr); 6001 6002 cnt = (sizeof(de_payload) >> 2) + 4 - 2; 6003 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, cnt)); 6004 amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(1) | 6005 WRITE_DATA_DST_SEL(8) | 6006 WR_CONFIRM) | 6007 WRITE_DATA_CACHE_POLICY(0)); 6008 amdgpu_ring_write(ring, lower_32_bits(de_payload_gpu_addr)); 6009 amdgpu_ring_write(ring, upper_32_bits(de_payload_gpu_addr)); 6010 6011 if (resume) 6012 amdgpu_ring_write_multiple(ring, de_payload_cpu_addr, 6013 sizeof(de_payload) >> 2); 6014 else 6015 amdgpu_ring_write_multiple(ring, (void *)&de_payload, 6016 sizeof(de_payload) >> 2); 6017 } 6018 6019 static void gfx_v11_0_ring_emit_frame_cntl(struct amdgpu_ring *ring, bool start, 6020 bool secure) 6021 { 6022 uint32_t v = secure ? FRAME_TMZ : 0; 6023 6024 amdgpu_ring_write(ring, PACKET3(PACKET3_FRAME_CONTROL, 0)); 6025 amdgpu_ring_write(ring, v | FRAME_CMD(start ? 0 : 1)); 6026 } 6027 6028 static void gfx_v11_0_ring_emit_rreg(struct amdgpu_ring *ring, uint32_t reg, 6029 uint32_t reg_val_offs) 6030 { 6031 struct amdgpu_device *adev = ring->adev; 6032 6033 amdgpu_ring_write(ring, PACKET3(PACKET3_COPY_DATA, 4)); 6034 amdgpu_ring_write(ring, 0 | /* src: register*/ 6035 (5 << 8) | /* dst: memory */ 6036 (1 << 20)); /* write confirm */ 6037 amdgpu_ring_write(ring, reg); 6038 amdgpu_ring_write(ring, 0); 6039 amdgpu_ring_write(ring, lower_32_bits(adev->wb.gpu_addr + 6040 reg_val_offs * 4)); 6041 amdgpu_ring_write(ring, upper_32_bits(adev->wb.gpu_addr + 6042 reg_val_offs * 4)); 6043 } 6044 6045 static void gfx_v11_0_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg, 6046 uint32_t val) 6047 { 6048 uint32_t cmd = 0; 6049 6050 switch (ring->funcs->type) { 6051 case AMDGPU_RING_TYPE_GFX: 6052 cmd = WRITE_DATA_ENGINE_SEL(1) | WR_CONFIRM; 6053 break; 6054 case AMDGPU_RING_TYPE_KIQ: 6055 cmd = (1 << 16); /* no inc addr */ 6056 break; 6057 default: 6058 cmd = WR_CONFIRM; 6059 break; 6060 } 6061 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3)); 6062 amdgpu_ring_write(ring, cmd); 6063 amdgpu_ring_write(ring, reg); 6064 amdgpu_ring_write(ring, 0); 6065 amdgpu_ring_write(ring, val); 6066 } 6067 6068 static void gfx_v11_0_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg, 6069 uint32_t val, uint32_t mask) 6070 { 6071 gfx_v11_0_wait_reg_mem(ring, 0, 0, 0, reg, 0, val, mask, 0x20); 6072 } 6073 6074 static void gfx_v11_0_ring_emit_reg_write_reg_wait(struct amdgpu_ring *ring, 6075 uint32_t reg0, uint32_t reg1, 6076 uint32_t ref, uint32_t mask) 6077 { 6078 int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX); 6079 6080 gfx_v11_0_wait_reg_mem(ring, usepfp, 0, 1, reg0, reg1, 6081 ref, mask, 0x20); 6082 } 6083 6084 static void gfx_v11_0_ring_soft_recovery(struct amdgpu_ring *ring, 6085 unsigned vmid) 6086 { 6087 struct amdgpu_device *adev = ring->adev; 6088 uint32_t value = 0; 6089 6090 value = REG_SET_FIELD(value, SQ_CMD, CMD, 0x03); 6091 value = REG_SET_FIELD(value, SQ_CMD, MODE, 0x01); 6092 value = REG_SET_FIELD(value, SQ_CMD, CHECK_VMID, 1); 6093 value = REG_SET_FIELD(value, SQ_CMD, VM_ID, vmid); 6094 amdgpu_gfx_rlc_enter_safe_mode(adev, 0); 6095 WREG32_SOC15(GC, 0, regSQ_CMD, value); 6096 amdgpu_gfx_rlc_exit_safe_mode(adev, 0); 6097 } 6098 6099 static void 6100 gfx_v11_0_set_gfx_eop_interrupt_state(struct amdgpu_device *adev, 6101 uint32_t me, uint32_t pipe, 6102 enum amdgpu_interrupt_state state) 6103 { 6104 uint32_t cp_int_cntl, cp_int_cntl_reg; 6105 6106 if (!me) { 6107 switch (pipe) { 6108 case 0: 6109 cp_int_cntl_reg = SOC15_REG_OFFSET(GC, 0, regCP_INT_CNTL_RING0); 6110 break; 6111 case 1: 6112 cp_int_cntl_reg = SOC15_REG_OFFSET(GC, 0, regCP_INT_CNTL_RING1); 6113 break; 6114 default: 6115 DRM_DEBUG("invalid pipe %d\n", pipe); 6116 return; 6117 } 6118 } else { 6119 DRM_DEBUG("invalid me %d\n", me); 6120 return; 6121 } 6122 6123 switch (state) { 6124 case AMDGPU_IRQ_STATE_DISABLE: 6125 cp_int_cntl = RREG32_SOC15_IP(GC, cp_int_cntl_reg); 6126 cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_INT_CNTL_RING0, 6127 TIME_STAMP_INT_ENABLE, 0); 6128 cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_INT_CNTL_RING0, 6129 GENERIC0_INT_ENABLE, 0); 6130 WREG32_SOC15_IP(GC, cp_int_cntl_reg, cp_int_cntl); 6131 break; 6132 case AMDGPU_IRQ_STATE_ENABLE: 6133 cp_int_cntl = RREG32_SOC15_IP(GC, cp_int_cntl_reg); 6134 cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_INT_CNTL_RING0, 6135 TIME_STAMP_INT_ENABLE, 1); 6136 cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_INT_CNTL_RING0, 6137 GENERIC0_INT_ENABLE, 1); 6138 WREG32_SOC15_IP(GC, cp_int_cntl_reg, cp_int_cntl); 6139 break; 6140 default: 6141 break; 6142 } 6143 } 6144 6145 static void gfx_v11_0_set_compute_eop_interrupt_state(struct amdgpu_device *adev, 6146 int me, int pipe, 6147 enum amdgpu_interrupt_state state) 6148 { 6149 u32 mec_int_cntl, mec_int_cntl_reg; 6150 6151 /* 6152 * amdgpu controls only the first MEC. That's why this function only 6153 * handles the setting of interrupts for this specific MEC. All other 6154 * pipes' interrupts are set by amdkfd. 6155 */ 6156 6157 if (me == 1) { 6158 switch (pipe) { 6159 case 0: 6160 mec_int_cntl_reg = SOC15_REG_OFFSET(GC, 0, regCP_ME1_PIPE0_INT_CNTL); 6161 break; 6162 case 1: 6163 mec_int_cntl_reg = SOC15_REG_OFFSET(GC, 0, regCP_ME1_PIPE1_INT_CNTL); 6164 break; 6165 case 2: 6166 mec_int_cntl_reg = SOC15_REG_OFFSET(GC, 0, regCP_ME1_PIPE2_INT_CNTL); 6167 break; 6168 case 3: 6169 mec_int_cntl_reg = SOC15_REG_OFFSET(GC, 0, regCP_ME1_PIPE3_INT_CNTL); 6170 break; 6171 default: 6172 DRM_DEBUG("invalid pipe %d\n", pipe); 6173 return; 6174 } 6175 } else { 6176 DRM_DEBUG("invalid me %d\n", me); 6177 return; 6178 } 6179 6180 switch (state) { 6181 case AMDGPU_IRQ_STATE_DISABLE: 6182 mec_int_cntl = RREG32_SOC15_IP(GC, mec_int_cntl_reg); 6183 mec_int_cntl = REG_SET_FIELD(mec_int_cntl, CP_ME1_PIPE0_INT_CNTL, 6184 TIME_STAMP_INT_ENABLE, 0); 6185 mec_int_cntl = REG_SET_FIELD(mec_int_cntl, CP_ME1_PIPE0_INT_CNTL, 6186 GENERIC0_INT_ENABLE, 0); 6187 WREG32_SOC15_IP(GC, mec_int_cntl_reg, mec_int_cntl); 6188 break; 6189 case AMDGPU_IRQ_STATE_ENABLE: 6190 mec_int_cntl = RREG32_SOC15_IP(GC, mec_int_cntl_reg); 6191 mec_int_cntl = REG_SET_FIELD(mec_int_cntl, CP_ME1_PIPE0_INT_CNTL, 6192 TIME_STAMP_INT_ENABLE, 1); 6193 mec_int_cntl = REG_SET_FIELD(mec_int_cntl, CP_ME1_PIPE0_INT_CNTL, 6194 GENERIC0_INT_ENABLE, 1); 6195 WREG32_SOC15_IP(GC, mec_int_cntl_reg, mec_int_cntl); 6196 break; 6197 default: 6198 break; 6199 } 6200 } 6201 6202 static int gfx_v11_0_set_eop_interrupt_state(struct amdgpu_device *adev, 6203 struct amdgpu_irq_src *src, 6204 unsigned type, 6205 enum amdgpu_interrupt_state state) 6206 { 6207 switch (type) { 6208 case AMDGPU_CP_IRQ_GFX_ME0_PIPE0_EOP: 6209 gfx_v11_0_set_gfx_eop_interrupt_state(adev, 0, 0, state); 6210 break; 6211 case AMDGPU_CP_IRQ_GFX_ME0_PIPE1_EOP: 6212 gfx_v11_0_set_gfx_eop_interrupt_state(adev, 0, 1, state); 6213 break; 6214 case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP: 6215 gfx_v11_0_set_compute_eop_interrupt_state(adev, 1, 0, state); 6216 break; 6217 case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE1_EOP: 6218 gfx_v11_0_set_compute_eop_interrupt_state(adev, 1, 1, state); 6219 break; 6220 case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE2_EOP: 6221 gfx_v11_0_set_compute_eop_interrupt_state(adev, 1, 2, state); 6222 break; 6223 case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE3_EOP: 6224 gfx_v11_0_set_compute_eop_interrupt_state(adev, 1, 3, state); 6225 break; 6226 default: 6227 break; 6228 } 6229 return 0; 6230 } 6231 6232 static int gfx_v11_0_eop_irq(struct amdgpu_device *adev, 6233 struct amdgpu_irq_src *source, 6234 struct amdgpu_iv_entry *entry) 6235 { 6236 int i; 6237 u8 me_id, pipe_id, queue_id; 6238 struct amdgpu_ring *ring; 6239 uint32_t mes_queue_id = entry->src_data[0]; 6240 6241 DRM_DEBUG("IH: CP EOP\n"); 6242 6243 if (adev->enable_mes && (mes_queue_id & AMDGPU_FENCE_MES_QUEUE_FLAG)) { 6244 struct amdgpu_mes_queue *queue; 6245 6246 mes_queue_id &= AMDGPU_FENCE_MES_QUEUE_ID_MASK; 6247 6248 spin_lock(&adev->mes.queue_id_lock); 6249 queue = idr_find(&adev->mes.queue_id_idr, mes_queue_id); 6250 if (queue) { 6251 DRM_DEBUG("process mes queue id = %d\n", mes_queue_id); 6252 amdgpu_fence_process(queue->ring); 6253 } 6254 spin_unlock(&adev->mes.queue_id_lock); 6255 } else { 6256 me_id = (entry->ring_id & 0x0c) >> 2; 6257 pipe_id = (entry->ring_id & 0x03) >> 0; 6258 queue_id = (entry->ring_id & 0x70) >> 4; 6259 6260 switch (me_id) { 6261 case 0: 6262 if (pipe_id == 0) 6263 amdgpu_fence_process(&adev->gfx.gfx_ring[0]); 6264 else 6265 amdgpu_fence_process(&adev->gfx.gfx_ring[1]); 6266 break; 6267 case 1: 6268 case 2: 6269 for (i = 0; i < adev->gfx.num_compute_rings; i++) { 6270 ring = &adev->gfx.compute_ring[i]; 6271 /* Per-queue interrupt is supported for MEC starting from VI. 6272 * The interrupt can only be enabled/disabled per pipe instead 6273 * of per queue. 6274 */ 6275 if ((ring->me == me_id) && 6276 (ring->pipe == pipe_id) && 6277 (ring->queue == queue_id)) 6278 amdgpu_fence_process(ring); 6279 } 6280 break; 6281 } 6282 } 6283 6284 return 0; 6285 } 6286 6287 static int gfx_v11_0_set_priv_reg_fault_state(struct amdgpu_device *adev, 6288 struct amdgpu_irq_src *source, 6289 unsigned int type, 6290 enum amdgpu_interrupt_state state) 6291 { 6292 u32 cp_int_cntl_reg, cp_int_cntl; 6293 int i, j; 6294 6295 switch (state) { 6296 case AMDGPU_IRQ_STATE_DISABLE: 6297 case AMDGPU_IRQ_STATE_ENABLE: 6298 for (i = 0; i < adev->gfx.me.num_me; i++) { 6299 for (j = 0; j < adev->gfx.me.num_pipe_per_me; j++) { 6300 cp_int_cntl_reg = gfx_v11_0_get_cpg_int_cntl(adev, i, j); 6301 6302 if (cp_int_cntl_reg) { 6303 cp_int_cntl = RREG32_SOC15_IP(GC, cp_int_cntl_reg); 6304 cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_INT_CNTL_RING0, 6305 PRIV_REG_INT_ENABLE, 6306 state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0); 6307 WREG32_SOC15_IP(GC, cp_int_cntl_reg, cp_int_cntl); 6308 } 6309 } 6310 } 6311 for (i = 0; i < adev->gfx.mec.num_mec; i++) { 6312 for (j = 0; j < adev->gfx.mec.num_pipe_per_mec; j++) { 6313 /* MECs start at 1 */ 6314 cp_int_cntl_reg = gfx_v11_0_get_cpc_int_cntl(adev, i + 1, j); 6315 6316 if (cp_int_cntl_reg) { 6317 cp_int_cntl = RREG32_SOC15_IP(GC, cp_int_cntl_reg); 6318 cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_ME1_PIPE0_INT_CNTL, 6319 PRIV_REG_INT_ENABLE, 6320 state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0); 6321 WREG32_SOC15_IP(GC, cp_int_cntl_reg, cp_int_cntl); 6322 } 6323 } 6324 } 6325 break; 6326 default: 6327 break; 6328 } 6329 6330 return 0; 6331 } 6332 6333 static int gfx_v11_0_set_bad_op_fault_state(struct amdgpu_device *adev, 6334 struct amdgpu_irq_src *source, 6335 unsigned type, 6336 enum amdgpu_interrupt_state state) 6337 { 6338 u32 cp_int_cntl_reg, cp_int_cntl; 6339 int i, j; 6340 6341 switch (state) { 6342 case AMDGPU_IRQ_STATE_DISABLE: 6343 case AMDGPU_IRQ_STATE_ENABLE: 6344 for (i = 0; i < adev->gfx.me.num_me; i++) { 6345 for (j = 0; j < adev->gfx.me.num_pipe_per_me; j++) { 6346 cp_int_cntl_reg = gfx_v11_0_get_cpg_int_cntl(adev, i, j); 6347 6348 if (cp_int_cntl_reg) { 6349 cp_int_cntl = RREG32_SOC15_IP(GC, cp_int_cntl_reg); 6350 cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_INT_CNTL_RING0, 6351 OPCODE_ERROR_INT_ENABLE, 6352 state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0); 6353 WREG32_SOC15_IP(GC, cp_int_cntl_reg, cp_int_cntl); 6354 } 6355 } 6356 } 6357 for (i = 0; i < adev->gfx.mec.num_mec; i++) { 6358 for (j = 0; j < adev->gfx.mec.num_pipe_per_mec; j++) { 6359 /* MECs start at 1 */ 6360 cp_int_cntl_reg = gfx_v11_0_get_cpc_int_cntl(adev, i + 1, j); 6361 6362 if (cp_int_cntl_reg) { 6363 cp_int_cntl = RREG32_SOC15_IP(GC, cp_int_cntl_reg); 6364 cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_ME1_PIPE0_INT_CNTL, 6365 OPCODE_ERROR_INT_ENABLE, 6366 state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0); 6367 WREG32_SOC15_IP(GC, cp_int_cntl_reg, cp_int_cntl); 6368 } 6369 } 6370 } 6371 break; 6372 default: 6373 break; 6374 } 6375 return 0; 6376 } 6377 6378 static int gfx_v11_0_set_priv_inst_fault_state(struct amdgpu_device *adev, 6379 struct amdgpu_irq_src *source, 6380 unsigned int type, 6381 enum amdgpu_interrupt_state state) 6382 { 6383 u32 cp_int_cntl_reg, cp_int_cntl; 6384 int i, j; 6385 6386 switch (state) { 6387 case AMDGPU_IRQ_STATE_DISABLE: 6388 case AMDGPU_IRQ_STATE_ENABLE: 6389 for (i = 0; i < adev->gfx.me.num_me; i++) { 6390 for (j = 0; j < adev->gfx.me.num_pipe_per_me; j++) { 6391 cp_int_cntl_reg = gfx_v11_0_get_cpg_int_cntl(adev, i, j); 6392 6393 if (cp_int_cntl_reg) { 6394 cp_int_cntl = RREG32_SOC15_IP(GC, cp_int_cntl_reg); 6395 cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_INT_CNTL_RING0, 6396 PRIV_INSTR_INT_ENABLE, 6397 state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0); 6398 WREG32_SOC15_IP(GC, cp_int_cntl_reg, cp_int_cntl); 6399 } 6400 } 6401 } 6402 break; 6403 default: 6404 break; 6405 } 6406 6407 return 0; 6408 } 6409 6410 static void gfx_v11_0_handle_priv_fault(struct amdgpu_device *adev, 6411 struct amdgpu_iv_entry *entry) 6412 { 6413 u8 me_id, pipe_id, queue_id; 6414 struct amdgpu_ring *ring; 6415 int i; 6416 6417 me_id = (entry->ring_id & 0x0c) >> 2; 6418 pipe_id = (entry->ring_id & 0x03) >> 0; 6419 queue_id = (entry->ring_id & 0x70) >> 4; 6420 6421 switch (me_id) { 6422 case 0: 6423 for (i = 0; i < adev->gfx.num_gfx_rings; i++) { 6424 ring = &adev->gfx.gfx_ring[i]; 6425 if (ring->me == me_id && ring->pipe == pipe_id && 6426 ring->queue == queue_id) 6427 drm_sched_fault(&ring->sched); 6428 } 6429 break; 6430 case 1: 6431 case 2: 6432 for (i = 0; i < adev->gfx.num_compute_rings; i++) { 6433 ring = &adev->gfx.compute_ring[i]; 6434 if (ring->me == me_id && ring->pipe == pipe_id && 6435 ring->queue == queue_id) 6436 drm_sched_fault(&ring->sched); 6437 } 6438 break; 6439 default: 6440 BUG(); 6441 break; 6442 } 6443 } 6444 6445 static int gfx_v11_0_priv_reg_irq(struct amdgpu_device *adev, 6446 struct amdgpu_irq_src *source, 6447 struct amdgpu_iv_entry *entry) 6448 { 6449 DRM_ERROR("Illegal register access in command stream\n"); 6450 gfx_v11_0_handle_priv_fault(adev, entry); 6451 return 0; 6452 } 6453 6454 static int gfx_v11_0_bad_op_irq(struct amdgpu_device *adev, 6455 struct amdgpu_irq_src *source, 6456 struct amdgpu_iv_entry *entry) 6457 { 6458 DRM_ERROR("Illegal opcode in command stream \n"); 6459 gfx_v11_0_handle_priv_fault(adev, entry); 6460 return 0; 6461 } 6462 6463 static int gfx_v11_0_priv_inst_irq(struct amdgpu_device *adev, 6464 struct amdgpu_irq_src *source, 6465 struct amdgpu_iv_entry *entry) 6466 { 6467 DRM_ERROR("Illegal instruction in command stream\n"); 6468 gfx_v11_0_handle_priv_fault(adev, entry); 6469 return 0; 6470 } 6471 6472 static int gfx_v11_0_rlc_gc_fed_irq(struct amdgpu_device *adev, 6473 struct amdgpu_irq_src *source, 6474 struct amdgpu_iv_entry *entry) 6475 { 6476 if (adev->gfx.ras && adev->gfx.ras->rlc_gc_fed_irq) 6477 return adev->gfx.ras->rlc_gc_fed_irq(adev, source, entry); 6478 6479 return 0; 6480 } 6481 6482 #if 0 6483 static int gfx_v11_0_kiq_set_interrupt_state(struct amdgpu_device *adev, 6484 struct amdgpu_irq_src *src, 6485 unsigned int type, 6486 enum amdgpu_interrupt_state state) 6487 { 6488 uint32_t tmp, target; 6489 struct amdgpu_ring *ring = &(adev->gfx.kiq[0].ring); 6490 6491 target = SOC15_REG_OFFSET(GC, 0, regCP_ME1_PIPE0_INT_CNTL); 6492 target += ring->pipe; 6493 6494 switch (type) { 6495 case AMDGPU_CP_KIQ_IRQ_DRIVER0: 6496 if (state == AMDGPU_IRQ_STATE_DISABLE) { 6497 tmp = RREG32_SOC15(GC, 0, regCPC_INT_CNTL); 6498 tmp = REG_SET_FIELD(tmp, CPC_INT_CNTL, 6499 GENERIC2_INT_ENABLE, 0); 6500 WREG32_SOC15(GC, 0, regCPC_INT_CNTL, tmp); 6501 6502 tmp = RREG32_SOC15_IP(GC, target); 6503 tmp = REG_SET_FIELD(tmp, CP_ME1_PIPE0_INT_CNTL, 6504 GENERIC2_INT_ENABLE, 0); 6505 WREG32_SOC15_IP(GC, target, tmp); 6506 } else { 6507 tmp = RREG32_SOC15(GC, 0, regCPC_INT_CNTL); 6508 tmp = REG_SET_FIELD(tmp, CPC_INT_CNTL, 6509 GENERIC2_INT_ENABLE, 1); 6510 WREG32_SOC15(GC, 0, regCPC_INT_CNTL, tmp); 6511 6512 tmp = RREG32_SOC15_IP(GC, target); 6513 tmp = REG_SET_FIELD(tmp, CP_ME1_PIPE0_INT_CNTL, 6514 GENERIC2_INT_ENABLE, 1); 6515 WREG32_SOC15_IP(GC, target, tmp); 6516 } 6517 break; 6518 default: 6519 BUG(); /* kiq only support GENERIC2_INT now */ 6520 break; 6521 } 6522 return 0; 6523 } 6524 #endif 6525 6526 static void gfx_v11_0_emit_mem_sync(struct amdgpu_ring *ring) 6527 { 6528 const unsigned int gcr_cntl = 6529 PACKET3_ACQUIRE_MEM_GCR_CNTL_GL2_INV(1) | 6530 PACKET3_ACQUIRE_MEM_GCR_CNTL_GL2_WB(1) | 6531 PACKET3_ACQUIRE_MEM_GCR_CNTL_GLM_INV(1) | 6532 PACKET3_ACQUIRE_MEM_GCR_CNTL_GLM_WB(1) | 6533 PACKET3_ACQUIRE_MEM_GCR_CNTL_GL1_INV(1) | 6534 PACKET3_ACQUIRE_MEM_GCR_CNTL_GLV_INV(1) | 6535 PACKET3_ACQUIRE_MEM_GCR_CNTL_GLK_INV(1) | 6536 PACKET3_ACQUIRE_MEM_GCR_CNTL_GLI_INV(1); 6537 6538 /* ACQUIRE_MEM - make one or more surfaces valid for use by the subsequent operations */ 6539 amdgpu_ring_write(ring, PACKET3(PACKET3_ACQUIRE_MEM, 6)); 6540 amdgpu_ring_write(ring, 0); /* CP_COHER_CNTL */ 6541 amdgpu_ring_write(ring, 0xffffffff); /* CP_COHER_SIZE */ 6542 amdgpu_ring_write(ring, 0xffffff); /* CP_COHER_SIZE_HI */ 6543 amdgpu_ring_write(ring, 0); /* CP_COHER_BASE */ 6544 amdgpu_ring_write(ring, 0); /* CP_COHER_BASE_HI */ 6545 amdgpu_ring_write(ring, 0x0000000A); /* POLL_INTERVAL */ 6546 amdgpu_ring_write(ring, gcr_cntl); /* GCR_CNTL */ 6547 } 6548 6549 static int gfx_v11_0_reset_kgq(struct amdgpu_ring *ring, unsigned int vmid) 6550 { 6551 struct amdgpu_device *adev = ring->adev; 6552 int r; 6553 6554 if (amdgpu_sriov_vf(adev)) 6555 return -EINVAL; 6556 6557 r = amdgpu_mes_reset_legacy_queue(ring->adev, ring, vmid, false); 6558 if (r) 6559 return r; 6560 6561 r = amdgpu_bo_reserve(ring->mqd_obj, false); 6562 if (unlikely(r != 0)) { 6563 dev_err(adev->dev, "fail to resv mqd_obj\n"); 6564 return r; 6565 } 6566 r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr); 6567 if (!r) { 6568 r = gfx_v11_0_kgq_init_queue(ring, true); 6569 amdgpu_bo_kunmap(ring->mqd_obj); 6570 ring->mqd_ptr = NULL; 6571 } 6572 amdgpu_bo_unreserve(ring->mqd_obj); 6573 if (r) { 6574 dev_err(adev->dev, "fail to unresv mqd_obj\n"); 6575 return r; 6576 } 6577 6578 r = amdgpu_mes_map_legacy_queue(adev, ring); 6579 if (r) { 6580 dev_err(adev->dev, "failed to remap kgq\n"); 6581 return r; 6582 } 6583 6584 return amdgpu_ring_test_ring(ring); 6585 } 6586 6587 static int gfx_v11_0_reset_kcq(struct amdgpu_ring *ring, unsigned int vmid) 6588 { 6589 struct amdgpu_device *adev = ring->adev; 6590 int i, r = 0; 6591 6592 if (amdgpu_sriov_vf(adev)) 6593 return -EINVAL; 6594 6595 amdgpu_gfx_rlc_enter_safe_mode(adev, 0); 6596 mutex_lock(&adev->srbm_mutex); 6597 soc21_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0); 6598 WREG32_SOC15(GC, 0, regCP_HQD_DEQUEUE_REQUEST, 0x2); 6599 WREG32_SOC15(GC, 0, regSPI_COMPUTE_QUEUE_RESET, 0x1); 6600 6601 /* make sure dequeue is complete*/ 6602 for (i = 0; i < adev->usec_timeout; i++) { 6603 if (!(RREG32_SOC15(GC, 0, regCP_HQD_ACTIVE) & 1)) 6604 break; 6605 udelay(1); 6606 } 6607 if (i >= adev->usec_timeout) 6608 r = -ETIMEDOUT; 6609 soc21_grbm_select(adev, 0, 0, 0, 0); 6610 mutex_unlock(&adev->srbm_mutex); 6611 amdgpu_gfx_rlc_exit_safe_mode(adev, 0); 6612 if (r) { 6613 dev_err(adev->dev, "fail to wait on hqd deactivate\n"); 6614 return r; 6615 } 6616 6617 r = amdgpu_bo_reserve(ring->mqd_obj, false); 6618 if (unlikely(r != 0)) { 6619 dev_err(adev->dev, "fail to resv mqd_obj\n"); 6620 return r; 6621 } 6622 r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr); 6623 if (!r) { 6624 r = gfx_v11_0_kcq_init_queue(ring, true); 6625 amdgpu_bo_kunmap(ring->mqd_obj); 6626 ring->mqd_ptr = NULL; 6627 } 6628 amdgpu_bo_unreserve(ring->mqd_obj); 6629 if (r) { 6630 dev_err(adev->dev, "fail to unresv mqd_obj\n"); 6631 return r; 6632 } 6633 r = amdgpu_mes_map_legacy_queue(adev, ring); 6634 if (r) { 6635 dev_err(adev->dev, "failed to remap kcq\n"); 6636 return r; 6637 } 6638 6639 return amdgpu_ring_test_ring(ring); 6640 } 6641 6642 static void gfx_v11_ip_print(void *handle, struct drm_printer *p) 6643 { 6644 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 6645 uint32_t i, j, k, reg, index = 0; 6646 uint32_t reg_count = ARRAY_SIZE(gc_reg_list_11_0); 6647 6648 if (!adev->gfx.ip_dump_core) 6649 return; 6650 6651 for (i = 0; i < reg_count; i++) 6652 drm_printf(p, "%-50s \t 0x%08x\n", 6653 gc_reg_list_11_0[i].reg_name, 6654 adev->gfx.ip_dump_core[i]); 6655 6656 /* print compute queue registers for all instances */ 6657 if (!adev->gfx.ip_dump_compute_queues) 6658 return; 6659 6660 reg_count = ARRAY_SIZE(gc_cp_reg_list_11); 6661 drm_printf(p, "\nnum_mec: %d num_pipe: %d num_queue: %d\n", 6662 adev->gfx.mec.num_mec, 6663 adev->gfx.mec.num_pipe_per_mec, 6664 adev->gfx.mec.num_queue_per_pipe); 6665 6666 for (i = 0; i < adev->gfx.mec.num_mec; i++) { 6667 for (j = 0; j < adev->gfx.mec.num_pipe_per_mec; j++) { 6668 for (k = 0; k < adev->gfx.mec.num_queue_per_pipe; k++) { 6669 drm_printf(p, "\nmec %d, pipe %d, queue %d\n", i, j, k); 6670 for (reg = 0; reg < reg_count; reg++) { 6671 drm_printf(p, "%-50s \t 0x%08x\n", 6672 gc_cp_reg_list_11[reg].reg_name, 6673 adev->gfx.ip_dump_compute_queues[index + reg]); 6674 } 6675 index += reg_count; 6676 } 6677 } 6678 } 6679 6680 /* print gfx queue registers for all instances */ 6681 if (!adev->gfx.ip_dump_gfx_queues) 6682 return; 6683 6684 index = 0; 6685 reg_count = ARRAY_SIZE(gc_gfx_queue_reg_list_11); 6686 drm_printf(p, "\nnum_me: %d num_pipe: %d num_queue: %d\n", 6687 adev->gfx.me.num_me, 6688 adev->gfx.me.num_pipe_per_me, 6689 adev->gfx.me.num_queue_per_pipe); 6690 6691 for (i = 0; i < adev->gfx.me.num_me; i++) { 6692 for (j = 0; j < adev->gfx.me.num_pipe_per_me; j++) { 6693 for (k = 0; k < adev->gfx.me.num_queue_per_pipe; k++) { 6694 drm_printf(p, "\nme %d, pipe %d, queue %d\n", i, j, k); 6695 for (reg = 0; reg < reg_count; reg++) { 6696 drm_printf(p, "%-50s \t 0x%08x\n", 6697 gc_gfx_queue_reg_list_11[reg].reg_name, 6698 adev->gfx.ip_dump_gfx_queues[index + reg]); 6699 } 6700 index += reg_count; 6701 } 6702 } 6703 } 6704 } 6705 6706 static void gfx_v11_ip_dump(void *handle) 6707 { 6708 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 6709 uint32_t i, j, k, reg, index = 0; 6710 uint32_t reg_count = ARRAY_SIZE(gc_reg_list_11_0); 6711 6712 if (!adev->gfx.ip_dump_core) 6713 return; 6714 6715 amdgpu_gfx_off_ctrl(adev, false); 6716 for (i = 0; i < reg_count; i++) 6717 adev->gfx.ip_dump_core[i] = RREG32(SOC15_REG_ENTRY_OFFSET(gc_reg_list_11_0[i])); 6718 amdgpu_gfx_off_ctrl(adev, true); 6719 6720 /* dump compute queue registers for all instances */ 6721 if (!adev->gfx.ip_dump_compute_queues) 6722 return; 6723 6724 reg_count = ARRAY_SIZE(gc_cp_reg_list_11); 6725 amdgpu_gfx_off_ctrl(adev, false); 6726 mutex_lock(&adev->srbm_mutex); 6727 for (i = 0; i < adev->gfx.mec.num_mec; i++) { 6728 for (j = 0; j < adev->gfx.mec.num_pipe_per_mec; j++) { 6729 for (k = 0; k < adev->gfx.mec.num_queue_per_pipe; k++) { 6730 /* ME0 is for GFX so start from 1 for CP */ 6731 soc21_grbm_select(adev, adev->gfx.me.num_me + i, j, k, 0); 6732 for (reg = 0; reg < reg_count; reg++) { 6733 adev->gfx.ip_dump_compute_queues[index + reg] = 6734 RREG32(SOC15_REG_ENTRY_OFFSET( 6735 gc_cp_reg_list_11[reg])); 6736 } 6737 index += reg_count; 6738 } 6739 } 6740 } 6741 soc21_grbm_select(adev, 0, 0, 0, 0); 6742 mutex_unlock(&adev->srbm_mutex); 6743 amdgpu_gfx_off_ctrl(adev, true); 6744 6745 /* dump gfx queue registers for all instances */ 6746 if (!adev->gfx.ip_dump_gfx_queues) 6747 return; 6748 6749 index = 0; 6750 reg_count = ARRAY_SIZE(gc_gfx_queue_reg_list_11); 6751 amdgpu_gfx_off_ctrl(adev, false); 6752 mutex_lock(&adev->srbm_mutex); 6753 for (i = 0; i < adev->gfx.me.num_me; i++) { 6754 for (j = 0; j < adev->gfx.me.num_pipe_per_me; j++) { 6755 for (k = 0; k < adev->gfx.me.num_queue_per_pipe; k++) { 6756 soc21_grbm_select(adev, i, j, k, 0); 6757 6758 for (reg = 0; reg < reg_count; reg++) { 6759 adev->gfx.ip_dump_gfx_queues[index + reg] = 6760 RREG32(SOC15_REG_ENTRY_OFFSET( 6761 gc_gfx_queue_reg_list_11[reg])); 6762 } 6763 index += reg_count; 6764 } 6765 } 6766 } 6767 soc21_grbm_select(adev, 0, 0, 0, 0); 6768 mutex_unlock(&adev->srbm_mutex); 6769 amdgpu_gfx_off_ctrl(adev, true); 6770 } 6771 6772 static const struct amd_ip_funcs gfx_v11_0_ip_funcs = { 6773 .name = "gfx_v11_0", 6774 .early_init = gfx_v11_0_early_init, 6775 .late_init = gfx_v11_0_late_init, 6776 .sw_init = gfx_v11_0_sw_init, 6777 .sw_fini = gfx_v11_0_sw_fini, 6778 .hw_init = gfx_v11_0_hw_init, 6779 .hw_fini = gfx_v11_0_hw_fini, 6780 .suspend = gfx_v11_0_suspend, 6781 .resume = gfx_v11_0_resume, 6782 .is_idle = gfx_v11_0_is_idle, 6783 .wait_for_idle = gfx_v11_0_wait_for_idle, 6784 .soft_reset = gfx_v11_0_soft_reset, 6785 .check_soft_reset = gfx_v11_0_check_soft_reset, 6786 .post_soft_reset = gfx_v11_0_post_soft_reset, 6787 .set_clockgating_state = gfx_v11_0_set_clockgating_state, 6788 .set_powergating_state = gfx_v11_0_set_powergating_state, 6789 .get_clockgating_state = gfx_v11_0_get_clockgating_state, 6790 .dump_ip_state = gfx_v11_ip_dump, 6791 .print_ip_state = gfx_v11_ip_print, 6792 }; 6793 6794 static const struct amdgpu_ring_funcs gfx_v11_0_ring_funcs_gfx = { 6795 .type = AMDGPU_RING_TYPE_GFX, 6796 .align_mask = 0xff, 6797 .nop = PACKET3(PACKET3_NOP, 0x3FFF), 6798 .support_64bit_ptrs = true, 6799 .secure_submission_supported = true, 6800 .get_rptr = gfx_v11_0_ring_get_rptr_gfx, 6801 .get_wptr = gfx_v11_0_ring_get_wptr_gfx, 6802 .set_wptr = gfx_v11_0_ring_set_wptr_gfx, 6803 .emit_frame_size = /* totally 247 maximum if 16 IBs */ 6804 5 + /* update_spm_vmid */ 6805 5 + /* COND_EXEC */ 6806 22 + /* SET_Q_PREEMPTION_MODE */ 6807 7 + /* PIPELINE_SYNC */ 6808 SOC15_FLUSH_GPU_TLB_NUM_WREG * 5 + 6809 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 + 6810 4 + /* VM_FLUSH */ 6811 8 + /* FENCE for VM_FLUSH */ 6812 20 + /* GDS switch */ 6813 5 + /* COND_EXEC */ 6814 7 + /* HDP_flush */ 6815 4 + /* VGT_flush */ 6816 31 + /* DE_META */ 6817 3 + /* CNTX_CTRL */ 6818 5 + /* HDP_INVL */ 6819 22 + /* SET_Q_PREEMPTION_MODE */ 6820 8 + 8 + /* FENCE x2 */ 6821 8, /* gfx_v11_0_emit_mem_sync */ 6822 .emit_ib_size = 4, /* gfx_v11_0_ring_emit_ib_gfx */ 6823 .emit_ib = gfx_v11_0_ring_emit_ib_gfx, 6824 .emit_fence = gfx_v11_0_ring_emit_fence, 6825 .emit_pipeline_sync = gfx_v11_0_ring_emit_pipeline_sync, 6826 .emit_vm_flush = gfx_v11_0_ring_emit_vm_flush, 6827 .emit_gds_switch = gfx_v11_0_ring_emit_gds_switch, 6828 .emit_hdp_flush = gfx_v11_0_ring_emit_hdp_flush, 6829 .test_ring = gfx_v11_0_ring_test_ring, 6830 .test_ib = gfx_v11_0_ring_test_ib, 6831 .insert_nop = gfx_v11_ring_insert_nop, 6832 .pad_ib = amdgpu_ring_generic_pad_ib, 6833 .emit_cntxcntl = gfx_v11_0_ring_emit_cntxcntl, 6834 .emit_gfx_shadow = gfx_v11_0_ring_emit_gfx_shadow, 6835 .init_cond_exec = gfx_v11_0_ring_emit_init_cond_exec, 6836 .preempt_ib = gfx_v11_0_ring_preempt_ib, 6837 .emit_frame_cntl = gfx_v11_0_ring_emit_frame_cntl, 6838 .emit_wreg = gfx_v11_0_ring_emit_wreg, 6839 .emit_reg_wait = gfx_v11_0_ring_emit_reg_wait, 6840 .emit_reg_write_reg_wait = gfx_v11_0_ring_emit_reg_write_reg_wait, 6841 .soft_recovery = gfx_v11_0_ring_soft_recovery, 6842 .emit_mem_sync = gfx_v11_0_emit_mem_sync, 6843 .reset = gfx_v11_0_reset_kgq, 6844 }; 6845 6846 static const struct amdgpu_ring_funcs gfx_v11_0_ring_funcs_compute = { 6847 .type = AMDGPU_RING_TYPE_COMPUTE, 6848 .align_mask = 0xff, 6849 .nop = PACKET3(PACKET3_NOP, 0x3FFF), 6850 .support_64bit_ptrs = true, 6851 .get_rptr = gfx_v11_0_ring_get_rptr_compute, 6852 .get_wptr = gfx_v11_0_ring_get_wptr_compute, 6853 .set_wptr = gfx_v11_0_ring_set_wptr_compute, 6854 .emit_frame_size = 6855 5 + /* update_spm_vmid */ 6856 20 + /* gfx_v11_0_ring_emit_gds_switch */ 6857 7 + /* gfx_v11_0_ring_emit_hdp_flush */ 6858 5 + /* hdp invalidate */ 6859 7 + /* gfx_v11_0_ring_emit_pipeline_sync */ 6860 SOC15_FLUSH_GPU_TLB_NUM_WREG * 5 + 6861 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 + 6862 2 + /* gfx_v11_0_ring_emit_vm_flush */ 6863 8 + 8 + 8 + /* gfx_v11_0_ring_emit_fence x3 for user fence, vm fence */ 6864 8, /* gfx_v11_0_emit_mem_sync */ 6865 .emit_ib_size = 7, /* gfx_v11_0_ring_emit_ib_compute */ 6866 .emit_ib = gfx_v11_0_ring_emit_ib_compute, 6867 .emit_fence = gfx_v11_0_ring_emit_fence, 6868 .emit_pipeline_sync = gfx_v11_0_ring_emit_pipeline_sync, 6869 .emit_vm_flush = gfx_v11_0_ring_emit_vm_flush, 6870 .emit_gds_switch = gfx_v11_0_ring_emit_gds_switch, 6871 .emit_hdp_flush = gfx_v11_0_ring_emit_hdp_flush, 6872 .test_ring = gfx_v11_0_ring_test_ring, 6873 .test_ib = gfx_v11_0_ring_test_ib, 6874 .insert_nop = gfx_v11_ring_insert_nop, 6875 .pad_ib = amdgpu_ring_generic_pad_ib, 6876 .emit_wreg = gfx_v11_0_ring_emit_wreg, 6877 .emit_reg_wait = gfx_v11_0_ring_emit_reg_wait, 6878 .emit_reg_write_reg_wait = gfx_v11_0_ring_emit_reg_write_reg_wait, 6879 .soft_recovery = gfx_v11_0_ring_soft_recovery, 6880 .emit_mem_sync = gfx_v11_0_emit_mem_sync, 6881 .reset = gfx_v11_0_reset_kcq, 6882 }; 6883 6884 static const struct amdgpu_ring_funcs gfx_v11_0_ring_funcs_kiq = { 6885 .type = AMDGPU_RING_TYPE_KIQ, 6886 .align_mask = 0xff, 6887 .nop = PACKET3(PACKET3_NOP, 0x3FFF), 6888 .support_64bit_ptrs = true, 6889 .get_rptr = gfx_v11_0_ring_get_rptr_compute, 6890 .get_wptr = gfx_v11_0_ring_get_wptr_compute, 6891 .set_wptr = gfx_v11_0_ring_set_wptr_compute, 6892 .emit_frame_size = 6893 20 + /* gfx_v11_0_ring_emit_gds_switch */ 6894 7 + /* gfx_v11_0_ring_emit_hdp_flush */ 6895 5 + /*hdp invalidate */ 6896 7 + /* gfx_v11_0_ring_emit_pipeline_sync */ 6897 SOC15_FLUSH_GPU_TLB_NUM_WREG * 5 + 6898 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 + 6899 8 + 8 + 8, /* gfx_v11_0_ring_emit_fence_kiq x3 for user fence, vm fence */ 6900 .emit_ib_size = 7, /* gfx_v11_0_ring_emit_ib_compute */ 6901 .emit_ib = gfx_v11_0_ring_emit_ib_compute, 6902 .emit_fence = gfx_v11_0_ring_emit_fence_kiq, 6903 .test_ring = gfx_v11_0_ring_test_ring, 6904 .test_ib = gfx_v11_0_ring_test_ib, 6905 .insert_nop = amdgpu_ring_insert_nop, 6906 .pad_ib = amdgpu_ring_generic_pad_ib, 6907 .emit_rreg = gfx_v11_0_ring_emit_rreg, 6908 .emit_wreg = gfx_v11_0_ring_emit_wreg, 6909 .emit_reg_wait = gfx_v11_0_ring_emit_reg_wait, 6910 .emit_reg_write_reg_wait = gfx_v11_0_ring_emit_reg_write_reg_wait, 6911 }; 6912 6913 static void gfx_v11_0_set_ring_funcs(struct amdgpu_device *adev) 6914 { 6915 int i; 6916 6917 adev->gfx.kiq[0].ring.funcs = &gfx_v11_0_ring_funcs_kiq; 6918 6919 for (i = 0; i < adev->gfx.num_gfx_rings; i++) 6920 adev->gfx.gfx_ring[i].funcs = &gfx_v11_0_ring_funcs_gfx; 6921 6922 for (i = 0; i < adev->gfx.num_compute_rings; i++) 6923 adev->gfx.compute_ring[i].funcs = &gfx_v11_0_ring_funcs_compute; 6924 } 6925 6926 static const struct amdgpu_irq_src_funcs gfx_v11_0_eop_irq_funcs = { 6927 .set = gfx_v11_0_set_eop_interrupt_state, 6928 .process = gfx_v11_0_eop_irq, 6929 }; 6930 6931 static const struct amdgpu_irq_src_funcs gfx_v11_0_priv_reg_irq_funcs = { 6932 .set = gfx_v11_0_set_priv_reg_fault_state, 6933 .process = gfx_v11_0_priv_reg_irq, 6934 }; 6935 6936 static const struct amdgpu_irq_src_funcs gfx_v11_0_bad_op_irq_funcs = { 6937 .set = gfx_v11_0_set_bad_op_fault_state, 6938 .process = gfx_v11_0_bad_op_irq, 6939 }; 6940 6941 static const struct amdgpu_irq_src_funcs gfx_v11_0_priv_inst_irq_funcs = { 6942 .set = gfx_v11_0_set_priv_inst_fault_state, 6943 .process = gfx_v11_0_priv_inst_irq, 6944 }; 6945 6946 static const struct amdgpu_irq_src_funcs gfx_v11_0_rlc_gc_fed_irq_funcs = { 6947 .process = gfx_v11_0_rlc_gc_fed_irq, 6948 }; 6949 6950 static void gfx_v11_0_set_irq_funcs(struct amdgpu_device *adev) 6951 { 6952 adev->gfx.eop_irq.num_types = AMDGPU_CP_IRQ_LAST; 6953 adev->gfx.eop_irq.funcs = &gfx_v11_0_eop_irq_funcs; 6954 6955 adev->gfx.priv_reg_irq.num_types = 1; 6956 adev->gfx.priv_reg_irq.funcs = &gfx_v11_0_priv_reg_irq_funcs; 6957 6958 adev->gfx.bad_op_irq.num_types = 1; 6959 adev->gfx.bad_op_irq.funcs = &gfx_v11_0_bad_op_irq_funcs; 6960 6961 adev->gfx.priv_inst_irq.num_types = 1; 6962 adev->gfx.priv_inst_irq.funcs = &gfx_v11_0_priv_inst_irq_funcs; 6963 6964 adev->gfx.rlc_gc_fed_irq.num_types = 1; /* 0x80 FED error */ 6965 adev->gfx.rlc_gc_fed_irq.funcs = &gfx_v11_0_rlc_gc_fed_irq_funcs; 6966 6967 } 6968 6969 static void gfx_v11_0_set_imu_funcs(struct amdgpu_device *adev) 6970 { 6971 if (adev->flags & AMD_IS_APU) 6972 adev->gfx.imu.mode = MISSION_MODE; 6973 else 6974 adev->gfx.imu.mode = DEBUG_MODE; 6975 6976 adev->gfx.imu.funcs = &gfx_v11_0_imu_funcs; 6977 } 6978 6979 static void gfx_v11_0_set_rlc_funcs(struct amdgpu_device *adev) 6980 { 6981 adev->gfx.rlc.funcs = &gfx_v11_0_rlc_funcs; 6982 } 6983 6984 static void gfx_v11_0_set_gds_init(struct amdgpu_device *adev) 6985 { 6986 unsigned total_cu = adev->gfx.config.max_cu_per_sh * 6987 adev->gfx.config.max_sh_per_se * 6988 adev->gfx.config.max_shader_engines; 6989 6990 adev->gds.gds_size = 0x1000; 6991 adev->gds.gds_compute_max_wave_id = total_cu * 32 - 1; 6992 adev->gds.gws_size = 64; 6993 adev->gds.oa_size = 16; 6994 } 6995 6996 static void gfx_v11_0_set_mqd_funcs(struct amdgpu_device *adev) 6997 { 6998 /* set gfx eng mqd */ 6999 adev->mqds[AMDGPU_HW_IP_GFX].mqd_size = 7000 sizeof(struct v11_gfx_mqd); 7001 adev->mqds[AMDGPU_HW_IP_GFX].init_mqd = 7002 gfx_v11_0_gfx_mqd_init; 7003 /* set compute eng mqd */ 7004 adev->mqds[AMDGPU_HW_IP_COMPUTE].mqd_size = 7005 sizeof(struct v11_compute_mqd); 7006 adev->mqds[AMDGPU_HW_IP_COMPUTE].init_mqd = 7007 gfx_v11_0_compute_mqd_init; 7008 } 7009 7010 static void gfx_v11_0_set_user_wgp_inactive_bitmap_per_sh(struct amdgpu_device *adev, 7011 u32 bitmap) 7012 { 7013 u32 data; 7014 7015 if (!bitmap) 7016 return; 7017 7018 data = bitmap << GC_USER_SHADER_ARRAY_CONFIG__INACTIVE_WGPS__SHIFT; 7019 data &= GC_USER_SHADER_ARRAY_CONFIG__INACTIVE_WGPS_MASK; 7020 7021 WREG32_SOC15(GC, 0, regGC_USER_SHADER_ARRAY_CONFIG, data); 7022 } 7023 7024 static u32 gfx_v11_0_get_wgp_active_bitmap_per_sh(struct amdgpu_device *adev) 7025 { 7026 u32 data, wgp_bitmask; 7027 data = RREG32_SOC15(GC, 0, regCC_GC_SHADER_ARRAY_CONFIG); 7028 data |= RREG32_SOC15(GC, 0, regGC_USER_SHADER_ARRAY_CONFIG); 7029 7030 data &= CC_GC_SHADER_ARRAY_CONFIG__INACTIVE_WGPS_MASK; 7031 data >>= CC_GC_SHADER_ARRAY_CONFIG__INACTIVE_WGPS__SHIFT; 7032 7033 wgp_bitmask = 7034 amdgpu_gfx_create_bitmask(adev->gfx.config.max_cu_per_sh >> 1); 7035 7036 return (~data) & wgp_bitmask; 7037 } 7038 7039 static u32 gfx_v11_0_get_cu_active_bitmap_per_sh(struct amdgpu_device *adev) 7040 { 7041 u32 wgp_idx, wgp_active_bitmap; 7042 u32 cu_bitmap_per_wgp, cu_active_bitmap; 7043 7044 wgp_active_bitmap = gfx_v11_0_get_wgp_active_bitmap_per_sh(adev); 7045 cu_active_bitmap = 0; 7046 7047 for (wgp_idx = 0; wgp_idx < 16; wgp_idx++) { 7048 /* if there is one WGP enabled, it means 2 CUs will be enabled */ 7049 cu_bitmap_per_wgp = 3 << (2 * wgp_idx); 7050 if (wgp_active_bitmap & (1 << wgp_idx)) 7051 cu_active_bitmap |= cu_bitmap_per_wgp; 7052 } 7053 7054 return cu_active_bitmap; 7055 } 7056 7057 static int gfx_v11_0_get_cu_info(struct amdgpu_device *adev, 7058 struct amdgpu_cu_info *cu_info) 7059 { 7060 int i, j, k, counter, active_cu_number = 0; 7061 u32 mask, bitmap; 7062 unsigned disable_masks[8 * 2]; 7063 7064 if (!adev || !cu_info) 7065 return -EINVAL; 7066 7067 amdgpu_gfx_parse_disable_cu(disable_masks, 8, 2); 7068 7069 mutex_lock(&adev->grbm_idx_mutex); 7070 for (i = 0; i < adev->gfx.config.max_shader_engines; i++) { 7071 for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) { 7072 bitmap = i * adev->gfx.config.max_sh_per_se + j; 7073 if (!((gfx_v11_0_get_sa_active_bitmap(adev) >> bitmap) & 1)) 7074 continue; 7075 mask = 1; 7076 counter = 0; 7077 gfx_v11_0_select_se_sh(adev, i, j, 0xffffffff, 0); 7078 if (i < 8 && j < 2) 7079 gfx_v11_0_set_user_wgp_inactive_bitmap_per_sh( 7080 adev, disable_masks[i * 2 + j]); 7081 bitmap = gfx_v11_0_get_cu_active_bitmap_per_sh(adev); 7082 7083 /** 7084 * GFX11 could support more than 4 SEs, while the bitmap 7085 * in cu_info struct is 4x4 and ioctl interface struct 7086 * drm_amdgpu_info_device should keep stable. 7087 * So we use last two columns of bitmap to store cu mask for 7088 * SEs 4 to 7, the layout of the bitmap is as below: 7089 * SE0: {SH0,SH1} --> {bitmap[0][0], bitmap[0][1]} 7090 * SE1: {SH0,SH1} --> {bitmap[1][0], bitmap[1][1]} 7091 * SE2: {SH0,SH1} --> {bitmap[2][0], bitmap[2][1]} 7092 * SE3: {SH0,SH1} --> {bitmap[3][0], bitmap[3][1]} 7093 * SE4: {SH0,SH1} --> {bitmap[0][2], bitmap[0][3]} 7094 * SE5: {SH0,SH1} --> {bitmap[1][2], bitmap[1][3]} 7095 * SE6: {SH0,SH1} --> {bitmap[2][2], bitmap[2][3]} 7096 * SE7: {SH0,SH1} --> {bitmap[3][2], bitmap[3][3]} 7097 */ 7098 cu_info->bitmap[0][i % 4][j + (i / 4) * 2] = bitmap; 7099 7100 for (k = 0; k < adev->gfx.config.max_cu_per_sh; k++) { 7101 if (bitmap & mask) 7102 counter++; 7103 7104 mask <<= 1; 7105 } 7106 active_cu_number += counter; 7107 } 7108 } 7109 gfx_v11_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0); 7110 mutex_unlock(&adev->grbm_idx_mutex); 7111 7112 cu_info->number = active_cu_number; 7113 cu_info->simd_per_cu = NUM_SIMD_PER_CU; 7114 7115 return 0; 7116 } 7117 7118 const struct amdgpu_ip_block_version gfx_v11_0_ip_block = 7119 { 7120 .type = AMD_IP_BLOCK_TYPE_GFX, 7121 .major = 11, 7122 .minor = 0, 7123 .rev = 0, 7124 .funcs = &gfx_v11_0_ip_funcs, 7125 }; 7126