1 /* 2 * Copyright 2021 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 */ 23 #include <linux/delay.h> 24 #include <linux/kernel.h> 25 #include <linux/firmware.h> 26 #include <linux/module.h> 27 #include <linux/pci.h> 28 #include "amdgpu.h" 29 #include "amdgpu_gfx.h" 30 #include "amdgpu_psp.h" 31 #include "amdgpu_smu.h" 32 #include "imu_v11_0.h" 33 #include "soc21.h" 34 #include "nvd.h" 35 36 #include "gc/gc_11_0_0_offset.h" 37 #include "gc/gc_11_0_0_sh_mask.h" 38 #include "smuio/smuio_13_0_6_offset.h" 39 #include "smuio/smuio_13_0_6_sh_mask.h" 40 #include "navi10_enum.h" 41 #include "ivsrcid/gfx/irqsrcs_gfx_11_0_0.h" 42 43 #include "soc15.h" 44 #include "clearstate_gfx11.h" 45 #include "v11_structs.h" 46 #include "gfx_v11_0.h" 47 #include "gfx_v11_0_cleaner_shader.h" 48 #include "gfx_v11_0_3.h" 49 #include "nbio_v4_3.h" 50 #include "mes_v11_0.h" 51 52 #define GFX11_NUM_GFX_RINGS 1 53 #define GFX11_MEC_HPD_SIZE 2048 54 55 #define RLCG_UCODE_LOADING_START_ADDRESS 0x00002000L 56 #define RLC_PG_DELAY_3_DEFAULT_GC_11_0_1 0x1388 57 58 #define regCGTT_WD_CLK_CTRL 0x5086 59 #define regCGTT_WD_CLK_CTRL_BASE_IDX 1 60 #define regRLC_RLCS_BOOTLOAD_STATUS_gc_11_0_1 0x4e7e 61 #define regRLC_RLCS_BOOTLOAD_STATUS_gc_11_0_1_BASE_IDX 1 62 #define regPC_CONFIG_CNTL_1 0x194d 63 #define regPC_CONFIG_CNTL_1_BASE_IDX 1 64 65 #define regCP_GFX_MQD_CONTROL_DEFAULT 0x00000100 66 #define regCP_GFX_HQD_VMID_DEFAULT 0x00000000 67 #define regCP_GFX_HQD_QUEUE_PRIORITY_DEFAULT 0x00000000 68 #define regCP_GFX_HQD_QUANTUM_DEFAULT 0x00000a01 69 #define regCP_GFX_HQD_CNTL_DEFAULT 0x00a00000 70 #define regCP_RB_DOORBELL_CONTROL_DEFAULT 0x00000000 71 #define regCP_GFX_HQD_RPTR_DEFAULT 0x00000000 72 73 #define regCP_HQD_EOP_CONTROL_DEFAULT 0x00000006 74 #define regCP_HQD_PQ_DOORBELL_CONTROL_DEFAULT 0x00000000 75 #define regCP_MQD_CONTROL_DEFAULT 0x00000100 76 #define regCP_HQD_PQ_CONTROL_DEFAULT 0x00308509 77 #define regCP_HQD_PQ_DOORBELL_CONTROL_DEFAULT 0x00000000 78 #define regCP_HQD_PQ_RPTR_DEFAULT 0x00000000 79 #define regCP_HQD_PERSISTENT_STATE_DEFAULT 0x0be05501 80 #define regCP_HQD_IB_CONTROL_DEFAULT 0x00300000 81 82 MODULE_FIRMWARE("amdgpu/gc_11_0_0_pfp.bin"); 83 MODULE_FIRMWARE("amdgpu/gc_11_0_0_me.bin"); 84 MODULE_FIRMWARE("amdgpu/gc_11_0_0_mec.bin"); 85 MODULE_FIRMWARE("amdgpu/gc_11_0_0_rlc.bin"); 86 MODULE_FIRMWARE("amdgpu/gc_11_0_0_rlc_1.bin"); 87 MODULE_FIRMWARE("amdgpu/gc_11_0_0_toc.bin"); 88 MODULE_FIRMWARE("amdgpu/gc_11_0_1_pfp.bin"); 89 MODULE_FIRMWARE("amdgpu/gc_11_0_1_me.bin"); 90 MODULE_FIRMWARE("amdgpu/gc_11_0_1_mec.bin"); 91 MODULE_FIRMWARE("amdgpu/gc_11_0_1_rlc.bin"); 92 MODULE_FIRMWARE("amdgpu/gc_11_0_2_pfp.bin"); 93 MODULE_FIRMWARE("amdgpu/gc_11_0_2_me.bin"); 94 MODULE_FIRMWARE("amdgpu/gc_11_0_2_mec.bin"); 95 MODULE_FIRMWARE("amdgpu/gc_11_0_2_rlc.bin"); 96 MODULE_FIRMWARE("amdgpu/gc_11_0_3_pfp.bin"); 97 MODULE_FIRMWARE("amdgpu/gc_11_0_3_me.bin"); 98 MODULE_FIRMWARE("amdgpu/gc_11_0_3_mec.bin"); 99 MODULE_FIRMWARE("amdgpu/gc_11_0_3_rlc.bin"); 100 MODULE_FIRMWARE("amdgpu/gc_11_0_4_pfp.bin"); 101 MODULE_FIRMWARE("amdgpu/gc_11_0_4_me.bin"); 102 MODULE_FIRMWARE("amdgpu/gc_11_0_4_mec.bin"); 103 MODULE_FIRMWARE("amdgpu/gc_11_0_4_rlc.bin"); 104 MODULE_FIRMWARE("amdgpu/gc_11_5_0_pfp.bin"); 105 MODULE_FIRMWARE("amdgpu/gc_11_5_0_me.bin"); 106 MODULE_FIRMWARE("amdgpu/gc_11_5_0_mec.bin"); 107 MODULE_FIRMWARE("amdgpu/gc_11_5_0_rlc.bin"); 108 MODULE_FIRMWARE("amdgpu/gc_11_5_1_pfp.bin"); 109 MODULE_FIRMWARE("amdgpu/gc_11_5_1_me.bin"); 110 MODULE_FIRMWARE("amdgpu/gc_11_5_1_mec.bin"); 111 MODULE_FIRMWARE("amdgpu/gc_11_5_1_rlc.bin"); 112 MODULE_FIRMWARE("amdgpu/gc_11_5_2_pfp.bin"); 113 MODULE_FIRMWARE("amdgpu/gc_11_5_2_me.bin"); 114 MODULE_FIRMWARE("amdgpu/gc_11_5_2_mec.bin"); 115 MODULE_FIRMWARE("amdgpu/gc_11_5_2_rlc.bin"); 116 MODULE_FIRMWARE("amdgpu/gc_11_5_3_pfp.bin"); 117 MODULE_FIRMWARE("amdgpu/gc_11_5_3_me.bin"); 118 MODULE_FIRMWARE("amdgpu/gc_11_5_3_mec.bin"); 119 MODULE_FIRMWARE("amdgpu/gc_11_5_3_rlc.bin"); 120 121 static const struct amdgpu_hwip_reg_entry gc_reg_list_11_0[] = { 122 SOC15_REG_ENTRY_STR(GC, 0, regGRBM_STATUS), 123 SOC15_REG_ENTRY_STR(GC, 0, regGRBM_STATUS2), 124 SOC15_REG_ENTRY_STR(GC, 0, regGRBM_STATUS3), 125 SOC15_REG_ENTRY_STR(GC, 0, regCP_STALLED_STAT1), 126 SOC15_REG_ENTRY_STR(GC, 0, regCP_STALLED_STAT2), 127 SOC15_REG_ENTRY_STR(GC, 0, regCP_STALLED_STAT3), 128 SOC15_REG_ENTRY_STR(GC, 0, regCP_CPC_STALLED_STAT1), 129 SOC15_REG_ENTRY_STR(GC, 0, regCP_CPF_STALLED_STAT1), 130 SOC15_REG_ENTRY_STR(GC, 0, regCP_BUSY_STAT), 131 SOC15_REG_ENTRY_STR(GC, 0, regCP_CPC_BUSY_STAT), 132 SOC15_REG_ENTRY_STR(GC, 0, regCP_CPF_BUSY_STAT), 133 SOC15_REG_ENTRY_STR(GC, 0, regCP_CPC_BUSY_STAT2), 134 SOC15_REG_ENTRY_STR(GC, 0, regCP_CPF_BUSY_STAT2), 135 SOC15_REG_ENTRY_STR(GC, 0, regCP_CPF_STATUS), 136 SOC15_REG_ENTRY_STR(GC, 0, regCP_GFX_ERROR), 137 SOC15_REG_ENTRY_STR(GC, 0, regCP_GFX_HPD_STATUS0), 138 SOC15_REG_ENTRY_STR(GC, 0, regCP_RB_BASE), 139 SOC15_REG_ENTRY_STR(GC, 0, regCP_RB_RPTR), 140 SOC15_REG_ENTRY_STR(GC, 0, regCP_RB_WPTR), 141 SOC15_REG_ENTRY_STR(GC, 0, regCP_RB0_BASE), 142 SOC15_REG_ENTRY_STR(GC, 0, regCP_RB0_RPTR), 143 SOC15_REG_ENTRY_STR(GC, 0, regCP_RB0_WPTR), 144 SOC15_REG_ENTRY_STR(GC, 0, regCP_RB1_BASE), 145 SOC15_REG_ENTRY_STR(GC, 0, regCP_RB1_RPTR), 146 SOC15_REG_ENTRY_STR(GC, 0, regCP_RB1_WPTR), 147 SOC15_REG_ENTRY_STR(GC, 0, regCP_IB1_CMD_BUFSZ), 148 SOC15_REG_ENTRY_STR(GC, 0, regCP_IB2_CMD_BUFSZ), 149 SOC15_REG_ENTRY_STR(GC, 0, regCP_IB1_BASE_LO), 150 SOC15_REG_ENTRY_STR(GC, 0, regCP_IB1_BASE_HI), 151 SOC15_REG_ENTRY_STR(GC, 0, regCP_IB1_BUFSZ), 152 SOC15_REG_ENTRY_STR(GC, 0, regCP_IB2_BASE_LO), 153 SOC15_REG_ENTRY_STR(GC, 0, regCP_IB2_BASE_HI), 154 SOC15_REG_ENTRY_STR(GC, 0, regCP_IB2_BUFSZ), 155 SOC15_REG_ENTRY_STR(GC, 0, regCPF_UTCL1_STATUS), 156 SOC15_REG_ENTRY_STR(GC, 0, regCPC_UTCL1_STATUS), 157 SOC15_REG_ENTRY_STR(GC, 0, regCPG_UTCL1_STATUS), 158 SOC15_REG_ENTRY_STR(GC, 0, regGDS_PROTECTION_FAULT), 159 SOC15_REG_ENTRY_STR(GC, 0, regGDS_VM_PROTECTION_FAULT), 160 SOC15_REG_ENTRY_STR(GC, 0, regIA_UTCL1_STATUS), 161 SOC15_REG_ENTRY_STR(GC, 0, regIA_UTCL1_STATUS_2), 162 SOC15_REG_ENTRY_STR(GC, 0, regPA_CL_CNTL_STATUS), 163 SOC15_REG_ENTRY_STR(GC, 0, regRLC_UTCL1_STATUS), 164 SOC15_REG_ENTRY_STR(GC, 0, regRMI_UTCL1_STATUS), 165 SOC15_REG_ENTRY_STR(GC, 0, regSQC_CACHES), 166 SOC15_REG_ENTRY_STR(GC, 0, regSQG_STATUS), 167 SOC15_REG_ENTRY_STR(GC, 0, regWD_UTCL1_STATUS), 168 SOC15_REG_ENTRY_STR(GC, 0, regGCVM_L2_PROTECTION_FAULT_CNTL), 169 SOC15_REG_ENTRY_STR(GC, 0, regGCVM_L2_PROTECTION_FAULT_STATUS), 170 SOC15_REG_ENTRY_STR(GC, 0, regCP_DEBUG), 171 SOC15_REG_ENTRY_STR(GC, 0, regCP_MEC_CNTL), 172 SOC15_REG_ENTRY_STR(GC, 0, regCP_MES_CNTL), 173 SOC15_REG_ENTRY_STR(GC, 0, regCP_MEC1_INSTR_PNTR), 174 SOC15_REG_ENTRY_STR(GC, 0, regCP_MES_DEBUG_INTERRUPT_INSTR_PNTR), 175 SOC15_REG_ENTRY_STR(GC, 0, regCP_MES_INSTR_PNTR), 176 SOC15_REG_ENTRY_STR(GC, 0, regCP_ME_INSTR_PNTR), 177 SOC15_REG_ENTRY_STR(GC, 0, regCP_PFP_INSTR_PNTR), 178 SOC15_REG_ENTRY_STR(GC, 0, regCP_CPC_STATUS), 179 /* cp header registers */ 180 SOC15_REG_ENTRY_STR(GC, 0, regCP_MEC_ME1_HEADER_DUMP), 181 SOC15_REG_ENTRY_STR(GC, 0, regCP_PFP_HEADER_DUMP), 182 SOC15_REG_ENTRY_STR(GC, 0, regCP_ME_HEADER_DUMP), 183 SOC15_REG_ENTRY_STR(GC, 0, regCP_MES_HEADER_DUMP), 184 /* SE status registers */ 185 SOC15_REG_ENTRY_STR(GC, 0, regGRBM_STATUS_SE0), 186 SOC15_REG_ENTRY_STR(GC, 0, regGRBM_STATUS_SE1), 187 SOC15_REG_ENTRY_STR(GC, 0, regGRBM_STATUS_SE2), 188 SOC15_REG_ENTRY_STR(GC, 0, regGRBM_STATUS_SE3), 189 SOC15_REG_ENTRY_STR(GC, 0, regGRBM_STATUS_SE4), 190 SOC15_REG_ENTRY_STR(GC, 0, regGRBM_STATUS_SE5) 191 }; 192 193 static const struct amdgpu_hwip_reg_entry gc_cp_reg_list_11[] = { 194 /* compute registers */ 195 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_VMID), 196 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_PERSISTENT_STATE), 197 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_PIPE_PRIORITY), 198 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_QUEUE_PRIORITY), 199 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_QUANTUM), 200 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_PQ_BASE), 201 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_PQ_BASE_HI), 202 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_PQ_RPTR), 203 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_PQ_WPTR_POLL_ADDR), 204 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_PQ_WPTR_POLL_ADDR_HI), 205 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_PQ_DOORBELL_CONTROL), 206 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_PQ_CONTROL), 207 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_IB_BASE_ADDR), 208 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_IB_BASE_ADDR_HI), 209 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_IB_RPTR), 210 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_IB_CONTROL), 211 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_DEQUEUE_REQUEST), 212 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_EOP_BASE_ADDR), 213 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_EOP_BASE_ADDR_HI), 214 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_EOP_CONTROL), 215 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_EOP_RPTR), 216 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_EOP_WPTR), 217 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_EOP_EVENTS), 218 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_CTX_SAVE_BASE_ADDR_LO), 219 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_CTX_SAVE_BASE_ADDR_HI), 220 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_CTX_SAVE_CONTROL), 221 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_CNTL_STACK_OFFSET), 222 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_CNTL_STACK_SIZE), 223 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_WG_STATE_OFFSET), 224 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_CTX_SAVE_SIZE), 225 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_GDS_RESOURCE_STATE), 226 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_ERROR), 227 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_EOP_WPTR_MEM), 228 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_PQ_WPTR_LO), 229 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_PQ_WPTR_HI), 230 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_SUSPEND_CNTL_STACK_OFFSET), 231 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_SUSPEND_CNTL_STACK_DW_CNT), 232 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_SUSPEND_WG_STATE_OFFSET), 233 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_DEQUEUE_STATUS) 234 }; 235 236 static const struct amdgpu_hwip_reg_entry gc_gfx_queue_reg_list_11[] = { 237 /* gfx queue registers */ 238 SOC15_REG_ENTRY_STR(GC, 0, regCP_GFX_HQD_ACTIVE), 239 SOC15_REG_ENTRY_STR(GC, 0, regCP_GFX_HQD_VMID), 240 SOC15_REG_ENTRY_STR(GC, 0, regCP_GFX_HQD_QUEUE_PRIORITY), 241 SOC15_REG_ENTRY_STR(GC, 0, regCP_GFX_HQD_QUANTUM), 242 SOC15_REG_ENTRY_STR(GC, 0, regCP_GFX_HQD_BASE), 243 SOC15_REG_ENTRY_STR(GC, 0, regCP_GFX_HQD_BASE_HI), 244 SOC15_REG_ENTRY_STR(GC, 0, regCP_GFX_HQD_OFFSET), 245 SOC15_REG_ENTRY_STR(GC, 0, regCP_GFX_HQD_CNTL), 246 SOC15_REG_ENTRY_STR(GC, 0, regCP_GFX_HQD_CSMD_RPTR), 247 SOC15_REG_ENTRY_STR(GC, 0, regCP_GFX_HQD_WPTR), 248 SOC15_REG_ENTRY_STR(GC, 0, regCP_GFX_HQD_WPTR_HI), 249 SOC15_REG_ENTRY_STR(GC, 0, regCP_GFX_HQD_DEQUEUE_REQUEST), 250 SOC15_REG_ENTRY_STR(GC, 0, regCP_GFX_HQD_MAPPED), 251 SOC15_REG_ENTRY_STR(GC, 0, regCP_GFX_HQD_QUE_MGR_CONTROL), 252 SOC15_REG_ENTRY_STR(GC, 0, regCP_GFX_HQD_HQ_CONTROL0), 253 SOC15_REG_ENTRY_STR(GC, 0, regCP_GFX_HQD_HQ_STATUS0), 254 SOC15_REG_ENTRY_STR(GC, 0, regCP_GFX_MQD_BASE_ADDR), 255 SOC15_REG_ENTRY_STR(GC, 0, regCP_GFX_MQD_BASE_ADDR_HI), 256 SOC15_REG_ENTRY_STR(GC, 0, regCP_RB_WPTR_POLL_ADDR_LO), 257 SOC15_REG_ENTRY_STR(GC, 0, regCP_RB_WPTR_POLL_ADDR_HI), 258 SOC15_REG_ENTRY_STR(GC, 0, regCP_RB_RPTR), 259 SOC15_REG_ENTRY_STR(GC, 0, regCP_IB1_BASE_LO), 260 SOC15_REG_ENTRY_STR(GC, 0, regCP_IB1_BASE_HI), 261 SOC15_REG_ENTRY_STR(GC, 0, regCP_IB1_CMD_BUFSZ), 262 SOC15_REG_ENTRY_STR(GC, 0, regCP_IB1_BUFSZ) 263 }; 264 265 static const struct soc15_reg_golden golden_settings_gc_11_0[] = { 266 SOC15_REG_GOLDEN_VALUE(GC, 0, regTCP_CNTL, 0x20000000, 0x20000000) 267 }; 268 269 static const struct soc15_reg_golden golden_settings_gc_11_0_1[] = 270 { 271 SOC15_REG_GOLDEN_VALUE(GC, 0, regCGTT_GS_NGG_CLK_CTRL, 0x9fff8fff, 0x00000010), 272 SOC15_REG_GOLDEN_VALUE(GC, 0, regCGTT_WD_CLK_CTRL, 0xffff8fff, 0x00000010), 273 SOC15_REG_GOLDEN_VALUE(GC, 0, regCPF_GCR_CNTL, 0x0007ffff, 0x0000c200), 274 SOC15_REG_GOLDEN_VALUE(GC, 0, regGL2C_CTRL3, 0xffff001b, 0x00f01988), 275 SOC15_REG_GOLDEN_VALUE(GC, 0, regPA_CL_ENHANCE, 0xf0ffffff, 0x00880007), 276 SOC15_REG_GOLDEN_VALUE(GC, 0, regPA_SC_ENHANCE_3, 0xfffffffd, 0x00000008), 277 SOC15_REG_GOLDEN_VALUE(GC, 0, regPA_SC_VRS_SURFACE_CNTL_1, 0xfff891ff, 0x55480100), 278 SOC15_REG_GOLDEN_VALUE(GC, 0, regTA_CNTL_AUX, 0xf7f7ffff, 0x01030000), 279 SOC15_REG_GOLDEN_VALUE(GC, 0, regTCP_CNTL2, 0xfcffffff, 0x0000000a) 280 }; 281 282 #define DEFAULT_SH_MEM_CONFIG \ 283 ((SH_MEM_ADDRESS_MODE_64 << SH_MEM_CONFIG__ADDRESS_MODE__SHIFT) | \ 284 (SH_MEM_ALIGNMENT_MODE_UNALIGNED << SH_MEM_CONFIG__ALIGNMENT_MODE__SHIFT) | \ 285 (3 << SH_MEM_CONFIG__INITIAL_INST_PREFETCH__SHIFT)) 286 287 static void gfx_v11_0_disable_gpa_mode(struct amdgpu_device *adev); 288 static void gfx_v11_0_set_ring_funcs(struct amdgpu_device *adev); 289 static void gfx_v11_0_set_irq_funcs(struct amdgpu_device *adev); 290 static void gfx_v11_0_set_gds_init(struct amdgpu_device *adev); 291 static void gfx_v11_0_set_rlc_funcs(struct amdgpu_device *adev); 292 static void gfx_v11_0_set_mqd_funcs(struct amdgpu_device *adev); 293 static void gfx_v11_0_set_imu_funcs(struct amdgpu_device *adev); 294 static int gfx_v11_0_get_cu_info(struct amdgpu_device *adev, 295 struct amdgpu_cu_info *cu_info); 296 static uint64_t gfx_v11_0_get_gpu_clock_counter(struct amdgpu_device *adev); 297 static void gfx_v11_0_select_se_sh(struct amdgpu_device *adev, u32 se_num, 298 u32 sh_num, u32 instance, int xcc_id); 299 static u32 gfx_v11_0_get_wgp_active_bitmap_per_sh(struct amdgpu_device *adev); 300 301 static void gfx_v11_0_ring_emit_de_meta(struct amdgpu_ring *ring, bool resume); 302 static void gfx_v11_0_ring_emit_frame_cntl(struct amdgpu_ring *ring, bool start, bool secure); 303 static void gfx_v11_0_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg, 304 uint32_t val); 305 static int gfx_v11_0_wait_for_rlc_autoload_complete(struct amdgpu_device *adev); 306 static void gfx_v11_0_ring_invalidate_tlbs(struct amdgpu_ring *ring, 307 uint16_t pasid, uint32_t flush_type, 308 bool all_hub, uint8_t dst_sel); 309 static void gfx_v11_0_set_safe_mode(struct amdgpu_device *adev, int xcc_id); 310 static void gfx_v11_0_unset_safe_mode(struct amdgpu_device *adev, int xcc_id); 311 static void gfx_v11_0_update_perf_clk(struct amdgpu_device *adev, 312 bool enable); 313 314 static void gfx11_kiq_set_resources(struct amdgpu_ring *kiq_ring, uint64_t queue_mask) 315 { 316 struct amdgpu_device *adev = kiq_ring->adev; 317 u64 shader_mc_addr; 318 319 /* Cleaner shader MC address */ 320 shader_mc_addr = adev->gfx.cleaner_shader_gpu_addr >> 8; 321 322 amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_SET_RESOURCES, 6)); 323 amdgpu_ring_write(kiq_ring, PACKET3_SET_RESOURCES_VMID_MASK(0) | 324 PACKET3_SET_RESOURCES_UNMAP_LATENTY(0xa) | /* unmap_latency: 0xa (~ 1s) */ 325 PACKET3_SET_RESOURCES_QUEUE_TYPE(0)); /* vmid_mask:0 queue_type:0 (KIQ) */ 326 amdgpu_ring_write(kiq_ring, lower_32_bits(queue_mask)); /* queue mask lo */ 327 amdgpu_ring_write(kiq_ring, upper_32_bits(queue_mask)); /* queue mask hi */ 328 amdgpu_ring_write(kiq_ring, lower_32_bits(shader_mc_addr)); /* cleaner shader addr lo */ 329 amdgpu_ring_write(kiq_ring, upper_32_bits(shader_mc_addr)); /* cleaner shader addr hi */ 330 amdgpu_ring_write(kiq_ring, 0); /* oac mask */ 331 amdgpu_ring_write(kiq_ring, 0); /* gds heap base:0, gds heap size:0 */ 332 } 333 334 static void gfx11_kiq_map_queues(struct amdgpu_ring *kiq_ring, 335 struct amdgpu_ring *ring) 336 { 337 uint64_t mqd_addr = amdgpu_bo_gpu_offset(ring->mqd_obj); 338 uint64_t wptr_addr = ring->wptr_gpu_addr; 339 uint32_t me = 0, eng_sel = 0; 340 341 switch (ring->funcs->type) { 342 case AMDGPU_RING_TYPE_COMPUTE: 343 me = 1; 344 eng_sel = 0; 345 break; 346 case AMDGPU_RING_TYPE_GFX: 347 me = 0; 348 eng_sel = 4; 349 break; 350 case AMDGPU_RING_TYPE_MES: 351 me = 2; 352 eng_sel = 5; 353 break; 354 default: 355 WARN_ON(1); 356 } 357 358 amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_MAP_QUEUES, 5)); 359 /* Q_sel:0, vmid:0, vidmem: 1, engine:0, num_Q:1*/ 360 amdgpu_ring_write(kiq_ring, /* Q_sel: 0, vmid: 0, engine: 0, num_Q: 1 */ 361 PACKET3_MAP_QUEUES_QUEUE_SEL(0) | /* Queue_Sel */ 362 PACKET3_MAP_QUEUES_VMID(0) | /* VMID */ 363 PACKET3_MAP_QUEUES_QUEUE(ring->queue) | 364 PACKET3_MAP_QUEUES_PIPE(ring->pipe) | 365 PACKET3_MAP_QUEUES_ME((me)) | 366 PACKET3_MAP_QUEUES_QUEUE_TYPE(0) | /*queue_type: normal compute queue */ 367 PACKET3_MAP_QUEUES_ALLOC_FORMAT(0) | /* alloc format: all_on_one_pipe */ 368 PACKET3_MAP_QUEUES_ENGINE_SEL(eng_sel) | 369 PACKET3_MAP_QUEUES_NUM_QUEUES(1)); /* num_queues: must be 1 */ 370 amdgpu_ring_write(kiq_ring, PACKET3_MAP_QUEUES_DOORBELL_OFFSET(ring->doorbell_index)); 371 amdgpu_ring_write(kiq_ring, lower_32_bits(mqd_addr)); 372 amdgpu_ring_write(kiq_ring, upper_32_bits(mqd_addr)); 373 amdgpu_ring_write(kiq_ring, lower_32_bits(wptr_addr)); 374 amdgpu_ring_write(kiq_ring, upper_32_bits(wptr_addr)); 375 } 376 377 static void gfx11_kiq_unmap_queues(struct amdgpu_ring *kiq_ring, 378 struct amdgpu_ring *ring, 379 enum amdgpu_unmap_queues_action action, 380 u64 gpu_addr, u64 seq) 381 { 382 struct amdgpu_device *adev = kiq_ring->adev; 383 uint32_t eng_sel = ring->funcs->type == AMDGPU_RING_TYPE_GFX ? 4 : 0; 384 385 if (adev->enable_mes && !adev->gfx.kiq[0].ring.sched.ready) { 386 amdgpu_mes_unmap_legacy_queue(adev, ring, action, gpu_addr, seq); 387 return; 388 } 389 390 amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_UNMAP_QUEUES, 4)); 391 amdgpu_ring_write(kiq_ring, /* Q_sel: 0, vmid: 0, engine: 0, num_Q: 1 */ 392 PACKET3_UNMAP_QUEUES_ACTION(action) | 393 PACKET3_UNMAP_QUEUES_QUEUE_SEL(0) | 394 PACKET3_UNMAP_QUEUES_ENGINE_SEL(eng_sel) | 395 PACKET3_UNMAP_QUEUES_NUM_QUEUES(1)); 396 amdgpu_ring_write(kiq_ring, 397 PACKET3_UNMAP_QUEUES_DOORBELL_OFFSET0(ring->doorbell_index)); 398 399 if (action == PREEMPT_QUEUES_NO_UNMAP) { 400 amdgpu_ring_write(kiq_ring, lower_32_bits(gpu_addr)); 401 amdgpu_ring_write(kiq_ring, upper_32_bits(gpu_addr)); 402 amdgpu_ring_write(kiq_ring, seq); 403 } else { 404 amdgpu_ring_write(kiq_ring, 0); 405 amdgpu_ring_write(kiq_ring, 0); 406 amdgpu_ring_write(kiq_ring, 0); 407 } 408 } 409 410 static void gfx11_kiq_query_status(struct amdgpu_ring *kiq_ring, 411 struct amdgpu_ring *ring, 412 u64 addr, 413 u64 seq) 414 { 415 uint32_t eng_sel = ring->funcs->type == AMDGPU_RING_TYPE_GFX ? 4 : 0; 416 417 amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_QUERY_STATUS, 5)); 418 amdgpu_ring_write(kiq_ring, 419 PACKET3_QUERY_STATUS_CONTEXT_ID(0) | 420 PACKET3_QUERY_STATUS_INTERRUPT_SEL(0) | 421 PACKET3_QUERY_STATUS_COMMAND(2)); 422 amdgpu_ring_write(kiq_ring, /* Q_sel: 0, vmid: 0, engine: 0, num_Q: 1 */ 423 PACKET3_QUERY_STATUS_DOORBELL_OFFSET(ring->doorbell_index) | 424 PACKET3_QUERY_STATUS_ENG_SEL(eng_sel)); 425 amdgpu_ring_write(kiq_ring, lower_32_bits(addr)); 426 amdgpu_ring_write(kiq_ring, upper_32_bits(addr)); 427 amdgpu_ring_write(kiq_ring, lower_32_bits(seq)); 428 amdgpu_ring_write(kiq_ring, upper_32_bits(seq)); 429 } 430 431 static void gfx11_kiq_invalidate_tlbs(struct amdgpu_ring *kiq_ring, 432 uint16_t pasid, uint32_t flush_type, 433 bool all_hub) 434 { 435 gfx_v11_0_ring_invalidate_tlbs(kiq_ring, pasid, flush_type, all_hub, 1); 436 } 437 438 static const struct kiq_pm4_funcs gfx_v11_0_kiq_pm4_funcs = { 439 .kiq_set_resources = gfx11_kiq_set_resources, 440 .kiq_map_queues = gfx11_kiq_map_queues, 441 .kiq_unmap_queues = gfx11_kiq_unmap_queues, 442 .kiq_query_status = gfx11_kiq_query_status, 443 .kiq_invalidate_tlbs = gfx11_kiq_invalidate_tlbs, 444 .set_resources_size = 8, 445 .map_queues_size = 7, 446 .unmap_queues_size = 6, 447 .query_status_size = 7, 448 .invalidate_tlbs_size = 2, 449 }; 450 451 static void gfx_v11_0_set_kiq_pm4_funcs(struct amdgpu_device *adev) 452 { 453 adev->gfx.kiq[0].pmf = &gfx_v11_0_kiq_pm4_funcs; 454 } 455 456 static void gfx_v11_0_init_golden_registers(struct amdgpu_device *adev) 457 { 458 if (amdgpu_sriov_vf(adev)) 459 return; 460 461 switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { 462 case IP_VERSION(11, 0, 1): 463 case IP_VERSION(11, 0, 4): 464 soc15_program_register_sequence(adev, 465 golden_settings_gc_11_0_1, 466 (const u32)ARRAY_SIZE(golden_settings_gc_11_0_1)); 467 break; 468 default: 469 break; 470 } 471 soc15_program_register_sequence(adev, 472 golden_settings_gc_11_0, 473 (const u32)ARRAY_SIZE(golden_settings_gc_11_0)); 474 475 } 476 477 static void gfx_v11_0_write_data_to_reg(struct amdgpu_ring *ring, int eng_sel, 478 bool wc, uint32_t reg, uint32_t val) 479 { 480 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3)); 481 amdgpu_ring_write(ring, WRITE_DATA_ENGINE_SEL(eng_sel) | 482 WRITE_DATA_DST_SEL(0) | (wc ? WR_CONFIRM : 0)); 483 amdgpu_ring_write(ring, reg); 484 amdgpu_ring_write(ring, 0); 485 amdgpu_ring_write(ring, val); 486 } 487 488 static void gfx_v11_0_wait_reg_mem(struct amdgpu_ring *ring, int eng_sel, 489 int mem_space, int opt, uint32_t addr0, 490 uint32_t addr1, uint32_t ref, uint32_t mask, 491 uint32_t inv) 492 { 493 amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5)); 494 amdgpu_ring_write(ring, 495 /* memory (1) or register (0) */ 496 (WAIT_REG_MEM_MEM_SPACE(mem_space) | 497 WAIT_REG_MEM_OPERATION(opt) | /* wait */ 498 WAIT_REG_MEM_FUNCTION(3) | /* equal */ 499 WAIT_REG_MEM_ENGINE(eng_sel))); 500 501 if (mem_space) 502 BUG_ON(addr0 & 0x3); /* Dword align */ 503 amdgpu_ring_write(ring, addr0); 504 amdgpu_ring_write(ring, addr1); 505 amdgpu_ring_write(ring, ref); 506 amdgpu_ring_write(ring, mask); 507 amdgpu_ring_write(ring, inv); /* poll interval */ 508 } 509 510 static void gfx_v11_ring_insert_nop(struct amdgpu_ring *ring, uint32_t num_nop) 511 { 512 /* Header itself is a NOP packet */ 513 if (num_nop == 1) { 514 amdgpu_ring_write(ring, ring->funcs->nop); 515 return; 516 } 517 518 /* Max HW optimization till 0x3ffe, followed by remaining one NOP at a time*/ 519 amdgpu_ring_write(ring, PACKET3(PACKET3_NOP, min(num_nop - 2, 0x3ffe))); 520 521 /* Header is at index 0, followed by num_nops - 1 NOP packet's */ 522 amdgpu_ring_insert_nop(ring, num_nop - 1); 523 } 524 525 static int gfx_v11_0_ring_test_ring(struct amdgpu_ring *ring) 526 { 527 struct amdgpu_device *adev = ring->adev; 528 uint32_t scratch = SOC15_REG_OFFSET(GC, 0, regSCRATCH_REG0); 529 uint32_t tmp = 0; 530 unsigned i; 531 int r; 532 533 WREG32(scratch, 0xCAFEDEAD); 534 r = amdgpu_ring_alloc(ring, 5); 535 if (r) { 536 DRM_ERROR("amdgpu: cp failed to lock ring %d (%d).\n", 537 ring->idx, r); 538 return r; 539 } 540 541 if (ring->funcs->type == AMDGPU_RING_TYPE_KIQ) { 542 gfx_v11_0_ring_emit_wreg(ring, scratch, 0xDEADBEEF); 543 } else { 544 amdgpu_ring_write(ring, PACKET3(PACKET3_SET_UCONFIG_REG, 1)); 545 amdgpu_ring_write(ring, scratch - 546 PACKET3_SET_UCONFIG_REG_START); 547 amdgpu_ring_write(ring, 0xDEADBEEF); 548 } 549 amdgpu_ring_commit(ring); 550 551 for (i = 0; i < adev->usec_timeout; i++) { 552 tmp = RREG32(scratch); 553 if (tmp == 0xDEADBEEF) 554 break; 555 if (amdgpu_emu_mode == 1) 556 msleep(1); 557 else 558 udelay(1); 559 } 560 561 if (i >= adev->usec_timeout) 562 r = -ETIMEDOUT; 563 return r; 564 } 565 566 static int gfx_v11_0_ring_test_ib(struct amdgpu_ring *ring, long timeout) 567 { 568 struct amdgpu_device *adev = ring->adev; 569 struct amdgpu_ib ib; 570 struct dma_fence *f = NULL; 571 unsigned index; 572 uint64_t gpu_addr; 573 volatile uint32_t *cpu_ptr; 574 long r; 575 576 /* MES KIQ fw hasn't indirect buffer support for now */ 577 if (adev->enable_mes_kiq && 578 ring->funcs->type == AMDGPU_RING_TYPE_KIQ) 579 return 0; 580 581 memset(&ib, 0, sizeof(ib)); 582 583 if (ring->is_mes_queue) { 584 uint32_t padding, offset; 585 586 offset = amdgpu_mes_ctx_get_offs(ring, AMDGPU_MES_CTX_IB_OFFS); 587 padding = amdgpu_mes_ctx_get_offs(ring, 588 AMDGPU_MES_CTX_PADDING_OFFS); 589 590 ib.gpu_addr = amdgpu_mes_ctx_get_offs_gpu_addr(ring, offset); 591 ib.ptr = amdgpu_mes_ctx_get_offs_cpu_addr(ring, offset); 592 593 gpu_addr = amdgpu_mes_ctx_get_offs_gpu_addr(ring, padding); 594 cpu_ptr = amdgpu_mes_ctx_get_offs_cpu_addr(ring, padding); 595 *cpu_ptr = cpu_to_le32(0xCAFEDEAD); 596 } else { 597 r = amdgpu_device_wb_get(adev, &index); 598 if (r) 599 return r; 600 601 gpu_addr = adev->wb.gpu_addr + (index * 4); 602 adev->wb.wb[index] = cpu_to_le32(0xCAFEDEAD); 603 cpu_ptr = &adev->wb.wb[index]; 604 605 r = amdgpu_ib_get(adev, NULL, 20, AMDGPU_IB_POOL_DIRECT, &ib); 606 if (r) { 607 DRM_ERROR("amdgpu: failed to get ib (%ld).\n", r); 608 goto err1; 609 } 610 } 611 612 ib.ptr[0] = PACKET3(PACKET3_WRITE_DATA, 3); 613 ib.ptr[1] = WRITE_DATA_DST_SEL(5) | WR_CONFIRM; 614 ib.ptr[2] = lower_32_bits(gpu_addr); 615 ib.ptr[3] = upper_32_bits(gpu_addr); 616 ib.ptr[4] = 0xDEADBEEF; 617 ib.length_dw = 5; 618 619 r = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f); 620 if (r) 621 goto err2; 622 623 r = dma_fence_wait_timeout(f, false, timeout); 624 if (r == 0) { 625 r = -ETIMEDOUT; 626 goto err2; 627 } else if (r < 0) { 628 goto err2; 629 } 630 631 if (le32_to_cpu(*cpu_ptr) == 0xDEADBEEF) 632 r = 0; 633 else 634 r = -EINVAL; 635 err2: 636 if (!ring->is_mes_queue) 637 amdgpu_ib_free(&ib, NULL); 638 dma_fence_put(f); 639 err1: 640 if (!ring->is_mes_queue) 641 amdgpu_device_wb_free(adev, index); 642 return r; 643 } 644 645 static void gfx_v11_0_free_microcode(struct amdgpu_device *adev) 646 { 647 amdgpu_ucode_release(&adev->gfx.pfp_fw); 648 amdgpu_ucode_release(&adev->gfx.me_fw); 649 amdgpu_ucode_release(&adev->gfx.rlc_fw); 650 amdgpu_ucode_release(&adev->gfx.mec_fw); 651 652 kfree(adev->gfx.rlc.register_list_format); 653 } 654 655 static int gfx_v11_0_init_toc_microcode(struct amdgpu_device *adev, const char *ucode_prefix) 656 { 657 const struct psp_firmware_header_v1_0 *toc_hdr; 658 int err = 0; 659 660 err = amdgpu_ucode_request(adev, &adev->psp.toc_fw, 661 AMDGPU_UCODE_REQUIRED, 662 "amdgpu/%s_toc.bin", ucode_prefix); 663 if (err) 664 goto out; 665 666 toc_hdr = (const struct psp_firmware_header_v1_0 *)adev->psp.toc_fw->data; 667 adev->psp.toc.fw_version = le32_to_cpu(toc_hdr->header.ucode_version); 668 adev->psp.toc.feature_version = le32_to_cpu(toc_hdr->sos.fw_version); 669 adev->psp.toc.size_bytes = le32_to_cpu(toc_hdr->header.ucode_size_bytes); 670 adev->psp.toc.start_addr = (uint8_t *)toc_hdr + 671 le32_to_cpu(toc_hdr->header.ucode_array_offset_bytes); 672 return 0; 673 out: 674 amdgpu_ucode_release(&adev->psp.toc_fw); 675 return err; 676 } 677 678 static void gfx_v11_0_check_fw_cp_gfx_shadow(struct amdgpu_device *adev) 679 { 680 switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { 681 case IP_VERSION(11, 0, 0): 682 case IP_VERSION(11, 0, 2): 683 case IP_VERSION(11, 0, 3): 684 if ((adev->gfx.me_fw_version >= 1505) && 685 (adev->gfx.pfp_fw_version >= 1600) && 686 (adev->gfx.mec_fw_version >= 512)) { 687 if (amdgpu_sriov_vf(adev)) 688 adev->gfx.cp_gfx_shadow = true; 689 else 690 adev->gfx.cp_gfx_shadow = false; 691 } 692 break; 693 default: 694 adev->gfx.cp_gfx_shadow = false; 695 break; 696 } 697 } 698 699 static int gfx_v11_0_init_microcode(struct amdgpu_device *adev) 700 { 701 char ucode_prefix[25]; 702 int err; 703 const struct rlc_firmware_header_v2_0 *rlc_hdr; 704 uint16_t version_major; 705 uint16_t version_minor; 706 707 DRM_DEBUG("\n"); 708 709 amdgpu_ucode_ip_version_decode(adev, GC_HWIP, ucode_prefix, sizeof(ucode_prefix)); 710 err = amdgpu_ucode_request(adev, &adev->gfx.pfp_fw, 711 AMDGPU_UCODE_REQUIRED, 712 "amdgpu/%s_pfp.bin", ucode_prefix); 713 if (err) 714 goto out; 715 /* check pfp fw hdr version to decide if enable rs64 for gfx11.*/ 716 adev->gfx.rs64_enable = amdgpu_ucode_hdr_version( 717 (union amdgpu_firmware_header *) 718 adev->gfx.pfp_fw->data, 2, 0); 719 if (adev->gfx.rs64_enable) { 720 dev_info(adev->dev, "CP RS64 enable\n"); 721 amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_RS64_PFP); 722 amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_RS64_PFP_P0_STACK); 723 amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_RS64_PFP_P1_STACK); 724 } else { 725 amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_PFP); 726 } 727 728 err = amdgpu_ucode_request(adev, &adev->gfx.me_fw, 729 AMDGPU_UCODE_REQUIRED, 730 "amdgpu/%s_me.bin", ucode_prefix); 731 if (err) 732 goto out; 733 if (adev->gfx.rs64_enable) { 734 amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_RS64_ME); 735 amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_RS64_ME_P0_STACK); 736 amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_RS64_ME_P1_STACK); 737 } else { 738 amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_ME); 739 } 740 741 if (!amdgpu_sriov_vf(adev)) { 742 if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(11, 0, 0) && 743 adev->pdev->revision == 0xCE) 744 err = amdgpu_ucode_request(adev, &adev->gfx.rlc_fw, 745 AMDGPU_UCODE_REQUIRED, 746 "amdgpu/gc_11_0_0_rlc_1.bin"); 747 else 748 err = amdgpu_ucode_request(adev, &adev->gfx.rlc_fw, 749 AMDGPU_UCODE_REQUIRED, 750 "amdgpu/%s_rlc.bin", ucode_prefix); 751 if (err) 752 goto out; 753 rlc_hdr = (const struct rlc_firmware_header_v2_0 *)adev->gfx.rlc_fw->data; 754 version_major = le16_to_cpu(rlc_hdr->header.header_version_major); 755 version_minor = le16_to_cpu(rlc_hdr->header.header_version_minor); 756 err = amdgpu_gfx_rlc_init_microcode(adev, version_major, version_minor); 757 if (err) 758 goto out; 759 } 760 761 err = amdgpu_ucode_request(adev, &adev->gfx.mec_fw, 762 AMDGPU_UCODE_REQUIRED, 763 "amdgpu/%s_mec.bin", ucode_prefix); 764 if (err) 765 goto out; 766 if (adev->gfx.rs64_enable) { 767 amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_RS64_MEC); 768 amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_RS64_MEC_P0_STACK); 769 amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_RS64_MEC_P1_STACK); 770 amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_RS64_MEC_P2_STACK); 771 amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_RS64_MEC_P3_STACK); 772 } else { 773 amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_MEC1); 774 amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_MEC1_JT); 775 } 776 777 if (adev->firmware.load_type == AMDGPU_FW_LOAD_RLC_BACKDOOR_AUTO) 778 err = gfx_v11_0_init_toc_microcode(adev, ucode_prefix); 779 780 /* only one MEC for gfx 11.0.0. */ 781 adev->gfx.mec2_fw = NULL; 782 783 gfx_v11_0_check_fw_cp_gfx_shadow(adev); 784 785 if (adev->gfx.imu.funcs && adev->gfx.imu.funcs->init_microcode) { 786 err = adev->gfx.imu.funcs->init_microcode(adev); 787 if (err) 788 DRM_ERROR("Failed to init imu firmware!\n"); 789 return err; 790 } 791 792 out: 793 if (err) { 794 amdgpu_ucode_release(&adev->gfx.pfp_fw); 795 amdgpu_ucode_release(&adev->gfx.me_fw); 796 amdgpu_ucode_release(&adev->gfx.rlc_fw); 797 amdgpu_ucode_release(&adev->gfx.mec_fw); 798 } 799 800 return err; 801 } 802 803 static u32 gfx_v11_0_get_csb_size(struct amdgpu_device *adev) 804 { 805 u32 count = 0; 806 const struct cs_section_def *sect = NULL; 807 const struct cs_extent_def *ext = NULL; 808 809 /* begin clear state */ 810 count += 2; 811 /* context control state */ 812 count += 3; 813 814 for (sect = gfx11_cs_data; sect->section != NULL; ++sect) { 815 for (ext = sect->section; ext->extent != NULL; ++ext) { 816 if (sect->id == SECT_CONTEXT) 817 count += 2 + ext->reg_count; 818 else 819 return 0; 820 } 821 } 822 823 /* set PA_SC_TILE_STEERING_OVERRIDE */ 824 count += 3; 825 /* end clear state */ 826 count += 2; 827 /* clear state */ 828 count += 2; 829 830 return count; 831 } 832 833 static void gfx_v11_0_get_csb_buffer(struct amdgpu_device *adev, 834 volatile u32 *buffer) 835 { 836 u32 count = 0, i; 837 const struct cs_section_def *sect = NULL; 838 const struct cs_extent_def *ext = NULL; 839 int ctx_reg_offset; 840 841 if (adev->gfx.rlc.cs_data == NULL) 842 return; 843 if (buffer == NULL) 844 return; 845 846 buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0)); 847 buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_BEGIN_CLEAR_STATE); 848 849 buffer[count++] = cpu_to_le32(PACKET3(PACKET3_CONTEXT_CONTROL, 1)); 850 buffer[count++] = cpu_to_le32(0x80000000); 851 buffer[count++] = cpu_to_le32(0x80000000); 852 853 for (sect = adev->gfx.rlc.cs_data; sect->section != NULL; ++sect) { 854 for (ext = sect->section; ext->extent != NULL; ++ext) { 855 if (sect->id == SECT_CONTEXT) { 856 buffer[count++] = 857 cpu_to_le32(PACKET3(PACKET3_SET_CONTEXT_REG, ext->reg_count)); 858 buffer[count++] = cpu_to_le32(ext->reg_index - 859 PACKET3_SET_CONTEXT_REG_START); 860 for (i = 0; i < ext->reg_count; i++) 861 buffer[count++] = cpu_to_le32(ext->extent[i]); 862 } else { 863 return; 864 } 865 } 866 } 867 868 ctx_reg_offset = 869 SOC15_REG_OFFSET(GC, 0, regPA_SC_TILE_STEERING_OVERRIDE) - PACKET3_SET_CONTEXT_REG_START; 870 buffer[count++] = cpu_to_le32(PACKET3(PACKET3_SET_CONTEXT_REG, 1)); 871 buffer[count++] = cpu_to_le32(ctx_reg_offset); 872 buffer[count++] = cpu_to_le32(adev->gfx.config.pa_sc_tile_steering_override); 873 874 buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0)); 875 buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_END_CLEAR_STATE); 876 877 buffer[count++] = cpu_to_le32(PACKET3(PACKET3_CLEAR_STATE, 0)); 878 buffer[count++] = cpu_to_le32(0); 879 } 880 881 static void gfx_v11_0_rlc_fini(struct amdgpu_device *adev) 882 { 883 /* clear state block */ 884 amdgpu_bo_free_kernel(&adev->gfx.rlc.clear_state_obj, 885 &adev->gfx.rlc.clear_state_gpu_addr, 886 (void **)&adev->gfx.rlc.cs_ptr); 887 888 /* jump table block */ 889 amdgpu_bo_free_kernel(&adev->gfx.rlc.cp_table_obj, 890 &adev->gfx.rlc.cp_table_gpu_addr, 891 (void **)&adev->gfx.rlc.cp_table_ptr); 892 } 893 894 static void gfx_v11_0_init_rlcg_reg_access_ctrl(struct amdgpu_device *adev) 895 { 896 struct amdgpu_rlcg_reg_access_ctrl *reg_access_ctrl; 897 898 reg_access_ctrl = &adev->gfx.rlc.reg_access_ctrl[0]; 899 reg_access_ctrl->scratch_reg0 = SOC15_REG_OFFSET(GC, 0, regSCRATCH_REG0); 900 reg_access_ctrl->scratch_reg1 = SOC15_REG_OFFSET(GC, 0, regSCRATCH_REG1); 901 reg_access_ctrl->scratch_reg2 = SOC15_REG_OFFSET(GC, 0, regSCRATCH_REG2); 902 reg_access_ctrl->scratch_reg3 = SOC15_REG_OFFSET(GC, 0, regSCRATCH_REG3); 903 reg_access_ctrl->grbm_cntl = SOC15_REG_OFFSET(GC, 0, regGRBM_GFX_CNTL); 904 reg_access_ctrl->grbm_idx = SOC15_REG_OFFSET(GC, 0, regGRBM_GFX_INDEX); 905 reg_access_ctrl->spare_int = SOC15_REG_OFFSET(GC, 0, regRLC_SPARE_INT_0); 906 adev->gfx.rlc.rlcg_reg_access_supported = true; 907 } 908 909 static int gfx_v11_0_rlc_init(struct amdgpu_device *adev) 910 { 911 const struct cs_section_def *cs_data; 912 int r; 913 914 adev->gfx.rlc.cs_data = gfx11_cs_data; 915 916 cs_data = adev->gfx.rlc.cs_data; 917 918 if (cs_data) { 919 /* init clear state block */ 920 r = amdgpu_gfx_rlc_init_csb(adev); 921 if (r) 922 return r; 923 } 924 925 /* init spm vmid with 0xf */ 926 if (adev->gfx.rlc.funcs->update_spm_vmid) 927 adev->gfx.rlc.funcs->update_spm_vmid(adev, NULL, 0xf); 928 929 return 0; 930 } 931 932 static void gfx_v11_0_mec_fini(struct amdgpu_device *adev) 933 { 934 amdgpu_bo_free_kernel(&adev->gfx.mec.hpd_eop_obj, NULL, NULL); 935 amdgpu_bo_free_kernel(&adev->gfx.mec.mec_fw_obj, NULL, NULL); 936 amdgpu_bo_free_kernel(&adev->gfx.mec.mec_fw_data_obj, NULL, NULL); 937 } 938 939 static void gfx_v11_0_me_init(struct amdgpu_device *adev) 940 { 941 bitmap_zero(adev->gfx.me.queue_bitmap, AMDGPU_MAX_GFX_QUEUES); 942 943 amdgpu_gfx_graphics_queue_acquire(adev); 944 } 945 946 static int gfx_v11_0_mec_init(struct amdgpu_device *adev) 947 { 948 int r; 949 u32 *hpd; 950 size_t mec_hpd_size; 951 952 bitmap_zero(adev->gfx.mec_bitmap[0].queue_bitmap, AMDGPU_MAX_COMPUTE_QUEUES); 953 954 /* take ownership of the relevant compute queues */ 955 amdgpu_gfx_compute_queue_acquire(adev); 956 mec_hpd_size = adev->gfx.num_compute_rings * GFX11_MEC_HPD_SIZE; 957 958 if (mec_hpd_size) { 959 r = amdgpu_bo_create_reserved(adev, mec_hpd_size, PAGE_SIZE, 960 AMDGPU_GEM_DOMAIN_GTT, 961 &adev->gfx.mec.hpd_eop_obj, 962 &adev->gfx.mec.hpd_eop_gpu_addr, 963 (void **)&hpd); 964 if (r) { 965 dev_warn(adev->dev, "(%d) create HDP EOP bo failed\n", r); 966 gfx_v11_0_mec_fini(adev); 967 return r; 968 } 969 970 memset(hpd, 0, mec_hpd_size); 971 972 amdgpu_bo_kunmap(adev->gfx.mec.hpd_eop_obj); 973 amdgpu_bo_unreserve(adev->gfx.mec.hpd_eop_obj); 974 } 975 976 return 0; 977 } 978 979 static uint32_t wave_read_ind(struct amdgpu_device *adev, uint32_t wave, uint32_t address) 980 { 981 WREG32_SOC15(GC, 0, regSQ_IND_INDEX, 982 (wave << SQ_IND_INDEX__WAVE_ID__SHIFT) | 983 (address << SQ_IND_INDEX__INDEX__SHIFT)); 984 return RREG32_SOC15(GC, 0, regSQ_IND_DATA); 985 } 986 987 static void wave_read_regs(struct amdgpu_device *adev, uint32_t wave, 988 uint32_t thread, uint32_t regno, 989 uint32_t num, uint32_t *out) 990 { 991 WREG32_SOC15(GC, 0, regSQ_IND_INDEX, 992 (wave << SQ_IND_INDEX__WAVE_ID__SHIFT) | 993 (regno << SQ_IND_INDEX__INDEX__SHIFT) | 994 (thread << SQ_IND_INDEX__WORKITEM_ID__SHIFT) | 995 (SQ_IND_INDEX__AUTO_INCR_MASK)); 996 while (num--) 997 *(out++) = RREG32_SOC15(GC, 0, regSQ_IND_DATA); 998 } 999 1000 static void gfx_v11_0_read_wave_data(struct amdgpu_device *adev, uint32_t xcc_id, uint32_t simd, uint32_t wave, uint32_t *dst, int *no_fields) 1001 { 1002 /* in gfx11 the SIMD_ID is specified as part of the INSTANCE 1003 * field when performing a select_se_sh so it should be 1004 * zero here */ 1005 WARN_ON(simd != 0); 1006 1007 /* type 3 wave data */ 1008 dst[(*no_fields)++] = 3; 1009 dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_STATUS); 1010 dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_PC_LO); 1011 dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_PC_HI); 1012 dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_EXEC_LO); 1013 dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_EXEC_HI); 1014 dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_HW_ID1); 1015 dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_HW_ID2); 1016 dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_GPR_ALLOC); 1017 dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_LDS_ALLOC); 1018 dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_TRAPSTS); 1019 dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_IB_STS); 1020 dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_IB_STS2); 1021 dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_IB_DBG1); 1022 dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_M0); 1023 dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_MODE); 1024 } 1025 1026 static void gfx_v11_0_read_wave_sgprs(struct amdgpu_device *adev, uint32_t xcc_id, uint32_t simd, 1027 uint32_t wave, uint32_t start, 1028 uint32_t size, uint32_t *dst) 1029 { 1030 WARN_ON(simd != 0); 1031 1032 wave_read_regs( 1033 adev, wave, 0, start + SQIND_WAVE_SGPRS_OFFSET, size, 1034 dst); 1035 } 1036 1037 static void gfx_v11_0_read_wave_vgprs(struct amdgpu_device *adev, uint32_t xcc_id, uint32_t simd, 1038 uint32_t wave, uint32_t thread, 1039 uint32_t start, uint32_t size, 1040 uint32_t *dst) 1041 { 1042 wave_read_regs( 1043 adev, wave, thread, 1044 start + SQIND_WAVE_VGPRS_OFFSET, size, dst); 1045 } 1046 1047 static void gfx_v11_0_select_me_pipe_q(struct amdgpu_device *adev, 1048 u32 me, u32 pipe, u32 q, u32 vm, u32 xcc_id) 1049 { 1050 soc21_grbm_select(adev, me, pipe, q, vm); 1051 } 1052 1053 /* all sizes are in bytes */ 1054 #define MQD_SHADOW_BASE_SIZE 73728 1055 #define MQD_SHADOW_BASE_ALIGNMENT 256 1056 #define MQD_FWWORKAREA_SIZE 484 1057 #define MQD_FWWORKAREA_ALIGNMENT 256 1058 1059 static int gfx_v11_0_get_gfx_shadow_info(struct amdgpu_device *adev, 1060 struct amdgpu_gfx_shadow_info *shadow_info) 1061 { 1062 if (adev->gfx.cp_gfx_shadow) { 1063 shadow_info->shadow_size = MQD_SHADOW_BASE_SIZE; 1064 shadow_info->shadow_alignment = MQD_SHADOW_BASE_ALIGNMENT; 1065 shadow_info->csa_size = MQD_FWWORKAREA_SIZE; 1066 shadow_info->csa_alignment = MQD_FWWORKAREA_ALIGNMENT; 1067 return 0; 1068 } else { 1069 memset(shadow_info, 0, sizeof(struct amdgpu_gfx_shadow_info)); 1070 return -ENOTSUPP; 1071 } 1072 } 1073 1074 static const struct amdgpu_gfx_funcs gfx_v11_0_gfx_funcs = { 1075 .get_gpu_clock_counter = &gfx_v11_0_get_gpu_clock_counter, 1076 .select_se_sh = &gfx_v11_0_select_se_sh, 1077 .read_wave_data = &gfx_v11_0_read_wave_data, 1078 .read_wave_sgprs = &gfx_v11_0_read_wave_sgprs, 1079 .read_wave_vgprs = &gfx_v11_0_read_wave_vgprs, 1080 .select_me_pipe_q = &gfx_v11_0_select_me_pipe_q, 1081 .update_perfmon_mgcg = &gfx_v11_0_update_perf_clk, 1082 .get_gfx_shadow_info = &gfx_v11_0_get_gfx_shadow_info, 1083 }; 1084 1085 static int gfx_v11_0_gpu_early_init(struct amdgpu_device *adev) 1086 { 1087 switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { 1088 case IP_VERSION(11, 0, 0): 1089 case IP_VERSION(11, 0, 2): 1090 adev->gfx.config.max_hw_contexts = 8; 1091 adev->gfx.config.sc_prim_fifo_size_frontend = 0x20; 1092 adev->gfx.config.sc_prim_fifo_size_backend = 0x100; 1093 adev->gfx.config.sc_hiz_tile_fifo_size = 0; 1094 adev->gfx.config.sc_earlyz_tile_fifo_size = 0x4C0; 1095 break; 1096 case IP_VERSION(11, 0, 3): 1097 adev->gfx.ras = &gfx_v11_0_3_ras; 1098 adev->gfx.config.max_hw_contexts = 8; 1099 adev->gfx.config.sc_prim_fifo_size_frontend = 0x20; 1100 adev->gfx.config.sc_prim_fifo_size_backend = 0x100; 1101 adev->gfx.config.sc_hiz_tile_fifo_size = 0; 1102 adev->gfx.config.sc_earlyz_tile_fifo_size = 0x4C0; 1103 break; 1104 case IP_VERSION(11, 0, 1): 1105 case IP_VERSION(11, 0, 4): 1106 case IP_VERSION(11, 5, 0): 1107 case IP_VERSION(11, 5, 1): 1108 case IP_VERSION(11, 5, 2): 1109 case IP_VERSION(11, 5, 3): 1110 adev->gfx.config.max_hw_contexts = 8; 1111 adev->gfx.config.sc_prim_fifo_size_frontend = 0x20; 1112 adev->gfx.config.sc_prim_fifo_size_backend = 0x100; 1113 adev->gfx.config.sc_hiz_tile_fifo_size = 0x80; 1114 adev->gfx.config.sc_earlyz_tile_fifo_size = 0x300; 1115 break; 1116 default: 1117 BUG(); 1118 break; 1119 } 1120 1121 return 0; 1122 } 1123 1124 static int gfx_v11_0_gfx_ring_init(struct amdgpu_device *adev, int ring_id, 1125 int me, int pipe, int queue) 1126 { 1127 struct amdgpu_ring *ring; 1128 unsigned int irq_type; 1129 unsigned int hw_prio; 1130 1131 ring = &adev->gfx.gfx_ring[ring_id]; 1132 1133 ring->me = me; 1134 ring->pipe = pipe; 1135 ring->queue = queue; 1136 1137 ring->ring_obj = NULL; 1138 ring->use_doorbell = true; 1139 1140 if (!ring_id) 1141 ring->doorbell_index = adev->doorbell_index.gfx_ring0 << 1; 1142 else 1143 ring->doorbell_index = adev->doorbell_index.gfx_ring1 << 1; 1144 ring->vm_hub = AMDGPU_GFXHUB(0); 1145 sprintf(ring->name, "gfx_%d.%d.%d", ring->me, ring->pipe, ring->queue); 1146 1147 irq_type = AMDGPU_CP_IRQ_GFX_ME0_PIPE0_EOP + ring->pipe; 1148 hw_prio = amdgpu_gfx_is_high_priority_graphics_queue(adev, ring) ? 1149 AMDGPU_GFX_PIPE_PRIO_HIGH : AMDGPU_GFX_PIPE_PRIO_NORMAL; 1150 return amdgpu_ring_init(adev, ring, 1024, &adev->gfx.eop_irq, irq_type, 1151 hw_prio, NULL); 1152 } 1153 1154 static int gfx_v11_0_compute_ring_init(struct amdgpu_device *adev, int ring_id, 1155 int mec, int pipe, int queue) 1156 { 1157 int r; 1158 unsigned irq_type; 1159 struct amdgpu_ring *ring; 1160 unsigned int hw_prio; 1161 1162 ring = &adev->gfx.compute_ring[ring_id]; 1163 1164 /* mec0 is me1 */ 1165 ring->me = mec + 1; 1166 ring->pipe = pipe; 1167 ring->queue = queue; 1168 1169 ring->ring_obj = NULL; 1170 ring->use_doorbell = true; 1171 ring->doorbell_index = (adev->doorbell_index.mec_ring0 + ring_id) << 1; 1172 ring->eop_gpu_addr = adev->gfx.mec.hpd_eop_gpu_addr 1173 + (ring_id * GFX11_MEC_HPD_SIZE); 1174 ring->vm_hub = AMDGPU_GFXHUB(0); 1175 sprintf(ring->name, "comp_%d.%d.%d", ring->me, ring->pipe, ring->queue); 1176 1177 irq_type = AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP 1178 + ((ring->me - 1) * adev->gfx.mec.num_pipe_per_mec) 1179 + ring->pipe; 1180 hw_prio = amdgpu_gfx_is_high_priority_compute_queue(adev, ring) ? 1181 AMDGPU_GFX_PIPE_PRIO_HIGH : AMDGPU_GFX_PIPE_PRIO_NORMAL; 1182 /* type-2 packets are deprecated on MEC, use type-3 instead */ 1183 r = amdgpu_ring_init(adev, ring, 1024, &adev->gfx.eop_irq, irq_type, 1184 hw_prio, NULL); 1185 if (r) 1186 return r; 1187 1188 return 0; 1189 } 1190 1191 static struct { 1192 SOC21_FIRMWARE_ID id; 1193 unsigned int offset; 1194 unsigned int size; 1195 } rlc_autoload_info[SOC21_FIRMWARE_ID_MAX]; 1196 1197 static void gfx_v11_0_parse_rlc_toc(struct amdgpu_device *adev, void *rlc_toc) 1198 { 1199 RLC_TABLE_OF_CONTENT *ucode = rlc_toc; 1200 1201 while (ucode && (ucode->id > SOC21_FIRMWARE_ID_INVALID) && 1202 (ucode->id < SOC21_FIRMWARE_ID_MAX)) { 1203 rlc_autoload_info[ucode->id].id = ucode->id; 1204 rlc_autoload_info[ucode->id].offset = ucode->offset * 4; 1205 rlc_autoload_info[ucode->id].size = ucode->size * 4; 1206 1207 ucode++; 1208 } 1209 } 1210 1211 static uint32_t gfx_v11_0_calc_toc_total_size(struct amdgpu_device *adev) 1212 { 1213 uint32_t total_size = 0; 1214 SOC21_FIRMWARE_ID id; 1215 1216 gfx_v11_0_parse_rlc_toc(adev, adev->psp.toc.start_addr); 1217 1218 for (id = SOC21_FIRMWARE_ID_RLC_G_UCODE; id < SOC21_FIRMWARE_ID_MAX; id++) 1219 total_size += rlc_autoload_info[id].size; 1220 1221 /* In case the offset in rlc toc ucode is aligned */ 1222 if (total_size < rlc_autoload_info[SOC21_FIRMWARE_ID_MAX-1].offset) 1223 total_size = rlc_autoload_info[SOC21_FIRMWARE_ID_MAX-1].offset + 1224 rlc_autoload_info[SOC21_FIRMWARE_ID_MAX-1].size; 1225 1226 return total_size; 1227 } 1228 1229 static int gfx_v11_0_rlc_autoload_buffer_init(struct amdgpu_device *adev) 1230 { 1231 int r; 1232 uint32_t total_size; 1233 1234 total_size = gfx_v11_0_calc_toc_total_size(adev); 1235 1236 r = amdgpu_bo_create_reserved(adev, total_size, 64 * 1024, 1237 AMDGPU_GEM_DOMAIN_VRAM | 1238 AMDGPU_GEM_DOMAIN_GTT, 1239 &adev->gfx.rlc.rlc_autoload_bo, 1240 &adev->gfx.rlc.rlc_autoload_gpu_addr, 1241 (void **)&adev->gfx.rlc.rlc_autoload_ptr); 1242 1243 if (r) { 1244 dev_err(adev->dev, "(%d) failed to create fw autoload bo\n", r); 1245 return r; 1246 } 1247 1248 return 0; 1249 } 1250 1251 static void gfx_v11_0_rlc_backdoor_autoload_copy_ucode(struct amdgpu_device *adev, 1252 SOC21_FIRMWARE_ID id, 1253 const void *fw_data, 1254 uint32_t fw_size, 1255 uint32_t *fw_autoload_mask) 1256 { 1257 uint32_t toc_offset; 1258 uint32_t toc_fw_size; 1259 char *ptr = adev->gfx.rlc.rlc_autoload_ptr; 1260 1261 if (id <= SOC21_FIRMWARE_ID_INVALID || id >= SOC21_FIRMWARE_ID_MAX) 1262 return; 1263 1264 toc_offset = rlc_autoload_info[id].offset; 1265 toc_fw_size = rlc_autoload_info[id].size; 1266 1267 if (fw_size == 0) 1268 fw_size = toc_fw_size; 1269 1270 if (fw_size > toc_fw_size) 1271 fw_size = toc_fw_size; 1272 1273 memcpy(ptr + toc_offset, fw_data, fw_size); 1274 1275 if (fw_size < toc_fw_size) 1276 memset(ptr + toc_offset + fw_size, 0, toc_fw_size - fw_size); 1277 1278 if ((id != SOC21_FIRMWARE_ID_RS64_PFP) && (id != SOC21_FIRMWARE_ID_RS64_ME)) 1279 *(uint64_t *)fw_autoload_mask |= 1ULL << id; 1280 } 1281 1282 static void gfx_v11_0_rlc_backdoor_autoload_copy_toc_ucode(struct amdgpu_device *adev, 1283 uint32_t *fw_autoload_mask) 1284 { 1285 void *data; 1286 uint32_t size; 1287 uint64_t *toc_ptr; 1288 1289 *(uint64_t *)fw_autoload_mask |= 0x1; 1290 1291 DRM_DEBUG("rlc autoload enabled fw: 0x%llx\n", *(uint64_t *)fw_autoload_mask); 1292 1293 data = adev->psp.toc.start_addr; 1294 size = rlc_autoload_info[SOC21_FIRMWARE_ID_RLC_TOC].size; 1295 1296 toc_ptr = (uint64_t *)data + size / 8 - 1; 1297 *toc_ptr = *(uint64_t *)fw_autoload_mask; 1298 1299 gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev, SOC21_FIRMWARE_ID_RLC_TOC, 1300 data, size, fw_autoload_mask); 1301 } 1302 1303 static void gfx_v11_0_rlc_backdoor_autoload_copy_gfx_ucode(struct amdgpu_device *adev, 1304 uint32_t *fw_autoload_mask) 1305 { 1306 const __le32 *fw_data; 1307 uint32_t fw_size; 1308 const struct gfx_firmware_header_v1_0 *cp_hdr; 1309 const struct gfx_firmware_header_v2_0 *cpv2_hdr; 1310 const struct rlc_firmware_header_v2_0 *rlc_hdr; 1311 const struct rlc_firmware_header_v2_2 *rlcv22_hdr; 1312 uint16_t version_major, version_minor; 1313 1314 if (adev->gfx.rs64_enable) { 1315 /* pfp ucode */ 1316 cpv2_hdr = (const struct gfx_firmware_header_v2_0 *) 1317 adev->gfx.pfp_fw->data; 1318 /* instruction */ 1319 fw_data = (const __le32 *)(adev->gfx.pfp_fw->data + 1320 le32_to_cpu(cpv2_hdr->ucode_offset_bytes)); 1321 fw_size = le32_to_cpu(cpv2_hdr->ucode_size_bytes); 1322 gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev, SOC21_FIRMWARE_ID_RS64_PFP, 1323 fw_data, fw_size, fw_autoload_mask); 1324 /* data */ 1325 fw_data = (const __le32 *)(adev->gfx.pfp_fw->data + 1326 le32_to_cpu(cpv2_hdr->data_offset_bytes)); 1327 fw_size = le32_to_cpu(cpv2_hdr->data_size_bytes); 1328 gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev, SOC21_FIRMWARE_ID_RS64_PFP_P0_STACK, 1329 fw_data, fw_size, fw_autoload_mask); 1330 gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev, SOC21_FIRMWARE_ID_RS64_PFP_P1_STACK, 1331 fw_data, fw_size, fw_autoload_mask); 1332 /* me ucode */ 1333 cpv2_hdr = (const struct gfx_firmware_header_v2_0 *) 1334 adev->gfx.me_fw->data; 1335 /* instruction */ 1336 fw_data = (const __le32 *)(adev->gfx.me_fw->data + 1337 le32_to_cpu(cpv2_hdr->ucode_offset_bytes)); 1338 fw_size = le32_to_cpu(cpv2_hdr->ucode_size_bytes); 1339 gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev, SOC21_FIRMWARE_ID_RS64_ME, 1340 fw_data, fw_size, fw_autoload_mask); 1341 /* data */ 1342 fw_data = (const __le32 *)(adev->gfx.me_fw->data + 1343 le32_to_cpu(cpv2_hdr->data_offset_bytes)); 1344 fw_size = le32_to_cpu(cpv2_hdr->data_size_bytes); 1345 gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev, SOC21_FIRMWARE_ID_RS64_ME_P0_STACK, 1346 fw_data, fw_size, fw_autoload_mask); 1347 gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev, SOC21_FIRMWARE_ID_RS64_ME_P1_STACK, 1348 fw_data, fw_size, fw_autoload_mask); 1349 /* mec ucode */ 1350 cpv2_hdr = (const struct gfx_firmware_header_v2_0 *) 1351 adev->gfx.mec_fw->data; 1352 /* instruction */ 1353 fw_data = (const __le32 *) (adev->gfx.mec_fw->data + 1354 le32_to_cpu(cpv2_hdr->ucode_offset_bytes)); 1355 fw_size = le32_to_cpu(cpv2_hdr->ucode_size_bytes); 1356 gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev, SOC21_FIRMWARE_ID_RS64_MEC, 1357 fw_data, fw_size, fw_autoload_mask); 1358 /* data */ 1359 fw_data = (const __le32 *) (adev->gfx.mec_fw->data + 1360 le32_to_cpu(cpv2_hdr->data_offset_bytes)); 1361 fw_size = le32_to_cpu(cpv2_hdr->data_size_bytes); 1362 gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev, SOC21_FIRMWARE_ID_RS64_MEC_P0_STACK, 1363 fw_data, fw_size, fw_autoload_mask); 1364 gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev, SOC21_FIRMWARE_ID_RS64_MEC_P1_STACK, 1365 fw_data, fw_size, fw_autoload_mask); 1366 gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev, SOC21_FIRMWARE_ID_RS64_MEC_P2_STACK, 1367 fw_data, fw_size, fw_autoload_mask); 1368 gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev, SOC21_FIRMWARE_ID_RS64_MEC_P3_STACK, 1369 fw_data, fw_size, fw_autoload_mask); 1370 } else { 1371 /* pfp ucode */ 1372 cp_hdr = (const struct gfx_firmware_header_v1_0 *) 1373 adev->gfx.pfp_fw->data; 1374 fw_data = (const __le32 *)(adev->gfx.pfp_fw->data + 1375 le32_to_cpu(cp_hdr->header.ucode_array_offset_bytes)); 1376 fw_size = le32_to_cpu(cp_hdr->header.ucode_size_bytes); 1377 gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev, SOC21_FIRMWARE_ID_CP_PFP, 1378 fw_data, fw_size, fw_autoload_mask); 1379 1380 /* me ucode */ 1381 cp_hdr = (const struct gfx_firmware_header_v1_0 *) 1382 adev->gfx.me_fw->data; 1383 fw_data = (const __le32 *)(adev->gfx.me_fw->data + 1384 le32_to_cpu(cp_hdr->header.ucode_array_offset_bytes)); 1385 fw_size = le32_to_cpu(cp_hdr->header.ucode_size_bytes); 1386 gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev, SOC21_FIRMWARE_ID_CP_ME, 1387 fw_data, fw_size, fw_autoload_mask); 1388 1389 /* mec ucode */ 1390 cp_hdr = (const struct gfx_firmware_header_v1_0 *) 1391 adev->gfx.mec_fw->data; 1392 fw_data = (const __le32 *) (adev->gfx.mec_fw->data + 1393 le32_to_cpu(cp_hdr->header.ucode_array_offset_bytes)); 1394 fw_size = le32_to_cpu(cp_hdr->header.ucode_size_bytes) - 1395 cp_hdr->jt_size * 4; 1396 gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev, SOC21_FIRMWARE_ID_CP_MEC, 1397 fw_data, fw_size, fw_autoload_mask); 1398 } 1399 1400 /* rlc ucode */ 1401 rlc_hdr = (const struct rlc_firmware_header_v2_0 *) 1402 adev->gfx.rlc_fw->data; 1403 fw_data = (const __le32 *)(adev->gfx.rlc_fw->data + 1404 le32_to_cpu(rlc_hdr->header.ucode_array_offset_bytes)); 1405 fw_size = le32_to_cpu(rlc_hdr->header.ucode_size_bytes); 1406 gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev, SOC21_FIRMWARE_ID_RLC_G_UCODE, 1407 fw_data, fw_size, fw_autoload_mask); 1408 1409 version_major = le16_to_cpu(rlc_hdr->header.header_version_major); 1410 version_minor = le16_to_cpu(rlc_hdr->header.header_version_minor); 1411 if (version_major == 2) { 1412 if (version_minor >= 2) { 1413 rlcv22_hdr = (const struct rlc_firmware_header_v2_2 *)adev->gfx.rlc_fw->data; 1414 1415 fw_data = (const __le32 *)(adev->gfx.rlc_fw->data + 1416 le32_to_cpu(rlcv22_hdr->rlc_iram_ucode_offset_bytes)); 1417 fw_size = le32_to_cpu(rlcv22_hdr->rlc_iram_ucode_size_bytes); 1418 gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev, SOC21_FIRMWARE_ID_RLX6_UCODE, 1419 fw_data, fw_size, fw_autoload_mask); 1420 1421 fw_data = (const __le32 *)(adev->gfx.rlc_fw->data + 1422 le32_to_cpu(rlcv22_hdr->rlc_dram_ucode_offset_bytes)); 1423 fw_size = le32_to_cpu(rlcv22_hdr->rlc_dram_ucode_size_bytes); 1424 gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev, SOC21_FIRMWARE_ID_RLX6_DRAM_BOOT, 1425 fw_data, fw_size, fw_autoload_mask); 1426 } 1427 } 1428 } 1429 1430 static void gfx_v11_0_rlc_backdoor_autoload_copy_sdma_ucode(struct amdgpu_device *adev, 1431 uint32_t *fw_autoload_mask) 1432 { 1433 const __le32 *fw_data; 1434 uint32_t fw_size; 1435 const struct sdma_firmware_header_v2_0 *sdma_hdr; 1436 1437 sdma_hdr = (const struct sdma_firmware_header_v2_0 *) 1438 adev->sdma.instance[0].fw->data; 1439 fw_data = (const __le32 *) (adev->sdma.instance[0].fw->data + 1440 le32_to_cpu(sdma_hdr->header.ucode_array_offset_bytes)); 1441 fw_size = le32_to_cpu(sdma_hdr->ctx_ucode_size_bytes); 1442 1443 gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev, 1444 SOC21_FIRMWARE_ID_SDMA_UCODE_TH0, fw_data, fw_size, fw_autoload_mask); 1445 1446 fw_data = (const __le32 *) (adev->sdma.instance[0].fw->data + 1447 le32_to_cpu(sdma_hdr->ctl_ucode_offset)); 1448 fw_size = le32_to_cpu(sdma_hdr->ctl_ucode_size_bytes); 1449 1450 gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev, 1451 SOC21_FIRMWARE_ID_SDMA_UCODE_TH1, fw_data, fw_size, fw_autoload_mask); 1452 } 1453 1454 static void gfx_v11_0_rlc_backdoor_autoload_copy_mes_ucode(struct amdgpu_device *adev, 1455 uint32_t *fw_autoload_mask) 1456 { 1457 const __le32 *fw_data; 1458 unsigned fw_size; 1459 const struct mes_firmware_header_v1_0 *mes_hdr; 1460 int pipe, ucode_id, data_id; 1461 1462 for (pipe = 0; pipe < 2; pipe++) { 1463 if (pipe==0) { 1464 ucode_id = SOC21_FIRMWARE_ID_RS64_MES_P0; 1465 data_id = SOC21_FIRMWARE_ID_RS64_MES_P0_STACK; 1466 } else { 1467 ucode_id = SOC21_FIRMWARE_ID_RS64_MES_P1; 1468 data_id = SOC21_FIRMWARE_ID_RS64_MES_P1_STACK; 1469 } 1470 1471 mes_hdr = (const struct mes_firmware_header_v1_0 *) 1472 adev->mes.fw[pipe]->data; 1473 1474 fw_data = (const __le32 *)(adev->mes.fw[pipe]->data + 1475 le32_to_cpu(mes_hdr->mes_ucode_offset_bytes)); 1476 fw_size = le32_to_cpu(mes_hdr->mes_ucode_size_bytes); 1477 1478 gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev, 1479 ucode_id, fw_data, fw_size, fw_autoload_mask); 1480 1481 fw_data = (const __le32 *)(adev->mes.fw[pipe]->data + 1482 le32_to_cpu(mes_hdr->mes_ucode_data_offset_bytes)); 1483 fw_size = le32_to_cpu(mes_hdr->mes_ucode_data_size_bytes); 1484 1485 gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev, 1486 data_id, fw_data, fw_size, fw_autoload_mask); 1487 } 1488 } 1489 1490 static int gfx_v11_0_rlc_backdoor_autoload_enable(struct amdgpu_device *adev) 1491 { 1492 uint32_t rlc_g_offset, rlc_g_size; 1493 uint64_t gpu_addr; 1494 uint32_t autoload_fw_id[2]; 1495 1496 memset(autoload_fw_id, 0, sizeof(uint32_t) * 2); 1497 1498 /* RLC autoload sequence 2: copy ucode */ 1499 gfx_v11_0_rlc_backdoor_autoload_copy_sdma_ucode(adev, autoload_fw_id); 1500 gfx_v11_0_rlc_backdoor_autoload_copy_gfx_ucode(adev, autoload_fw_id); 1501 gfx_v11_0_rlc_backdoor_autoload_copy_mes_ucode(adev, autoload_fw_id); 1502 gfx_v11_0_rlc_backdoor_autoload_copy_toc_ucode(adev, autoload_fw_id); 1503 1504 rlc_g_offset = rlc_autoload_info[SOC21_FIRMWARE_ID_RLC_G_UCODE].offset; 1505 rlc_g_size = rlc_autoload_info[SOC21_FIRMWARE_ID_RLC_G_UCODE].size; 1506 gpu_addr = adev->gfx.rlc.rlc_autoload_gpu_addr + rlc_g_offset; 1507 1508 WREG32_SOC15(GC, 0, regGFX_IMU_RLC_BOOTLOADER_ADDR_HI, upper_32_bits(gpu_addr)); 1509 WREG32_SOC15(GC, 0, regGFX_IMU_RLC_BOOTLOADER_ADDR_LO, lower_32_bits(gpu_addr)); 1510 1511 WREG32_SOC15(GC, 0, regGFX_IMU_RLC_BOOTLOADER_SIZE, rlc_g_size); 1512 1513 /* RLC autoload sequence 3: load IMU fw */ 1514 if (adev->gfx.imu.funcs->load_microcode) 1515 adev->gfx.imu.funcs->load_microcode(adev); 1516 /* RLC autoload sequence 4 init IMU fw */ 1517 if (adev->gfx.imu.funcs->setup_imu) 1518 adev->gfx.imu.funcs->setup_imu(adev); 1519 if (adev->gfx.imu.funcs->start_imu) 1520 adev->gfx.imu.funcs->start_imu(adev); 1521 1522 /* RLC autoload sequence 5 disable gpa mode */ 1523 gfx_v11_0_disable_gpa_mode(adev); 1524 1525 return 0; 1526 } 1527 1528 static void gfx_v11_0_alloc_ip_dump(struct amdgpu_device *adev) 1529 { 1530 uint32_t reg_count = ARRAY_SIZE(gc_reg_list_11_0); 1531 uint32_t *ptr; 1532 uint32_t inst; 1533 1534 ptr = kcalloc(reg_count, sizeof(uint32_t), GFP_KERNEL); 1535 if (!ptr) { 1536 DRM_ERROR("Failed to allocate memory for GFX IP Dump\n"); 1537 adev->gfx.ip_dump_core = NULL; 1538 } else { 1539 adev->gfx.ip_dump_core = ptr; 1540 } 1541 1542 /* Allocate memory for compute queue registers for all the instances */ 1543 reg_count = ARRAY_SIZE(gc_cp_reg_list_11); 1544 inst = adev->gfx.mec.num_mec * adev->gfx.mec.num_pipe_per_mec * 1545 adev->gfx.mec.num_queue_per_pipe; 1546 1547 ptr = kcalloc(reg_count * inst, sizeof(uint32_t), GFP_KERNEL); 1548 if (!ptr) { 1549 DRM_ERROR("Failed to allocate memory for Compute Queues IP Dump\n"); 1550 adev->gfx.ip_dump_compute_queues = NULL; 1551 } else { 1552 adev->gfx.ip_dump_compute_queues = ptr; 1553 } 1554 1555 /* Allocate memory for gfx queue registers for all the instances */ 1556 reg_count = ARRAY_SIZE(gc_gfx_queue_reg_list_11); 1557 inst = adev->gfx.me.num_me * adev->gfx.me.num_pipe_per_me * 1558 adev->gfx.me.num_queue_per_pipe; 1559 1560 ptr = kcalloc(reg_count * inst, sizeof(uint32_t), GFP_KERNEL); 1561 if (!ptr) { 1562 DRM_ERROR("Failed to allocate memory for GFX Queues IP Dump\n"); 1563 adev->gfx.ip_dump_gfx_queues = NULL; 1564 } else { 1565 adev->gfx.ip_dump_gfx_queues = ptr; 1566 } 1567 } 1568 1569 static int gfx_v11_0_sw_init(struct amdgpu_ip_block *ip_block) 1570 { 1571 int i, j, k, r, ring_id = 0; 1572 int xcc_id = 0; 1573 struct amdgpu_device *adev = ip_block->adev; 1574 1575 INIT_DELAYED_WORK(&adev->gfx.idle_work, amdgpu_gfx_profile_idle_work_handler); 1576 1577 switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { 1578 case IP_VERSION(11, 0, 0): 1579 case IP_VERSION(11, 0, 2): 1580 case IP_VERSION(11, 0, 3): 1581 adev->gfx.me.num_me = 1; 1582 adev->gfx.me.num_pipe_per_me = 1; 1583 adev->gfx.me.num_queue_per_pipe = 1; 1584 adev->gfx.mec.num_mec = 1; 1585 adev->gfx.mec.num_pipe_per_mec = 4; 1586 adev->gfx.mec.num_queue_per_pipe = 4; 1587 break; 1588 case IP_VERSION(11, 0, 1): 1589 case IP_VERSION(11, 0, 4): 1590 case IP_VERSION(11, 5, 0): 1591 case IP_VERSION(11, 5, 1): 1592 case IP_VERSION(11, 5, 2): 1593 case IP_VERSION(11, 5, 3): 1594 adev->gfx.me.num_me = 1; 1595 adev->gfx.me.num_pipe_per_me = 1; 1596 adev->gfx.me.num_queue_per_pipe = 1; 1597 adev->gfx.mec.num_mec = 1; 1598 adev->gfx.mec.num_pipe_per_mec = 4; 1599 adev->gfx.mec.num_queue_per_pipe = 4; 1600 break; 1601 default: 1602 adev->gfx.me.num_me = 1; 1603 adev->gfx.me.num_pipe_per_me = 1; 1604 adev->gfx.me.num_queue_per_pipe = 1; 1605 adev->gfx.mec.num_mec = 1; 1606 adev->gfx.mec.num_pipe_per_mec = 4; 1607 adev->gfx.mec.num_queue_per_pipe = 8; 1608 break; 1609 } 1610 1611 switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { 1612 case IP_VERSION(11, 0, 0): 1613 case IP_VERSION(11, 0, 2): 1614 case IP_VERSION(11, 0, 3): 1615 adev->gfx.cleaner_shader_ptr = gfx_11_0_3_cleaner_shader_hex; 1616 adev->gfx.cleaner_shader_size = sizeof(gfx_11_0_3_cleaner_shader_hex); 1617 if (adev->gfx.me_fw_version >= 2280 && 1618 adev->gfx.pfp_fw_version >= 2370 && 1619 adev->gfx.mec_fw_version >= 2450 && 1620 adev->mes.fw_version[0] >= 99) { 1621 adev->gfx.enable_cleaner_shader = true; 1622 r = amdgpu_gfx_cleaner_shader_sw_init(adev, adev->gfx.cleaner_shader_size); 1623 if (r) { 1624 adev->gfx.enable_cleaner_shader = false; 1625 dev_err(adev->dev, "Failed to initialize cleaner shader\n"); 1626 } 1627 } 1628 break; 1629 case IP_VERSION(11, 5, 0): 1630 case IP_VERSION(11, 5, 1): 1631 adev->gfx.cleaner_shader_ptr = gfx_11_0_3_cleaner_shader_hex; 1632 adev->gfx.cleaner_shader_size = sizeof(gfx_11_0_3_cleaner_shader_hex); 1633 if (adev->gfx.mec_fw_version >= 26 && 1634 adev->mes.fw_version[0] >= 114) { 1635 adev->gfx.enable_cleaner_shader = true; 1636 r = amdgpu_gfx_cleaner_shader_sw_init(adev, adev->gfx.cleaner_shader_size); 1637 if (r) { 1638 adev->gfx.enable_cleaner_shader = false; 1639 dev_err(adev->dev, "Failed to initialize cleaner shader\n"); 1640 } 1641 } 1642 break; 1643 default: 1644 adev->gfx.enable_cleaner_shader = false; 1645 break; 1646 } 1647 1648 /* Enable CG flag in one VF mode for enabling RLC safe mode enter/exit */ 1649 if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(11, 0, 3) && 1650 amdgpu_sriov_is_pp_one_vf(adev)) 1651 adev->cg_flags = AMD_CG_SUPPORT_GFX_CGCG; 1652 1653 /* EOP Event */ 1654 r = amdgpu_irq_add_id(adev, SOC21_IH_CLIENTID_GRBM_CP, 1655 GFX_11_0_0__SRCID__CP_EOP_INTERRUPT, 1656 &adev->gfx.eop_irq); 1657 if (r) 1658 return r; 1659 1660 /* Bad opcode Event */ 1661 r = amdgpu_irq_add_id(adev, SOC21_IH_CLIENTID_GRBM_CP, 1662 GFX_11_0_0__SRCID__CP_BAD_OPCODE_ERROR, 1663 &adev->gfx.bad_op_irq); 1664 if (r) 1665 return r; 1666 1667 /* Privileged reg */ 1668 r = amdgpu_irq_add_id(adev, SOC21_IH_CLIENTID_GRBM_CP, 1669 GFX_11_0_0__SRCID__CP_PRIV_REG_FAULT, 1670 &adev->gfx.priv_reg_irq); 1671 if (r) 1672 return r; 1673 1674 /* Privileged inst */ 1675 r = amdgpu_irq_add_id(adev, SOC21_IH_CLIENTID_GRBM_CP, 1676 GFX_11_0_0__SRCID__CP_PRIV_INSTR_FAULT, 1677 &adev->gfx.priv_inst_irq); 1678 if (r) 1679 return r; 1680 1681 /* FED error */ 1682 r = amdgpu_irq_add_id(adev, SOC21_IH_CLIENTID_GFX, 1683 GFX_11_0_0__SRCID__RLC_GC_FED_INTERRUPT, 1684 &adev->gfx.rlc_gc_fed_irq); 1685 if (r) 1686 return r; 1687 1688 adev->gfx.gfx_current_status = AMDGPU_GFX_NORMAL_MODE; 1689 1690 gfx_v11_0_me_init(adev); 1691 1692 r = gfx_v11_0_rlc_init(adev); 1693 if (r) { 1694 DRM_ERROR("Failed to init rlc BOs!\n"); 1695 return r; 1696 } 1697 1698 r = gfx_v11_0_mec_init(adev); 1699 if (r) { 1700 DRM_ERROR("Failed to init MEC BOs!\n"); 1701 return r; 1702 } 1703 1704 /* set up the gfx ring */ 1705 for (i = 0; i < adev->gfx.me.num_me; i++) { 1706 for (j = 0; j < adev->gfx.me.num_queue_per_pipe; j++) { 1707 for (k = 0; k < adev->gfx.me.num_pipe_per_me; k++) { 1708 if (!amdgpu_gfx_is_me_queue_enabled(adev, i, k, j)) 1709 continue; 1710 1711 r = gfx_v11_0_gfx_ring_init(adev, ring_id, 1712 i, k, j); 1713 if (r) 1714 return r; 1715 ring_id++; 1716 } 1717 } 1718 } 1719 1720 ring_id = 0; 1721 /* set up the compute queues - allocate horizontally across pipes */ 1722 for (i = 0; i < adev->gfx.mec.num_mec; ++i) { 1723 for (j = 0; j < adev->gfx.mec.num_queue_per_pipe; j++) { 1724 for (k = 0; k < adev->gfx.mec.num_pipe_per_mec; k++) { 1725 if (!amdgpu_gfx_is_mec_queue_enabled(adev, 0, i, 1726 k, j)) 1727 continue; 1728 1729 r = gfx_v11_0_compute_ring_init(adev, ring_id, 1730 i, k, j); 1731 if (r) 1732 return r; 1733 1734 ring_id++; 1735 } 1736 } 1737 } 1738 1739 adev->gfx.gfx_supported_reset = 1740 amdgpu_get_soft_full_reset_mask(&adev->gfx.gfx_ring[0]); 1741 adev->gfx.compute_supported_reset = 1742 amdgpu_get_soft_full_reset_mask(&adev->gfx.compute_ring[0]); 1743 switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { 1744 case IP_VERSION(11, 0, 0): 1745 case IP_VERSION(11, 0, 2): 1746 case IP_VERSION(11, 0, 3): 1747 if ((adev->gfx.me_fw_version >= 2280) && 1748 (adev->gfx.mec_fw_version >= 2410)) { 1749 adev->gfx.compute_supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE; 1750 adev->gfx.gfx_supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE; 1751 } 1752 break; 1753 default: 1754 break; 1755 } 1756 1757 if (!adev->enable_mes_kiq) { 1758 r = amdgpu_gfx_kiq_init(adev, GFX11_MEC_HPD_SIZE, 0); 1759 if (r) { 1760 DRM_ERROR("Failed to init KIQ BOs!\n"); 1761 return r; 1762 } 1763 1764 r = amdgpu_gfx_kiq_init_ring(adev, xcc_id); 1765 if (r) 1766 return r; 1767 } 1768 1769 r = amdgpu_gfx_mqd_sw_init(adev, sizeof(struct v11_compute_mqd), 0); 1770 if (r) 1771 return r; 1772 1773 /* allocate visible FB for rlc auto-loading fw */ 1774 if (adev->firmware.load_type == AMDGPU_FW_LOAD_RLC_BACKDOOR_AUTO) { 1775 r = gfx_v11_0_rlc_autoload_buffer_init(adev); 1776 if (r) 1777 return r; 1778 } 1779 1780 r = gfx_v11_0_gpu_early_init(adev); 1781 if (r) 1782 return r; 1783 1784 if (amdgpu_gfx_ras_sw_init(adev)) { 1785 dev_err(adev->dev, "Failed to initialize gfx ras block!\n"); 1786 return -EINVAL; 1787 } 1788 1789 gfx_v11_0_alloc_ip_dump(adev); 1790 1791 r = amdgpu_gfx_sysfs_init(adev); 1792 if (r) 1793 return r; 1794 1795 return 0; 1796 } 1797 1798 static void gfx_v11_0_pfp_fini(struct amdgpu_device *adev) 1799 { 1800 amdgpu_bo_free_kernel(&adev->gfx.pfp.pfp_fw_obj, 1801 &adev->gfx.pfp.pfp_fw_gpu_addr, 1802 (void **)&adev->gfx.pfp.pfp_fw_ptr); 1803 1804 amdgpu_bo_free_kernel(&adev->gfx.pfp.pfp_fw_data_obj, 1805 &adev->gfx.pfp.pfp_fw_data_gpu_addr, 1806 (void **)&adev->gfx.pfp.pfp_fw_data_ptr); 1807 } 1808 1809 static void gfx_v11_0_me_fini(struct amdgpu_device *adev) 1810 { 1811 amdgpu_bo_free_kernel(&adev->gfx.me.me_fw_obj, 1812 &adev->gfx.me.me_fw_gpu_addr, 1813 (void **)&adev->gfx.me.me_fw_ptr); 1814 1815 amdgpu_bo_free_kernel(&adev->gfx.me.me_fw_data_obj, 1816 &adev->gfx.me.me_fw_data_gpu_addr, 1817 (void **)&adev->gfx.me.me_fw_data_ptr); 1818 } 1819 1820 static void gfx_v11_0_rlc_autoload_buffer_fini(struct amdgpu_device *adev) 1821 { 1822 amdgpu_bo_free_kernel(&adev->gfx.rlc.rlc_autoload_bo, 1823 &adev->gfx.rlc.rlc_autoload_gpu_addr, 1824 (void **)&adev->gfx.rlc.rlc_autoload_ptr); 1825 } 1826 1827 static int gfx_v11_0_sw_fini(struct amdgpu_ip_block *ip_block) 1828 { 1829 int i; 1830 struct amdgpu_device *adev = ip_block->adev; 1831 1832 for (i = 0; i < adev->gfx.num_gfx_rings; i++) 1833 amdgpu_ring_fini(&adev->gfx.gfx_ring[i]); 1834 for (i = 0; i < adev->gfx.num_compute_rings; i++) 1835 amdgpu_ring_fini(&adev->gfx.compute_ring[i]); 1836 1837 amdgpu_gfx_mqd_sw_fini(adev, 0); 1838 1839 if (!adev->enable_mes_kiq) { 1840 amdgpu_gfx_kiq_free_ring(&adev->gfx.kiq[0].ring); 1841 amdgpu_gfx_kiq_fini(adev, 0); 1842 } 1843 1844 amdgpu_gfx_cleaner_shader_sw_fini(adev); 1845 1846 gfx_v11_0_pfp_fini(adev); 1847 gfx_v11_0_me_fini(adev); 1848 gfx_v11_0_rlc_fini(adev); 1849 gfx_v11_0_mec_fini(adev); 1850 1851 if (adev->firmware.load_type == AMDGPU_FW_LOAD_RLC_BACKDOOR_AUTO) 1852 gfx_v11_0_rlc_autoload_buffer_fini(adev); 1853 1854 gfx_v11_0_free_microcode(adev); 1855 1856 amdgpu_gfx_sysfs_fini(adev); 1857 1858 kfree(adev->gfx.ip_dump_core); 1859 kfree(adev->gfx.ip_dump_compute_queues); 1860 kfree(adev->gfx.ip_dump_gfx_queues); 1861 1862 return 0; 1863 } 1864 1865 static void gfx_v11_0_select_se_sh(struct amdgpu_device *adev, u32 se_num, 1866 u32 sh_num, u32 instance, int xcc_id) 1867 { 1868 u32 data; 1869 1870 if (instance == 0xffffffff) 1871 data = REG_SET_FIELD(0, GRBM_GFX_INDEX, 1872 INSTANCE_BROADCAST_WRITES, 1); 1873 else 1874 data = REG_SET_FIELD(0, GRBM_GFX_INDEX, INSTANCE_INDEX, 1875 instance); 1876 1877 if (se_num == 0xffffffff) 1878 data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SE_BROADCAST_WRITES, 1879 1); 1880 else 1881 data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SE_INDEX, se_num); 1882 1883 if (sh_num == 0xffffffff) 1884 data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SA_BROADCAST_WRITES, 1885 1); 1886 else 1887 data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SA_INDEX, sh_num); 1888 1889 WREG32_SOC15(GC, 0, regGRBM_GFX_INDEX, data); 1890 } 1891 1892 static u32 gfx_v11_0_get_sa_active_bitmap(struct amdgpu_device *adev) 1893 { 1894 u32 gc_disabled_sa_mask, gc_user_disabled_sa_mask, sa_mask; 1895 1896 gc_disabled_sa_mask = RREG32_SOC15(GC, 0, regCC_GC_SA_UNIT_DISABLE); 1897 gc_disabled_sa_mask = REG_GET_FIELD(gc_disabled_sa_mask, 1898 CC_GC_SA_UNIT_DISABLE, 1899 SA_DISABLE); 1900 gc_user_disabled_sa_mask = RREG32_SOC15(GC, 0, regGC_USER_SA_UNIT_DISABLE); 1901 gc_user_disabled_sa_mask = REG_GET_FIELD(gc_user_disabled_sa_mask, 1902 GC_USER_SA_UNIT_DISABLE, 1903 SA_DISABLE); 1904 sa_mask = amdgpu_gfx_create_bitmask(adev->gfx.config.max_sh_per_se * 1905 adev->gfx.config.max_shader_engines); 1906 1907 return sa_mask & (~(gc_disabled_sa_mask | gc_user_disabled_sa_mask)); 1908 } 1909 1910 static u32 gfx_v11_0_get_rb_active_bitmap(struct amdgpu_device *adev) 1911 { 1912 u32 gc_disabled_rb_mask, gc_user_disabled_rb_mask; 1913 u32 rb_mask; 1914 1915 gc_disabled_rb_mask = RREG32_SOC15(GC, 0, regCC_RB_BACKEND_DISABLE); 1916 gc_disabled_rb_mask = REG_GET_FIELD(gc_disabled_rb_mask, 1917 CC_RB_BACKEND_DISABLE, 1918 BACKEND_DISABLE); 1919 gc_user_disabled_rb_mask = RREG32_SOC15(GC, 0, regGC_USER_RB_BACKEND_DISABLE); 1920 gc_user_disabled_rb_mask = REG_GET_FIELD(gc_user_disabled_rb_mask, 1921 GC_USER_RB_BACKEND_DISABLE, 1922 BACKEND_DISABLE); 1923 rb_mask = amdgpu_gfx_create_bitmask(adev->gfx.config.max_backends_per_se * 1924 adev->gfx.config.max_shader_engines); 1925 1926 return rb_mask & (~(gc_disabled_rb_mask | gc_user_disabled_rb_mask)); 1927 } 1928 1929 static void gfx_v11_0_setup_rb(struct amdgpu_device *adev) 1930 { 1931 u32 rb_bitmap_per_sa; 1932 u32 rb_bitmap_width_per_sa; 1933 u32 max_sa; 1934 u32 active_sa_bitmap; 1935 u32 global_active_rb_bitmap; 1936 u32 active_rb_bitmap = 0; 1937 u32 i; 1938 1939 /* query sa bitmap from SA_UNIT_DISABLE registers */ 1940 active_sa_bitmap = gfx_v11_0_get_sa_active_bitmap(adev); 1941 /* query rb bitmap from RB_BACKEND_DISABLE registers */ 1942 global_active_rb_bitmap = gfx_v11_0_get_rb_active_bitmap(adev); 1943 1944 /* generate active rb bitmap according to active sa bitmap */ 1945 max_sa = adev->gfx.config.max_shader_engines * 1946 adev->gfx.config.max_sh_per_se; 1947 rb_bitmap_width_per_sa = adev->gfx.config.max_backends_per_se / 1948 adev->gfx.config.max_sh_per_se; 1949 rb_bitmap_per_sa = amdgpu_gfx_create_bitmask(rb_bitmap_width_per_sa); 1950 1951 for (i = 0; i < max_sa; i++) { 1952 if (active_sa_bitmap & (1 << i)) 1953 active_rb_bitmap |= (rb_bitmap_per_sa << (i * rb_bitmap_width_per_sa)); 1954 } 1955 1956 active_rb_bitmap &= global_active_rb_bitmap; 1957 adev->gfx.config.backend_enable_mask = active_rb_bitmap; 1958 adev->gfx.config.num_rbs = hweight32(active_rb_bitmap); 1959 } 1960 1961 #define DEFAULT_SH_MEM_BASES (0x6000) 1962 #define LDS_APP_BASE 0x1 1963 #define SCRATCH_APP_BASE 0x2 1964 1965 static void gfx_v11_0_init_compute_vmid(struct amdgpu_device *adev) 1966 { 1967 int i; 1968 uint32_t sh_mem_bases; 1969 uint32_t data; 1970 1971 /* 1972 * Configure apertures: 1973 * LDS: 0x60000000'00000000 - 0x60000001'00000000 (4GB) 1974 * Scratch: 0x60000001'00000000 - 0x60000002'00000000 (4GB) 1975 * GPUVM: 0x60010000'00000000 - 0x60020000'00000000 (1TB) 1976 */ 1977 sh_mem_bases = (LDS_APP_BASE << SH_MEM_BASES__SHARED_BASE__SHIFT) | 1978 SCRATCH_APP_BASE; 1979 1980 mutex_lock(&adev->srbm_mutex); 1981 for (i = adev->vm_manager.first_kfd_vmid; i < AMDGPU_NUM_VMID; i++) { 1982 soc21_grbm_select(adev, 0, 0, 0, i); 1983 /* CP and shaders */ 1984 WREG32_SOC15(GC, 0, regSH_MEM_CONFIG, DEFAULT_SH_MEM_CONFIG); 1985 WREG32_SOC15(GC, 0, regSH_MEM_BASES, sh_mem_bases); 1986 1987 /* Enable trap for each kfd vmid. */ 1988 data = RREG32_SOC15(GC, 0, regSPI_GDBG_PER_VMID_CNTL); 1989 data = REG_SET_FIELD(data, SPI_GDBG_PER_VMID_CNTL, TRAP_EN, 1); 1990 WREG32_SOC15(GC, 0, regSPI_GDBG_PER_VMID_CNTL, data); 1991 } 1992 soc21_grbm_select(adev, 0, 0, 0, 0); 1993 mutex_unlock(&adev->srbm_mutex); 1994 1995 /* 1996 * Initialize all compute VMIDs to have no GDS, GWS, or OA 1997 * access. These should be enabled by FW for target VMIDs. 1998 */ 1999 for (i = adev->vm_manager.first_kfd_vmid; i < AMDGPU_NUM_VMID; i++) { 2000 WREG32_SOC15_OFFSET(GC, 0, regGDS_VMID0_BASE, 2 * i, 0); 2001 WREG32_SOC15_OFFSET(GC, 0, regGDS_VMID0_SIZE, 2 * i, 0); 2002 WREG32_SOC15_OFFSET(GC, 0, regGDS_GWS_VMID0, i, 0); 2003 WREG32_SOC15_OFFSET(GC, 0, regGDS_OA_VMID0, i, 0); 2004 } 2005 } 2006 2007 static void gfx_v11_0_init_gds_vmid(struct amdgpu_device *adev) 2008 { 2009 int vmid; 2010 2011 /* 2012 * Initialize all compute and user-gfx VMIDs to have no GDS, GWS, or OA 2013 * access. Compute VMIDs should be enabled by FW for target VMIDs, 2014 * the driver can enable them for graphics. VMID0 should maintain 2015 * access so that HWS firmware can save/restore entries. 2016 */ 2017 for (vmid = 1; vmid < 16; vmid++) { 2018 WREG32_SOC15_OFFSET(GC, 0, regGDS_VMID0_BASE, 2 * vmid, 0); 2019 WREG32_SOC15_OFFSET(GC, 0, regGDS_VMID0_SIZE, 2 * vmid, 0); 2020 WREG32_SOC15_OFFSET(GC, 0, regGDS_GWS_VMID0, vmid, 0); 2021 WREG32_SOC15_OFFSET(GC, 0, regGDS_OA_VMID0, vmid, 0); 2022 } 2023 } 2024 2025 static void gfx_v11_0_tcp_harvest(struct amdgpu_device *adev) 2026 { 2027 /* TODO: harvest feature to be added later. */ 2028 } 2029 2030 static void gfx_v11_0_get_tcc_info(struct amdgpu_device *adev) 2031 { 2032 /* TCCs are global (not instanced). */ 2033 uint32_t tcc_disable = RREG32_SOC15(GC, 0, regCGTS_TCC_DISABLE) | 2034 RREG32_SOC15(GC, 0, regCGTS_USER_TCC_DISABLE); 2035 2036 adev->gfx.config.tcc_disabled_mask = 2037 REG_GET_FIELD(tcc_disable, CGTS_TCC_DISABLE, TCC_DISABLE) | 2038 (REG_GET_FIELD(tcc_disable, CGTS_TCC_DISABLE, HI_TCC_DISABLE) << 16); 2039 } 2040 2041 static void gfx_v11_0_constants_init(struct amdgpu_device *adev) 2042 { 2043 u32 tmp; 2044 int i; 2045 2046 if (!amdgpu_sriov_vf(adev)) 2047 WREG32_FIELD15_PREREG(GC, 0, GRBM_CNTL, READ_TIMEOUT, 0xff); 2048 2049 gfx_v11_0_setup_rb(adev); 2050 gfx_v11_0_get_cu_info(adev, &adev->gfx.cu_info); 2051 gfx_v11_0_get_tcc_info(adev); 2052 adev->gfx.config.pa_sc_tile_steering_override = 0; 2053 2054 /* Set whether texture coordinate truncation is conformant. */ 2055 tmp = RREG32_SOC15(GC, 0, regTA_CNTL2); 2056 adev->gfx.config.ta_cntl2_truncate_coord_mode = 2057 REG_GET_FIELD(tmp, TA_CNTL2, TRUNCATE_COORD_MODE); 2058 2059 /* XXX SH_MEM regs */ 2060 /* where to put LDS, scratch, GPUVM in FSA64 space */ 2061 mutex_lock(&adev->srbm_mutex); 2062 for (i = 0; i < adev->vm_manager.id_mgr[AMDGPU_GFXHUB(0)].num_ids; i++) { 2063 soc21_grbm_select(adev, 0, 0, 0, i); 2064 /* CP and shaders */ 2065 WREG32_SOC15(GC, 0, regSH_MEM_CONFIG, DEFAULT_SH_MEM_CONFIG); 2066 if (i != 0) { 2067 tmp = REG_SET_FIELD(0, SH_MEM_BASES, PRIVATE_BASE, 2068 (adev->gmc.private_aperture_start >> 48)); 2069 tmp = REG_SET_FIELD(tmp, SH_MEM_BASES, SHARED_BASE, 2070 (adev->gmc.shared_aperture_start >> 48)); 2071 WREG32_SOC15(GC, 0, regSH_MEM_BASES, tmp); 2072 } 2073 } 2074 soc21_grbm_select(adev, 0, 0, 0, 0); 2075 2076 mutex_unlock(&adev->srbm_mutex); 2077 2078 gfx_v11_0_init_compute_vmid(adev); 2079 gfx_v11_0_init_gds_vmid(adev); 2080 } 2081 2082 static u32 gfx_v11_0_get_cpg_int_cntl(struct amdgpu_device *adev, 2083 int me, int pipe) 2084 { 2085 if (me != 0) 2086 return 0; 2087 2088 switch (pipe) { 2089 case 0: 2090 return SOC15_REG_OFFSET(GC, 0, regCP_INT_CNTL_RING0); 2091 case 1: 2092 return SOC15_REG_OFFSET(GC, 0, regCP_INT_CNTL_RING1); 2093 default: 2094 return 0; 2095 } 2096 } 2097 2098 static u32 gfx_v11_0_get_cpc_int_cntl(struct amdgpu_device *adev, 2099 int me, int pipe) 2100 { 2101 /* 2102 * amdgpu controls only the first MEC. That's why this function only 2103 * handles the setting of interrupts for this specific MEC. All other 2104 * pipes' interrupts are set by amdkfd. 2105 */ 2106 if (me != 1) 2107 return 0; 2108 2109 switch (pipe) { 2110 case 0: 2111 return SOC15_REG_OFFSET(GC, 0, regCP_ME1_PIPE0_INT_CNTL); 2112 case 1: 2113 return SOC15_REG_OFFSET(GC, 0, regCP_ME1_PIPE1_INT_CNTL); 2114 case 2: 2115 return SOC15_REG_OFFSET(GC, 0, regCP_ME1_PIPE2_INT_CNTL); 2116 case 3: 2117 return SOC15_REG_OFFSET(GC, 0, regCP_ME1_PIPE3_INT_CNTL); 2118 default: 2119 return 0; 2120 } 2121 } 2122 2123 static void gfx_v11_0_enable_gui_idle_interrupt(struct amdgpu_device *adev, 2124 bool enable) 2125 { 2126 u32 tmp, cp_int_cntl_reg; 2127 int i, j; 2128 2129 if (amdgpu_sriov_vf(adev)) 2130 return; 2131 2132 for (i = 0; i < adev->gfx.me.num_me; i++) { 2133 for (j = 0; j < adev->gfx.me.num_pipe_per_me; j++) { 2134 cp_int_cntl_reg = gfx_v11_0_get_cpg_int_cntl(adev, i, j); 2135 2136 if (cp_int_cntl_reg) { 2137 tmp = RREG32_SOC15_IP(GC, cp_int_cntl_reg); 2138 tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CNTX_BUSY_INT_ENABLE, 2139 enable ? 1 : 0); 2140 tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CNTX_EMPTY_INT_ENABLE, 2141 enable ? 1 : 0); 2142 tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CMP_BUSY_INT_ENABLE, 2143 enable ? 1 : 0); 2144 tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, GFX_IDLE_INT_ENABLE, 2145 enable ? 1 : 0); 2146 WREG32_SOC15_IP(GC, cp_int_cntl_reg, tmp); 2147 } 2148 } 2149 } 2150 } 2151 2152 static int gfx_v11_0_init_csb(struct amdgpu_device *adev) 2153 { 2154 adev->gfx.rlc.funcs->get_csb_buffer(adev, adev->gfx.rlc.cs_ptr); 2155 2156 WREG32_SOC15(GC, 0, regRLC_CSIB_ADDR_HI, 2157 adev->gfx.rlc.clear_state_gpu_addr >> 32); 2158 WREG32_SOC15(GC, 0, regRLC_CSIB_ADDR_LO, 2159 adev->gfx.rlc.clear_state_gpu_addr & 0xfffffffc); 2160 WREG32_SOC15(GC, 0, regRLC_CSIB_LENGTH, adev->gfx.rlc.clear_state_size); 2161 2162 return 0; 2163 } 2164 2165 static void gfx_v11_0_rlc_stop(struct amdgpu_device *adev) 2166 { 2167 u32 tmp = RREG32_SOC15(GC, 0, regRLC_CNTL); 2168 2169 tmp = REG_SET_FIELD(tmp, RLC_CNTL, RLC_ENABLE_F32, 0); 2170 WREG32_SOC15(GC, 0, regRLC_CNTL, tmp); 2171 } 2172 2173 static void gfx_v11_0_rlc_reset(struct amdgpu_device *adev) 2174 { 2175 WREG32_FIELD15_PREREG(GC, 0, GRBM_SOFT_RESET, SOFT_RESET_RLC, 1); 2176 udelay(50); 2177 WREG32_FIELD15_PREREG(GC, 0, GRBM_SOFT_RESET, SOFT_RESET_RLC, 0); 2178 udelay(50); 2179 } 2180 2181 static void gfx_v11_0_rlc_smu_handshake_cntl(struct amdgpu_device *adev, 2182 bool enable) 2183 { 2184 uint32_t rlc_pg_cntl; 2185 2186 rlc_pg_cntl = RREG32_SOC15(GC, 0, regRLC_PG_CNTL); 2187 2188 if (!enable) { 2189 /* RLC_PG_CNTL[23] = 0 (default) 2190 * RLC will wait for handshake acks with SMU 2191 * GFXOFF will be enabled 2192 * RLC_PG_CNTL[23] = 1 2193 * RLC will not issue any message to SMU 2194 * hence no handshake between SMU & RLC 2195 * GFXOFF will be disabled 2196 */ 2197 rlc_pg_cntl |= RLC_PG_CNTL__SMU_HANDSHAKE_DISABLE_MASK; 2198 } else 2199 rlc_pg_cntl &= ~RLC_PG_CNTL__SMU_HANDSHAKE_DISABLE_MASK; 2200 WREG32_SOC15(GC, 0, regRLC_PG_CNTL, rlc_pg_cntl); 2201 } 2202 2203 static void gfx_v11_0_rlc_start(struct amdgpu_device *adev) 2204 { 2205 /* TODO: enable rlc & smu handshake until smu 2206 * and gfxoff feature works as expected */ 2207 if (!(amdgpu_pp_feature_mask & PP_GFXOFF_MASK)) 2208 gfx_v11_0_rlc_smu_handshake_cntl(adev, false); 2209 2210 WREG32_FIELD15_PREREG(GC, 0, RLC_CNTL, RLC_ENABLE_F32, 1); 2211 udelay(50); 2212 } 2213 2214 static void gfx_v11_0_rlc_enable_srm(struct amdgpu_device *adev) 2215 { 2216 uint32_t tmp; 2217 2218 /* enable Save Restore Machine */ 2219 tmp = RREG32(SOC15_REG_OFFSET(GC, 0, regRLC_SRM_CNTL)); 2220 tmp |= RLC_SRM_CNTL__AUTO_INCR_ADDR_MASK; 2221 tmp |= RLC_SRM_CNTL__SRM_ENABLE_MASK; 2222 WREG32(SOC15_REG_OFFSET(GC, 0, regRLC_SRM_CNTL), tmp); 2223 } 2224 2225 static void gfx_v11_0_load_rlcg_microcode(struct amdgpu_device *adev) 2226 { 2227 const struct rlc_firmware_header_v2_0 *hdr; 2228 const __le32 *fw_data; 2229 unsigned i, fw_size; 2230 2231 hdr = (const struct rlc_firmware_header_v2_0 *)adev->gfx.rlc_fw->data; 2232 fw_data = (const __le32 *)(adev->gfx.rlc_fw->data + 2233 le32_to_cpu(hdr->header.ucode_array_offset_bytes)); 2234 fw_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4; 2235 2236 WREG32_SOC15(GC, 0, regRLC_GPM_UCODE_ADDR, 2237 RLCG_UCODE_LOADING_START_ADDRESS); 2238 2239 for (i = 0; i < fw_size; i++) 2240 WREG32_SOC15(GC, 0, regRLC_GPM_UCODE_DATA, 2241 le32_to_cpup(fw_data++)); 2242 2243 WREG32_SOC15(GC, 0, regRLC_GPM_UCODE_ADDR, adev->gfx.rlc_fw_version); 2244 } 2245 2246 static void gfx_v11_0_load_rlc_iram_dram_microcode(struct amdgpu_device *adev) 2247 { 2248 const struct rlc_firmware_header_v2_2 *hdr; 2249 const __le32 *fw_data; 2250 unsigned i, fw_size; 2251 u32 tmp; 2252 2253 hdr = (const struct rlc_firmware_header_v2_2 *)adev->gfx.rlc_fw->data; 2254 2255 fw_data = (const __le32 *)(adev->gfx.rlc_fw->data + 2256 le32_to_cpu(hdr->rlc_iram_ucode_offset_bytes)); 2257 fw_size = le32_to_cpu(hdr->rlc_iram_ucode_size_bytes) / 4; 2258 2259 WREG32_SOC15(GC, 0, regRLC_LX6_IRAM_ADDR, 0); 2260 2261 for (i = 0; i < fw_size; i++) { 2262 if ((amdgpu_emu_mode == 1) && (i % 100 == 99)) 2263 msleep(1); 2264 WREG32_SOC15(GC, 0, regRLC_LX6_IRAM_DATA, 2265 le32_to_cpup(fw_data++)); 2266 } 2267 2268 WREG32_SOC15(GC, 0, regRLC_LX6_IRAM_ADDR, adev->gfx.rlc_fw_version); 2269 2270 fw_data = (const __le32 *)(adev->gfx.rlc_fw->data + 2271 le32_to_cpu(hdr->rlc_dram_ucode_offset_bytes)); 2272 fw_size = le32_to_cpu(hdr->rlc_dram_ucode_size_bytes) / 4; 2273 2274 WREG32_SOC15(GC, 0, regRLC_LX6_DRAM_ADDR, 0); 2275 for (i = 0; i < fw_size; i++) { 2276 if ((amdgpu_emu_mode == 1) && (i % 100 == 99)) 2277 msleep(1); 2278 WREG32_SOC15(GC, 0, regRLC_LX6_DRAM_DATA, 2279 le32_to_cpup(fw_data++)); 2280 } 2281 2282 WREG32_SOC15(GC, 0, regRLC_LX6_IRAM_ADDR, adev->gfx.rlc_fw_version); 2283 2284 tmp = RREG32_SOC15(GC, 0, regRLC_LX6_CNTL); 2285 tmp = REG_SET_FIELD(tmp, RLC_LX6_CNTL, PDEBUG_ENABLE, 1); 2286 tmp = REG_SET_FIELD(tmp, RLC_LX6_CNTL, BRESET, 0); 2287 WREG32_SOC15(GC, 0, regRLC_LX6_CNTL, tmp); 2288 } 2289 2290 static void gfx_v11_0_load_rlcp_rlcv_microcode(struct amdgpu_device *adev) 2291 { 2292 const struct rlc_firmware_header_v2_3 *hdr; 2293 const __le32 *fw_data; 2294 unsigned i, fw_size; 2295 u32 tmp; 2296 2297 hdr = (const struct rlc_firmware_header_v2_3 *)adev->gfx.rlc_fw->data; 2298 2299 fw_data = (const __le32 *)(adev->gfx.rlc_fw->data + 2300 le32_to_cpu(hdr->rlcp_ucode_offset_bytes)); 2301 fw_size = le32_to_cpu(hdr->rlcp_ucode_size_bytes) / 4; 2302 2303 WREG32_SOC15(GC, 0, regRLC_PACE_UCODE_ADDR, 0); 2304 2305 for (i = 0; i < fw_size; i++) { 2306 if ((amdgpu_emu_mode == 1) && (i % 100 == 99)) 2307 msleep(1); 2308 WREG32_SOC15(GC, 0, regRLC_PACE_UCODE_DATA, 2309 le32_to_cpup(fw_data++)); 2310 } 2311 2312 WREG32_SOC15(GC, 0, regRLC_PACE_UCODE_ADDR, adev->gfx.rlc_fw_version); 2313 2314 tmp = RREG32_SOC15(GC, 0, regRLC_GPM_THREAD_ENABLE); 2315 tmp = REG_SET_FIELD(tmp, RLC_GPM_THREAD_ENABLE, THREAD1_ENABLE, 1); 2316 WREG32_SOC15(GC, 0, regRLC_GPM_THREAD_ENABLE, tmp); 2317 2318 fw_data = (const __le32 *)(adev->gfx.rlc_fw->data + 2319 le32_to_cpu(hdr->rlcv_ucode_offset_bytes)); 2320 fw_size = le32_to_cpu(hdr->rlcv_ucode_size_bytes) / 4; 2321 2322 WREG32_SOC15(GC, 0, regRLC_GPU_IOV_UCODE_ADDR, 0); 2323 2324 for (i = 0; i < fw_size; i++) { 2325 if ((amdgpu_emu_mode == 1) && (i % 100 == 99)) 2326 msleep(1); 2327 WREG32_SOC15(GC, 0, regRLC_GPU_IOV_UCODE_DATA, 2328 le32_to_cpup(fw_data++)); 2329 } 2330 2331 WREG32_SOC15(GC, 0, regRLC_GPU_IOV_UCODE_ADDR, adev->gfx.rlc_fw_version); 2332 2333 tmp = RREG32_SOC15(GC, 0, regRLC_GPU_IOV_F32_CNTL); 2334 tmp = REG_SET_FIELD(tmp, RLC_GPU_IOV_F32_CNTL, ENABLE, 1); 2335 WREG32_SOC15(GC, 0, regRLC_GPU_IOV_F32_CNTL, tmp); 2336 } 2337 2338 static int gfx_v11_0_rlc_load_microcode(struct amdgpu_device *adev) 2339 { 2340 const struct rlc_firmware_header_v2_0 *hdr; 2341 uint16_t version_major; 2342 uint16_t version_minor; 2343 2344 if (!adev->gfx.rlc_fw) 2345 return -EINVAL; 2346 2347 hdr = (const struct rlc_firmware_header_v2_0 *)adev->gfx.rlc_fw->data; 2348 amdgpu_ucode_print_rlc_hdr(&hdr->header); 2349 2350 version_major = le16_to_cpu(hdr->header.header_version_major); 2351 version_minor = le16_to_cpu(hdr->header.header_version_minor); 2352 2353 if (version_major == 2) { 2354 gfx_v11_0_load_rlcg_microcode(adev); 2355 if (amdgpu_dpm == 1) { 2356 if (version_minor >= 2) 2357 gfx_v11_0_load_rlc_iram_dram_microcode(adev); 2358 if (version_minor == 3) 2359 gfx_v11_0_load_rlcp_rlcv_microcode(adev); 2360 } 2361 2362 return 0; 2363 } 2364 2365 return -EINVAL; 2366 } 2367 2368 static int gfx_v11_0_rlc_resume(struct amdgpu_device *adev) 2369 { 2370 int r; 2371 2372 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) { 2373 gfx_v11_0_init_csb(adev); 2374 2375 if (!amdgpu_sriov_vf(adev)) /* enable RLC SRM */ 2376 gfx_v11_0_rlc_enable_srm(adev); 2377 } else { 2378 if (amdgpu_sriov_vf(adev)) { 2379 gfx_v11_0_init_csb(adev); 2380 return 0; 2381 } 2382 2383 adev->gfx.rlc.funcs->stop(adev); 2384 2385 /* disable CG */ 2386 WREG32_SOC15(GC, 0, regRLC_CGCG_CGLS_CTRL, 0); 2387 2388 /* disable PG */ 2389 WREG32_SOC15(GC, 0, regRLC_PG_CNTL, 0); 2390 2391 if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT) { 2392 /* legacy rlc firmware loading */ 2393 r = gfx_v11_0_rlc_load_microcode(adev); 2394 if (r) 2395 return r; 2396 } 2397 2398 gfx_v11_0_init_csb(adev); 2399 2400 adev->gfx.rlc.funcs->start(adev); 2401 } 2402 return 0; 2403 } 2404 2405 static int gfx_v11_0_config_me_cache(struct amdgpu_device *adev, uint64_t addr) 2406 { 2407 uint32_t usec_timeout = 50000; /* wait for 50ms */ 2408 uint32_t tmp; 2409 int i; 2410 2411 /* Trigger an invalidation of the L1 instruction caches */ 2412 tmp = RREG32_SOC15(GC, 0, regCP_ME_IC_OP_CNTL); 2413 tmp = REG_SET_FIELD(tmp, CP_ME_IC_OP_CNTL, INVALIDATE_CACHE, 1); 2414 WREG32_SOC15(GC, 0, regCP_ME_IC_OP_CNTL, tmp); 2415 2416 /* Wait for invalidation complete */ 2417 for (i = 0; i < usec_timeout; i++) { 2418 tmp = RREG32_SOC15(GC, 0, regCP_ME_IC_OP_CNTL); 2419 if (1 == REG_GET_FIELD(tmp, CP_ME_IC_OP_CNTL, 2420 INVALIDATE_CACHE_COMPLETE)) 2421 break; 2422 udelay(1); 2423 } 2424 2425 if (i >= usec_timeout) { 2426 dev_err(adev->dev, "failed to invalidate instruction cache\n"); 2427 return -EINVAL; 2428 } 2429 2430 if (amdgpu_emu_mode == 1) 2431 amdgpu_device_flush_hdp(adev, NULL); 2432 2433 tmp = RREG32_SOC15(GC, 0, regCP_ME_IC_BASE_CNTL); 2434 tmp = REG_SET_FIELD(tmp, CP_ME_IC_BASE_CNTL, VMID, 0); 2435 tmp = REG_SET_FIELD(tmp, CP_ME_IC_BASE_CNTL, CACHE_POLICY, 0); 2436 tmp = REG_SET_FIELD(tmp, CP_ME_IC_BASE_CNTL, EXE_DISABLE, 0); 2437 tmp = REG_SET_FIELD(tmp, CP_ME_IC_BASE_CNTL, ADDRESS_CLAMP, 1); 2438 WREG32_SOC15(GC, 0, regCP_ME_IC_BASE_CNTL, tmp); 2439 2440 /* Program me ucode address into intruction cache address register */ 2441 WREG32_SOC15(GC, 0, regCP_ME_IC_BASE_LO, 2442 lower_32_bits(addr) & 0xFFFFF000); 2443 WREG32_SOC15(GC, 0, regCP_ME_IC_BASE_HI, 2444 upper_32_bits(addr)); 2445 2446 return 0; 2447 } 2448 2449 static int gfx_v11_0_config_pfp_cache(struct amdgpu_device *adev, uint64_t addr) 2450 { 2451 uint32_t usec_timeout = 50000; /* wait for 50ms */ 2452 uint32_t tmp; 2453 int i; 2454 2455 /* Trigger an invalidation of the L1 instruction caches */ 2456 tmp = RREG32_SOC15(GC, 0, regCP_PFP_IC_OP_CNTL); 2457 tmp = REG_SET_FIELD(tmp, CP_PFP_IC_OP_CNTL, INVALIDATE_CACHE, 1); 2458 WREG32_SOC15(GC, 0, regCP_PFP_IC_OP_CNTL, tmp); 2459 2460 /* Wait for invalidation complete */ 2461 for (i = 0; i < usec_timeout; i++) { 2462 tmp = RREG32_SOC15(GC, 0, regCP_PFP_IC_OP_CNTL); 2463 if (1 == REG_GET_FIELD(tmp, CP_PFP_IC_OP_CNTL, 2464 INVALIDATE_CACHE_COMPLETE)) 2465 break; 2466 udelay(1); 2467 } 2468 2469 if (i >= usec_timeout) { 2470 dev_err(adev->dev, "failed to invalidate instruction cache\n"); 2471 return -EINVAL; 2472 } 2473 2474 if (amdgpu_emu_mode == 1) 2475 amdgpu_device_flush_hdp(adev, NULL); 2476 2477 tmp = RREG32_SOC15(GC, 0, regCP_PFP_IC_BASE_CNTL); 2478 tmp = REG_SET_FIELD(tmp, CP_PFP_IC_BASE_CNTL, VMID, 0); 2479 tmp = REG_SET_FIELD(tmp, CP_PFP_IC_BASE_CNTL, CACHE_POLICY, 0); 2480 tmp = REG_SET_FIELD(tmp, CP_PFP_IC_BASE_CNTL, EXE_DISABLE, 0); 2481 tmp = REG_SET_FIELD(tmp, CP_PFP_IC_BASE_CNTL, ADDRESS_CLAMP, 1); 2482 WREG32_SOC15(GC, 0, regCP_PFP_IC_BASE_CNTL, tmp); 2483 2484 /* Program pfp ucode address into intruction cache address register */ 2485 WREG32_SOC15(GC, 0, regCP_PFP_IC_BASE_LO, 2486 lower_32_bits(addr) & 0xFFFFF000); 2487 WREG32_SOC15(GC, 0, regCP_PFP_IC_BASE_HI, 2488 upper_32_bits(addr)); 2489 2490 return 0; 2491 } 2492 2493 static int gfx_v11_0_config_mec_cache(struct amdgpu_device *adev, uint64_t addr) 2494 { 2495 uint32_t usec_timeout = 50000; /* wait for 50ms */ 2496 uint32_t tmp; 2497 int i; 2498 2499 /* Trigger an invalidation of the L1 instruction caches */ 2500 tmp = RREG32_SOC15(GC, 0, regCP_CPC_IC_OP_CNTL); 2501 tmp = REG_SET_FIELD(tmp, CP_CPC_IC_OP_CNTL, INVALIDATE_CACHE, 1); 2502 2503 WREG32_SOC15(GC, 0, regCP_CPC_IC_OP_CNTL, tmp); 2504 2505 /* Wait for invalidation complete */ 2506 for (i = 0; i < usec_timeout; i++) { 2507 tmp = RREG32_SOC15(GC, 0, regCP_CPC_IC_OP_CNTL); 2508 if (1 == REG_GET_FIELD(tmp, CP_CPC_IC_OP_CNTL, 2509 INVALIDATE_CACHE_COMPLETE)) 2510 break; 2511 udelay(1); 2512 } 2513 2514 if (i >= usec_timeout) { 2515 dev_err(adev->dev, "failed to invalidate instruction cache\n"); 2516 return -EINVAL; 2517 } 2518 2519 if (amdgpu_emu_mode == 1) 2520 amdgpu_device_flush_hdp(adev, NULL); 2521 2522 tmp = RREG32_SOC15(GC, 0, regCP_CPC_IC_BASE_CNTL); 2523 tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, CACHE_POLICY, 0); 2524 tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, EXE_DISABLE, 0); 2525 tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, ADDRESS_CLAMP, 1); 2526 WREG32_SOC15(GC, 0, regCP_CPC_IC_BASE_CNTL, tmp); 2527 2528 /* Program mec1 ucode address into intruction cache address register */ 2529 WREG32_SOC15(GC, 0, regCP_CPC_IC_BASE_LO, 2530 lower_32_bits(addr) & 0xFFFFF000); 2531 WREG32_SOC15(GC, 0, regCP_CPC_IC_BASE_HI, 2532 upper_32_bits(addr)); 2533 2534 return 0; 2535 } 2536 2537 static int gfx_v11_0_config_pfp_cache_rs64(struct amdgpu_device *adev, uint64_t addr, uint64_t addr2) 2538 { 2539 uint32_t usec_timeout = 50000; /* wait for 50ms */ 2540 uint32_t tmp; 2541 unsigned i, pipe_id; 2542 const struct gfx_firmware_header_v2_0 *pfp_hdr; 2543 2544 pfp_hdr = (const struct gfx_firmware_header_v2_0 *) 2545 adev->gfx.pfp_fw->data; 2546 2547 WREG32_SOC15(GC, 0, regCP_PFP_IC_BASE_LO, 2548 lower_32_bits(addr)); 2549 WREG32_SOC15(GC, 0, regCP_PFP_IC_BASE_HI, 2550 upper_32_bits(addr)); 2551 2552 tmp = RREG32_SOC15(GC, 0, regCP_PFP_IC_BASE_CNTL); 2553 tmp = REG_SET_FIELD(tmp, CP_PFP_IC_BASE_CNTL, VMID, 0); 2554 tmp = REG_SET_FIELD(tmp, CP_PFP_IC_BASE_CNTL, CACHE_POLICY, 0); 2555 tmp = REG_SET_FIELD(tmp, CP_PFP_IC_BASE_CNTL, EXE_DISABLE, 0); 2556 WREG32_SOC15(GC, 0, regCP_PFP_IC_BASE_CNTL, tmp); 2557 2558 /* 2559 * Programming any of the CP_PFP_IC_BASE registers 2560 * forces invalidation of the ME L1 I$. Wait for the 2561 * invalidation complete 2562 */ 2563 for (i = 0; i < usec_timeout; i++) { 2564 tmp = RREG32_SOC15(GC, 0, regCP_PFP_IC_OP_CNTL); 2565 if (1 == REG_GET_FIELD(tmp, CP_PFP_IC_OP_CNTL, 2566 INVALIDATE_CACHE_COMPLETE)) 2567 break; 2568 udelay(1); 2569 } 2570 2571 if (i >= usec_timeout) { 2572 dev_err(adev->dev, "failed to invalidate instruction cache\n"); 2573 return -EINVAL; 2574 } 2575 2576 /* Prime the L1 instruction caches */ 2577 tmp = RREG32_SOC15(GC, 0, regCP_PFP_IC_OP_CNTL); 2578 tmp = REG_SET_FIELD(tmp, CP_PFP_IC_OP_CNTL, PRIME_ICACHE, 1); 2579 WREG32_SOC15(GC, 0, regCP_PFP_IC_OP_CNTL, tmp); 2580 /* Waiting for cache primed*/ 2581 for (i = 0; i < usec_timeout; i++) { 2582 tmp = RREG32_SOC15(GC, 0, regCP_PFP_IC_OP_CNTL); 2583 if (1 == REG_GET_FIELD(tmp, CP_PFP_IC_OP_CNTL, 2584 ICACHE_PRIMED)) 2585 break; 2586 udelay(1); 2587 } 2588 2589 if (i >= usec_timeout) { 2590 dev_err(adev->dev, "failed to prime instruction cache\n"); 2591 return -EINVAL; 2592 } 2593 2594 mutex_lock(&adev->srbm_mutex); 2595 for (pipe_id = 0; pipe_id < adev->gfx.me.num_pipe_per_me; pipe_id++) { 2596 soc21_grbm_select(adev, 0, pipe_id, 0, 0); 2597 WREG32_SOC15(GC, 0, regCP_PFP_PRGRM_CNTR_START, 2598 (pfp_hdr->ucode_start_addr_hi << 30) | 2599 (pfp_hdr->ucode_start_addr_lo >> 2)); 2600 WREG32_SOC15(GC, 0, regCP_PFP_PRGRM_CNTR_START_HI, 2601 pfp_hdr->ucode_start_addr_hi >> 2); 2602 2603 /* 2604 * Program CP_ME_CNTL to reset given PIPE to take 2605 * effect of CP_PFP_PRGRM_CNTR_START. 2606 */ 2607 tmp = RREG32_SOC15(GC, 0, regCP_ME_CNTL); 2608 if (pipe_id == 0) 2609 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, 2610 PFP_PIPE0_RESET, 1); 2611 else 2612 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, 2613 PFP_PIPE1_RESET, 1); 2614 WREG32_SOC15(GC, 0, regCP_ME_CNTL, tmp); 2615 2616 /* Clear pfp pipe0 reset bit. */ 2617 if (pipe_id == 0) 2618 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, 2619 PFP_PIPE0_RESET, 0); 2620 else 2621 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, 2622 PFP_PIPE1_RESET, 0); 2623 WREG32_SOC15(GC, 0, regCP_ME_CNTL, tmp); 2624 2625 WREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_BASE0_LO, 2626 lower_32_bits(addr2)); 2627 WREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_BASE0_HI, 2628 upper_32_bits(addr2)); 2629 } 2630 soc21_grbm_select(adev, 0, 0, 0, 0); 2631 mutex_unlock(&adev->srbm_mutex); 2632 2633 tmp = RREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_BASE_CNTL); 2634 tmp = REG_SET_FIELD(tmp, CP_GFX_RS64_DC_BASE_CNTL, VMID, 0); 2635 tmp = REG_SET_FIELD(tmp, CP_GFX_RS64_DC_BASE_CNTL, CACHE_POLICY, 0); 2636 WREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_BASE_CNTL, tmp); 2637 2638 /* Invalidate the data caches */ 2639 tmp = RREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_OP_CNTL); 2640 tmp = REG_SET_FIELD(tmp, CP_GFX_RS64_DC_OP_CNTL, INVALIDATE_DCACHE, 1); 2641 WREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_OP_CNTL, tmp); 2642 2643 for (i = 0; i < usec_timeout; i++) { 2644 tmp = RREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_OP_CNTL); 2645 if (1 == REG_GET_FIELD(tmp, CP_GFX_RS64_DC_OP_CNTL, 2646 INVALIDATE_DCACHE_COMPLETE)) 2647 break; 2648 udelay(1); 2649 } 2650 2651 if (i >= usec_timeout) { 2652 dev_err(adev->dev, "failed to invalidate RS64 data cache\n"); 2653 return -EINVAL; 2654 } 2655 2656 return 0; 2657 } 2658 2659 static int gfx_v11_0_config_me_cache_rs64(struct amdgpu_device *adev, uint64_t addr, uint64_t addr2) 2660 { 2661 uint32_t usec_timeout = 50000; /* wait for 50ms */ 2662 uint32_t tmp; 2663 unsigned i, pipe_id; 2664 const struct gfx_firmware_header_v2_0 *me_hdr; 2665 2666 me_hdr = (const struct gfx_firmware_header_v2_0 *) 2667 adev->gfx.me_fw->data; 2668 2669 WREG32_SOC15(GC, 0, regCP_ME_IC_BASE_LO, 2670 lower_32_bits(addr)); 2671 WREG32_SOC15(GC, 0, regCP_ME_IC_BASE_HI, 2672 upper_32_bits(addr)); 2673 2674 tmp = RREG32_SOC15(GC, 0, regCP_ME_IC_BASE_CNTL); 2675 tmp = REG_SET_FIELD(tmp, CP_ME_IC_BASE_CNTL, VMID, 0); 2676 tmp = REG_SET_FIELD(tmp, CP_ME_IC_BASE_CNTL, CACHE_POLICY, 0); 2677 tmp = REG_SET_FIELD(tmp, CP_ME_IC_BASE_CNTL, EXE_DISABLE, 0); 2678 WREG32_SOC15(GC, 0, regCP_ME_IC_BASE_CNTL, tmp); 2679 2680 /* 2681 * Programming any of the CP_ME_IC_BASE registers 2682 * forces invalidation of the ME L1 I$. Wait for the 2683 * invalidation complete 2684 */ 2685 for (i = 0; i < usec_timeout; i++) { 2686 tmp = RREG32_SOC15(GC, 0, regCP_ME_IC_OP_CNTL); 2687 if (1 == REG_GET_FIELD(tmp, CP_ME_IC_OP_CNTL, 2688 INVALIDATE_CACHE_COMPLETE)) 2689 break; 2690 udelay(1); 2691 } 2692 2693 if (i >= usec_timeout) { 2694 dev_err(adev->dev, "failed to invalidate instruction cache\n"); 2695 return -EINVAL; 2696 } 2697 2698 /* Prime the instruction caches */ 2699 tmp = RREG32_SOC15(GC, 0, regCP_ME_IC_OP_CNTL); 2700 tmp = REG_SET_FIELD(tmp, CP_ME_IC_OP_CNTL, PRIME_ICACHE, 1); 2701 WREG32_SOC15(GC, 0, regCP_ME_IC_OP_CNTL, tmp); 2702 2703 /* Waiting for instruction cache primed*/ 2704 for (i = 0; i < usec_timeout; i++) { 2705 tmp = RREG32_SOC15(GC, 0, regCP_ME_IC_OP_CNTL); 2706 if (1 == REG_GET_FIELD(tmp, CP_ME_IC_OP_CNTL, 2707 ICACHE_PRIMED)) 2708 break; 2709 udelay(1); 2710 } 2711 2712 if (i >= usec_timeout) { 2713 dev_err(adev->dev, "failed to prime instruction cache\n"); 2714 return -EINVAL; 2715 } 2716 2717 mutex_lock(&adev->srbm_mutex); 2718 for (pipe_id = 0; pipe_id < adev->gfx.me.num_pipe_per_me; pipe_id++) { 2719 soc21_grbm_select(adev, 0, pipe_id, 0, 0); 2720 WREG32_SOC15(GC, 0, regCP_ME_PRGRM_CNTR_START, 2721 (me_hdr->ucode_start_addr_hi << 30) | 2722 (me_hdr->ucode_start_addr_lo >> 2) ); 2723 WREG32_SOC15(GC, 0, regCP_ME_PRGRM_CNTR_START_HI, 2724 me_hdr->ucode_start_addr_hi>>2); 2725 2726 /* 2727 * Program CP_ME_CNTL to reset given PIPE to take 2728 * effect of CP_PFP_PRGRM_CNTR_START. 2729 */ 2730 tmp = RREG32_SOC15(GC, 0, regCP_ME_CNTL); 2731 if (pipe_id == 0) 2732 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, 2733 ME_PIPE0_RESET, 1); 2734 else 2735 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, 2736 ME_PIPE1_RESET, 1); 2737 WREG32_SOC15(GC, 0, regCP_ME_CNTL, tmp); 2738 2739 /* Clear pfp pipe0 reset bit. */ 2740 if (pipe_id == 0) 2741 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, 2742 ME_PIPE0_RESET, 0); 2743 else 2744 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, 2745 ME_PIPE1_RESET, 0); 2746 WREG32_SOC15(GC, 0, regCP_ME_CNTL, tmp); 2747 2748 WREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_BASE1_LO, 2749 lower_32_bits(addr2)); 2750 WREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_BASE1_HI, 2751 upper_32_bits(addr2)); 2752 } 2753 soc21_grbm_select(adev, 0, 0, 0, 0); 2754 mutex_unlock(&adev->srbm_mutex); 2755 2756 tmp = RREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_BASE_CNTL); 2757 tmp = REG_SET_FIELD(tmp, CP_GFX_RS64_DC_BASE_CNTL, VMID, 0); 2758 tmp = REG_SET_FIELD(tmp, CP_GFX_RS64_DC_BASE_CNTL, CACHE_POLICY, 0); 2759 WREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_BASE_CNTL, tmp); 2760 2761 /* Invalidate the data caches */ 2762 tmp = RREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_OP_CNTL); 2763 tmp = REG_SET_FIELD(tmp, CP_GFX_RS64_DC_OP_CNTL, INVALIDATE_DCACHE, 1); 2764 WREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_OP_CNTL, tmp); 2765 2766 for (i = 0; i < usec_timeout; i++) { 2767 tmp = RREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_OP_CNTL); 2768 if (1 == REG_GET_FIELD(tmp, CP_GFX_RS64_DC_OP_CNTL, 2769 INVALIDATE_DCACHE_COMPLETE)) 2770 break; 2771 udelay(1); 2772 } 2773 2774 if (i >= usec_timeout) { 2775 dev_err(adev->dev, "failed to invalidate RS64 data cache\n"); 2776 return -EINVAL; 2777 } 2778 2779 return 0; 2780 } 2781 2782 static int gfx_v11_0_config_mec_cache_rs64(struct amdgpu_device *adev, uint64_t addr, uint64_t addr2) 2783 { 2784 uint32_t usec_timeout = 50000; /* wait for 50ms */ 2785 uint32_t tmp; 2786 unsigned i; 2787 const struct gfx_firmware_header_v2_0 *mec_hdr; 2788 2789 mec_hdr = (const struct gfx_firmware_header_v2_0 *) 2790 adev->gfx.mec_fw->data; 2791 2792 tmp = RREG32_SOC15(GC, 0, regCP_CPC_IC_BASE_CNTL); 2793 tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, VMID, 0); 2794 tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, EXE_DISABLE, 0); 2795 tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, CACHE_POLICY, 0); 2796 WREG32_SOC15(GC, 0, regCP_CPC_IC_BASE_CNTL, tmp); 2797 2798 tmp = RREG32_SOC15(GC, 0, regCP_MEC_DC_BASE_CNTL); 2799 tmp = REG_SET_FIELD(tmp, CP_MEC_DC_BASE_CNTL, VMID, 0); 2800 tmp = REG_SET_FIELD(tmp, CP_MEC_DC_BASE_CNTL, CACHE_POLICY, 0); 2801 WREG32_SOC15(GC, 0, regCP_MEC_DC_BASE_CNTL, tmp); 2802 2803 mutex_lock(&adev->srbm_mutex); 2804 for (i = 0; i < adev->gfx.mec.num_pipe_per_mec; i++) { 2805 soc21_grbm_select(adev, 1, i, 0, 0); 2806 2807 WREG32_SOC15(GC, 0, regCP_MEC_MDBASE_LO, addr2); 2808 WREG32_SOC15(GC, 0, regCP_MEC_MDBASE_HI, 2809 upper_32_bits(addr2)); 2810 2811 WREG32_SOC15(GC, 0, regCP_MEC_RS64_PRGRM_CNTR_START, 2812 mec_hdr->ucode_start_addr_lo >> 2 | 2813 mec_hdr->ucode_start_addr_hi << 30); 2814 WREG32_SOC15(GC, 0, regCP_MEC_RS64_PRGRM_CNTR_START_HI, 2815 mec_hdr->ucode_start_addr_hi >> 2); 2816 2817 WREG32_SOC15(GC, 0, regCP_CPC_IC_BASE_LO, addr); 2818 WREG32_SOC15(GC, 0, regCP_CPC_IC_BASE_HI, 2819 upper_32_bits(addr)); 2820 } 2821 mutex_unlock(&adev->srbm_mutex); 2822 soc21_grbm_select(adev, 0, 0, 0, 0); 2823 2824 /* Trigger an invalidation of the L1 instruction caches */ 2825 tmp = RREG32_SOC15(GC, 0, regCP_MEC_DC_OP_CNTL); 2826 tmp = REG_SET_FIELD(tmp, CP_MEC_DC_OP_CNTL, INVALIDATE_DCACHE, 1); 2827 WREG32_SOC15(GC, 0, regCP_MEC_DC_OP_CNTL, tmp); 2828 2829 /* Wait for invalidation complete */ 2830 for (i = 0; i < usec_timeout; i++) { 2831 tmp = RREG32_SOC15(GC, 0, regCP_MEC_DC_OP_CNTL); 2832 if (1 == REG_GET_FIELD(tmp, CP_MEC_DC_OP_CNTL, 2833 INVALIDATE_DCACHE_COMPLETE)) 2834 break; 2835 udelay(1); 2836 } 2837 2838 if (i >= usec_timeout) { 2839 dev_err(adev->dev, "failed to invalidate instruction cache\n"); 2840 return -EINVAL; 2841 } 2842 2843 /* Trigger an invalidation of the L1 instruction caches */ 2844 tmp = RREG32_SOC15(GC, 0, regCP_CPC_IC_OP_CNTL); 2845 tmp = REG_SET_FIELD(tmp, CP_CPC_IC_OP_CNTL, INVALIDATE_CACHE, 1); 2846 WREG32_SOC15(GC, 0, regCP_CPC_IC_OP_CNTL, tmp); 2847 2848 /* Wait for invalidation complete */ 2849 for (i = 0; i < usec_timeout; i++) { 2850 tmp = RREG32_SOC15(GC, 0, regCP_CPC_IC_OP_CNTL); 2851 if (1 == REG_GET_FIELD(tmp, CP_CPC_IC_OP_CNTL, 2852 INVALIDATE_CACHE_COMPLETE)) 2853 break; 2854 udelay(1); 2855 } 2856 2857 if (i >= usec_timeout) { 2858 dev_err(adev->dev, "failed to invalidate instruction cache\n"); 2859 return -EINVAL; 2860 } 2861 2862 return 0; 2863 } 2864 2865 static void gfx_v11_0_config_gfx_rs64(struct amdgpu_device *adev) 2866 { 2867 const struct gfx_firmware_header_v2_0 *pfp_hdr; 2868 const struct gfx_firmware_header_v2_0 *me_hdr; 2869 const struct gfx_firmware_header_v2_0 *mec_hdr; 2870 uint32_t pipe_id, tmp; 2871 2872 mec_hdr = (const struct gfx_firmware_header_v2_0 *) 2873 adev->gfx.mec_fw->data; 2874 me_hdr = (const struct gfx_firmware_header_v2_0 *) 2875 adev->gfx.me_fw->data; 2876 pfp_hdr = (const struct gfx_firmware_header_v2_0 *) 2877 adev->gfx.pfp_fw->data; 2878 2879 /* config pfp program start addr */ 2880 for (pipe_id = 0; pipe_id < 2; pipe_id++) { 2881 soc21_grbm_select(adev, 0, pipe_id, 0, 0); 2882 WREG32_SOC15(GC, 0, regCP_PFP_PRGRM_CNTR_START, 2883 (pfp_hdr->ucode_start_addr_hi << 30) | 2884 (pfp_hdr->ucode_start_addr_lo >> 2)); 2885 WREG32_SOC15(GC, 0, regCP_PFP_PRGRM_CNTR_START_HI, 2886 pfp_hdr->ucode_start_addr_hi >> 2); 2887 } 2888 soc21_grbm_select(adev, 0, 0, 0, 0); 2889 2890 /* reset pfp pipe */ 2891 tmp = RREG32_SOC15(GC, 0, regCP_ME_CNTL); 2892 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, PFP_PIPE0_RESET, 1); 2893 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, PFP_PIPE1_RESET, 1); 2894 WREG32_SOC15(GC, 0, regCP_ME_CNTL, tmp); 2895 2896 /* clear pfp pipe reset */ 2897 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, PFP_PIPE0_RESET, 0); 2898 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, PFP_PIPE1_RESET, 0); 2899 WREG32_SOC15(GC, 0, regCP_ME_CNTL, tmp); 2900 2901 /* config me program start addr */ 2902 for (pipe_id = 0; pipe_id < 2; pipe_id++) { 2903 soc21_grbm_select(adev, 0, pipe_id, 0, 0); 2904 WREG32_SOC15(GC, 0, regCP_ME_PRGRM_CNTR_START, 2905 (me_hdr->ucode_start_addr_hi << 30) | 2906 (me_hdr->ucode_start_addr_lo >> 2) ); 2907 WREG32_SOC15(GC, 0, regCP_ME_PRGRM_CNTR_START_HI, 2908 me_hdr->ucode_start_addr_hi>>2); 2909 } 2910 soc21_grbm_select(adev, 0, 0, 0, 0); 2911 2912 /* reset me pipe */ 2913 tmp = RREG32_SOC15(GC, 0, regCP_ME_CNTL); 2914 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, ME_PIPE0_RESET, 1); 2915 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, ME_PIPE1_RESET, 1); 2916 WREG32_SOC15(GC, 0, regCP_ME_CNTL, tmp); 2917 2918 /* clear me pipe reset */ 2919 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, ME_PIPE0_RESET, 0); 2920 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, ME_PIPE1_RESET, 0); 2921 WREG32_SOC15(GC, 0, regCP_ME_CNTL, tmp); 2922 2923 /* config mec program start addr */ 2924 for (pipe_id = 0; pipe_id < 4; pipe_id++) { 2925 soc21_grbm_select(adev, 1, pipe_id, 0, 0); 2926 WREG32_SOC15(GC, 0, regCP_MEC_RS64_PRGRM_CNTR_START, 2927 mec_hdr->ucode_start_addr_lo >> 2 | 2928 mec_hdr->ucode_start_addr_hi << 30); 2929 WREG32_SOC15(GC, 0, regCP_MEC_RS64_PRGRM_CNTR_START_HI, 2930 mec_hdr->ucode_start_addr_hi >> 2); 2931 } 2932 soc21_grbm_select(adev, 0, 0, 0, 0); 2933 2934 /* reset mec pipe */ 2935 tmp = RREG32_SOC15(GC, 0, regCP_MEC_RS64_CNTL); 2936 tmp = REG_SET_FIELD(tmp, CP_MEC_RS64_CNTL, MEC_PIPE0_RESET, 1); 2937 tmp = REG_SET_FIELD(tmp, CP_MEC_RS64_CNTL, MEC_PIPE1_RESET, 1); 2938 tmp = REG_SET_FIELD(tmp, CP_MEC_RS64_CNTL, MEC_PIPE2_RESET, 1); 2939 tmp = REG_SET_FIELD(tmp, CP_MEC_RS64_CNTL, MEC_PIPE3_RESET, 1); 2940 WREG32_SOC15(GC, 0, regCP_MEC_RS64_CNTL, tmp); 2941 2942 /* clear mec pipe reset */ 2943 tmp = REG_SET_FIELD(tmp, CP_MEC_RS64_CNTL, MEC_PIPE0_RESET, 0); 2944 tmp = REG_SET_FIELD(tmp, CP_MEC_RS64_CNTL, MEC_PIPE1_RESET, 0); 2945 tmp = REG_SET_FIELD(tmp, CP_MEC_RS64_CNTL, MEC_PIPE2_RESET, 0); 2946 tmp = REG_SET_FIELD(tmp, CP_MEC_RS64_CNTL, MEC_PIPE3_RESET, 0); 2947 WREG32_SOC15(GC, 0, regCP_MEC_RS64_CNTL, tmp); 2948 } 2949 2950 static int gfx_v11_0_wait_for_rlc_autoload_complete(struct amdgpu_device *adev) 2951 { 2952 uint32_t cp_status; 2953 uint32_t bootload_status; 2954 int i, r; 2955 uint64_t addr, addr2; 2956 2957 for (i = 0; i < adev->usec_timeout; i++) { 2958 cp_status = RREG32_SOC15(GC, 0, regCP_STAT); 2959 2960 if (amdgpu_ip_version(adev, GC_HWIP, 0) == 2961 IP_VERSION(11, 0, 1) || 2962 amdgpu_ip_version(adev, GC_HWIP, 0) == 2963 IP_VERSION(11, 0, 4) || 2964 amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(11, 5, 0) || 2965 amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(11, 5, 1) || 2966 amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(11, 5, 2) || 2967 amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(11, 5, 3)) 2968 bootload_status = RREG32_SOC15(GC, 0, 2969 regRLC_RLCS_BOOTLOAD_STATUS_gc_11_0_1); 2970 else 2971 bootload_status = RREG32_SOC15(GC, 0, regRLC_RLCS_BOOTLOAD_STATUS); 2972 2973 if ((cp_status == 0) && 2974 (REG_GET_FIELD(bootload_status, 2975 RLC_RLCS_BOOTLOAD_STATUS, BOOTLOAD_COMPLETE) == 1)) { 2976 break; 2977 } 2978 udelay(1); 2979 } 2980 2981 if (i >= adev->usec_timeout) { 2982 dev_err(adev->dev, "rlc autoload: gc ucode autoload timeout\n"); 2983 return -ETIMEDOUT; 2984 } 2985 2986 if (adev->firmware.load_type == AMDGPU_FW_LOAD_RLC_BACKDOOR_AUTO) { 2987 if (adev->gfx.rs64_enable) { 2988 addr = adev->gfx.rlc.rlc_autoload_gpu_addr + 2989 rlc_autoload_info[SOC21_FIRMWARE_ID_RS64_ME].offset; 2990 addr2 = adev->gfx.rlc.rlc_autoload_gpu_addr + 2991 rlc_autoload_info[SOC21_FIRMWARE_ID_RS64_ME_P0_STACK].offset; 2992 r = gfx_v11_0_config_me_cache_rs64(adev, addr, addr2); 2993 if (r) 2994 return r; 2995 addr = adev->gfx.rlc.rlc_autoload_gpu_addr + 2996 rlc_autoload_info[SOC21_FIRMWARE_ID_RS64_PFP].offset; 2997 addr2 = adev->gfx.rlc.rlc_autoload_gpu_addr + 2998 rlc_autoload_info[SOC21_FIRMWARE_ID_RS64_PFP_P0_STACK].offset; 2999 r = gfx_v11_0_config_pfp_cache_rs64(adev, addr, addr2); 3000 if (r) 3001 return r; 3002 addr = adev->gfx.rlc.rlc_autoload_gpu_addr + 3003 rlc_autoload_info[SOC21_FIRMWARE_ID_RS64_MEC].offset; 3004 addr2 = adev->gfx.rlc.rlc_autoload_gpu_addr + 3005 rlc_autoload_info[SOC21_FIRMWARE_ID_RS64_MEC_P0_STACK].offset; 3006 r = gfx_v11_0_config_mec_cache_rs64(adev, addr, addr2); 3007 if (r) 3008 return r; 3009 } else { 3010 addr = adev->gfx.rlc.rlc_autoload_gpu_addr + 3011 rlc_autoload_info[SOC21_FIRMWARE_ID_CP_ME].offset; 3012 r = gfx_v11_0_config_me_cache(adev, addr); 3013 if (r) 3014 return r; 3015 addr = adev->gfx.rlc.rlc_autoload_gpu_addr + 3016 rlc_autoload_info[SOC21_FIRMWARE_ID_CP_PFP].offset; 3017 r = gfx_v11_0_config_pfp_cache(adev, addr); 3018 if (r) 3019 return r; 3020 addr = adev->gfx.rlc.rlc_autoload_gpu_addr + 3021 rlc_autoload_info[SOC21_FIRMWARE_ID_CP_MEC].offset; 3022 r = gfx_v11_0_config_mec_cache(adev, addr); 3023 if (r) 3024 return r; 3025 } 3026 } 3027 3028 return 0; 3029 } 3030 3031 static int gfx_v11_0_cp_gfx_enable(struct amdgpu_device *adev, bool enable) 3032 { 3033 int i; 3034 u32 tmp = RREG32_SOC15(GC, 0, regCP_ME_CNTL); 3035 3036 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, ME_HALT, enable ? 0 : 1); 3037 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, PFP_HALT, enable ? 0 : 1); 3038 WREG32_SOC15(GC, 0, regCP_ME_CNTL, tmp); 3039 3040 for (i = 0; i < adev->usec_timeout; i++) { 3041 if (RREG32_SOC15(GC, 0, regCP_STAT) == 0) 3042 break; 3043 udelay(1); 3044 } 3045 3046 if (i >= adev->usec_timeout) 3047 DRM_ERROR("failed to %s cp gfx\n", enable ? "unhalt" : "halt"); 3048 3049 return 0; 3050 } 3051 3052 static int gfx_v11_0_cp_gfx_load_pfp_microcode(struct amdgpu_device *adev) 3053 { 3054 int r; 3055 const struct gfx_firmware_header_v1_0 *pfp_hdr; 3056 const __le32 *fw_data; 3057 unsigned i, fw_size; 3058 3059 pfp_hdr = (const struct gfx_firmware_header_v1_0 *) 3060 adev->gfx.pfp_fw->data; 3061 3062 amdgpu_ucode_print_gfx_hdr(&pfp_hdr->header); 3063 3064 fw_data = (const __le32 *)(adev->gfx.pfp_fw->data + 3065 le32_to_cpu(pfp_hdr->header.ucode_array_offset_bytes)); 3066 fw_size = le32_to_cpu(pfp_hdr->header.ucode_size_bytes); 3067 3068 r = amdgpu_bo_create_reserved(adev, pfp_hdr->header.ucode_size_bytes, 3069 PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT, 3070 &adev->gfx.pfp.pfp_fw_obj, 3071 &adev->gfx.pfp.pfp_fw_gpu_addr, 3072 (void **)&adev->gfx.pfp.pfp_fw_ptr); 3073 if (r) { 3074 dev_err(adev->dev, "(%d) failed to create pfp fw bo\n", r); 3075 gfx_v11_0_pfp_fini(adev); 3076 return r; 3077 } 3078 3079 memcpy(adev->gfx.pfp.pfp_fw_ptr, fw_data, fw_size); 3080 3081 amdgpu_bo_kunmap(adev->gfx.pfp.pfp_fw_obj); 3082 amdgpu_bo_unreserve(adev->gfx.pfp.pfp_fw_obj); 3083 3084 gfx_v11_0_config_pfp_cache(adev, adev->gfx.pfp.pfp_fw_gpu_addr); 3085 3086 WREG32_SOC15(GC, 0, regCP_HYP_PFP_UCODE_ADDR, 0); 3087 3088 for (i = 0; i < pfp_hdr->jt_size; i++) 3089 WREG32_SOC15(GC, 0, regCP_HYP_PFP_UCODE_DATA, 3090 le32_to_cpup(fw_data + pfp_hdr->jt_offset + i)); 3091 3092 WREG32_SOC15(GC, 0, regCP_HYP_PFP_UCODE_ADDR, adev->gfx.pfp_fw_version); 3093 3094 return 0; 3095 } 3096 3097 static int gfx_v11_0_cp_gfx_load_pfp_microcode_rs64(struct amdgpu_device *adev) 3098 { 3099 int r; 3100 const struct gfx_firmware_header_v2_0 *pfp_hdr; 3101 const __le32 *fw_ucode, *fw_data; 3102 unsigned i, pipe_id, fw_ucode_size, fw_data_size; 3103 uint32_t tmp; 3104 uint32_t usec_timeout = 50000; /* wait for 50ms */ 3105 3106 pfp_hdr = (const struct gfx_firmware_header_v2_0 *) 3107 adev->gfx.pfp_fw->data; 3108 3109 amdgpu_ucode_print_gfx_hdr(&pfp_hdr->header); 3110 3111 /* instruction */ 3112 fw_ucode = (const __le32 *)(adev->gfx.pfp_fw->data + 3113 le32_to_cpu(pfp_hdr->ucode_offset_bytes)); 3114 fw_ucode_size = le32_to_cpu(pfp_hdr->ucode_size_bytes); 3115 /* data */ 3116 fw_data = (const __le32 *)(adev->gfx.pfp_fw->data + 3117 le32_to_cpu(pfp_hdr->data_offset_bytes)); 3118 fw_data_size = le32_to_cpu(pfp_hdr->data_size_bytes); 3119 3120 /* 64kb align */ 3121 r = amdgpu_bo_create_reserved(adev, fw_ucode_size, 3122 64 * 1024, 3123 AMDGPU_GEM_DOMAIN_VRAM | 3124 AMDGPU_GEM_DOMAIN_GTT, 3125 &adev->gfx.pfp.pfp_fw_obj, 3126 &adev->gfx.pfp.pfp_fw_gpu_addr, 3127 (void **)&adev->gfx.pfp.pfp_fw_ptr); 3128 if (r) { 3129 dev_err(adev->dev, "(%d) failed to create pfp ucode fw bo\n", r); 3130 gfx_v11_0_pfp_fini(adev); 3131 return r; 3132 } 3133 3134 r = amdgpu_bo_create_reserved(adev, fw_data_size, 3135 64 * 1024, 3136 AMDGPU_GEM_DOMAIN_VRAM | 3137 AMDGPU_GEM_DOMAIN_GTT, 3138 &adev->gfx.pfp.pfp_fw_data_obj, 3139 &adev->gfx.pfp.pfp_fw_data_gpu_addr, 3140 (void **)&adev->gfx.pfp.pfp_fw_data_ptr); 3141 if (r) { 3142 dev_err(adev->dev, "(%d) failed to create pfp data fw bo\n", r); 3143 gfx_v11_0_pfp_fini(adev); 3144 return r; 3145 } 3146 3147 memcpy(adev->gfx.pfp.pfp_fw_ptr, fw_ucode, fw_ucode_size); 3148 memcpy(adev->gfx.pfp.pfp_fw_data_ptr, fw_data, fw_data_size); 3149 3150 amdgpu_bo_kunmap(adev->gfx.pfp.pfp_fw_obj); 3151 amdgpu_bo_kunmap(adev->gfx.pfp.pfp_fw_data_obj); 3152 amdgpu_bo_unreserve(adev->gfx.pfp.pfp_fw_obj); 3153 amdgpu_bo_unreserve(adev->gfx.pfp.pfp_fw_data_obj); 3154 3155 if (amdgpu_emu_mode == 1) 3156 amdgpu_device_flush_hdp(adev, NULL); 3157 3158 WREG32_SOC15(GC, 0, regCP_PFP_IC_BASE_LO, 3159 lower_32_bits(adev->gfx.pfp.pfp_fw_gpu_addr)); 3160 WREG32_SOC15(GC, 0, regCP_PFP_IC_BASE_HI, 3161 upper_32_bits(adev->gfx.pfp.pfp_fw_gpu_addr)); 3162 3163 tmp = RREG32_SOC15(GC, 0, regCP_PFP_IC_BASE_CNTL); 3164 tmp = REG_SET_FIELD(tmp, CP_PFP_IC_BASE_CNTL, VMID, 0); 3165 tmp = REG_SET_FIELD(tmp, CP_PFP_IC_BASE_CNTL, CACHE_POLICY, 0); 3166 tmp = REG_SET_FIELD(tmp, CP_PFP_IC_BASE_CNTL, EXE_DISABLE, 0); 3167 WREG32_SOC15(GC, 0, regCP_PFP_IC_BASE_CNTL, tmp); 3168 3169 /* 3170 * Programming any of the CP_PFP_IC_BASE registers 3171 * forces invalidation of the ME L1 I$. Wait for the 3172 * invalidation complete 3173 */ 3174 for (i = 0; i < usec_timeout; i++) { 3175 tmp = RREG32_SOC15(GC, 0, regCP_PFP_IC_OP_CNTL); 3176 if (1 == REG_GET_FIELD(tmp, CP_PFP_IC_OP_CNTL, 3177 INVALIDATE_CACHE_COMPLETE)) 3178 break; 3179 udelay(1); 3180 } 3181 3182 if (i >= usec_timeout) { 3183 dev_err(adev->dev, "failed to invalidate instruction cache\n"); 3184 return -EINVAL; 3185 } 3186 3187 /* Prime the L1 instruction caches */ 3188 tmp = RREG32_SOC15(GC, 0, regCP_PFP_IC_OP_CNTL); 3189 tmp = REG_SET_FIELD(tmp, CP_PFP_IC_OP_CNTL, PRIME_ICACHE, 1); 3190 WREG32_SOC15(GC, 0, regCP_PFP_IC_OP_CNTL, tmp); 3191 /* Waiting for cache primed*/ 3192 for (i = 0; i < usec_timeout; i++) { 3193 tmp = RREG32_SOC15(GC, 0, regCP_PFP_IC_OP_CNTL); 3194 if (1 == REG_GET_FIELD(tmp, CP_PFP_IC_OP_CNTL, 3195 ICACHE_PRIMED)) 3196 break; 3197 udelay(1); 3198 } 3199 3200 if (i >= usec_timeout) { 3201 dev_err(adev->dev, "failed to prime instruction cache\n"); 3202 return -EINVAL; 3203 } 3204 3205 mutex_lock(&adev->srbm_mutex); 3206 for (pipe_id = 0; pipe_id < adev->gfx.me.num_pipe_per_me; pipe_id++) { 3207 soc21_grbm_select(adev, 0, pipe_id, 0, 0); 3208 WREG32_SOC15(GC, 0, regCP_PFP_PRGRM_CNTR_START, 3209 (pfp_hdr->ucode_start_addr_hi << 30) | 3210 (pfp_hdr->ucode_start_addr_lo >> 2) ); 3211 WREG32_SOC15(GC, 0, regCP_PFP_PRGRM_CNTR_START_HI, 3212 pfp_hdr->ucode_start_addr_hi>>2); 3213 3214 /* 3215 * Program CP_ME_CNTL to reset given PIPE to take 3216 * effect of CP_PFP_PRGRM_CNTR_START. 3217 */ 3218 tmp = RREG32_SOC15(GC, 0, regCP_ME_CNTL); 3219 if (pipe_id == 0) 3220 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, 3221 PFP_PIPE0_RESET, 1); 3222 else 3223 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, 3224 PFP_PIPE1_RESET, 1); 3225 WREG32_SOC15(GC, 0, regCP_ME_CNTL, tmp); 3226 3227 /* Clear pfp pipe0 reset bit. */ 3228 if (pipe_id == 0) 3229 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, 3230 PFP_PIPE0_RESET, 0); 3231 else 3232 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, 3233 PFP_PIPE1_RESET, 0); 3234 WREG32_SOC15(GC, 0, regCP_ME_CNTL, tmp); 3235 3236 WREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_BASE0_LO, 3237 lower_32_bits(adev->gfx.pfp.pfp_fw_data_gpu_addr)); 3238 WREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_BASE0_HI, 3239 upper_32_bits(adev->gfx.pfp.pfp_fw_data_gpu_addr)); 3240 } 3241 soc21_grbm_select(adev, 0, 0, 0, 0); 3242 mutex_unlock(&adev->srbm_mutex); 3243 3244 tmp = RREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_BASE_CNTL); 3245 tmp = REG_SET_FIELD(tmp, CP_GFX_RS64_DC_BASE_CNTL, VMID, 0); 3246 tmp = REG_SET_FIELD(tmp, CP_GFX_RS64_DC_BASE_CNTL, CACHE_POLICY, 0); 3247 WREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_BASE_CNTL, tmp); 3248 3249 /* Invalidate the data caches */ 3250 tmp = RREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_OP_CNTL); 3251 tmp = REG_SET_FIELD(tmp, CP_GFX_RS64_DC_OP_CNTL, INVALIDATE_DCACHE, 1); 3252 WREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_OP_CNTL, tmp); 3253 3254 for (i = 0; i < usec_timeout; i++) { 3255 tmp = RREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_OP_CNTL); 3256 if (1 == REG_GET_FIELD(tmp, CP_GFX_RS64_DC_OP_CNTL, 3257 INVALIDATE_DCACHE_COMPLETE)) 3258 break; 3259 udelay(1); 3260 } 3261 3262 if (i >= usec_timeout) { 3263 dev_err(adev->dev, "failed to invalidate RS64 data cache\n"); 3264 return -EINVAL; 3265 } 3266 3267 return 0; 3268 } 3269 3270 static int gfx_v11_0_cp_gfx_load_me_microcode(struct amdgpu_device *adev) 3271 { 3272 int r; 3273 const struct gfx_firmware_header_v1_0 *me_hdr; 3274 const __le32 *fw_data; 3275 unsigned i, fw_size; 3276 3277 me_hdr = (const struct gfx_firmware_header_v1_0 *) 3278 adev->gfx.me_fw->data; 3279 3280 amdgpu_ucode_print_gfx_hdr(&me_hdr->header); 3281 3282 fw_data = (const __le32 *)(adev->gfx.me_fw->data + 3283 le32_to_cpu(me_hdr->header.ucode_array_offset_bytes)); 3284 fw_size = le32_to_cpu(me_hdr->header.ucode_size_bytes); 3285 3286 r = amdgpu_bo_create_reserved(adev, me_hdr->header.ucode_size_bytes, 3287 PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT, 3288 &adev->gfx.me.me_fw_obj, 3289 &adev->gfx.me.me_fw_gpu_addr, 3290 (void **)&adev->gfx.me.me_fw_ptr); 3291 if (r) { 3292 dev_err(adev->dev, "(%d) failed to create me fw bo\n", r); 3293 gfx_v11_0_me_fini(adev); 3294 return r; 3295 } 3296 3297 memcpy(adev->gfx.me.me_fw_ptr, fw_data, fw_size); 3298 3299 amdgpu_bo_kunmap(adev->gfx.me.me_fw_obj); 3300 amdgpu_bo_unreserve(adev->gfx.me.me_fw_obj); 3301 3302 gfx_v11_0_config_me_cache(adev, adev->gfx.me.me_fw_gpu_addr); 3303 3304 WREG32_SOC15(GC, 0, regCP_HYP_ME_UCODE_ADDR, 0); 3305 3306 for (i = 0; i < me_hdr->jt_size; i++) 3307 WREG32_SOC15(GC, 0, regCP_HYP_ME_UCODE_DATA, 3308 le32_to_cpup(fw_data + me_hdr->jt_offset + i)); 3309 3310 WREG32_SOC15(GC, 0, regCP_HYP_ME_UCODE_ADDR, adev->gfx.me_fw_version); 3311 3312 return 0; 3313 } 3314 3315 static int gfx_v11_0_cp_gfx_load_me_microcode_rs64(struct amdgpu_device *adev) 3316 { 3317 int r; 3318 const struct gfx_firmware_header_v2_0 *me_hdr; 3319 const __le32 *fw_ucode, *fw_data; 3320 unsigned i, pipe_id, fw_ucode_size, fw_data_size; 3321 uint32_t tmp; 3322 uint32_t usec_timeout = 50000; /* wait for 50ms */ 3323 3324 me_hdr = (const struct gfx_firmware_header_v2_0 *) 3325 adev->gfx.me_fw->data; 3326 3327 amdgpu_ucode_print_gfx_hdr(&me_hdr->header); 3328 3329 /* instruction */ 3330 fw_ucode = (const __le32 *)(adev->gfx.me_fw->data + 3331 le32_to_cpu(me_hdr->ucode_offset_bytes)); 3332 fw_ucode_size = le32_to_cpu(me_hdr->ucode_size_bytes); 3333 /* data */ 3334 fw_data = (const __le32 *)(adev->gfx.me_fw->data + 3335 le32_to_cpu(me_hdr->data_offset_bytes)); 3336 fw_data_size = le32_to_cpu(me_hdr->data_size_bytes); 3337 3338 /* 64kb align*/ 3339 r = amdgpu_bo_create_reserved(adev, fw_ucode_size, 3340 64 * 1024, 3341 AMDGPU_GEM_DOMAIN_VRAM | 3342 AMDGPU_GEM_DOMAIN_GTT, 3343 &adev->gfx.me.me_fw_obj, 3344 &adev->gfx.me.me_fw_gpu_addr, 3345 (void **)&adev->gfx.me.me_fw_ptr); 3346 if (r) { 3347 dev_err(adev->dev, "(%d) failed to create me ucode bo\n", r); 3348 gfx_v11_0_me_fini(adev); 3349 return r; 3350 } 3351 3352 r = amdgpu_bo_create_reserved(adev, fw_data_size, 3353 64 * 1024, 3354 AMDGPU_GEM_DOMAIN_VRAM | 3355 AMDGPU_GEM_DOMAIN_GTT, 3356 &adev->gfx.me.me_fw_data_obj, 3357 &adev->gfx.me.me_fw_data_gpu_addr, 3358 (void **)&adev->gfx.me.me_fw_data_ptr); 3359 if (r) { 3360 dev_err(adev->dev, "(%d) failed to create me data bo\n", r); 3361 gfx_v11_0_pfp_fini(adev); 3362 return r; 3363 } 3364 3365 memcpy(adev->gfx.me.me_fw_ptr, fw_ucode, fw_ucode_size); 3366 memcpy(adev->gfx.me.me_fw_data_ptr, fw_data, fw_data_size); 3367 3368 amdgpu_bo_kunmap(adev->gfx.me.me_fw_obj); 3369 amdgpu_bo_kunmap(adev->gfx.me.me_fw_data_obj); 3370 amdgpu_bo_unreserve(adev->gfx.me.me_fw_obj); 3371 amdgpu_bo_unreserve(adev->gfx.me.me_fw_data_obj); 3372 3373 if (amdgpu_emu_mode == 1) 3374 amdgpu_device_flush_hdp(adev, NULL); 3375 3376 WREG32_SOC15(GC, 0, regCP_ME_IC_BASE_LO, 3377 lower_32_bits(adev->gfx.me.me_fw_gpu_addr)); 3378 WREG32_SOC15(GC, 0, regCP_ME_IC_BASE_HI, 3379 upper_32_bits(adev->gfx.me.me_fw_gpu_addr)); 3380 3381 tmp = RREG32_SOC15(GC, 0, regCP_ME_IC_BASE_CNTL); 3382 tmp = REG_SET_FIELD(tmp, CP_ME_IC_BASE_CNTL, VMID, 0); 3383 tmp = REG_SET_FIELD(tmp, CP_ME_IC_BASE_CNTL, CACHE_POLICY, 0); 3384 tmp = REG_SET_FIELD(tmp, CP_ME_IC_BASE_CNTL, EXE_DISABLE, 0); 3385 WREG32_SOC15(GC, 0, regCP_ME_IC_BASE_CNTL, tmp); 3386 3387 /* 3388 * Programming any of the CP_ME_IC_BASE registers 3389 * forces invalidation of the ME L1 I$. Wait for the 3390 * invalidation complete 3391 */ 3392 for (i = 0; i < usec_timeout; i++) { 3393 tmp = RREG32_SOC15(GC, 0, regCP_ME_IC_OP_CNTL); 3394 if (1 == REG_GET_FIELD(tmp, CP_ME_IC_OP_CNTL, 3395 INVALIDATE_CACHE_COMPLETE)) 3396 break; 3397 udelay(1); 3398 } 3399 3400 if (i >= usec_timeout) { 3401 dev_err(adev->dev, "failed to invalidate instruction cache\n"); 3402 return -EINVAL; 3403 } 3404 3405 /* Prime the instruction caches */ 3406 tmp = RREG32_SOC15(GC, 0, regCP_ME_IC_OP_CNTL); 3407 tmp = REG_SET_FIELD(tmp, CP_ME_IC_OP_CNTL, PRIME_ICACHE, 1); 3408 WREG32_SOC15(GC, 0, regCP_ME_IC_OP_CNTL, tmp); 3409 3410 /* Waiting for instruction cache primed*/ 3411 for (i = 0; i < usec_timeout; i++) { 3412 tmp = RREG32_SOC15(GC, 0, regCP_ME_IC_OP_CNTL); 3413 if (1 == REG_GET_FIELD(tmp, CP_ME_IC_OP_CNTL, 3414 ICACHE_PRIMED)) 3415 break; 3416 udelay(1); 3417 } 3418 3419 if (i >= usec_timeout) { 3420 dev_err(adev->dev, "failed to prime instruction cache\n"); 3421 return -EINVAL; 3422 } 3423 3424 mutex_lock(&adev->srbm_mutex); 3425 for (pipe_id = 0; pipe_id < adev->gfx.me.num_pipe_per_me; pipe_id++) { 3426 soc21_grbm_select(adev, 0, pipe_id, 0, 0); 3427 WREG32_SOC15(GC, 0, regCP_ME_PRGRM_CNTR_START, 3428 (me_hdr->ucode_start_addr_hi << 30) | 3429 (me_hdr->ucode_start_addr_lo >> 2) ); 3430 WREG32_SOC15(GC, 0, regCP_ME_PRGRM_CNTR_START_HI, 3431 me_hdr->ucode_start_addr_hi>>2); 3432 3433 /* 3434 * Program CP_ME_CNTL to reset given PIPE to take 3435 * effect of CP_PFP_PRGRM_CNTR_START. 3436 */ 3437 tmp = RREG32_SOC15(GC, 0, regCP_ME_CNTL); 3438 if (pipe_id == 0) 3439 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, 3440 ME_PIPE0_RESET, 1); 3441 else 3442 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, 3443 ME_PIPE1_RESET, 1); 3444 WREG32_SOC15(GC, 0, regCP_ME_CNTL, tmp); 3445 3446 /* Clear pfp pipe0 reset bit. */ 3447 if (pipe_id == 0) 3448 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, 3449 ME_PIPE0_RESET, 0); 3450 else 3451 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, 3452 ME_PIPE1_RESET, 0); 3453 WREG32_SOC15(GC, 0, regCP_ME_CNTL, tmp); 3454 3455 WREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_BASE1_LO, 3456 lower_32_bits(adev->gfx.me.me_fw_data_gpu_addr)); 3457 WREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_BASE1_HI, 3458 upper_32_bits(adev->gfx.me.me_fw_data_gpu_addr)); 3459 } 3460 soc21_grbm_select(adev, 0, 0, 0, 0); 3461 mutex_unlock(&adev->srbm_mutex); 3462 3463 tmp = RREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_BASE_CNTL); 3464 tmp = REG_SET_FIELD(tmp, CP_GFX_RS64_DC_BASE_CNTL, VMID, 0); 3465 tmp = REG_SET_FIELD(tmp, CP_GFX_RS64_DC_BASE_CNTL, CACHE_POLICY, 0); 3466 WREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_BASE_CNTL, tmp); 3467 3468 /* Invalidate the data caches */ 3469 tmp = RREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_OP_CNTL); 3470 tmp = REG_SET_FIELD(tmp, CP_GFX_RS64_DC_OP_CNTL, INVALIDATE_DCACHE, 1); 3471 WREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_OP_CNTL, tmp); 3472 3473 for (i = 0; i < usec_timeout; i++) { 3474 tmp = RREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_OP_CNTL); 3475 if (1 == REG_GET_FIELD(tmp, CP_GFX_RS64_DC_OP_CNTL, 3476 INVALIDATE_DCACHE_COMPLETE)) 3477 break; 3478 udelay(1); 3479 } 3480 3481 if (i >= usec_timeout) { 3482 dev_err(adev->dev, "failed to invalidate RS64 data cache\n"); 3483 return -EINVAL; 3484 } 3485 3486 return 0; 3487 } 3488 3489 static int gfx_v11_0_cp_gfx_load_microcode(struct amdgpu_device *adev) 3490 { 3491 int r; 3492 3493 if (!adev->gfx.me_fw || !adev->gfx.pfp_fw) 3494 return -EINVAL; 3495 3496 gfx_v11_0_cp_gfx_enable(adev, false); 3497 3498 if (adev->gfx.rs64_enable) 3499 r = gfx_v11_0_cp_gfx_load_pfp_microcode_rs64(adev); 3500 else 3501 r = gfx_v11_0_cp_gfx_load_pfp_microcode(adev); 3502 if (r) { 3503 dev_err(adev->dev, "(%d) failed to load pfp fw\n", r); 3504 return r; 3505 } 3506 3507 if (adev->gfx.rs64_enable) 3508 r = gfx_v11_0_cp_gfx_load_me_microcode_rs64(adev); 3509 else 3510 r = gfx_v11_0_cp_gfx_load_me_microcode(adev); 3511 if (r) { 3512 dev_err(adev->dev, "(%d) failed to load me fw\n", r); 3513 return r; 3514 } 3515 3516 return 0; 3517 } 3518 3519 static int gfx_v11_0_cp_gfx_start(struct amdgpu_device *adev) 3520 { 3521 struct amdgpu_ring *ring; 3522 const struct cs_section_def *sect = NULL; 3523 const struct cs_extent_def *ext = NULL; 3524 int r, i; 3525 int ctx_reg_offset; 3526 3527 /* init the CP */ 3528 WREG32_SOC15(GC, 0, regCP_MAX_CONTEXT, 3529 adev->gfx.config.max_hw_contexts - 1); 3530 WREG32_SOC15(GC, 0, regCP_DEVICE_ID, 1); 3531 3532 if (!amdgpu_async_gfx_ring) 3533 gfx_v11_0_cp_gfx_enable(adev, true); 3534 3535 ring = &adev->gfx.gfx_ring[0]; 3536 r = amdgpu_ring_alloc(ring, gfx_v11_0_get_csb_size(adev)); 3537 if (r) { 3538 DRM_ERROR("amdgpu: cp failed to lock ring (%d).\n", r); 3539 return r; 3540 } 3541 3542 amdgpu_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0)); 3543 amdgpu_ring_write(ring, PACKET3_PREAMBLE_BEGIN_CLEAR_STATE); 3544 3545 amdgpu_ring_write(ring, PACKET3(PACKET3_CONTEXT_CONTROL, 1)); 3546 amdgpu_ring_write(ring, 0x80000000); 3547 amdgpu_ring_write(ring, 0x80000000); 3548 3549 for (sect = gfx11_cs_data; sect->section != NULL; ++sect) { 3550 for (ext = sect->section; ext->extent != NULL; ++ext) { 3551 if (sect->id == SECT_CONTEXT) { 3552 amdgpu_ring_write(ring, 3553 PACKET3(PACKET3_SET_CONTEXT_REG, 3554 ext->reg_count)); 3555 amdgpu_ring_write(ring, ext->reg_index - 3556 PACKET3_SET_CONTEXT_REG_START); 3557 for (i = 0; i < ext->reg_count; i++) 3558 amdgpu_ring_write(ring, ext->extent[i]); 3559 } 3560 } 3561 } 3562 3563 ctx_reg_offset = 3564 SOC15_REG_OFFSET(GC, 0, regPA_SC_TILE_STEERING_OVERRIDE) - PACKET3_SET_CONTEXT_REG_START; 3565 amdgpu_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 1)); 3566 amdgpu_ring_write(ring, ctx_reg_offset); 3567 amdgpu_ring_write(ring, adev->gfx.config.pa_sc_tile_steering_override); 3568 3569 amdgpu_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0)); 3570 amdgpu_ring_write(ring, PACKET3_PREAMBLE_END_CLEAR_STATE); 3571 3572 amdgpu_ring_write(ring, PACKET3(PACKET3_CLEAR_STATE, 0)); 3573 amdgpu_ring_write(ring, 0); 3574 3575 amdgpu_ring_commit(ring); 3576 3577 /* submit cs packet to copy state 0 to next available state */ 3578 if (adev->gfx.num_gfx_rings > 1) { 3579 /* maximum supported gfx ring is 2 */ 3580 ring = &adev->gfx.gfx_ring[1]; 3581 r = amdgpu_ring_alloc(ring, 2); 3582 if (r) { 3583 DRM_ERROR("amdgpu: cp failed to lock ring (%d).\n", r); 3584 return r; 3585 } 3586 3587 amdgpu_ring_write(ring, PACKET3(PACKET3_CLEAR_STATE, 0)); 3588 amdgpu_ring_write(ring, 0); 3589 3590 amdgpu_ring_commit(ring); 3591 } 3592 return 0; 3593 } 3594 3595 static void gfx_v11_0_cp_gfx_switch_pipe(struct amdgpu_device *adev, 3596 CP_PIPE_ID pipe) 3597 { 3598 u32 tmp; 3599 3600 tmp = RREG32_SOC15(GC, 0, regGRBM_GFX_CNTL); 3601 tmp = REG_SET_FIELD(tmp, GRBM_GFX_CNTL, PIPEID, pipe); 3602 3603 WREG32_SOC15(GC, 0, regGRBM_GFX_CNTL, tmp); 3604 } 3605 3606 static void gfx_v11_0_cp_gfx_set_doorbell(struct amdgpu_device *adev, 3607 struct amdgpu_ring *ring) 3608 { 3609 u32 tmp; 3610 3611 tmp = RREG32_SOC15(GC, 0, regCP_RB_DOORBELL_CONTROL); 3612 if (ring->use_doorbell) { 3613 tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL, 3614 DOORBELL_OFFSET, ring->doorbell_index); 3615 tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL, 3616 DOORBELL_EN, 1); 3617 } else { 3618 tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL, 3619 DOORBELL_EN, 0); 3620 } 3621 WREG32_SOC15(GC, 0, regCP_RB_DOORBELL_CONTROL, tmp); 3622 3623 tmp = REG_SET_FIELD(0, CP_RB_DOORBELL_RANGE_LOWER, 3624 DOORBELL_RANGE_LOWER, ring->doorbell_index); 3625 WREG32_SOC15(GC, 0, regCP_RB_DOORBELL_RANGE_LOWER, tmp); 3626 3627 WREG32_SOC15(GC, 0, regCP_RB_DOORBELL_RANGE_UPPER, 3628 CP_RB_DOORBELL_RANGE_UPPER__DOORBELL_RANGE_UPPER_MASK); 3629 } 3630 3631 static int gfx_v11_0_cp_gfx_resume(struct amdgpu_device *adev) 3632 { 3633 struct amdgpu_ring *ring; 3634 u32 tmp; 3635 u32 rb_bufsz; 3636 u64 rb_addr, rptr_addr, wptr_gpu_addr; 3637 3638 /* Set the write pointer delay */ 3639 WREG32_SOC15(GC, 0, regCP_RB_WPTR_DELAY, 0); 3640 3641 /* set the RB to use vmid 0 */ 3642 WREG32_SOC15(GC, 0, regCP_RB_VMID, 0); 3643 3644 /* Init gfx ring 0 for pipe 0 */ 3645 mutex_lock(&adev->srbm_mutex); 3646 gfx_v11_0_cp_gfx_switch_pipe(adev, PIPE_ID0); 3647 3648 /* Set ring buffer size */ 3649 ring = &adev->gfx.gfx_ring[0]; 3650 rb_bufsz = order_base_2(ring->ring_size / 8); 3651 tmp = REG_SET_FIELD(0, CP_RB0_CNTL, RB_BUFSZ, rb_bufsz); 3652 tmp = REG_SET_FIELD(tmp, CP_RB0_CNTL, RB_BLKSZ, rb_bufsz - 2); 3653 WREG32_SOC15(GC, 0, regCP_RB0_CNTL, tmp); 3654 3655 /* Initialize the ring buffer's write pointers */ 3656 ring->wptr = 0; 3657 WREG32_SOC15(GC, 0, regCP_RB0_WPTR, lower_32_bits(ring->wptr)); 3658 WREG32_SOC15(GC, 0, regCP_RB0_WPTR_HI, upper_32_bits(ring->wptr)); 3659 3660 /* set the wb address whether it's enabled or not */ 3661 rptr_addr = ring->rptr_gpu_addr; 3662 WREG32_SOC15(GC, 0, regCP_RB0_RPTR_ADDR, lower_32_bits(rptr_addr)); 3663 WREG32_SOC15(GC, 0, regCP_RB0_RPTR_ADDR_HI, upper_32_bits(rptr_addr) & 3664 CP_RB_RPTR_ADDR_HI__RB_RPTR_ADDR_HI_MASK); 3665 3666 wptr_gpu_addr = ring->wptr_gpu_addr; 3667 WREG32_SOC15(GC, 0, regCP_RB_WPTR_POLL_ADDR_LO, 3668 lower_32_bits(wptr_gpu_addr)); 3669 WREG32_SOC15(GC, 0, regCP_RB_WPTR_POLL_ADDR_HI, 3670 upper_32_bits(wptr_gpu_addr)); 3671 3672 mdelay(1); 3673 WREG32_SOC15(GC, 0, regCP_RB0_CNTL, tmp); 3674 3675 rb_addr = ring->gpu_addr >> 8; 3676 WREG32_SOC15(GC, 0, regCP_RB0_BASE, rb_addr); 3677 WREG32_SOC15(GC, 0, regCP_RB0_BASE_HI, upper_32_bits(rb_addr)); 3678 3679 WREG32_SOC15(GC, 0, regCP_RB_ACTIVE, 1); 3680 3681 gfx_v11_0_cp_gfx_set_doorbell(adev, ring); 3682 mutex_unlock(&adev->srbm_mutex); 3683 3684 /* Init gfx ring 1 for pipe 1 */ 3685 if (adev->gfx.num_gfx_rings > 1) { 3686 mutex_lock(&adev->srbm_mutex); 3687 gfx_v11_0_cp_gfx_switch_pipe(adev, PIPE_ID1); 3688 /* maximum supported gfx ring is 2 */ 3689 ring = &adev->gfx.gfx_ring[1]; 3690 rb_bufsz = order_base_2(ring->ring_size / 8); 3691 tmp = REG_SET_FIELD(0, CP_RB1_CNTL, RB_BUFSZ, rb_bufsz); 3692 tmp = REG_SET_FIELD(tmp, CP_RB1_CNTL, RB_BLKSZ, rb_bufsz - 2); 3693 WREG32_SOC15(GC, 0, regCP_RB1_CNTL, tmp); 3694 /* Initialize the ring buffer's write pointers */ 3695 ring->wptr = 0; 3696 WREG32_SOC15(GC, 0, regCP_RB1_WPTR, lower_32_bits(ring->wptr)); 3697 WREG32_SOC15(GC, 0, regCP_RB1_WPTR_HI, upper_32_bits(ring->wptr)); 3698 /* Set the wb address whether it's enabled or not */ 3699 rptr_addr = ring->rptr_gpu_addr; 3700 WREG32_SOC15(GC, 0, regCP_RB1_RPTR_ADDR, lower_32_bits(rptr_addr)); 3701 WREG32_SOC15(GC, 0, regCP_RB1_RPTR_ADDR_HI, upper_32_bits(rptr_addr) & 3702 CP_RB1_RPTR_ADDR_HI__RB_RPTR_ADDR_HI_MASK); 3703 wptr_gpu_addr = ring->wptr_gpu_addr; 3704 WREG32_SOC15(GC, 0, regCP_RB_WPTR_POLL_ADDR_LO, 3705 lower_32_bits(wptr_gpu_addr)); 3706 WREG32_SOC15(GC, 0, regCP_RB_WPTR_POLL_ADDR_HI, 3707 upper_32_bits(wptr_gpu_addr)); 3708 3709 mdelay(1); 3710 WREG32_SOC15(GC, 0, regCP_RB1_CNTL, tmp); 3711 3712 rb_addr = ring->gpu_addr >> 8; 3713 WREG32_SOC15(GC, 0, regCP_RB1_BASE, rb_addr); 3714 WREG32_SOC15(GC, 0, regCP_RB1_BASE_HI, upper_32_bits(rb_addr)); 3715 WREG32_SOC15(GC, 0, regCP_RB1_ACTIVE, 1); 3716 3717 gfx_v11_0_cp_gfx_set_doorbell(adev, ring); 3718 mutex_unlock(&adev->srbm_mutex); 3719 } 3720 /* Switch to pipe 0 */ 3721 mutex_lock(&adev->srbm_mutex); 3722 gfx_v11_0_cp_gfx_switch_pipe(adev, PIPE_ID0); 3723 mutex_unlock(&adev->srbm_mutex); 3724 3725 /* start the ring */ 3726 gfx_v11_0_cp_gfx_start(adev); 3727 3728 return 0; 3729 } 3730 3731 static void gfx_v11_0_cp_compute_enable(struct amdgpu_device *adev, bool enable) 3732 { 3733 u32 data; 3734 3735 if (adev->gfx.rs64_enable) { 3736 data = RREG32_SOC15(GC, 0, regCP_MEC_RS64_CNTL); 3737 data = REG_SET_FIELD(data, CP_MEC_RS64_CNTL, MEC_INVALIDATE_ICACHE, 3738 enable ? 0 : 1); 3739 data = REG_SET_FIELD(data, CP_MEC_RS64_CNTL, MEC_PIPE0_RESET, 3740 enable ? 0 : 1); 3741 data = REG_SET_FIELD(data, CP_MEC_RS64_CNTL, MEC_PIPE1_RESET, 3742 enable ? 0 : 1); 3743 data = REG_SET_FIELD(data, CP_MEC_RS64_CNTL, MEC_PIPE2_RESET, 3744 enable ? 0 : 1); 3745 data = REG_SET_FIELD(data, CP_MEC_RS64_CNTL, MEC_PIPE3_RESET, 3746 enable ? 0 : 1); 3747 data = REG_SET_FIELD(data, CP_MEC_RS64_CNTL, MEC_PIPE0_ACTIVE, 3748 enable ? 1 : 0); 3749 data = REG_SET_FIELD(data, CP_MEC_RS64_CNTL, MEC_PIPE1_ACTIVE, 3750 enable ? 1 : 0); 3751 data = REG_SET_FIELD(data, CP_MEC_RS64_CNTL, MEC_PIPE2_ACTIVE, 3752 enable ? 1 : 0); 3753 data = REG_SET_FIELD(data, CP_MEC_RS64_CNTL, MEC_PIPE3_ACTIVE, 3754 enable ? 1 : 0); 3755 data = REG_SET_FIELD(data, CP_MEC_RS64_CNTL, MEC_HALT, 3756 enable ? 0 : 1); 3757 WREG32_SOC15(GC, 0, regCP_MEC_RS64_CNTL, data); 3758 } else { 3759 data = RREG32_SOC15(GC, 0, regCP_MEC_CNTL); 3760 3761 if (enable) { 3762 data = REG_SET_FIELD(data, CP_MEC_CNTL, MEC_ME1_HALT, 0); 3763 if (!adev->enable_mes_kiq) 3764 data = REG_SET_FIELD(data, CP_MEC_CNTL, 3765 MEC_ME2_HALT, 0); 3766 } else { 3767 data = REG_SET_FIELD(data, CP_MEC_CNTL, MEC_ME1_HALT, 1); 3768 data = REG_SET_FIELD(data, CP_MEC_CNTL, MEC_ME2_HALT, 1); 3769 } 3770 WREG32_SOC15(GC, 0, regCP_MEC_CNTL, data); 3771 } 3772 3773 udelay(50); 3774 } 3775 3776 static int gfx_v11_0_cp_compute_load_microcode(struct amdgpu_device *adev) 3777 { 3778 const struct gfx_firmware_header_v1_0 *mec_hdr; 3779 const __le32 *fw_data; 3780 unsigned i, fw_size; 3781 u32 *fw = NULL; 3782 int r; 3783 3784 if (!adev->gfx.mec_fw) 3785 return -EINVAL; 3786 3787 gfx_v11_0_cp_compute_enable(adev, false); 3788 3789 mec_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data; 3790 amdgpu_ucode_print_gfx_hdr(&mec_hdr->header); 3791 3792 fw_data = (const __le32 *) 3793 (adev->gfx.mec_fw->data + 3794 le32_to_cpu(mec_hdr->header.ucode_array_offset_bytes)); 3795 fw_size = le32_to_cpu(mec_hdr->header.ucode_size_bytes); 3796 3797 r = amdgpu_bo_create_reserved(adev, mec_hdr->header.ucode_size_bytes, 3798 PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT, 3799 &adev->gfx.mec.mec_fw_obj, 3800 &adev->gfx.mec.mec_fw_gpu_addr, 3801 (void **)&fw); 3802 if (r) { 3803 dev_err(adev->dev, "(%d) failed to create mec fw bo\n", r); 3804 gfx_v11_0_mec_fini(adev); 3805 return r; 3806 } 3807 3808 memcpy(fw, fw_data, fw_size); 3809 3810 amdgpu_bo_kunmap(adev->gfx.mec.mec_fw_obj); 3811 amdgpu_bo_unreserve(adev->gfx.mec.mec_fw_obj); 3812 3813 gfx_v11_0_config_mec_cache(adev, adev->gfx.mec.mec_fw_gpu_addr); 3814 3815 /* MEC1 */ 3816 WREG32_SOC15(GC, 0, regCP_MEC_ME1_UCODE_ADDR, 0); 3817 3818 for (i = 0; i < mec_hdr->jt_size; i++) 3819 WREG32_SOC15(GC, 0, regCP_MEC_ME1_UCODE_DATA, 3820 le32_to_cpup(fw_data + mec_hdr->jt_offset + i)); 3821 3822 WREG32_SOC15(GC, 0, regCP_MEC_ME1_UCODE_ADDR, adev->gfx.mec_fw_version); 3823 3824 return 0; 3825 } 3826 3827 static int gfx_v11_0_cp_compute_load_microcode_rs64(struct amdgpu_device *adev) 3828 { 3829 const struct gfx_firmware_header_v2_0 *mec_hdr; 3830 const __le32 *fw_ucode, *fw_data; 3831 u32 tmp, fw_ucode_size, fw_data_size; 3832 u32 i, usec_timeout = 50000; /* Wait for 50 ms */ 3833 u32 *fw_ucode_ptr, *fw_data_ptr; 3834 int r; 3835 3836 if (!adev->gfx.mec_fw) 3837 return -EINVAL; 3838 3839 gfx_v11_0_cp_compute_enable(adev, false); 3840 3841 mec_hdr = (const struct gfx_firmware_header_v2_0 *)adev->gfx.mec_fw->data; 3842 amdgpu_ucode_print_gfx_hdr(&mec_hdr->header); 3843 3844 fw_ucode = (const __le32 *) (adev->gfx.mec_fw->data + 3845 le32_to_cpu(mec_hdr->ucode_offset_bytes)); 3846 fw_ucode_size = le32_to_cpu(mec_hdr->ucode_size_bytes); 3847 3848 fw_data = (const __le32 *) (adev->gfx.mec_fw->data + 3849 le32_to_cpu(mec_hdr->data_offset_bytes)); 3850 fw_data_size = le32_to_cpu(mec_hdr->data_size_bytes); 3851 3852 r = amdgpu_bo_create_reserved(adev, fw_ucode_size, 3853 64 * 1024, 3854 AMDGPU_GEM_DOMAIN_VRAM | 3855 AMDGPU_GEM_DOMAIN_GTT, 3856 &adev->gfx.mec.mec_fw_obj, 3857 &adev->gfx.mec.mec_fw_gpu_addr, 3858 (void **)&fw_ucode_ptr); 3859 if (r) { 3860 dev_err(adev->dev, "(%d) failed to create mec fw ucode bo\n", r); 3861 gfx_v11_0_mec_fini(adev); 3862 return r; 3863 } 3864 3865 r = amdgpu_bo_create_reserved(adev, fw_data_size, 3866 64 * 1024, 3867 AMDGPU_GEM_DOMAIN_VRAM | 3868 AMDGPU_GEM_DOMAIN_GTT, 3869 &adev->gfx.mec.mec_fw_data_obj, 3870 &adev->gfx.mec.mec_fw_data_gpu_addr, 3871 (void **)&fw_data_ptr); 3872 if (r) { 3873 dev_err(adev->dev, "(%d) failed to create mec fw ucode bo\n", r); 3874 gfx_v11_0_mec_fini(adev); 3875 return r; 3876 } 3877 3878 memcpy(fw_ucode_ptr, fw_ucode, fw_ucode_size); 3879 memcpy(fw_data_ptr, fw_data, fw_data_size); 3880 3881 amdgpu_bo_kunmap(adev->gfx.mec.mec_fw_obj); 3882 amdgpu_bo_kunmap(adev->gfx.mec.mec_fw_data_obj); 3883 amdgpu_bo_unreserve(adev->gfx.mec.mec_fw_obj); 3884 amdgpu_bo_unreserve(adev->gfx.mec.mec_fw_data_obj); 3885 3886 tmp = RREG32_SOC15(GC, 0, regCP_CPC_IC_BASE_CNTL); 3887 tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, VMID, 0); 3888 tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, EXE_DISABLE, 0); 3889 tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, CACHE_POLICY, 0); 3890 WREG32_SOC15(GC, 0, regCP_CPC_IC_BASE_CNTL, tmp); 3891 3892 tmp = RREG32_SOC15(GC, 0, regCP_MEC_DC_BASE_CNTL); 3893 tmp = REG_SET_FIELD(tmp, CP_MEC_DC_BASE_CNTL, VMID, 0); 3894 tmp = REG_SET_FIELD(tmp, CP_MEC_DC_BASE_CNTL, CACHE_POLICY, 0); 3895 WREG32_SOC15(GC, 0, regCP_MEC_DC_BASE_CNTL, tmp); 3896 3897 mutex_lock(&adev->srbm_mutex); 3898 for (i = 0; i < adev->gfx.mec.num_pipe_per_mec; i++) { 3899 soc21_grbm_select(adev, 1, i, 0, 0); 3900 3901 WREG32_SOC15(GC, 0, regCP_MEC_MDBASE_LO, adev->gfx.mec.mec_fw_data_gpu_addr); 3902 WREG32_SOC15(GC, 0, regCP_MEC_MDBASE_HI, 3903 upper_32_bits(adev->gfx.mec.mec_fw_data_gpu_addr)); 3904 3905 WREG32_SOC15(GC, 0, regCP_MEC_RS64_PRGRM_CNTR_START, 3906 mec_hdr->ucode_start_addr_lo >> 2 | 3907 mec_hdr->ucode_start_addr_hi << 30); 3908 WREG32_SOC15(GC, 0, regCP_MEC_RS64_PRGRM_CNTR_START_HI, 3909 mec_hdr->ucode_start_addr_hi >> 2); 3910 3911 WREG32_SOC15(GC, 0, regCP_CPC_IC_BASE_LO, adev->gfx.mec.mec_fw_gpu_addr); 3912 WREG32_SOC15(GC, 0, regCP_CPC_IC_BASE_HI, 3913 upper_32_bits(adev->gfx.mec.mec_fw_gpu_addr)); 3914 } 3915 mutex_unlock(&adev->srbm_mutex); 3916 soc21_grbm_select(adev, 0, 0, 0, 0); 3917 3918 /* Trigger an invalidation of the L1 instruction caches */ 3919 tmp = RREG32_SOC15(GC, 0, regCP_MEC_DC_OP_CNTL); 3920 tmp = REG_SET_FIELD(tmp, CP_MEC_DC_OP_CNTL, INVALIDATE_DCACHE, 1); 3921 WREG32_SOC15(GC, 0, regCP_MEC_DC_OP_CNTL, tmp); 3922 3923 /* Wait for invalidation complete */ 3924 for (i = 0; i < usec_timeout; i++) { 3925 tmp = RREG32_SOC15(GC, 0, regCP_MEC_DC_OP_CNTL); 3926 if (1 == REG_GET_FIELD(tmp, CP_MEC_DC_OP_CNTL, 3927 INVALIDATE_DCACHE_COMPLETE)) 3928 break; 3929 udelay(1); 3930 } 3931 3932 if (i >= usec_timeout) { 3933 dev_err(adev->dev, "failed to invalidate instruction cache\n"); 3934 return -EINVAL; 3935 } 3936 3937 /* Trigger an invalidation of the L1 instruction caches */ 3938 tmp = RREG32_SOC15(GC, 0, regCP_CPC_IC_OP_CNTL); 3939 tmp = REG_SET_FIELD(tmp, CP_CPC_IC_OP_CNTL, INVALIDATE_CACHE, 1); 3940 WREG32_SOC15(GC, 0, regCP_CPC_IC_OP_CNTL, tmp); 3941 3942 /* Wait for invalidation complete */ 3943 for (i = 0; i < usec_timeout; i++) { 3944 tmp = RREG32_SOC15(GC, 0, regCP_CPC_IC_OP_CNTL); 3945 if (1 == REG_GET_FIELD(tmp, CP_CPC_IC_OP_CNTL, 3946 INVALIDATE_CACHE_COMPLETE)) 3947 break; 3948 udelay(1); 3949 } 3950 3951 if (i >= usec_timeout) { 3952 dev_err(adev->dev, "failed to invalidate instruction cache\n"); 3953 return -EINVAL; 3954 } 3955 3956 return 0; 3957 } 3958 3959 static void gfx_v11_0_kiq_setting(struct amdgpu_ring *ring) 3960 { 3961 uint32_t tmp; 3962 struct amdgpu_device *adev = ring->adev; 3963 3964 /* tell RLC which is KIQ queue */ 3965 tmp = RREG32_SOC15(GC, 0, regRLC_CP_SCHEDULERS); 3966 tmp &= 0xffffff00; 3967 tmp |= (ring->me << 5) | (ring->pipe << 3) | (ring->queue); 3968 WREG32_SOC15(GC, 0, regRLC_CP_SCHEDULERS, tmp | 0x80); 3969 } 3970 3971 static void gfx_v11_0_cp_set_doorbell_range(struct amdgpu_device *adev) 3972 { 3973 /* set graphics engine doorbell range */ 3974 WREG32_SOC15(GC, 0, regCP_RB_DOORBELL_RANGE_LOWER, 3975 (adev->doorbell_index.gfx_ring0 * 2) << 2); 3976 WREG32_SOC15(GC, 0, regCP_RB_DOORBELL_RANGE_UPPER, 3977 (adev->doorbell_index.gfx_userqueue_end * 2) << 2); 3978 3979 /* set compute engine doorbell range */ 3980 WREG32_SOC15(GC, 0, regCP_MEC_DOORBELL_RANGE_LOWER, 3981 (adev->doorbell_index.kiq * 2) << 2); 3982 WREG32_SOC15(GC, 0, regCP_MEC_DOORBELL_RANGE_UPPER, 3983 (adev->doorbell_index.userqueue_end * 2) << 2); 3984 } 3985 3986 static void gfx_v11_0_gfx_mqd_set_priority(struct amdgpu_device *adev, 3987 struct v11_gfx_mqd *mqd, 3988 struct amdgpu_mqd_prop *prop) 3989 { 3990 bool priority = 0; 3991 u32 tmp; 3992 3993 /* set up default queue priority level 3994 * 0x0 = low priority, 0x1 = high priority 3995 */ 3996 if (prop->hqd_pipe_priority == AMDGPU_GFX_PIPE_PRIO_HIGH) 3997 priority = 1; 3998 3999 tmp = regCP_GFX_HQD_QUEUE_PRIORITY_DEFAULT; 4000 tmp = REG_SET_FIELD(tmp, CP_GFX_HQD_QUEUE_PRIORITY, PRIORITY_LEVEL, priority); 4001 mqd->cp_gfx_hqd_queue_priority = tmp; 4002 } 4003 4004 static int gfx_v11_0_gfx_mqd_init(struct amdgpu_device *adev, void *m, 4005 struct amdgpu_mqd_prop *prop) 4006 { 4007 struct v11_gfx_mqd *mqd = m; 4008 uint64_t hqd_gpu_addr, wb_gpu_addr; 4009 uint32_t tmp; 4010 uint32_t rb_bufsz; 4011 4012 /* set up gfx hqd wptr */ 4013 mqd->cp_gfx_hqd_wptr = 0; 4014 mqd->cp_gfx_hqd_wptr_hi = 0; 4015 4016 /* set the pointer to the MQD */ 4017 mqd->cp_mqd_base_addr = prop->mqd_gpu_addr & 0xfffffffc; 4018 mqd->cp_mqd_base_addr_hi = upper_32_bits(prop->mqd_gpu_addr); 4019 4020 /* set up mqd control */ 4021 tmp = regCP_GFX_MQD_CONTROL_DEFAULT; 4022 tmp = REG_SET_FIELD(tmp, CP_GFX_MQD_CONTROL, VMID, 0); 4023 tmp = REG_SET_FIELD(tmp, CP_GFX_MQD_CONTROL, PRIV_STATE, 1); 4024 tmp = REG_SET_FIELD(tmp, CP_GFX_MQD_CONTROL, CACHE_POLICY, 0); 4025 mqd->cp_gfx_mqd_control = tmp; 4026 4027 /* set up gfx_hqd_vimd with 0x0 to indicate the ring buffer's vmid */ 4028 tmp = regCP_GFX_HQD_VMID_DEFAULT; 4029 tmp = REG_SET_FIELD(tmp, CP_GFX_HQD_VMID, VMID, 0); 4030 mqd->cp_gfx_hqd_vmid = 0; 4031 4032 /* set up gfx queue priority */ 4033 gfx_v11_0_gfx_mqd_set_priority(adev, mqd, prop); 4034 4035 /* set up time quantum */ 4036 tmp = regCP_GFX_HQD_QUANTUM_DEFAULT; 4037 tmp = REG_SET_FIELD(tmp, CP_GFX_HQD_QUANTUM, QUANTUM_EN, 1); 4038 mqd->cp_gfx_hqd_quantum = tmp; 4039 4040 /* set up gfx hqd base. this is similar as CP_RB_BASE */ 4041 hqd_gpu_addr = prop->hqd_base_gpu_addr >> 8; 4042 mqd->cp_gfx_hqd_base = hqd_gpu_addr; 4043 mqd->cp_gfx_hqd_base_hi = upper_32_bits(hqd_gpu_addr); 4044 4045 /* set up hqd_rptr_addr/_hi, similar as CP_RB_RPTR */ 4046 wb_gpu_addr = prop->rptr_gpu_addr; 4047 mqd->cp_gfx_hqd_rptr_addr = wb_gpu_addr & 0xfffffffc; 4048 mqd->cp_gfx_hqd_rptr_addr_hi = 4049 upper_32_bits(wb_gpu_addr) & 0xffff; 4050 4051 /* set up rb_wptr_poll addr */ 4052 wb_gpu_addr = prop->wptr_gpu_addr; 4053 mqd->cp_rb_wptr_poll_addr_lo = wb_gpu_addr & 0xfffffffc; 4054 mqd->cp_rb_wptr_poll_addr_hi = upper_32_bits(wb_gpu_addr) & 0xffff; 4055 4056 /* set up the gfx_hqd_control, similar as CP_RB0_CNTL */ 4057 rb_bufsz = order_base_2(prop->queue_size / 4) - 1; 4058 tmp = regCP_GFX_HQD_CNTL_DEFAULT; 4059 tmp = REG_SET_FIELD(tmp, CP_GFX_HQD_CNTL, RB_BUFSZ, rb_bufsz); 4060 tmp = REG_SET_FIELD(tmp, CP_GFX_HQD_CNTL, RB_BLKSZ, rb_bufsz - 2); 4061 #ifdef __BIG_ENDIAN 4062 tmp = REG_SET_FIELD(tmp, CP_GFX_HQD_CNTL, BUF_SWAP, 1); 4063 #endif 4064 mqd->cp_gfx_hqd_cntl = tmp; 4065 4066 /* set up cp_doorbell_control */ 4067 tmp = regCP_RB_DOORBELL_CONTROL_DEFAULT; 4068 if (prop->use_doorbell) { 4069 tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL, 4070 DOORBELL_OFFSET, prop->doorbell_index); 4071 tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL, 4072 DOORBELL_EN, 1); 4073 } else 4074 tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL, 4075 DOORBELL_EN, 0); 4076 mqd->cp_rb_doorbell_control = tmp; 4077 4078 /* reset read and write pointers, similar to CP_RB0_WPTR/_RPTR */ 4079 mqd->cp_gfx_hqd_rptr = regCP_GFX_HQD_RPTR_DEFAULT; 4080 4081 /* active the queue */ 4082 mqd->cp_gfx_hqd_active = 1; 4083 4084 return 0; 4085 } 4086 4087 static int gfx_v11_0_kgq_init_queue(struct amdgpu_ring *ring, bool reset) 4088 { 4089 struct amdgpu_device *adev = ring->adev; 4090 struct v11_gfx_mqd *mqd = ring->mqd_ptr; 4091 int mqd_idx = ring - &adev->gfx.gfx_ring[0]; 4092 4093 if (!reset && !amdgpu_in_reset(adev) && !adev->in_suspend) { 4094 memset((void *)mqd, 0, sizeof(*mqd)); 4095 mutex_lock(&adev->srbm_mutex); 4096 soc21_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0); 4097 amdgpu_ring_init_mqd(ring); 4098 soc21_grbm_select(adev, 0, 0, 0, 0); 4099 mutex_unlock(&adev->srbm_mutex); 4100 if (adev->gfx.me.mqd_backup[mqd_idx]) 4101 memcpy_fromio(adev->gfx.me.mqd_backup[mqd_idx], mqd, sizeof(*mqd)); 4102 } else { 4103 /* restore mqd with the backup copy */ 4104 if (adev->gfx.me.mqd_backup[mqd_idx]) 4105 memcpy_toio(mqd, adev->gfx.me.mqd_backup[mqd_idx], sizeof(*mqd)); 4106 /* reset the ring */ 4107 ring->wptr = 0; 4108 *ring->wptr_cpu_addr = 0; 4109 amdgpu_ring_clear_ring(ring); 4110 } 4111 4112 return 0; 4113 } 4114 4115 static int gfx_v11_0_cp_async_gfx_ring_resume(struct amdgpu_device *adev) 4116 { 4117 int r, i; 4118 4119 for (i = 0; i < adev->gfx.num_gfx_rings; i++) { 4120 r = gfx_v11_0_kgq_init_queue(&adev->gfx.gfx_ring[i], false); 4121 if (r) 4122 return r; 4123 } 4124 4125 r = amdgpu_gfx_enable_kgq(adev, 0); 4126 if (r) 4127 return r; 4128 4129 return gfx_v11_0_cp_gfx_start(adev); 4130 } 4131 4132 static int gfx_v11_0_compute_mqd_init(struct amdgpu_device *adev, void *m, 4133 struct amdgpu_mqd_prop *prop) 4134 { 4135 struct v11_compute_mqd *mqd = m; 4136 uint64_t hqd_gpu_addr, wb_gpu_addr, eop_base_addr; 4137 uint32_t tmp; 4138 4139 mqd->header = 0xC0310800; 4140 mqd->compute_pipelinestat_enable = 0x00000001; 4141 mqd->compute_static_thread_mgmt_se0 = 0xffffffff; 4142 mqd->compute_static_thread_mgmt_se1 = 0xffffffff; 4143 mqd->compute_static_thread_mgmt_se2 = 0xffffffff; 4144 mqd->compute_static_thread_mgmt_se3 = 0xffffffff; 4145 mqd->compute_misc_reserved = 0x00000007; 4146 4147 eop_base_addr = prop->eop_gpu_addr >> 8; 4148 mqd->cp_hqd_eop_base_addr_lo = eop_base_addr; 4149 mqd->cp_hqd_eop_base_addr_hi = upper_32_bits(eop_base_addr); 4150 4151 /* set the EOP size, register value is 2^(EOP_SIZE+1) dwords */ 4152 tmp = regCP_HQD_EOP_CONTROL_DEFAULT; 4153 tmp = REG_SET_FIELD(tmp, CP_HQD_EOP_CONTROL, EOP_SIZE, 4154 (order_base_2(GFX11_MEC_HPD_SIZE / 4) - 1)); 4155 4156 mqd->cp_hqd_eop_control = tmp; 4157 4158 /* enable doorbell? */ 4159 tmp = regCP_HQD_PQ_DOORBELL_CONTROL_DEFAULT; 4160 4161 if (prop->use_doorbell) { 4162 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL, 4163 DOORBELL_OFFSET, prop->doorbell_index); 4164 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL, 4165 DOORBELL_EN, 1); 4166 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL, 4167 DOORBELL_SOURCE, 0); 4168 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL, 4169 DOORBELL_HIT, 0); 4170 } else { 4171 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL, 4172 DOORBELL_EN, 0); 4173 } 4174 4175 mqd->cp_hqd_pq_doorbell_control = tmp; 4176 4177 /* disable the queue if it's active */ 4178 mqd->cp_hqd_dequeue_request = 0; 4179 mqd->cp_hqd_pq_rptr = 0; 4180 mqd->cp_hqd_pq_wptr_lo = 0; 4181 mqd->cp_hqd_pq_wptr_hi = 0; 4182 4183 /* set the pointer to the MQD */ 4184 mqd->cp_mqd_base_addr_lo = prop->mqd_gpu_addr & 0xfffffffc; 4185 mqd->cp_mqd_base_addr_hi = upper_32_bits(prop->mqd_gpu_addr); 4186 4187 /* set MQD vmid to 0 */ 4188 tmp = regCP_MQD_CONTROL_DEFAULT; 4189 tmp = REG_SET_FIELD(tmp, CP_MQD_CONTROL, VMID, 0); 4190 mqd->cp_mqd_control = tmp; 4191 4192 /* set the pointer to the HQD, this is similar CP_RB0_BASE/_HI */ 4193 hqd_gpu_addr = prop->hqd_base_gpu_addr >> 8; 4194 mqd->cp_hqd_pq_base_lo = hqd_gpu_addr; 4195 mqd->cp_hqd_pq_base_hi = upper_32_bits(hqd_gpu_addr); 4196 4197 /* set up the HQD, this is similar to CP_RB0_CNTL */ 4198 tmp = regCP_HQD_PQ_CONTROL_DEFAULT; 4199 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, QUEUE_SIZE, 4200 (order_base_2(prop->queue_size / 4) - 1)); 4201 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, RPTR_BLOCK_SIZE, 4202 (order_base_2(AMDGPU_GPU_PAGE_SIZE / 4) - 1)); 4203 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, UNORD_DISPATCH, 1); 4204 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, TUNNEL_DISPATCH, 4205 prop->allow_tunneling); 4206 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, PRIV_STATE, 1); 4207 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, KMD_QUEUE, 1); 4208 mqd->cp_hqd_pq_control = tmp; 4209 4210 /* set the wb address whether it's enabled or not */ 4211 wb_gpu_addr = prop->rptr_gpu_addr; 4212 mqd->cp_hqd_pq_rptr_report_addr_lo = wb_gpu_addr & 0xfffffffc; 4213 mqd->cp_hqd_pq_rptr_report_addr_hi = 4214 upper_32_bits(wb_gpu_addr) & 0xffff; 4215 4216 /* only used if CP_PQ_WPTR_POLL_CNTL.CP_PQ_WPTR_POLL_CNTL__EN_MASK=1 */ 4217 wb_gpu_addr = prop->wptr_gpu_addr; 4218 mqd->cp_hqd_pq_wptr_poll_addr_lo = wb_gpu_addr & 0xfffffffc; 4219 mqd->cp_hqd_pq_wptr_poll_addr_hi = upper_32_bits(wb_gpu_addr) & 0xffff; 4220 4221 tmp = 0; 4222 /* enable the doorbell if requested */ 4223 if (prop->use_doorbell) { 4224 tmp = regCP_HQD_PQ_DOORBELL_CONTROL_DEFAULT; 4225 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL, 4226 DOORBELL_OFFSET, prop->doorbell_index); 4227 4228 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL, 4229 DOORBELL_EN, 1); 4230 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL, 4231 DOORBELL_SOURCE, 0); 4232 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL, 4233 DOORBELL_HIT, 0); 4234 } 4235 4236 mqd->cp_hqd_pq_doorbell_control = tmp; 4237 4238 /* reset read and write pointers, similar to CP_RB0_WPTR/_RPTR */ 4239 mqd->cp_hqd_pq_rptr = regCP_HQD_PQ_RPTR_DEFAULT; 4240 4241 /* set the vmid for the queue */ 4242 mqd->cp_hqd_vmid = 0; 4243 4244 tmp = regCP_HQD_PERSISTENT_STATE_DEFAULT; 4245 tmp = REG_SET_FIELD(tmp, CP_HQD_PERSISTENT_STATE, PRELOAD_SIZE, 0x55); 4246 mqd->cp_hqd_persistent_state = tmp; 4247 4248 /* set MIN_IB_AVAIL_SIZE */ 4249 tmp = regCP_HQD_IB_CONTROL_DEFAULT; 4250 tmp = REG_SET_FIELD(tmp, CP_HQD_IB_CONTROL, MIN_IB_AVAIL_SIZE, 3); 4251 mqd->cp_hqd_ib_control = tmp; 4252 4253 /* set static priority for a compute queue/ring */ 4254 mqd->cp_hqd_pipe_priority = prop->hqd_pipe_priority; 4255 mqd->cp_hqd_queue_priority = prop->hqd_queue_priority; 4256 4257 mqd->cp_hqd_active = prop->hqd_active; 4258 4259 return 0; 4260 } 4261 4262 static int gfx_v11_0_kiq_init_register(struct amdgpu_ring *ring) 4263 { 4264 struct amdgpu_device *adev = ring->adev; 4265 struct v11_compute_mqd *mqd = ring->mqd_ptr; 4266 int j; 4267 4268 /* inactivate the queue */ 4269 if (amdgpu_sriov_vf(adev)) 4270 WREG32_SOC15(GC, 0, regCP_HQD_ACTIVE, 0); 4271 4272 /* disable wptr polling */ 4273 WREG32_FIELD15_PREREG(GC, 0, CP_PQ_WPTR_POLL_CNTL, EN, 0); 4274 4275 /* write the EOP addr */ 4276 WREG32_SOC15(GC, 0, regCP_HQD_EOP_BASE_ADDR, 4277 mqd->cp_hqd_eop_base_addr_lo); 4278 WREG32_SOC15(GC, 0, regCP_HQD_EOP_BASE_ADDR_HI, 4279 mqd->cp_hqd_eop_base_addr_hi); 4280 4281 /* set the EOP size, register value is 2^(EOP_SIZE+1) dwords */ 4282 WREG32_SOC15(GC, 0, regCP_HQD_EOP_CONTROL, 4283 mqd->cp_hqd_eop_control); 4284 4285 /* enable doorbell? */ 4286 WREG32_SOC15(GC, 0, regCP_HQD_PQ_DOORBELL_CONTROL, 4287 mqd->cp_hqd_pq_doorbell_control); 4288 4289 /* disable the queue if it's active */ 4290 if (RREG32_SOC15(GC, 0, regCP_HQD_ACTIVE) & 1) { 4291 WREG32_SOC15(GC, 0, regCP_HQD_DEQUEUE_REQUEST, 1); 4292 for (j = 0; j < adev->usec_timeout; j++) { 4293 if (!(RREG32_SOC15(GC, 0, regCP_HQD_ACTIVE) & 1)) 4294 break; 4295 udelay(1); 4296 } 4297 WREG32_SOC15(GC, 0, regCP_HQD_DEQUEUE_REQUEST, 4298 mqd->cp_hqd_dequeue_request); 4299 WREG32_SOC15(GC, 0, regCP_HQD_PQ_RPTR, 4300 mqd->cp_hqd_pq_rptr); 4301 WREG32_SOC15(GC, 0, regCP_HQD_PQ_WPTR_LO, 4302 mqd->cp_hqd_pq_wptr_lo); 4303 WREG32_SOC15(GC, 0, regCP_HQD_PQ_WPTR_HI, 4304 mqd->cp_hqd_pq_wptr_hi); 4305 } 4306 4307 /* set the pointer to the MQD */ 4308 WREG32_SOC15(GC, 0, regCP_MQD_BASE_ADDR, 4309 mqd->cp_mqd_base_addr_lo); 4310 WREG32_SOC15(GC, 0, regCP_MQD_BASE_ADDR_HI, 4311 mqd->cp_mqd_base_addr_hi); 4312 4313 /* set MQD vmid to 0 */ 4314 WREG32_SOC15(GC, 0, regCP_MQD_CONTROL, 4315 mqd->cp_mqd_control); 4316 4317 /* set the pointer to the HQD, this is similar CP_RB0_BASE/_HI */ 4318 WREG32_SOC15(GC, 0, regCP_HQD_PQ_BASE, 4319 mqd->cp_hqd_pq_base_lo); 4320 WREG32_SOC15(GC, 0, regCP_HQD_PQ_BASE_HI, 4321 mqd->cp_hqd_pq_base_hi); 4322 4323 /* set up the HQD, this is similar to CP_RB0_CNTL */ 4324 WREG32_SOC15(GC, 0, regCP_HQD_PQ_CONTROL, 4325 mqd->cp_hqd_pq_control); 4326 4327 /* set the wb address whether it's enabled or not */ 4328 WREG32_SOC15(GC, 0, regCP_HQD_PQ_RPTR_REPORT_ADDR, 4329 mqd->cp_hqd_pq_rptr_report_addr_lo); 4330 WREG32_SOC15(GC, 0, regCP_HQD_PQ_RPTR_REPORT_ADDR_HI, 4331 mqd->cp_hqd_pq_rptr_report_addr_hi); 4332 4333 /* only used if CP_PQ_WPTR_POLL_CNTL.CP_PQ_WPTR_POLL_CNTL__EN_MASK=1 */ 4334 WREG32_SOC15(GC, 0, regCP_HQD_PQ_WPTR_POLL_ADDR, 4335 mqd->cp_hqd_pq_wptr_poll_addr_lo); 4336 WREG32_SOC15(GC, 0, regCP_HQD_PQ_WPTR_POLL_ADDR_HI, 4337 mqd->cp_hqd_pq_wptr_poll_addr_hi); 4338 4339 /* enable the doorbell if requested */ 4340 if (ring->use_doorbell) { 4341 WREG32_SOC15(GC, 0, regCP_MEC_DOORBELL_RANGE_LOWER, 4342 (adev->doorbell_index.kiq * 2) << 2); 4343 WREG32_SOC15(GC, 0, regCP_MEC_DOORBELL_RANGE_UPPER, 4344 (adev->doorbell_index.userqueue_end * 2) << 2); 4345 } 4346 4347 WREG32_SOC15(GC, 0, regCP_HQD_PQ_DOORBELL_CONTROL, 4348 mqd->cp_hqd_pq_doorbell_control); 4349 4350 /* reset read and write pointers, similar to CP_RB0_WPTR/_RPTR */ 4351 WREG32_SOC15(GC, 0, regCP_HQD_PQ_WPTR_LO, 4352 mqd->cp_hqd_pq_wptr_lo); 4353 WREG32_SOC15(GC, 0, regCP_HQD_PQ_WPTR_HI, 4354 mqd->cp_hqd_pq_wptr_hi); 4355 4356 /* set the vmid for the queue */ 4357 WREG32_SOC15(GC, 0, regCP_HQD_VMID, mqd->cp_hqd_vmid); 4358 4359 WREG32_SOC15(GC, 0, regCP_HQD_PERSISTENT_STATE, 4360 mqd->cp_hqd_persistent_state); 4361 4362 /* activate the queue */ 4363 WREG32_SOC15(GC, 0, regCP_HQD_ACTIVE, 4364 mqd->cp_hqd_active); 4365 4366 if (ring->use_doorbell) 4367 WREG32_FIELD15_PREREG(GC, 0, CP_PQ_STATUS, DOORBELL_ENABLE, 1); 4368 4369 return 0; 4370 } 4371 4372 static int gfx_v11_0_kiq_init_queue(struct amdgpu_ring *ring) 4373 { 4374 struct amdgpu_device *adev = ring->adev; 4375 struct v11_compute_mqd *mqd = ring->mqd_ptr; 4376 4377 gfx_v11_0_kiq_setting(ring); 4378 4379 if (amdgpu_in_reset(adev)) { /* for GPU_RESET case */ 4380 /* reset MQD to a clean status */ 4381 if (adev->gfx.kiq[0].mqd_backup) 4382 memcpy_toio(mqd, adev->gfx.kiq[0].mqd_backup, sizeof(*mqd)); 4383 4384 /* reset ring buffer */ 4385 ring->wptr = 0; 4386 amdgpu_ring_clear_ring(ring); 4387 4388 mutex_lock(&adev->srbm_mutex); 4389 soc21_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0); 4390 gfx_v11_0_kiq_init_register(ring); 4391 soc21_grbm_select(adev, 0, 0, 0, 0); 4392 mutex_unlock(&adev->srbm_mutex); 4393 } else { 4394 memset((void *)mqd, 0, sizeof(*mqd)); 4395 if (amdgpu_sriov_vf(adev) && adev->in_suspend) 4396 amdgpu_ring_clear_ring(ring); 4397 mutex_lock(&adev->srbm_mutex); 4398 soc21_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0); 4399 amdgpu_ring_init_mqd(ring); 4400 gfx_v11_0_kiq_init_register(ring); 4401 soc21_grbm_select(adev, 0, 0, 0, 0); 4402 mutex_unlock(&adev->srbm_mutex); 4403 4404 if (adev->gfx.kiq[0].mqd_backup) 4405 memcpy_fromio(adev->gfx.kiq[0].mqd_backup, mqd, sizeof(*mqd)); 4406 } 4407 4408 return 0; 4409 } 4410 4411 static int gfx_v11_0_kcq_init_queue(struct amdgpu_ring *ring, bool reset) 4412 { 4413 struct amdgpu_device *adev = ring->adev; 4414 struct v11_compute_mqd *mqd = ring->mqd_ptr; 4415 int mqd_idx = ring - &adev->gfx.compute_ring[0]; 4416 4417 if (!reset && !amdgpu_in_reset(adev) && !adev->in_suspend) { 4418 memset((void *)mqd, 0, sizeof(*mqd)); 4419 mutex_lock(&adev->srbm_mutex); 4420 soc21_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0); 4421 amdgpu_ring_init_mqd(ring); 4422 soc21_grbm_select(adev, 0, 0, 0, 0); 4423 mutex_unlock(&adev->srbm_mutex); 4424 4425 if (adev->gfx.mec.mqd_backup[mqd_idx]) 4426 memcpy_fromio(adev->gfx.mec.mqd_backup[mqd_idx], mqd, sizeof(*mqd)); 4427 } else { 4428 /* restore MQD to a clean status */ 4429 if (adev->gfx.mec.mqd_backup[mqd_idx]) 4430 memcpy_toio(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(*mqd)); 4431 /* reset ring buffer */ 4432 ring->wptr = 0; 4433 atomic64_set((atomic64_t *)ring->wptr_cpu_addr, 0); 4434 amdgpu_ring_clear_ring(ring); 4435 } 4436 4437 return 0; 4438 } 4439 4440 static int gfx_v11_0_kiq_resume(struct amdgpu_device *adev) 4441 { 4442 gfx_v11_0_kiq_init_queue(&adev->gfx.kiq[0].ring); 4443 return 0; 4444 } 4445 4446 static int gfx_v11_0_kcq_resume(struct amdgpu_device *adev) 4447 { 4448 int i, r; 4449 4450 if (!amdgpu_async_gfx_ring) 4451 gfx_v11_0_cp_compute_enable(adev, true); 4452 4453 for (i = 0; i < adev->gfx.num_compute_rings; i++) { 4454 r = gfx_v11_0_kcq_init_queue(&adev->gfx.compute_ring[i], false); 4455 if (r) 4456 return r; 4457 } 4458 4459 return amdgpu_gfx_enable_kcq(adev, 0); 4460 } 4461 4462 static int gfx_v11_0_cp_resume(struct amdgpu_device *adev) 4463 { 4464 int r, i; 4465 struct amdgpu_ring *ring; 4466 4467 if (!(adev->flags & AMD_IS_APU)) 4468 gfx_v11_0_enable_gui_idle_interrupt(adev, false); 4469 4470 if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT) { 4471 /* legacy firmware loading */ 4472 r = gfx_v11_0_cp_gfx_load_microcode(adev); 4473 if (r) 4474 return r; 4475 4476 if (adev->gfx.rs64_enable) 4477 r = gfx_v11_0_cp_compute_load_microcode_rs64(adev); 4478 else 4479 r = gfx_v11_0_cp_compute_load_microcode(adev); 4480 if (r) 4481 return r; 4482 } 4483 4484 gfx_v11_0_cp_set_doorbell_range(adev); 4485 4486 if (amdgpu_async_gfx_ring) { 4487 gfx_v11_0_cp_compute_enable(adev, true); 4488 gfx_v11_0_cp_gfx_enable(adev, true); 4489 } 4490 4491 if (adev->enable_mes_kiq && adev->mes.kiq_hw_init) 4492 r = amdgpu_mes_kiq_hw_init(adev); 4493 else 4494 r = gfx_v11_0_kiq_resume(adev); 4495 if (r) 4496 return r; 4497 4498 r = gfx_v11_0_kcq_resume(adev); 4499 if (r) 4500 return r; 4501 4502 if (!amdgpu_async_gfx_ring) { 4503 r = gfx_v11_0_cp_gfx_resume(adev); 4504 if (r) 4505 return r; 4506 } else { 4507 r = gfx_v11_0_cp_async_gfx_ring_resume(adev); 4508 if (r) 4509 return r; 4510 } 4511 4512 for (i = 0; i < adev->gfx.num_gfx_rings; i++) { 4513 ring = &adev->gfx.gfx_ring[i]; 4514 r = amdgpu_ring_test_helper(ring); 4515 if (r) 4516 return r; 4517 } 4518 4519 for (i = 0; i < adev->gfx.num_compute_rings; i++) { 4520 ring = &adev->gfx.compute_ring[i]; 4521 r = amdgpu_ring_test_helper(ring); 4522 if (r) 4523 return r; 4524 } 4525 4526 return 0; 4527 } 4528 4529 static void gfx_v11_0_cp_enable(struct amdgpu_device *adev, bool enable) 4530 { 4531 gfx_v11_0_cp_gfx_enable(adev, enable); 4532 gfx_v11_0_cp_compute_enable(adev, enable); 4533 } 4534 4535 static int gfx_v11_0_gfxhub_enable(struct amdgpu_device *adev) 4536 { 4537 int r; 4538 bool value; 4539 4540 r = adev->gfxhub.funcs->gart_enable(adev); 4541 if (r) 4542 return r; 4543 4544 amdgpu_device_flush_hdp(adev, NULL); 4545 4546 value = (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS) ? 4547 false : true; 4548 4549 adev->gfxhub.funcs->set_fault_enable_default(adev, value); 4550 /* TODO investigate why this and the hdp flush above is needed, 4551 * are we missing a flush somewhere else? */ 4552 adev->gmc.gmc_funcs->flush_gpu_tlb(adev, 0, AMDGPU_GFXHUB(0), 0); 4553 4554 return 0; 4555 } 4556 4557 static void gfx_v11_0_select_cp_fw_arch(struct amdgpu_device *adev) 4558 { 4559 u32 tmp; 4560 4561 /* select RS64 */ 4562 if (adev->gfx.rs64_enable) { 4563 tmp = RREG32_SOC15(GC, 0, regCP_GFX_CNTL); 4564 tmp = REG_SET_FIELD(tmp, CP_GFX_CNTL, ENGINE_SEL, 1); 4565 WREG32_SOC15(GC, 0, regCP_GFX_CNTL, tmp); 4566 4567 tmp = RREG32_SOC15(GC, 0, regCP_MEC_ISA_CNTL); 4568 tmp = REG_SET_FIELD(tmp, CP_MEC_ISA_CNTL, ISA_MODE, 1); 4569 WREG32_SOC15(GC, 0, regCP_MEC_ISA_CNTL, tmp); 4570 } 4571 4572 if (amdgpu_emu_mode == 1) 4573 msleep(100); 4574 } 4575 4576 static int get_gb_addr_config(struct amdgpu_device * adev) 4577 { 4578 u32 gb_addr_config; 4579 4580 gb_addr_config = RREG32_SOC15(GC, 0, regGB_ADDR_CONFIG); 4581 if (gb_addr_config == 0) 4582 return -EINVAL; 4583 4584 adev->gfx.config.gb_addr_config_fields.num_pkrs = 4585 1 << REG_GET_FIELD(gb_addr_config, GB_ADDR_CONFIG, NUM_PKRS); 4586 4587 adev->gfx.config.gb_addr_config = gb_addr_config; 4588 4589 adev->gfx.config.gb_addr_config_fields.num_pipes = 1 << 4590 REG_GET_FIELD(adev->gfx.config.gb_addr_config, 4591 GB_ADDR_CONFIG, NUM_PIPES); 4592 4593 adev->gfx.config.max_tile_pipes = 4594 adev->gfx.config.gb_addr_config_fields.num_pipes; 4595 4596 adev->gfx.config.gb_addr_config_fields.max_compress_frags = 1 << 4597 REG_GET_FIELD(adev->gfx.config.gb_addr_config, 4598 GB_ADDR_CONFIG, MAX_COMPRESSED_FRAGS); 4599 adev->gfx.config.gb_addr_config_fields.num_rb_per_se = 1 << 4600 REG_GET_FIELD(adev->gfx.config.gb_addr_config, 4601 GB_ADDR_CONFIG, NUM_RB_PER_SE); 4602 adev->gfx.config.gb_addr_config_fields.num_se = 1 << 4603 REG_GET_FIELD(adev->gfx.config.gb_addr_config, 4604 GB_ADDR_CONFIG, NUM_SHADER_ENGINES); 4605 adev->gfx.config.gb_addr_config_fields.pipe_interleave_size = 1 << (8 + 4606 REG_GET_FIELD(adev->gfx.config.gb_addr_config, 4607 GB_ADDR_CONFIG, PIPE_INTERLEAVE_SIZE)); 4608 4609 return 0; 4610 } 4611 4612 static void gfx_v11_0_disable_gpa_mode(struct amdgpu_device *adev) 4613 { 4614 uint32_t data; 4615 4616 data = RREG32_SOC15(GC, 0, regCPC_PSP_DEBUG); 4617 data |= CPC_PSP_DEBUG__GPA_OVERRIDE_MASK; 4618 WREG32_SOC15(GC, 0, regCPC_PSP_DEBUG, data); 4619 4620 data = RREG32_SOC15(GC, 0, regCPG_PSP_DEBUG); 4621 data |= CPG_PSP_DEBUG__GPA_OVERRIDE_MASK; 4622 WREG32_SOC15(GC, 0, regCPG_PSP_DEBUG, data); 4623 } 4624 4625 static int gfx_v11_0_hw_init(struct amdgpu_ip_block *ip_block) 4626 { 4627 int r; 4628 struct amdgpu_device *adev = ip_block->adev; 4629 4630 amdgpu_gfx_cleaner_shader_init(adev, adev->gfx.cleaner_shader_size, 4631 adev->gfx.cleaner_shader_ptr); 4632 4633 if (adev->firmware.load_type == AMDGPU_FW_LOAD_RLC_BACKDOOR_AUTO) { 4634 if (adev->gfx.imu.funcs) { 4635 /* RLC autoload sequence 1: Program rlc ram */ 4636 if (adev->gfx.imu.funcs->program_rlc_ram) 4637 adev->gfx.imu.funcs->program_rlc_ram(adev); 4638 /* rlc autoload firmware */ 4639 r = gfx_v11_0_rlc_backdoor_autoload_enable(adev); 4640 if (r) 4641 return r; 4642 } 4643 } else { 4644 if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT) { 4645 if (adev->gfx.imu.funcs && (amdgpu_dpm > 0)) { 4646 if (adev->gfx.imu.funcs->load_microcode) 4647 adev->gfx.imu.funcs->load_microcode(adev); 4648 if (adev->gfx.imu.funcs->setup_imu) 4649 adev->gfx.imu.funcs->setup_imu(adev); 4650 if (adev->gfx.imu.funcs->start_imu) 4651 adev->gfx.imu.funcs->start_imu(adev); 4652 } 4653 4654 /* disable gpa mode in backdoor loading */ 4655 gfx_v11_0_disable_gpa_mode(adev); 4656 } 4657 } 4658 4659 if ((adev->firmware.load_type == AMDGPU_FW_LOAD_RLC_BACKDOOR_AUTO) || 4660 (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP)) { 4661 r = gfx_v11_0_wait_for_rlc_autoload_complete(adev); 4662 if (r) { 4663 dev_err(adev->dev, "(%d) failed to wait rlc autoload complete\n", r); 4664 return r; 4665 } 4666 } 4667 4668 adev->gfx.is_poweron = true; 4669 4670 if(get_gb_addr_config(adev)) 4671 DRM_WARN("Invalid gb_addr_config !\n"); 4672 4673 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP && 4674 adev->gfx.rs64_enable) 4675 gfx_v11_0_config_gfx_rs64(adev); 4676 4677 r = gfx_v11_0_gfxhub_enable(adev); 4678 if (r) 4679 return r; 4680 4681 if (!amdgpu_emu_mode) 4682 gfx_v11_0_init_golden_registers(adev); 4683 4684 if ((adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT) || 4685 (adev->firmware.load_type == AMDGPU_FW_LOAD_RLC_BACKDOOR_AUTO && amdgpu_dpm == 1)) { 4686 /** 4687 * For gfx 11, rlc firmware loading relies on smu firmware is 4688 * loaded firstly, so in direct type, it has to load smc ucode 4689 * here before rlc. 4690 */ 4691 r = amdgpu_pm_load_smu_firmware(adev, NULL); 4692 if (r) 4693 return r; 4694 } 4695 4696 gfx_v11_0_constants_init(adev); 4697 4698 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) 4699 gfx_v11_0_select_cp_fw_arch(adev); 4700 4701 if (adev->nbio.funcs->gc_doorbell_init) 4702 adev->nbio.funcs->gc_doorbell_init(adev); 4703 4704 r = gfx_v11_0_rlc_resume(adev); 4705 if (r) 4706 return r; 4707 4708 /* 4709 * init golden registers and rlc resume may override some registers, 4710 * reconfig them here 4711 */ 4712 gfx_v11_0_tcp_harvest(adev); 4713 4714 r = gfx_v11_0_cp_resume(adev); 4715 if (r) 4716 return r; 4717 4718 /* get IMU version from HW if it's not set */ 4719 if (!adev->gfx.imu_fw_version) 4720 adev->gfx.imu_fw_version = RREG32_SOC15(GC, 0, regGFX_IMU_SCRATCH_0); 4721 4722 return r; 4723 } 4724 4725 static int gfx_v11_0_hw_fini(struct amdgpu_ip_block *ip_block) 4726 { 4727 struct amdgpu_device *adev = ip_block->adev; 4728 4729 cancel_delayed_work_sync(&adev->gfx.idle_work); 4730 4731 amdgpu_irq_put(adev, &adev->gfx.priv_reg_irq, 0); 4732 amdgpu_irq_put(adev, &adev->gfx.priv_inst_irq, 0); 4733 amdgpu_irq_put(adev, &adev->gfx.bad_op_irq, 0); 4734 4735 if (!adev->no_hw_access) { 4736 if (amdgpu_async_gfx_ring) { 4737 if (amdgpu_gfx_disable_kgq(adev, 0)) 4738 DRM_ERROR("KGQ disable failed\n"); 4739 } 4740 4741 if (amdgpu_gfx_disable_kcq(adev, 0)) 4742 DRM_ERROR("KCQ disable failed\n"); 4743 4744 amdgpu_mes_kiq_hw_fini(adev); 4745 } 4746 4747 if (amdgpu_sriov_vf(adev)) 4748 /* Remove the steps disabling CPG and clearing KIQ position, 4749 * so that CP could perform IDLE-SAVE during switch. Those 4750 * steps are necessary to avoid a DMAR error in gfx9 but it is 4751 * not reproduced on gfx11. 4752 */ 4753 return 0; 4754 4755 gfx_v11_0_cp_enable(adev, false); 4756 gfx_v11_0_enable_gui_idle_interrupt(adev, false); 4757 4758 adev->gfxhub.funcs->gart_disable(adev); 4759 4760 adev->gfx.is_poweron = false; 4761 4762 return 0; 4763 } 4764 4765 static int gfx_v11_0_suspend(struct amdgpu_ip_block *ip_block) 4766 { 4767 return gfx_v11_0_hw_fini(ip_block); 4768 } 4769 4770 static int gfx_v11_0_resume(struct amdgpu_ip_block *ip_block) 4771 { 4772 return gfx_v11_0_hw_init(ip_block); 4773 } 4774 4775 static bool gfx_v11_0_is_idle(struct amdgpu_ip_block *ip_block) 4776 { 4777 struct amdgpu_device *adev = ip_block->adev; 4778 4779 if (REG_GET_FIELD(RREG32_SOC15(GC, 0, regGRBM_STATUS), 4780 GRBM_STATUS, GUI_ACTIVE)) 4781 return false; 4782 else 4783 return true; 4784 } 4785 4786 static int gfx_v11_0_wait_for_idle(struct amdgpu_ip_block *ip_block) 4787 { 4788 unsigned i; 4789 u32 tmp; 4790 struct amdgpu_device *adev = ip_block->adev; 4791 4792 for (i = 0; i < adev->usec_timeout; i++) { 4793 /* read MC_STATUS */ 4794 tmp = RREG32_SOC15(GC, 0, regGRBM_STATUS) & 4795 GRBM_STATUS__GUI_ACTIVE_MASK; 4796 4797 if (!REG_GET_FIELD(tmp, GRBM_STATUS, GUI_ACTIVE)) 4798 return 0; 4799 udelay(1); 4800 } 4801 return -ETIMEDOUT; 4802 } 4803 4804 int gfx_v11_0_request_gfx_index_mutex(struct amdgpu_device *adev, 4805 bool req) 4806 { 4807 u32 i, tmp, val; 4808 4809 for (i = 0; i < adev->usec_timeout; i++) { 4810 /* Request with MeId=2, PipeId=0 */ 4811 tmp = REG_SET_FIELD(0, CP_GFX_INDEX_MUTEX, REQUEST, req); 4812 tmp = REG_SET_FIELD(tmp, CP_GFX_INDEX_MUTEX, CLIENTID, 4); 4813 WREG32_SOC15(GC, 0, regCP_GFX_INDEX_MUTEX, tmp); 4814 4815 val = RREG32_SOC15(GC, 0, regCP_GFX_INDEX_MUTEX); 4816 if (req) { 4817 if (val == tmp) 4818 break; 4819 } else { 4820 tmp = REG_SET_FIELD(tmp, CP_GFX_INDEX_MUTEX, 4821 REQUEST, 1); 4822 4823 /* unlocked or locked by firmware */ 4824 if (val != tmp) 4825 break; 4826 } 4827 udelay(1); 4828 } 4829 4830 if (i >= adev->usec_timeout) 4831 return -EINVAL; 4832 4833 return 0; 4834 } 4835 4836 static int gfx_v11_0_soft_reset(struct amdgpu_ip_block *ip_block) 4837 { 4838 u32 grbm_soft_reset = 0; 4839 u32 tmp; 4840 int r, i, j, k; 4841 struct amdgpu_device *adev = ip_block->adev; 4842 4843 amdgpu_gfx_rlc_enter_safe_mode(adev, 0); 4844 4845 tmp = RREG32_SOC15(GC, 0, regCP_INT_CNTL); 4846 tmp = REG_SET_FIELD(tmp, CP_INT_CNTL, CMP_BUSY_INT_ENABLE, 0); 4847 tmp = REG_SET_FIELD(tmp, CP_INT_CNTL, CNTX_BUSY_INT_ENABLE, 0); 4848 tmp = REG_SET_FIELD(tmp, CP_INT_CNTL, CNTX_EMPTY_INT_ENABLE, 0); 4849 tmp = REG_SET_FIELD(tmp, CP_INT_CNTL, GFX_IDLE_INT_ENABLE, 0); 4850 WREG32_SOC15(GC, 0, regCP_INT_CNTL, tmp); 4851 4852 mutex_lock(&adev->srbm_mutex); 4853 for (i = 0; i < adev->gfx.mec.num_mec; ++i) { 4854 for (j = 0; j < adev->gfx.mec.num_queue_per_pipe; j++) { 4855 for (k = 0; k < adev->gfx.mec.num_pipe_per_mec; k++) { 4856 soc21_grbm_select(adev, i, k, j, 0); 4857 4858 WREG32_SOC15(GC, 0, regCP_HQD_DEQUEUE_REQUEST, 0x2); 4859 WREG32_SOC15(GC, 0, regSPI_COMPUTE_QUEUE_RESET, 0x1); 4860 } 4861 } 4862 } 4863 for (i = 0; i < adev->gfx.me.num_me; ++i) { 4864 for (j = 0; j < adev->gfx.me.num_queue_per_pipe; j++) { 4865 for (k = 0; k < adev->gfx.me.num_pipe_per_me; k++) { 4866 soc21_grbm_select(adev, i, k, j, 0); 4867 4868 WREG32_SOC15(GC, 0, regCP_GFX_HQD_DEQUEUE_REQUEST, 0x1); 4869 } 4870 } 4871 } 4872 soc21_grbm_select(adev, 0, 0, 0, 0); 4873 mutex_unlock(&adev->srbm_mutex); 4874 4875 /* Try to acquire the gfx mutex before access to CP_VMID_RESET */ 4876 mutex_lock(&adev->gfx.reset_sem_mutex); 4877 r = gfx_v11_0_request_gfx_index_mutex(adev, true); 4878 if (r) { 4879 mutex_unlock(&adev->gfx.reset_sem_mutex); 4880 DRM_ERROR("Failed to acquire the gfx mutex during soft reset\n"); 4881 return r; 4882 } 4883 4884 WREG32_SOC15(GC, 0, regCP_VMID_RESET, 0xfffffffe); 4885 4886 // Read CP_VMID_RESET register three times. 4887 // to get sufficient time for GFX_HQD_ACTIVE reach 0 4888 RREG32_SOC15(GC, 0, regCP_VMID_RESET); 4889 RREG32_SOC15(GC, 0, regCP_VMID_RESET); 4890 RREG32_SOC15(GC, 0, regCP_VMID_RESET); 4891 4892 /* release the gfx mutex */ 4893 r = gfx_v11_0_request_gfx_index_mutex(adev, false); 4894 mutex_unlock(&adev->gfx.reset_sem_mutex); 4895 if (r) { 4896 DRM_ERROR("Failed to release the gfx mutex during soft reset\n"); 4897 return r; 4898 } 4899 4900 for (i = 0; i < adev->usec_timeout; i++) { 4901 if (!RREG32_SOC15(GC, 0, regCP_HQD_ACTIVE) && 4902 !RREG32_SOC15(GC, 0, regCP_GFX_HQD_ACTIVE)) 4903 break; 4904 udelay(1); 4905 } 4906 if (i >= adev->usec_timeout) { 4907 printk("Failed to wait all pipes clean\n"); 4908 return -EINVAL; 4909 } 4910 4911 /********** trigger soft reset ***********/ 4912 grbm_soft_reset = RREG32_SOC15(GC, 0, regGRBM_SOFT_RESET); 4913 grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, 4914 SOFT_RESET_CP, 1); 4915 grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, 4916 SOFT_RESET_GFX, 1); 4917 grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, 4918 SOFT_RESET_CPF, 1); 4919 grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, 4920 SOFT_RESET_CPC, 1); 4921 grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, 4922 SOFT_RESET_CPG, 1); 4923 WREG32_SOC15(GC, 0, regGRBM_SOFT_RESET, grbm_soft_reset); 4924 /********** exit soft reset ***********/ 4925 grbm_soft_reset = RREG32_SOC15(GC, 0, regGRBM_SOFT_RESET); 4926 grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, 4927 SOFT_RESET_CP, 0); 4928 grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, 4929 SOFT_RESET_GFX, 0); 4930 grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, 4931 SOFT_RESET_CPF, 0); 4932 grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, 4933 SOFT_RESET_CPC, 0); 4934 grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, 4935 SOFT_RESET_CPG, 0); 4936 WREG32_SOC15(GC, 0, regGRBM_SOFT_RESET, grbm_soft_reset); 4937 4938 tmp = RREG32_SOC15(GC, 0, regCP_SOFT_RESET_CNTL); 4939 tmp = REG_SET_FIELD(tmp, CP_SOFT_RESET_CNTL, CMP_HQD_REG_RESET, 0x1); 4940 WREG32_SOC15(GC, 0, regCP_SOFT_RESET_CNTL, tmp); 4941 4942 WREG32_SOC15(GC, 0, regCP_ME_CNTL, 0x0); 4943 WREG32_SOC15(GC, 0, regCP_MEC_RS64_CNTL, 0x0); 4944 4945 for (i = 0; i < adev->usec_timeout; i++) { 4946 if (!RREG32_SOC15(GC, 0, regCP_VMID_RESET)) 4947 break; 4948 udelay(1); 4949 } 4950 if (i >= adev->usec_timeout) { 4951 printk("Failed to wait CP_VMID_RESET to 0\n"); 4952 return -EINVAL; 4953 } 4954 4955 tmp = RREG32_SOC15(GC, 0, regCP_INT_CNTL); 4956 tmp = REG_SET_FIELD(tmp, CP_INT_CNTL, CMP_BUSY_INT_ENABLE, 1); 4957 tmp = REG_SET_FIELD(tmp, CP_INT_CNTL, CNTX_BUSY_INT_ENABLE, 1); 4958 tmp = REG_SET_FIELD(tmp, CP_INT_CNTL, CNTX_EMPTY_INT_ENABLE, 1); 4959 tmp = REG_SET_FIELD(tmp, CP_INT_CNTL, GFX_IDLE_INT_ENABLE, 1); 4960 WREG32_SOC15(GC, 0, regCP_INT_CNTL, tmp); 4961 4962 amdgpu_gfx_rlc_exit_safe_mode(adev, 0); 4963 4964 return gfx_v11_0_cp_resume(adev); 4965 } 4966 4967 static bool gfx_v11_0_check_soft_reset(struct amdgpu_ip_block *ip_block) 4968 { 4969 int i, r; 4970 struct amdgpu_device *adev = ip_block->adev; 4971 struct amdgpu_ring *ring; 4972 long tmo = msecs_to_jiffies(1000); 4973 4974 for (i = 0; i < adev->gfx.num_gfx_rings; i++) { 4975 ring = &adev->gfx.gfx_ring[i]; 4976 r = amdgpu_ring_test_ib(ring, tmo); 4977 if (r) 4978 return true; 4979 } 4980 4981 for (i = 0; i < adev->gfx.num_compute_rings; i++) { 4982 ring = &adev->gfx.compute_ring[i]; 4983 r = amdgpu_ring_test_ib(ring, tmo); 4984 if (r) 4985 return true; 4986 } 4987 4988 return false; 4989 } 4990 4991 static int gfx_v11_0_post_soft_reset(struct amdgpu_ip_block *ip_block) 4992 { 4993 struct amdgpu_device *adev = ip_block->adev; 4994 /** 4995 * GFX soft reset will impact MES, need resume MES when do GFX soft reset 4996 */ 4997 return amdgpu_mes_resume(adev); 4998 } 4999 5000 static uint64_t gfx_v11_0_get_gpu_clock_counter(struct amdgpu_device *adev) 5001 { 5002 uint64_t clock; 5003 uint64_t clock_counter_lo, clock_counter_hi_pre, clock_counter_hi_after; 5004 5005 if (amdgpu_sriov_vf(adev)) { 5006 amdgpu_gfx_off_ctrl(adev, false); 5007 mutex_lock(&adev->gfx.gpu_clock_mutex); 5008 clock_counter_hi_pre = (uint64_t)RREG32_SOC15(GC, 0, regCP_MES_MTIME_HI); 5009 clock_counter_lo = (uint64_t)RREG32_SOC15(GC, 0, regCP_MES_MTIME_LO); 5010 clock_counter_hi_after = (uint64_t)RREG32_SOC15(GC, 0, regCP_MES_MTIME_HI); 5011 if (clock_counter_hi_pre != clock_counter_hi_after) 5012 clock_counter_lo = (uint64_t)RREG32_SOC15(GC, 0, regCP_MES_MTIME_LO); 5013 mutex_unlock(&adev->gfx.gpu_clock_mutex); 5014 amdgpu_gfx_off_ctrl(adev, true); 5015 } else { 5016 preempt_disable(); 5017 clock_counter_hi_pre = (uint64_t)RREG32_SOC15(SMUIO, 0, regGOLDEN_TSC_COUNT_UPPER); 5018 clock_counter_lo = (uint64_t)RREG32_SOC15(SMUIO, 0, regGOLDEN_TSC_COUNT_LOWER); 5019 clock_counter_hi_after = (uint64_t)RREG32_SOC15(SMUIO, 0, regGOLDEN_TSC_COUNT_UPPER); 5020 if (clock_counter_hi_pre != clock_counter_hi_after) 5021 clock_counter_lo = (uint64_t)RREG32_SOC15(SMUIO, 0, regGOLDEN_TSC_COUNT_LOWER); 5022 preempt_enable(); 5023 } 5024 clock = clock_counter_lo | (clock_counter_hi_after << 32ULL); 5025 5026 return clock; 5027 } 5028 5029 static void gfx_v11_0_ring_emit_gds_switch(struct amdgpu_ring *ring, 5030 uint32_t vmid, 5031 uint32_t gds_base, uint32_t gds_size, 5032 uint32_t gws_base, uint32_t gws_size, 5033 uint32_t oa_base, uint32_t oa_size) 5034 { 5035 struct amdgpu_device *adev = ring->adev; 5036 5037 /* GDS Base */ 5038 gfx_v11_0_write_data_to_reg(ring, 0, false, 5039 SOC15_REG_OFFSET(GC, 0, regGDS_VMID0_BASE) + 2 * vmid, 5040 gds_base); 5041 5042 /* GDS Size */ 5043 gfx_v11_0_write_data_to_reg(ring, 0, false, 5044 SOC15_REG_OFFSET(GC, 0, regGDS_VMID0_SIZE) + 2 * vmid, 5045 gds_size); 5046 5047 /* GWS */ 5048 gfx_v11_0_write_data_to_reg(ring, 0, false, 5049 SOC15_REG_OFFSET(GC, 0, regGDS_GWS_VMID0) + vmid, 5050 gws_size << GDS_GWS_VMID0__SIZE__SHIFT | gws_base); 5051 5052 /* OA */ 5053 gfx_v11_0_write_data_to_reg(ring, 0, false, 5054 SOC15_REG_OFFSET(GC, 0, regGDS_OA_VMID0) + vmid, 5055 (1 << (oa_size + oa_base)) - (1 << oa_base)); 5056 } 5057 5058 static int gfx_v11_0_early_init(struct amdgpu_ip_block *ip_block) 5059 { 5060 struct amdgpu_device *adev = ip_block->adev; 5061 5062 adev->gfx.funcs = &gfx_v11_0_gfx_funcs; 5063 5064 adev->gfx.num_gfx_rings = GFX11_NUM_GFX_RINGS; 5065 adev->gfx.num_compute_rings = min(amdgpu_gfx_get_num_kcq(adev), 5066 AMDGPU_MAX_COMPUTE_RINGS); 5067 5068 gfx_v11_0_set_kiq_pm4_funcs(adev); 5069 gfx_v11_0_set_ring_funcs(adev); 5070 gfx_v11_0_set_irq_funcs(adev); 5071 gfx_v11_0_set_gds_init(adev); 5072 gfx_v11_0_set_rlc_funcs(adev); 5073 gfx_v11_0_set_mqd_funcs(adev); 5074 gfx_v11_0_set_imu_funcs(adev); 5075 5076 gfx_v11_0_init_rlcg_reg_access_ctrl(adev); 5077 5078 return gfx_v11_0_init_microcode(adev); 5079 } 5080 5081 static int gfx_v11_0_late_init(struct amdgpu_ip_block *ip_block) 5082 { 5083 struct amdgpu_device *adev = ip_block->adev; 5084 int r; 5085 5086 r = amdgpu_irq_get(adev, &adev->gfx.priv_reg_irq, 0); 5087 if (r) 5088 return r; 5089 5090 r = amdgpu_irq_get(adev, &adev->gfx.priv_inst_irq, 0); 5091 if (r) 5092 return r; 5093 5094 r = amdgpu_irq_get(adev, &adev->gfx.bad_op_irq, 0); 5095 if (r) 5096 return r; 5097 return 0; 5098 } 5099 5100 static bool gfx_v11_0_is_rlc_enabled(struct amdgpu_device *adev) 5101 { 5102 uint32_t rlc_cntl; 5103 5104 /* if RLC is not enabled, do nothing */ 5105 rlc_cntl = RREG32_SOC15(GC, 0, regRLC_CNTL); 5106 return (REG_GET_FIELD(rlc_cntl, RLC_CNTL, RLC_ENABLE_F32)) ? true : false; 5107 } 5108 5109 static void gfx_v11_0_set_safe_mode(struct amdgpu_device *adev, int xcc_id) 5110 { 5111 uint32_t data; 5112 unsigned i; 5113 5114 data = RLC_SAFE_MODE__CMD_MASK; 5115 data |= (1 << RLC_SAFE_MODE__MESSAGE__SHIFT); 5116 5117 WREG32_SOC15(GC, 0, regRLC_SAFE_MODE, data); 5118 5119 /* wait for RLC_SAFE_MODE */ 5120 for (i = 0; i < adev->usec_timeout; i++) { 5121 if (!REG_GET_FIELD(RREG32_SOC15(GC, 0, regRLC_SAFE_MODE), 5122 RLC_SAFE_MODE, CMD)) 5123 break; 5124 udelay(1); 5125 } 5126 } 5127 5128 static void gfx_v11_0_unset_safe_mode(struct amdgpu_device *adev, int xcc_id) 5129 { 5130 WREG32_SOC15(GC, 0, regRLC_SAFE_MODE, RLC_SAFE_MODE__CMD_MASK); 5131 } 5132 5133 static void gfx_v11_0_update_perf_clk(struct amdgpu_device *adev, 5134 bool enable) 5135 { 5136 uint32_t def, data; 5137 5138 if (!(adev->cg_flags & AMD_CG_SUPPORT_GFX_PERF_CLK)) 5139 return; 5140 5141 def = data = RREG32_SOC15(GC, 0, regRLC_CGTT_MGCG_OVERRIDE); 5142 5143 if (enable) 5144 data &= ~RLC_CGTT_MGCG_OVERRIDE__PERFMON_CLOCK_STATE_MASK; 5145 else 5146 data |= RLC_CGTT_MGCG_OVERRIDE__PERFMON_CLOCK_STATE_MASK; 5147 5148 if (def != data) 5149 WREG32_SOC15(GC, 0, regRLC_CGTT_MGCG_OVERRIDE, data); 5150 } 5151 5152 static void gfx_v11_0_update_sram_fgcg(struct amdgpu_device *adev, 5153 bool enable) 5154 { 5155 uint32_t def, data; 5156 5157 if (!(adev->cg_flags & AMD_CG_SUPPORT_GFX_FGCG)) 5158 return; 5159 5160 def = data = RREG32_SOC15(GC, 0, regRLC_CGTT_MGCG_OVERRIDE); 5161 5162 if (enable) 5163 data &= ~RLC_CGTT_MGCG_OVERRIDE__GFXIP_FGCG_OVERRIDE_MASK; 5164 else 5165 data |= RLC_CGTT_MGCG_OVERRIDE__GFXIP_FGCG_OVERRIDE_MASK; 5166 5167 if (def != data) 5168 WREG32_SOC15(GC, 0, regRLC_CGTT_MGCG_OVERRIDE, data); 5169 } 5170 5171 static void gfx_v11_0_update_repeater_fgcg(struct amdgpu_device *adev, 5172 bool enable) 5173 { 5174 uint32_t def, data; 5175 5176 if (!(adev->cg_flags & AMD_CG_SUPPORT_REPEATER_FGCG)) 5177 return; 5178 5179 def = data = RREG32_SOC15(GC, 0, regRLC_CGTT_MGCG_OVERRIDE); 5180 5181 if (enable) 5182 data &= ~RLC_CGTT_MGCG_OVERRIDE__GFXIP_REPEATER_FGCG_OVERRIDE_MASK; 5183 else 5184 data |= RLC_CGTT_MGCG_OVERRIDE__GFXIP_REPEATER_FGCG_OVERRIDE_MASK; 5185 5186 if (def != data) 5187 WREG32_SOC15(GC, 0, regRLC_CGTT_MGCG_OVERRIDE, data); 5188 } 5189 5190 static void gfx_v11_0_update_medium_grain_clock_gating(struct amdgpu_device *adev, 5191 bool enable) 5192 { 5193 uint32_t data, def; 5194 5195 if (!(adev->cg_flags & (AMD_CG_SUPPORT_GFX_MGCG | AMD_CG_SUPPORT_GFX_MGLS))) 5196 return; 5197 5198 /* It is disabled by HW by default */ 5199 if (enable) { 5200 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGCG) { 5201 /* 1 - RLC_CGTT_MGCG_OVERRIDE */ 5202 def = data = RREG32_SOC15(GC, 0, regRLC_CGTT_MGCG_OVERRIDE); 5203 5204 data &= ~(RLC_CGTT_MGCG_OVERRIDE__GRBM_CGTT_SCLK_OVERRIDE_MASK | 5205 RLC_CGTT_MGCG_OVERRIDE__RLC_CGTT_SCLK_OVERRIDE_MASK | 5206 RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGCG_OVERRIDE_MASK); 5207 5208 if (def != data) 5209 WREG32_SOC15(GC, 0, regRLC_CGTT_MGCG_OVERRIDE, data); 5210 } 5211 } else { 5212 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGCG) { 5213 def = data = RREG32_SOC15(GC, 0, regRLC_CGTT_MGCG_OVERRIDE); 5214 5215 data |= (RLC_CGTT_MGCG_OVERRIDE__RLC_CGTT_SCLK_OVERRIDE_MASK | 5216 RLC_CGTT_MGCG_OVERRIDE__GRBM_CGTT_SCLK_OVERRIDE_MASK | 5217 RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGCG_OVERRIDE_MASK); 5218 5219 if (def != data) 5220 WREG32_SOC15(GC, 0, regRLC_CGTT_MGCG_OVERRIDE, data); 5221 } 5222 } 5223 } 5224 5225 static void gfx_v11_0_update_coarse_grain_clock_gating(struct amdgpu_device *adev, 5226 bool enable) 5227 { 5228 uint32_t def, data; 5229 5230 if (!(adev->cg_flags & 5231 (AMD_CG_SUPPORT_GFX_CGCG | 5232 AMD_CG_SUPPORT_GFX_CGLS | 5233 AMD_CG_SUPPORT_GFX_3D_CGCG | 5234 AMD_CG_SUPPORT_GFX_3D_CGLS))) 5235 return; 5236 5237 if (enable) { 5238 def = data = RREG32_SOC15(GC, 0, regRLC_CGTT_MGCG_OVERRIDE); 5239 5240 /* unset CGCG override */ 5241 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGCG) 5242 data &= ~RLC_CGTT_MGCG_OVERRIDE__GFXIP_CGCG_OVERRIDE_MASK; 5243 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGLS) 5244 data &= ~RLC_CGTT_MGCG_OVERRIDE__GFXIP_CGLS_OVERRIDE_MASK; 5245 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_3D_CGCG || 5246 adev->cg_flags & AMD_CG_SUPPORT_GFX_3D_CGLS) 5247 data &= ~RLC_CGTT_MGCG_OVERRIDE__GFXIP_GFX3D_CG_OVERRIDE_MASK; 5248 5249 /* update CGCG override bits */ 5250 if (def != data) 5251 WREG32_SOC15(GC, 0, regRLC_CGTT_MGCG_OVERRIDE, data); 5252 5253 /* enable cgcg FSM(0x0000363F) */ 5254 def = data = RREG32_SOC15(GC, 0, regRLC_CGCG_CGLS_CTRL); 5255 5256 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGCG) { 5257 data &= ~RLC_CGCG_CGLS_CTRL__CGCG_GFX_IDLE_THRESHOLD_MASK; 5258 data |= (0x36 << RLC_CGCG_CGLS_CTRL__CGCG_GFX_IDLE_THRESHOLD__SHIFT) | 5259 RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK; 5260 } 5261 5262 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGLS) { 5263 data &= ~RLC_CGCG_CGLS_CTRL__CGLS_REP_COMPANSAT_DELAY_MASK; 5264 data |= (0x000F << RLC_CGCG_CGLS_CTRL__CGLS_REP_COMPANSAT_DELAY__SHIFT) | 5265 RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK; 5266 } 5267 5268 if (def != data) 5269 WREG32_SOC15(GC, 0, regRLC_CGCG_CGLS_CTRL, data); 5270 5271 /* Program RLC_CGCG_CGLS_CTRL_3D */ 5272 def = data = RREG32_SOC15(GC, 0, regRLC_CGCG_CGLS_CTRL_3D); 5273 5274 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_3D_CGCG) { 5275 data &= ~RLC_CGCG_CGLS_CTRL_3D__CGCG_GFX_IDLE_THRESHOLD_MASK; 5276 data |= (0x36 << RLC_CGCG_CGLS_CTRL_3D__CGCG_GFX_IDLE_THRESHOLD__SHIFT) | 5277 RLC_CGCG_CGLS_CTRL_3D__CGCG_EN_MASK; 5278 } 5279 5280 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_3D_CGLS) { 5281 data &= ~RLC_CGCG_CGLS_CTRL_3D__CGLS_REP_COMPANSAT_DELAY_MASK; 5282 data |= (0xf << RLC_CGCG_CGLS_CTRL_3D__CGLS_REP_COMPANSAT_DELAY__SHIFT) | 5283 RLC_CGCG_CGLS_CTRL_3D__CGLS_EN_MASK; 5284 } 5285 5286 if (def != data) 5287 WREG32_SOC15(GC, 0, regRLC_CGCG_CGLS_CTRL_3D, data); 5288 5289 /* set IDLE_POLL_COUNT(0x00900100) */ 5290 def = data = RREG32_SOC15(GC, 0, regCP_RB_WPTR_POLL_CNTL); 5291 5292 data &= ~(CP_RB_WPTR_POLL_CNTL__POLL_FREQUENCY_MASK | CP_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK); 5293 data |= (0x0100 << CP_RB_WPTR_POLL_CNTL__POLL_FREQUENCY__SHIFT) | 5294 (0x0090 << CP_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT); 5295 5296 if (def != data) 5297 WREG32_SOC15(GC, 0, regCP_RB_WPTR_POLL_CNTL, data); 5298 5299 data = RREG32_SOC15(GC, 0, regCP_INT_CNTL); 5300 data = REG_SET_FIELD(data, CP_INT_CNTL, CNTX_BUSY_INT_ENABLE, 1); 5301 data = REG_SET_FIELD(data, CP_INT_CNTL, CNTX_EMPTY_INT_ENABLE, 1); 5302 data = REG_SET_FIELD(data, CP_INT_CNTL, CMP_BUSY_INT_ENABLE, 1); 5303 data = REG_SET_FIELD(data, CP_INT_CNTL, GFX_IDLE_INT_ENABLE, 1); 5304 WREG32_SOC15(GC, 0, regCP_INT_CNTL, data); 5305 5306 data = RREG32_SOC15(GC, 0, regSDMA0_RLC_CGCG_CTRL); 5307 data = REG_SET_FIELD(data, SDMA0_RLC_CGCG_CTRL, CGCG_INT_ENABLE, 1); 5308 WREG32_SOC15(GC, 0, regSDMA0_RLC_CGCG_CTRL, data); 5309 5310 /* Some ASICs only have one SDMA instance, not need to configure SDMA1 */ 5311 if (adev->sdma.num_instances > 1) { 5312 data = RREG32_SOC15(GC, 0, regSDMA1_RLC_CGCG_CTRL); 5313 data = REG_SET_FIELD(data, SDMA1_RLC_CGCG_CTRL, CGCG_INT_ENABLE, 1); 5314 WREG32_SOC15(GC, 0, regSDMA1_RLC_CGCG_CTRL, data); 5315 } 5316 } else { 5317 /* Program RLC_CGCG_CGLS_CTRL */ 5318 def = data = RREG32_SOC15(GC, 0, regRLC_CGCG_CGLS_CTRL); 5319 5320 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGCG) 5321 data &= ~RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK; 5322 5323 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGLS) 5324 data &= ~RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK; 5325 5326 if (def != data) 5327 WREG32_SOC15(GC, 0, regRLC_CGCG_CGLS_CTRL, data); 5328 5329 /* Program RLC_CGCG_CGLS_CTRL_3D */ 5330 def = data = RREG32_SOC15(GC, 0, regRLC_CGCG_CGLS_CTRL_3D); 5331 5332 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_3D_CGCG) 5333 data &= ~RLC_CGCG_CGLS_CTRL_3D__CGCG_EN_MASK; 5334 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_3D_CGLS) 5335 data &= ~RLC_CGCG_CGLS_CTRL_3D__CGLS_EN_MASK; 5336 5337 if (def != data) 5338 WREG32_SOC15(GC, 0, regRLC_CGCG_CGLS_CTRL_3D, data); 5339 5340 data = RREG32_SOC15(GC, 0, regSDMA0_RLC_CGCG_CTRL); 5341 data &= ~SDMA0_RLC_CGCG_CTRL__CGCG_INT_ENABLE_MASK; 5342 WREG32_SOC15(GC, 0, regSDMA0_RLC_CGCG_CTRL, data); 5343 5344 /* Some ASICs only have one SDMA instance, not need to configure SDMA1 */ 5345 if (adev->sdma.num_instances > 1) { 5346 data = RREG32_SOC15(GC, 0, regSDMA1_RLC_CGCG_CTRL); 5347 data &= ~SDMA1_RLC_CGCG_CTRL__CGCG_INT_ENABLE_MASK; 5348 WREG32_SOC15(GC, 0, regSDMA1_RLC_CGCG_CTRL, data); 5349 } 5350 } 5351 } 5352 5353 static int gfx_v11_0_update_gfx_clock_gating(struct amdgpu_device *adev, 5354 bool enable) 5355 { 5356 amdgpu_gfx_rlc_enter_safe_mode(adev, 0); 5357 5358 gfx_v11_0_update_coarse_grain_clock_gating(adev, enable); 5359 5360 gfx_v11_0_update_medium_grain_clock_gating(adev, enable); 5361 5362 gfx_v11_0_update_repeater_fgcg(adev, enable); 5363 5364 gfx_v11_0_update_sram_fgcg(adev, enable); 5365 5366 gfx_v11_0_update_perf_clk(adev, enable); 5367 5368 if (adev->cg_flags & 5369 (AMD_CG_SUPPORT_GFX_MGCG | 5370 AMD_CG_SUPPORT_GFX_CGLS | 5371 AMD_CG_SUPPORT_GFX_CGCG | 5372 AMD_CG_SUPPORT_GFX_3D_CGCG | 5373 AMD_CG_SUPPORT_GFX_3D_CGLS)) 5374 gfx_v11_0_enable_gui_idle_interrupt(adev, enable); 5375 5376 amdgpu_gfx_rlc_exit_safe_mode(adev, 0); 5377 5378 return 0; 5379 } 5380 5381 static void gfx_v11_0_update_spm_vmid(struct amdgpu_device *adev, struct amdgpu_ring *ring, unsigned vmid) 5382 { 5383 u32 reg, pre_data, data; 5384 5385 amdgpu_gfx_off_ctrl(adev, false); 5386 reg = SOC15_REG_OFFSET(GC, 0, regRLC_SPM_MC_CNTL); 5387 if (amdgpu_sriov_is_pp_one_vf(adev) && !amdgpu_sriov_runtime(adev)) 5388 pre_data = RREG32_NO_KIQ(reg); 5389 else 5390 pre_data = RREG32(reg); 5391 5392 data = pre_data & (~RLC_SPM_MC_CNTL__RLC_SPM_VMID_MASK); 5393 data |= (vmid & RLC_SPM_MC_CNTL__RLC_SPM_VMID_MASK) << RLC_SPM_MC_CNTL__RLC_SPM_VMID__SHIFT; 5394 5395 if (pre_data != data) { 5396 if (amdgpu_sriov_is_pp_one_vf(adev) && !amdgpu_sriov_runtime(adev)) { 5397 WREG32_SOC15_NO_KIQ(GC, 0, regRLC_SPM_MC_CNTL, data); 5398 } else 5399 WREG32_SOC15(GC, 0, regRLC_SPM_MC_CNTL, data); 5400 } 5401 amdgpu_gfx_off_ctrl(adev, true); 5402 5403 if (ring 5404 && amdgpu_sriov_is_pp_one_vf(adev) 5405 && (pre_data != data) 5406 && ((ring->funcs->type == AMDGPU_RING_TYPE_GFX) 5407 || (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE))) { 5408 amdgpu_ring_emit_wreg(ring, reg, data); 5409 } 5410 } 5411 5412 static const struct amdgpu_rlc_funcs gfx_v11_0_rlc_funcs = { 5413 .is_rlc_enabled = gfx_v11_0_is_rlc_enabled, 5414 .set_safe_mode = gfx_v11_0_set_safe_mode, 5415 .unset_safe_mode = gfx_v11_0_unset_safe_mode, 5416 .init = gfx_v11_0_rlc_init, 5417 .get_csb_size = gfx_v11_0_get_csb_size, 5418 .get_csb_buffer = gfx_v11_0_get_csb_buffer, 5419 .resume = gfx_v11_0_rlc_resume, 5420 .stop = gfx_v11_0_rlc_stop, 5421 .reset = gfx_v11_0_rlc_reset, 5422 .start = gfx_v11_0_rlc_start, 5423 .update_spm_vmid = gfx_v11_0_update_spm_vmid, 5424 }; 5425 5426 static void gfx_v11_cntl_power_gating(struct amdgpu_device *adev, bool enable) 5427 { 5428 u32 data = RREG32_SOC15(GC, 0, regRLC_PG_CNTL); 5429 5430 if (enable && (adev->pg_flags & AMD_PG_SUPPORT_GFX_PG)) 5431 data |= RLC_PG_CNTL__GFX_POWER_GATING_ENABLE_MASK; 5432 else 5433 data &= ~RLC_PG_CNTL__GFX_POWER_GATING_ENABLE_MASK; 5434 5435 WREG32_SOC15(GC, 0, regRLC_PG_CNTL, data); 5436 5437 // Program RLC_PG_DELAY3 for CGPG hysteresis 5438 if (enable && (adev->pg_flags & AMD_PG_SUPPORT_GFX_PG)) { 5439 switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { 5440 case IP_VERSION(11, 0, 1): 5441 case IP_VERSION(11, 0, 4): 5442 case IP_VERSION(11, 5, 0): 5443 case IP_VERSION(11, 5, 1): 5444 case IP_VERSION(11, 5, 2): 5445 case IP_VERSION(11, 5, 3): 5446 WREG32_SOC15(GC, 0, regRLC_PG_DELAY_3, RLC_PG_DELAY_3_DEFAULT_GC_11_0_1); 5447 break; 5448 default: 5449 break; 5450 } 5451 } 5452 } 5453 5454 static void gfx_v11_cntl_pg(struct amdgpu_device *adev, bool enable) 5455 { 5456 amdgpu_gfx_rlc_enter_safe_mode(adev, 0); 5457 5458 gfx_v11_cntl_power_gating(adev, enable); 5459 5460 amdgpu_gfx_rlc_exit_safe_mode(adev, 0); 5461 } 5462 5463 static int gfx_v11_0_set_powergating_state(struct amdgpu_ip_block *ip_block, 5464 enum amd_powergating_state state) 5465 { 5466 struct amdgpu_device *adev = ip_block->adev; 5467 bool enable = (state == AMD_PG_STATE_GATE); 5468 5469 if (amdgpu_sriov_vf(adev)) 5470 return 0; 5471 5472 switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { 5473 case IP_VERSION(11, 0, 0): 5474 case IP_VERSION(11, 0, 2): 5475 case IP_VERSION(11, 0, 3): 5476 amdgpu_gfx_off_ctrl(adev, enable); 5477 break; 5478 case IP_VERSION(11, 0, 1): 5479 case IP_VERSION(11, 0, 4): 5480 case IP_VERSION(11, 5, 0): 5481 case IP_VERSION(11, 5, 1): 5482 case IP_VERSION(11, 5, 2): 5483 case IP_VERSION(11, 5, 3): 5484 if (!enable) 5485 amdgpu_gfx_off_ctrl(adev, false); 5486 5487 gfx_v11_cntl_pg(adev, enable); 5488 5489 if (enable) 5490 amdgpu_gfx_off_ctrl(adev, true); 5491 5492 break; 5493 default: 5494 break; 5495 } 5496 5497 return 0; 5498 } 5499 5500 static int gfx_v11_0_set_clockgating_state(struct amdgpu_ip_block *ip_block, 5501 enum amd_clockgating_state state) 5502 { 5503 struct amdgpu_device *adev = ip_block->adev; 5504 5505 if (amdgpu_sriov_vf(adev)) 5506 return 0; 5507 5508 switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { 5509 case IP_VERSION(11, 0, 0): 5510 case IP_VERSION(11, 0, 1): 5511 case IP_VERSION(11, 0, 2): 5512 case IP_VERSION(11, 0, 3): 5513 case IP_VERSION(11, 0, 4): 5514 case IP_VERSION(11, 5, 0): 5515 case IP_VERSION(11, 5, 1): 5516 case IP_VERSION(11, 5, 2): 5517 case IP_VERSION(11, 5, 3): 5518 gfx_v11_0_update_gfx_clock_gating(adev, 5519 state == AMD_CG_STATE_GATE); 5520 break; 5521 default: 5522 break; 5523 } 5524 5525 return 0; 5526 } 5527 5528 static void gfx_v11_0_get_clockgating_state(struct amdgpu_ip_block *ip_block, u64 *flags) 5529 { 5530 struct amdgpu_device *adev = ip_block->adev; 5531 int data; 5532 5533 /* AMD_CG_SUPPORT_GFX_MGCG */ 5534 data = RREG32_SOC15(GC, 0, regRLC_CGTT_MGCG_OVERRIDE); 5535 if (!(data & RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGCG_OVERRIDE_MASK)) 5536 *flags |= AMD_CG_SUPPORT_GFX_MGCG; 5537 5538 /* AMD_CG_SUPPORT_REPEATER_FGCG */ 5539 if (!(data & RLC_CGTT_MGCG_OVERRIDE__GFXIP_REPEATER_FGCG_OVERRIDE_MASK)) 5540 *flags |= AMD_CG_SUPPORT_REPEATER_FGCG; 5541 5542 /* AMD_CG_SUPPORT_GFX_FGCG */ 5543 if (!(data & RLC_CGTT_MGCG_OVERRIDE__GFXIP_FGCG_OVERRIDE_MASK)) 5544 *flags |= AMD_CG_SUPPORT_GFX_FGCG; 5545 5546 /* AMD_CG_SUPPORT_GFX_PERF_CLK */ 5547 if (!(data & RLC_CGTT_MGCG_OVERRIDE__PERFMON_CLOCK_STATE_MASK)) 5548 *flags |= AMD_CG_SUPPORT_GFX_PERF_CLK; 5549 5550 /* AMD_CG_SUPPORT_GFX_CGCG */ 5551 data = RREG32_SOC15(GC, 0, regRLC_CGCG_CGLS_CTRL); 5552 if (data & RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK) 5553 *flags |= AMD_CG_SUPPORT_GFX_CGCG; 5554 5555 /* AMD_CG_SUPPORT_GFX_CGLS */ 5556 if (data & RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK) 5557 *flags |= AMD_CG_SUPPORT_GFX_CGLS; 5558 5559 /* AMD_CG_SUPPORT_GFX_3D_CGCG */ 5560 data = RREG32_SOC15(GC, 0, regRLC_CGCG_CGLS_CTRL_3D); 5561 if (data & RLC_CGCG_CGLS_CTRL_3D__CGCG_EN_MASK) 5562 *flags |= AMD_CG_SUPPORT_GFX_3D_CGCG; 5563 5564 /* AMD_CG_SUPPORT_GFX_3D_CGLS */ 5565 if (data & RLC_CGCG_CGLS_CTRL_3D__CGLS_EN_MASK) 5566 *flags |= AMD_CG_SUPPORT_GFX_3D_CGLS; 5567 } 5568 5569 static u64 gfx_v11_0_ring_get_rptr_gfx(struct amdgpu_ring *ring) 5570 { 5571 /* gfx11 is 32bit rptr*/ 5572 return *(uint32_t *)ring->rptr_cpu_addr; 5573 } 5574 5575 static u64 gfx_v11_0_ring_get_wptr_gfx(struct amdgpu_ring *ring) 5576 { 5577 struct amdgpu_device *adev = ring->adev; 5578 u64 wptr; 5579 5580 /* XXX check if swapping is necessary on BE */ 5581 if (ring->use_doorbell) { 5582 wptr = atomic64_read((atomic64_t *)ring->wptr_cpu_addr); 5583 } else { 5584 wptr = RREG32_SOC15(GC, 0, regCP_RB0_WPTR); 5585 wptr += (u64)RREG32_SOC15(GC, 0, regCP_RB0_WPTR_HI) << 32; 5586 } 5587 5588 return wptr; 5589 } 5590 5591 static void gfx_v11_0_ring_set_wptr_gfx(struct amdgpu_ring *ring) 5592 { 5593 struct amdgpu_device *adev = ring->adev; 5594 5595 if (ring->use_doorbell) { 5596 /* XXX check if swapping is necessary on BE */ 5597 atomic64_set((atomic64_t *)ring->wptr_cpu_addr, 5598 ring->wptr); 5599 WDOORBELL64(ring->doorbell_index, ring->wptr); 5600 } else { 5601 WREG32_SOC15(GC, 0, regCP_RB0_WPTR, 5602 lower_32_bits(ring->wptr)); 5603 WREG32_SOC15(GC, 0, regCP_RB0_WPTR_HI, 5604 upper_32_bits(ring->wptr)); 5605 } 5606 } 5607 5608 static u64 gfx_v11_0_ring_get_rptr_compute(struct amdgpu_ring *ring) 5609 { 5610 /* gfx11 hardware is 32bit rptr */ 5611 return *(uint32_t *)ring->rptr_cpu_addr; 5612 } 5613 5614 static u64 gfx_v11_0_ring_get_wptr_compute(struct amdgpu_ring *ring) 5615 { 5616 u64 wptr; 5617 5618 /* XXX check if swapping is necessary on BE */ 5619 if (ring->use_doorbell) 5620 wptr = atomic64_read((atomic64_t *)ring->wptr_cpu_addr); 5621 else 5622 BUG(); 5623 return wptr; 5624 } 5625 5626 static void gfx_v11_0_ring_set_wptr_compute(struct amdgpu_ring *ring) 5627 { 5628 struct amdgpu_device *adev = ring->adev; 5629 5630 /* XXX check if swapping is necessary on BE */ 5631 if (ring->use_doorbell) { 5632 atomic64_set((atomic64_t *)ring->wptr_cpu_addr, 5633 ring->wptr); 5634 WDOORBELL64(ring->doorbell_index, ring->wptr); 5635 } else { 5636 BUG(); /* only DOORBELL method supported on gfx11 now */ 5637 } 5638 } 5639 5640 static void gfx_v11_0_ring_emit_hdp_flush(struct amdgpu_ring *ring) 5641 { 5642 struct amdgpu_device *adev = ring->adev; 5643 u32 ref_and_mask, reg_mem_engine; 5644 const struct nbio_hdp_flush_reg *nbio_hf_reg = adev->nbio.hdp_flush_reg; 5645 5646 if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) { 5647 switch (ring->me) { 5648 case 1: 5649 ref_and_mask = nbio_hf_reg->ref_and_mask_cp2 << ring->pipe; 5650 break; 5651 case 2: 5652 ref_and_mask = nbio_hf_reg->ref_and_mask_cp6 << ring->pipe; 5653 break; 5654 default: 5655 return; 5656 } 5657 reg_mem_engine = 0; 5658 } else { 5659 ref_and_mask = nbio_hf_reg->ref_and_mask_cp0 << ring->pipe; 5660 reg_mem_engine = 1; /* pfp */ 5661 } 5662 5663 gfx_v11_0_wait_reg_mem(ring, reg_mem_engine, 0, 1, 5664 adev->nbio.funcs->get_hdp_flush_req_offset(adev), 5665 adev->nbio.funcs->get_hdp_flush_done_offset(adev), 5666 ref_and_mask, ref_and_mask, 0x20); 5667 } 5668 5669 static void gfx_v11_0_ring_emit_ib_gfx(struct amdgpu_ring *ring, 5670 struct amdgpu_job *job, 5671 struct amdgpu_ib *ib, 5672 uint32_t flags) 5673 { 5674 unsigned vmid = AMDGPU_JOB_GET_VMID(job); 5675 u32 header, control = 0; 5676 5677 BUG_ON(ib->flags & AMDGPU_IB_FLAG_CE); 5678 5679 header = PACKET3(PACKET3_INDIRECT_BUFFER, 2); 5680 5681 control |= ib->length_dw | (vmid << 24); 5682 5683 if (ring->adev->gfx.mcbp && (ib->flags & AMDGPU_IB_FLAG_PREEMPT)) { 5684 control |= INDIRECT_BUFFER_PRE_ENB(1); 5685 5686 if (flags & AMDGPU_IB_PREEMPTED) 5687 control |= INDIRECT_BUFFER_PRE_RESUME(1); 5688 5689 if (vmid) 5690 gfx_v11_0_ring_emit_de_meta(ring, 5691 (!amdgpu_sriov_vf(ring->adev) && flags & AMDGPU_IB_PREEMPTED) ? true : false); 5692 } 5693 5694 if (ring->is_mes_queue) 5695 /* inherit vmid from mqd */ 5696 control |= 0x400000; 5697 5698 amdgpu_ring_write(ring, header); 5699 BUG_ON(ib->gpu_addr & 0x3); /* Dword align */ 5700 amdgpu_ring_write(ring, 5701 #ifdef __BIG_ENDIAN 5702 (2 << 0) | 5703 #endif 5704 lower_32_bits(ib->gpu_addr)); 5705 amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr)); 5706 amdgpu_ring_write(ring, control); 5707 } 5708 5709 static void gfx_v11_0_ring_emit_ib_compute(struct amdgpu_ring *ring, 5710 struct amdgpu_job *job, 5711 struct amdgpu_ib *ib, 5712 uint32_t flags) 5713 { 5714 unsigned vmid = AMDGPU_JOB_GET_VMID(job); 5715 u32 control = INDIRECT_BUFFER_VALID | ib->length_dw | (vmid << 24); 5716 5717 if (ring->is_mes_queue) 5718 /* inherit vmid from mqd */ 5719 control |= 0x40000000; 5720 5721 /* Currently, there is a high possibility to get wave ID mismatch 5722 * between ME and GDS, leading to a hw deadlock, because ME generates 5723 * different wave IDs than the GDS expects. This situation happens 5724 * randomly when at least 5 compute pipes use GDS ordered append. 5725 * The wave IDs generated by ME are also wrong after suspend/resume. 5726 * Those are probably bugs somewhere else in the kernel driver. 5727 * 5728 * Writing GDS_COMPUTE_MAX_WAVE_ID resets wave ID counters in ME and 5729 * GDS to 0 for this ring (me/pipe). 5730 */ 5731 if (ib->flags & AMDGPU_IB_FLAG_RESET_GDS_MAX_WAVE_ID) { 5732 amdgpu_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1)); 5733 amdgpu_ring_write(ring, regGDS_COMPUTE_MAX_WAVE_ID); 5734 amdgpu_ring_write(ring, ring->adev->gds.gds_compute_max_wave_id); 5735 } 5736 5737 amdgpu_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2)); 5738 BUG_ON(ib->gpu_addr & 0x3); /* Dword align */ 5739 amdgpu_ring_write(ring, 5740 #ifdef __BIG_ENDIAN 5741 (2 << 0) | 5742 #endif 5743 lower_32_bits(ib->gpu_addr)); 5744 amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr)); 5745 amdgpu_ring_write(ring, control); 5746 } 5747 5748 static void gfx_v11_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, 5749 u64 seq, unsigned flags) 5750 { 5751 bool write64bit = flags & AMDGPU_FENCE_FLAG_64BIT; 5752 bool int_sel = flags & AMDGPU_FENCE_FLAG_INT; 5753 5754 /* RELEASE_MEM - flush caches, send int */ 5755 amdgpu_ring_write(ring, PACKET3(PACKET3_RELEASE_MEM, 6)); 5756 amdgpu_ring_write(ring, (PACKET3_RELEASE_MEM_GCR_SEQ | 5757 PACKET3_RELEASE_MEM_GCR_GL2_WB | 5758 PACKET3_RELEASE_MEM_GCR_GLM_INV | /* must be set with GLM_WB */ 5759 PACKET3_RELEASE_MEM_GCR_GLM_WB | 5760 PACKET3_RELEASE_MEM_CACHE_POLICY(3) | 5761 PACKET3_RELEASE_MEM_EVENT_TYPE(CACHE_FLUSH_AND_INV_TS_EVENT) | 5762 PACKET3_RELEASE_MEM_EVENT_INDEX(5))); 5763 amdgpu_ring_write(ring, (PACKET3_RELEASE_MEM_DATA_SEL(write64bit ? 2 : 1) | 5764 PACKET3_RELEASE_MEM_INT_SEL(int_sel ? 2 : 0))); 5765 5766 /* 5767 * the address should be Qword aligned if 64bit write, Dword 5768 * aligned if only send 32bit data low (discard data high) 5769 */ 5770 if (write64bit) 5771 BUG_ON(addr & 0x7); 5772 else 5773 BUG_ON(addr & 0x3); 5774 amdgpu_ring_write(ring, lower_32_bits(addr)); 5775 amdgpu_ring_write(ring, upper_32_bits(addr)); 5776 amdgpu_ring_write(ring, lower_32_bits(seq)); 5777 amdgpu_ring_write(ring, upper_32_bits(seq)); 5778 amdgpu_ring_write(ring, ring->is_mes_queue ? 5779 (ring->hw_queue_id | AMDGPU_FENCE_MES_QUEUE_FLAG) : 0); 5780 } 5781 5782 static void gfx_v11_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring) 5783 { 5784 int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX); 5785 uint32_t seq = ring->fence_drv.sync_seq; 5786 uint64_t addr = ring->fence_drv.gpu_addr; 5787 5788 gfx_v11_0_wait_reg_mem(ring, usepfp, 1, 0, lower_32_bits(addr), 5789 upper_32_bits(addr), seq, 0xffffffff, 4); 5790 } 5791 5792 static void gfx_v11_0_ring_invalidate_tlbs(struct amdgpu_ring *ring, 5793 uint16_t pasid, uint32_t flush_type, 5794 bool all_hub, uint8_t dst_sel) 5795 { 5796 amdgpu_ring_write(ring, PACKET3(PACKET3_INVALIDATE_TLBS, 0)); 5797 amdgpu_ring_write(ring, 5798 PACKET3_INVALIDATE_TLBS_DST_SEL(dst_sel) | 5799 PACKET3_INVALIDATE_TLBS_ALL_HUB(all_hub) | 5800 PACKET3_INVALIDATE_TLBS_PASID(pasid) | 5801 PACKET3_INVALIDATE_TLBS_FLUSH_TYPE(flush_type)); 5802 } 5803 5804 static void gfx_v11_0_ring_emit_vm_flush(struct amdgpu_ring *ring, 5805 unsigned vmid, uint64_t pd_addr) 5806 { 5807 if (ring->is_mes_queue) 5808 gfx_v11_0_ring_invalidate_tlbs(ring, 0, 0, false, 0); 5809 else 5810 amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr); 5811 5812 /* compute doesn't have PFP */ 5813 if (ring->funcs->type == AMDGPU_RING_TYPE_GFX) { 5814 /* sync PFP to ME, otherwise we might get invalid PFP reads */ 5815 amdgpu_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0)); 5816 amdgpu_ring_write(ring, 0x0); 5817 } 5818 5819 /* Make sure that we can't skip the SET_Q_MODE packets when the VM 5820 * changed in any way. 5821 */ 5822 ring->set_q_mode_offs = 0; 5823 ring->set_q_mode_ptr = NULL; 5824 } 5825 5826 static void gfx_v11_0_ring_emit_fence_kiq(struct amdgpu_ring *ring, u64 addr, 5827 u64 seq, unsigned int flags) 5828 { 5829 struct amdgpu_device *adev = ring->adev; 5830 5831 /* we only allocate 32bit for each seq wb address */ 5832 BUG_ON(flags & AMDGPU_FENCE_FLAG_64BIT); 5833 5834 /* write fence seq to the "addr" */ 5835 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3)); 5836 amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) | 5837 WRITE_DATA_DST_SEL(5) | WR_CONFIRM)); 5838 amdgpu_ring_write(ring, lower_32_bits(addr)); 5839 amdgpu_ring_write(ring, upper_32_bits(addr)); 5840 amdgpu_ring_write(ring, lower_32_bits(seq)); 5841 5842 if (flags & AMDGPU_FENCE_FLAG_INT) { 5843 /* set register to trigger INT */ 5844 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3)); 5845 amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) | 5846 WRITE_DATA_DST_SEL(0) | WR_CONFIRM)); 5847 amdgpu_ring_write(ring, SOC15_REG_OFFSET(GC, 0, regCPC_INT_STATUS)); 5848 amdgpu_ring_write(ring, 0); 5849 amdgpu_ring_write(ring, 0x20000000); /* src_id is 178 */ 5850 } 5851 } 5852 5853 static void gfx_v11_0_ring_emit_cntxcntl(struct amdgpu_ring *ring, 5854 uint32_t flags) 5855 { 5856 uint32_t dw2 = 0; 5857 5858 dw2 |= 0x80000000; /* set load_enable otherwise this package is just NOPs */ 5859 if (flags & AMDGPU_HAVE_CTX_SWITCH) { 5860 /* set load_global_config & load_global_uconfig */ 5861 dw2 |= 0x8001; 5862 /* set load_cs_sh_regs */ 5863 dw2 |= 0x01000000; 5864 /* set load_per_context_state & load_gfx_sh_regs for GFX */ 5865 dw2 |= 0x10002; 5866 } 5867 5868 amdgpu_ring_write(ring, PACKET3(PACKET3_CONTEXT_CONTROL, 1)); 5869 amdgpu_ring_write(ring, dw2); 5870 amdgpu_ring_write(ring, 0); 5871 } 5872 5873 static unsigned gfx_v11_0_ring_emit_init_cond_exec(struct amdgpu_ring *ring, 5874 uint64_t addr) 5875 { 5876 unsigned ret; 5877 5878 amdgpu_ring_write(ring, PACKET3(PACKET3_COND_EXEC, 3)); 5879 amdgpu_ring_write(ring, lower_32_bits(addr)); 5880 amdgpu_ring_write(ring, upper_32_bits(addr)); 5881 /* discard following DWs if *cond_exec_gpu_addr==0 */ 5882 amdgpu_ring_write(ring, 0); 5883 ret = ring->wptr & ring->buf_mask; 5884 /* patch dummy value later */ 5885 amdgpu_ring_write(ring, 0); 5886 5887 return ret; 5888 } 5889 5890 static void gfx_v11_0_ring_emit_gfx_shadow(struct amdgpu_ring *ring, 5891 u64 shadow_va, u64 csa_va, 5892 u64 gds_va, bool init_shadow, 5893 int vmid) 5894 { 5895 struct amdgpu_device *adev = ring->adev; 5896 unsigned int offs, end; 5897 5898 if (!adev->gfx.cp_gfx_shadow || !ring->ring_obj) 5899 return; 5900 5901 /* 5902 * The logic here isn't easy to understand because we need to keep state 5903 * accross multiple executions of the function as well as between the 5904 * CPU and GPU. The general idea is that the newly written GPU command 5905 * has a condition on the previous one and only executed if really 5906 * necessary. 5907 */ 5908 5909 /* 5910 * The dw in the NOP controls if the next SET_Q_MODE packet should be 5911 * executed or not. Reserve 64bits just to be on the save side. 5912 */ 5913 amdgpu_ring_write(ring, PACKET3(PACKET3_NOP, 1)); 5914 offs = ring->wptr & ring->buf_mask; 5915 5916 /* 5917 * We start with skipping the prefix SET_Q_MODE and always executing 5918 * the postfix SET_Q_MODE packet. This is changed below with a 5919 * WRITE_DATA command when the postfix executed. 5920 */ 5921 amdgpu_ring_write(ring, shadow_va ? 1 : 0); 5922 amdgpu_ring_write(ring, 0); 5923 5924 if (ring->set_q_mode_offs) { 5925 uint64_t addr; 5926 5927 addr = amdgpu_bo_gpu_offset(ring->ring_obj); 5928 addr += ring->set_q_mode_offs << 2; 5929 end = gfx_v11_0_ring_emit_init_cond_exec(ring, addr); 5930 } 5931 5932 /* 5933 * When the postfix SET_Q_MODE packet executes we need to make sure that the 5934 * next prefix SET_Q_MODE packet executes as well. 5935 */ 5936 if (!shadow_va) { 5937 uint64_t addr; 5938 5939 addr = amdgpu_bo_gpu_offset(ring->ring_obj); 5940 addr += offs << 2; 5941 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3)); 5942 amdgpu_ring_write(ring, WRITE_DATA_DST_SEL(5) | WR_CONFIRM); 5943 amdgpu_ring_write(ring, lower_32_bits(addr)); 5944 amdgpu_ring_write(ring, upper_32_bits(addr)); 5945 amdgpu_ring_write(ring, 0x1); 5946 } 5947 5948 amdgpu_ring_write(ring, PACKET3(PACKET3_SET_Q_PREEMPTION_MODE, 7)); 5949 amdgpu_ring_write(ring, lower_32_bits(shadow_va)); 5950 amdgpu_ring_write(ring, upper_32_bits(shadow_va)); 5951 amdgpu_ring_write(ring, lower_32_bits(gds_va)); 5952 amdgpu_ring_write(ring, upper_32_bits(gds_va)); 5953 amdgpu_ring_write(ring, lower_32_bits(csa_va)); 5954 amdgpu_ring_write(ring, upper_32_bits(csa_va)); 5955 amdgpu_ring_write(ring, shadow_va ? 5956 PACKET3_SET_Q_PREEMPTION_MODE_IB_VMID(vmid) : 0); 5957 amdgpu_ring_write(ring, init_shadow ? 5958 PACKET3_SET_Q_PREEMPTION_MODE_INIT_SHADOW_MEM : 0); 5959 5960 if (ring->set_q_mode_offs) 5961 amdgpu_ring_patch_cond_exec(ring, end); 5962 5963 if (shadow_va) { 5964 uint64_t token = shadow_va ^ csa_va ^ gds_va ^ vmid; 5965 5966 /* 5967 * If the tokens match try to skip the last postfix SET_Q_MODE 5968 * packet to avoid saving/restoring the state all the time. 5969 */ 5970 if (ring->set_q_mode_ptr && ring->set_q_mode_token == token) 5971 *ring->set_q_mode_ptr = 0; 5972 5973 ring->set_q_mode_token = token; 5974 } else { 5975 ring->set_q_mode_ptr = &ring->ring[ring->set_q_mode_offs]; 5976 } 5977 5978 ring->set_q_mode_offs = offs; 5979 } 5980 5981 static int gfx_v11_0_ring_preempt_ib(struct amdgpu_ring *ring) 5982 { 5983 int i, r = 0; 5984 struct amdgpu_device *adev = ring->adev; 5985 struct amdgpu_kiq *kiq = &adev->gfx.kiq[0]; 5986 struct amdgpu_ring *kiq_ring = &kiq->ring; 5987 unsigned long flags; 5988 5989 if (adev->enable_mes) 5990 return -EINVAL; 5991 5992 if (!kiq->pmf || !kiq->pmf->kiq_unmap_queues) 5993 return -EINVAL; 5994 5995 spin_lock_irqsave(&kiq->ring_lock, flags); 5996 5997 if (amdgpu_ring_alloc(kiq_ring, kiq->pmf->unmap_queues_size)) { 5998 spin_unlock_irqrestore(&kiq->ring_lock, flags); 5999 return -ENOMEM; 6000 } 6001 6002 /* assert preemption condition */ 6003 amdgpu_ring_set_preempt_cond_exec(ring, false); 6004 6005 /* assert IB preemption, emit the trailing fence */ 6006 kiq->pmf->kiq_unmap_queues(kiq_ring, ring, PREEMPT_QUEUES_NO_UNMAP, 6007 ring->trail_fence_gpu_addr, 6008 ++ring->trail_seq); 6009 amdgpu_ring_commit(kiq_ring); 6010 6011 spin_unlock_irqrestore(&kiq->ring_lock, flags); 6012 6013 /* poll the trailing fence */ 6014 for (i = 0; i < adev->usec_timeout; i++) { 6015 if (ring->trail_seq == 6016 le32_to_cpu(*(ring->trail_fence_cpu_addr))) 6017 break; 6018 udelay(1); 6019 } 6020 6021 if (i >= adev->usec_timeout) { 6022 r = -EINVAL; 6023 DRM_ERROR("ring %d failed to preempt ib\n", ring->idx); 6024 } 6025 6026 /* deassert preemption condition */ 6027 amdgpu_ring_set_preempt_cond_exec(ring, true); 6028 return r; 6029 } 6030 6031 static void gfx_v11_0_ring_emit_de_meta(struct amdgpu_ring *ring, bool resume) 6032 { 6033 struct amdgpu_device *adev = ring->adev; 6034 struct v10_de_ib_state de_payload = {0}; 6035 uint64_t offset, gds_addr, de_payload_gpu_addr; 6036 void *de_payload_cpu_addr; 6037 int cnt; 6038 6039 if (ring->is_mes_queue) { 6040 offset = offsetof(struct amdgpu_mes_ctx_meta_data, 6041 gfx[0].gfx_meta_data) + 6042 offsetof(struct v10_gfx_meta_data, de_payload); 6043 de_payload_gpu_addr = 6044 amdgpu_mes_ctx_get_offs_gpu_addr(ring, offset); 6045 de_payload_cpu_addr = 6046 amdgpu_mes_ctx_get_offs_cpu_addr(ring, offset); 6047 6048 offset = offsetof(struct amdgpu_mes_ctx_meta_data, 6049 gfx[0].gds_backup) + 6050 offsetof(struct v10_gfx_meta_data, de_payload); 6051 gds_addr = amdgpu_mes_ctx_get_offs_gpu_addr(ring, offset); 6052 } else { 6053 offset = offsetof(struct v10_gfx_meta_data, de_payload); 6054 de_payload_gpu_addr = amdgpu_csa_vaddr(ring->adev) + offset; 6055 de_payload_cpu_addr = adev->virt.csa_cpu_addr + offset; 6056 6057 gds_addr = ALIGN(amdgpu_csa_vaddr(ring->adev) + 6058 AMDGPU_CSA_SIZE - adev->gds.gds_size, 6059 PAGE_SIZE); 6060 } 6061 6062 de_payload.gds_backup_addrlo = lower_32_bits(gds_addr); 6063 de_payload.gds_backup_addrhi = upper_32_bits(gds_addr); 6064 6065 cnt = (sizeof(de_payload) >> 2) + 4 - 2; 6066 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, cnt)); 6067 amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(1) | 6068 WRITE_DATA_DST_SEL(8) | 6069 WR_CONFIRM) | 6070 WRITE_DATA_CACHE_POLICY(0)); 6071 amdgpu_ring_write(ring, lower_32_bits(de_payload_gpu_addr)); 6072 amdgpu_ring_write(ring, upper_32_bits(de_payload_gpu_addr)); 6073 6074 if (resume) 6075 amdgpu_ring_write_multiple(ring, de_payload_cpu_addr, 6076 sizeof(de_payload) >> 2); 6077 else 6078 amdgpu_ring_write_multiple(ring, (void *)&de_payload, 6079 sizeof(de_payload) >> 2); 6080 } 6081 6082 static void gfx_v11_0_ring_emit_frame_cntl(struct amdgpu_ring *ring, bool start, 6083 bool secure) 6084 { 6085 uint32_t v = secure ? FRAME_TMZ : 0; 6086 6087 amdgpu_ring_write(ring, PACKET3(PACKET3_FRAME_CONTROL, 0)); 6088 amdgpu_ring_write(ring, v | FRAME_CMD(start ? 0 : 1)); 6089 } 6090 6091 static void gfx_v11_0_ring_emit_rreg(struct amdgpu_ring *ring, uint32_t reg, 6092 uint32_t reg_val_offs) 6093 { 6094 struct amdgpu_device *adev = ring->adev; 6095 6096 amdgpu_ring_write(ring, PACKET3(PACKET3_COPY_DATA, 4)); 6097 amdgpu_ring_write(ring, 0 | /* src: register*/ 6098 (5 << 8) | /* dst: memory */ 6099 (1 << 20)); /* write confirm */ 6100 amdgpu_ring_write(ring, reg); 6101 amdgpu_ring_write(ring, 0); 6102 amdgpu_ring_write(ring, lower_32_bits(adev->wb.gpu_addr + 6103 reg_val_offs * 4)); 6104 amdgpu_ring_write(ring, upper_32_bits(adev->wb.gpu_addr + 6105 reg_val_offs * 4)); 6106 } 6107 6108 static void gfx_v11_0_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg, 6109 uint32_t val) 6110 { 6111 uint32_t cmd = 0; 6112 6113 switch (ring->funcs->type) { 6114 case AMDGPU_RING_TYPE_GFX: 6115 cmd = WRITE_DATA_ENGINE_SEL(1) | WR_CONFIRM; 6116 break; 6117 case AMDGPU_RING_TYPE_KIQ: 6118 cmd = (1 << 16); /* no inc addr */ 6119 break; 6120 default: 6121 cmd = WR_CONFIRM; 6122 break; 6123 } 6124 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3)); 6125 amdgpu_ring_write(ring, cmd); 6126 amdgpu_ring_write(ring, reg); 6127 amdgpu_ring_write(ring, 0); 6128 amdgpu_ring_write(ring, val); 6129 } 6130 6131 static void gfx_v11_0_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg, 6132 uint32_t val, uint32_t mask) 6133 { 6134 gfx_v11_0_wait_reg_mem(ring, 0, 0, 0, reg, 0, val, mask, 0x20); 6135 } 6136 6137 static void gfx_v11_0_ring_emit_reg_write_reg_wait(struct amdgpu_ring *ring, 6138 uint32_t reg0, uint32_t reg1, 6139 uint32_t ref, uint32_t mask) 6140 { 6141 int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX); 6142 6143 gfx_v11_0_wait_reg_mem(ring, usepfp, 0, 1, reg0, reg1, 6144 ref, mask, 0x20); 6145 } 6146 6147 static void gfx_v11_0_ring_soft_recovery(struct amdgpu_ring *ring, 6148 unsigned vmid) 6149 { 6150 struct amdgpu_device *adev = ring->adev; 6151 uint32_t value = 0; 6152 6153 value = REG_SET_FIELD(value, SQ_CMD, CMD, 0x03); 6154 value = REG_SET_FIELD(value, SQ_CMD, MODE, 0x01); 6155 value = REG_SET_FIELD(value, SQ_CMD, CHECK_VMID, 1); 6156 value = REG_SET_FIELD(value, SQ_CMD, VM_ID, vmid); 6157 amdgpu_gfx_rlc_enter_safe_mode(adev, 0); 6158 WREG32_SOC15(GC, 0, regSQ_CMD, value); 6159 amdgpu_gfx_rlc_exit_safe_mode(adev, 0); 6160 } 6161 6162 static void 6163 gfx_v11_0_set_gfx_eop_interrupt_state(struct amdgpu_device *adev, 6164 uint32_t me, uint32_t pipe, 6165 enum amdgpu_interrupt_state state) 6166 { 6167 uint32_t cp_int_cntl, cp_int_cntl_reg; 6168 6169 if (!me) { 6170 switch (pipe) { 6171 case 0: 6172 cp_int_cntl_reg = SOC15_REG_OFFSET(GC, 0, regCP_INT_CNTL_RING0); 6173 break; 6174 case 1: 6175 cp_int_cntl_reg = SOC15_REG_OFFSET(GC, 0, regCP_INT_CNTL_RING1); 6176 break; 6177 default: 6178 DRM_DEBUG("invalid pipe %d\n", pipe); 6179 return; 6180 } 6181 } else { 6182 DRM_DEBUG("invalid me %d\n", me); 6183 return; 6184 } 6185 6186 switch (state) { 6187 case AMDGPU_IRQ_STATE_DISABLE: 6188 cp_int_cntl = RREG32_SOC15_IP(GC, cp_int_cntl_reg); 6189 cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_INT_CNTL_RING0, 6190 TIME_STAMP_INT_ENABLE, 0); 6191 cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_INT_CNTL_RING0, 6192 GENERIC0_INT_ENABLE, 0); 6193 WREG32_SOC15_IP(GC, cp_int_cntl_reg, cp_int_cntl); 6194 break; 6195 case AMDGPU_IRQ_STATE_ENABLE: 6196 cp_int_cntl = RREG32_SOC15_IP(GC, cp_int_cntl_reg); 6197 cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_INT_CNTL_RING0, 6198 TIME_STAMP_INT_ENABLE, 1); 6199 cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_INT_CNTL_RING0, 6200 GENERIC0_INT_ENABLE, 1); 6201 WREG32_SOC15_IP(GC, cp_int_cntl_reg, cp_int_cntl); 6202 break; 6203 default: 6204 break; 6205 } 6206 } 6207 6208 static void gfx_v11_0_set_compute_eop_interrupt_state(struct amdgpu_device *adev, 6209 int me, int pipe, 6210 enum amdgpu_interrupt_state state) 6211 { 6212 u32 mec_int_cntl, mec_int_cntl_reg; 6213 6214 /* 6215 * amdgpu controls only the first MEC. That's why this function only 6216 * handles the setting of interrupts for this specific MEC. All other 6217 * pipes' interrupts are set by amdkfd. 6218 */ 6219 6220 if (me == 1) { 6221 switch (pipe) { 6222 case 0: 6223 mec_int_cntl_reg = SOC15_REG_OFFSET(GC, 0, regCP_ME1_PIPE0_INT_CNTL); 6224 break; 6225 case 1: 6226 mec_int_cntl_reg = SOC15_REG_OFFSET(GC, 0, regCP_ME1_PIPE1_INT_CNTL); 6227 break; 6228 case 2: 6229 mec_int_cntl_reg = SOC15_REG_OFFSET(GC, 0, regCP_ME1_PIPE2_INT_CNTL); 6230 break; 6231 case 3: 6232 mec_int_cntl_reg = SOC15_REG_OFFSET(GC, 0, regCP_ME1_PIPE3_INT_CNTL); 6233 break; 6234 default: 6235 DRM_DEBUG("invalid pipe %d\n", pipe); 6236 return; 6237 } 6238 } else { 6239 DRM_DEBUG("invalid me %d\n", me); 6240 return; 6241 } 6242 6243 switch (state) { 6244 case AMDGPU_IRQ_STATE_DISABLE: 6245 mec_int_cntl = RREG32_SOC15_IP(GC, mec_int_cntl_reg); 6246 mec_int_cntl = REG_SET_FIELD(mec_int_cntl, CP_ME1_PIPE0_INT_CNTL, 6247 TIME_STAMP_INT_ENABLE, 0); 6248 mec_int_cntl = REG_SET_FIELD(mec_int_cntl, CP_ME1_PIPE0_INT_CNTL, 6249 GENERIC0_INT_ENABLE, 0); 6250 WREG32_SOC15_IP(GC, mec_int_cntl_reg, mec_int_cntl); 6251 break; 6252 case AMDGPU_IRQ_STATE_ENABLE: 6253 mec_int_cntl = RREG32_SOC15_IP(GC, mec_int_cntl_reg); 6254 mec_int_cntl = REG_SET_FIELD(mec_int_cntl, CP_ME1_PIPE0_INT_CNTL, 6255 TIME_STAMP_INT_ENABLE, 1); 6256 mec_int_cntl = REG_SET_FIELD(mec_int_cntl, CP_ME1_PIPE0_INT_CNTL, 6257 GENERIC0_INT_ENABLE, 1); 6258 WREG32_SOC15_IP(GC, mec_int_cntl_reg, mec_int_cntl); 6259 break; 6260 default: 6261 break; 6262 } 6263 } 6264 6265 static int gfx_v11_0_set_eop_interrupt_state(struct amdgpu_device *adev, 6266 struct amdgpu_irq_src *src, 6267 unsigned type, 6268 enum amdgpu_interrupt_state state) 6269 { 6270 switch (type) { 6271 case AMDGPU_CP_IRQ_GFX_ME0_PIPE0_EOP: 6272 gfx_v11_0_set_gfx_eop_interrupt_state(adev, 0, 0, state); 6273 break; 6274 case AMDGPU_CP_IRQ_GFX_ME0_PIPE1_EOP: 6275 gfx_v11_0_set_gfx_eop_interrupt_state(adev, 0, 1, state); 6276 break; 6277 case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP: 6278 gfx_v11_0_set_compute_eop_interrupt_state(adev, 1, 0, state); 6279 break; 6280 case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE1_EOP: 6281 gfx_v11_0_set_compute_eop_interrupt_state(adev, 1, 1, state); 6282 break; 6283 case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE2_EOP: 6284 gfx_v11_0_set_compute_eop_interrupt_state(adev, 1, 2, state); 6285 break; 6286 case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE3_EOP: 6287 gfx_v11_0_set_compute_eop_interrupt_state(adev, 1, 3, state); 6288 break; 6289 default: 6290 break; 6291 } 6292 return 0; 6293 } 6294 6295 static int gfx_v11_0_eop_irq(struct amdgpu_device *adev, 6296 struct amdgpu_irq_src *source, 6297 struct amdgpu_iv_entry *entry) 6298 { 6299 int i; 6300 u8 me_id, pipe_id, queue_id; 6301 struct amdgpu_ring *ring; 6302 uint32_t mes_queue_id = entry->src_data[0]; 6303 6304 DRM_DEBUG("IH: CP EOP\n"); 6305 6306 if (adev->enable_mes && (mes_queue_id & AMDGPU_FENCE_MES_QUEUE_FLAG)) { 6307 struct amdgpu_mes_queue *queue; 6308 6309 mes_queue_id &= AMDGPU_FENCE_MES_QUEUE_ID_MASK; 6310 6311 spin_lock(&adev->mes.queue_id_lock); 6312 queue = idr_find(&adev->mes.queue_id_idr, mes_queue_id); 6313 if (queue) { 6314 DRM_DEBUG("process mes queue id = %d\n", mes_queue_id); 6315 amdgpu_fence_process(queue->ring); 6316 } 6317 spin_unlock(&adev->mes.queue_id_lock); 6318 } else { 6319 me_id = (entry->ring_id & 0x0c) >> 2; 6320 pipe_id = (entry->ring_id & 0x03) >> 0; 6321 queue_id = (entry->ring_id & 0x70) >> 4; 6322 6323 switch (me_id) { 6324 case 0: 6325 if (pipe_id == 0) 6326 amdgpu_fence_process(&adev->gfx.gfx_ring[0]); 6327 else 6328 amdgpu_fence_process(&adev->gfx.gfx_ring[1]); 6329 break; 6330 case 1: 6331 case 2: 6332 for (i = 0; i < adev->gfx.num_compute_rings; i++) { 6333 ring = &adev->gfx.compute_ring[i]; 6334 /* Per-queue interrupt is supported for MEC starting from VI. 6335 * The interrupt can only be enabled/disabled per pipe instead 6336 * of per queue. 6337 */ 6338 if ((ring->me == me_id) && 6339 (ring->pipe == pipe_id) && 6340 (ring->queue == queue_id)) 6341 amdgpu_fence_process(ring); 6342 } 6343 break; 6344 } 6345 } 6346 6347 return 0; 6348 } 6349 6350 static int gfx_v11_0_set_priv_reg_fault_state(struct amdgpu_device *adev, 6351 struct amdgpu_irq_src *source, 6352 unsigned int type, 6353 enum amdgpu_interrupt_state state) 6354 { 6355 u32 cp_int_cntl_reg, cp_int_cntl; 6356 int i, j; 6357 6358 switch (state) { 6359 case AMDGPU_IRQ_STATE_DISABLE: 6360 case AMDGPU_IRQ_STATE_ENABLE: 6361 for (i = 0; i < adev->gfx.me.num_me; i++) { 6362 for (j = 0; j < adev->gfx.me.num_pipe_per_me; j++) { 6363 cp_int_cntl_reg = gfx_v11_0_get_cpg_int_cntl(adev, i, j); 6364 6365 if (cp_int_cntl_reg) { 6366 cp_int_cntl = RREG32_SOC15_IP(GC, cp_int_cntl_reg); 6367 cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_INT_CNTL_RING0, 6368 PRIV_REG_INT_ENABLE, 6369 state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0); 6370 WREG32_SOC15_IP(GC, cp_int_cntl_reg, cp_int_cntl); 6371 } 6372 } 6373 } 6374 for (i = 0; i < adev->gfx.mec.num_mec; i++) { 6375 for (j = 0; j < adev->gfx.mec.num_pipe_per_mec; j++) { 6376 /* MECs start at 1 */ 6377 cp_int_cntl_reg = gfx_v11_0_get_cpc_int_cntl(adev, i + 1, j); 6378 6379 if (cp_int_cntl_reg) { 6380 cp_int_cntl = RREG32_SOC15_IP(GC, cp_int_cntl_reg); 6381 cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_ME1_PIPE0_INT_CNTL, 6382 PRIV_REG_INT_ENABLE, 6383 state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0); 6384 WREG32_SOC15_IP(GC, cp_int_cntl_reg, cp_int_cntl); 6385 } 6386 } 6387 } 6388 break; 6389 default: 6390 break; 6391 } 6392 6393 return 0; 6394 } 6395 6396 static int gfx_v11_0_set_bad_op_fault_state(struct amdgpu_device *adev, 6397 struct amdgpu_irq_src *source, 6398 unsigned type, 6399 enum amdgpu_interrupt_state state) 6400 { 6401 u32 cp_int_cntl_reg, cp_int_cntl; 6402 int i, j; 6403 6404 switch (state) { 6405 case AMDGPU_IRQ_STATE_DISABLE: 6406 case AMDGPU_IRQ_STATE_ENABLE: 6407 for (i = 0; i < adev->gfx.me.num_me; i++) { 6408 for (j = 0; j < adev->gfx.me.num_pipe_per_me; j++) { 6409 cp_int_cntl_reg = gfx_v11_0_get_cpg_int_cntl(adev, i, j); 6410 6411 if (cp_int_cntl_reg) { 6412 cp_int_cntl = RREG32_SOC15_IP(GC, cp_int_cntl_reg); 6413 cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_INT_CNTL_RING0, 6414 OPCODE_ERROR_INT_ENABLE, 6415 state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0); 6416 WREG32_SOC15_IP(GC, cp_int_cntl_reg, cp_int_cntl); 6417 } 6418 } 6419 } 6420 for (i = 0; i < adev->gfx.mec.num_mec; i++) { 6421 for (j = 0; j < adev->gfx.mec.num_pipe_per_mec; j++) { 6422 /* MECs start at 1 */ 6423 cp_int_cntl_reg = gfx_v11_0_get_cpc_int_cntl(adev, i + 1, j); 6424 6425 if (cp_int_cntl_reg) { 6426 cp_int_cntl = RREG32_SOC15_IP(GC, cp_int_cntl_reg); 6427 cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_ME1_PIPE0_INT_CNTL, 6428 OPCODE_ERROR_INT_ENABLE, 6429 state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0); 6430 WREG32_SOC15_IP(GC, cp_int_cntl_reg, cp_int_cntl); 6431 } 6432 } 6433 } 6434 break; 6435 default: 6436 break; 6437 } 6438 return 0; 6439 } 6440 6441 static int gfx_v11_0_set_priv_inst_fault_state(struct amdgpu_device *adev, 6442 struct amdgpu_irq_src *source, 6443 unsigned int type, 6444 enum amdgpu_interrupt_state state) 6445 { 6446 u32 cp_int_cntl_reg, cp_int_cntl; 6447 int i, j; 6448 6449 switch (state) { 6450 case AMDGPU_IRQ_STATE_DISABLE: 6451 case AMDGPU_IRQ_STATE_ENABLE: 6452 for (i = 0; i < adev->gfx.me.num_me; i++) { 6453 for (j = 0; j < adev->gfx.me.num_pipe_per_me; j++) { 6454 cp_int_cntl_reg = gfx_v11_0_get_cpg_int_cntl(adev, i, j); 6455 6456 if (cp_int_cntl_reg) { 6457 cp_int_cntl = RREG32_SOC15_IP(GC, cp_int_cntl_reg); 6458 cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_INT_CNTL_RING0, 6459 PRIV_INSTR_INT_ENABLE, 6460 state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0); 6461 WREG32_SOC15_IP(GC, cp_int_cntl_reg, cp_int_cntl); 6462 } 6463 } 6464 } 6465 break; 6466 default: 6467 break; 6468 } 6469 6470 return 0; 6471 } 6472 6473 static void gfx_v11_0_handle_priv_fault(struct amdgpu_device *adev, 6474 struct amdgpu_iv_entry *entry) 6475 { 6476 u8 me_id, pipe_id, queue_id; 6477 struct amdgpu_ring *ring; 6478 int i; 6479 6480 me_id = (entry->ring_id & 0x0c) >> 2; 6481 pipe_id = (entry->ring_id & 0x03) >> 0; 6482 queue_id = (entry->ring_id & 0x70) >> 4; 6483 6484 switch (me_id) { 6485 case 0: 6486 for (i = 0; i < adev->gfx.num_gfx_rings; i++) { 6487 ring = &adev->gfx.gfx_ring[i]; 6488 if (ring->me == me_id && ring->pipe == pipe_id && 6489 ring->queue == queue_id) 6490 drm_sched_fault(&ring->sched); 6491 } 6492 break; 6493 case 1: 6494 case 2: 6495 for (i = 0; i < adev->gfx.num_compute_rings; i++) { 6496 ring = &adev->gfx.compute_ring[i]; 6497 if (ring->me == me_id && ring->pipe == pipe_id && 6498 ring->queue == queue_id) 6499 drm_sched_fault(&ring->sched); 6500 } 6501 break; 6502 default: 6503 BUG(); 6504 break; 6505 } 6506 } 6507 6508 static int gfx_v11_0_priv_reg_irq(struct amdgpu_device *adev, 6509 struct amdgpu_irq_src *source, 6510 struct amdgpu_iv_entry *entry) 6511 { 6512 DRM_ERROR("Illegal register access in command stream\n"); 6513 gfx_v11_0_handle_priv_fault(adev, entry); 6514 return 0; 6515 } 6516 6517 static int gfx_v11_0_bad_op_irq(struct amdgpu_device *adev, 6518 struct amdgpu_irq_src *source, 6519 struct amdgpu_iv_entry *entry) 6520 { 6521 DRM_ERROR("Illegal opcode in command stream \n"); 6522 gfx_v11_0_handle_priv_fault(adev, entry); 6523 return 0; 6524 } 6525 6526 static int gfx_v11_0_priv_inst_irq(struct amdgpu_device *adev, 6527 struct amdgpu_irq_src *source, 6528 struct amdgpu_iv_entry *entry) 6529 { 6530 DRM_ERROR("Illegal instruction in command stream\n"); 6531 gfx_v11_0_handle_priv_fault(adev, entry); 6532 return 0; 6533 } 6534 6535 static int gfx_v11_0_rlc_gc_fed_irq(struct amdgpu_device *adev, 6536 struct amdgpu_irq_src *source, 6537 struct amdgpu_iv_entry *entry) 6538 { 6539 if (adev->gfx.ras && adev->gfx.ras->rlc_gc_fed_irq) 6540 return adev->gfx.ras->rlc_gc_fed_irq(adev, source, entry); 6541 6542 return 0; 6543 } 6544 6545 #if 0 6546 static int gfx_v11_0_kiq_set_interrupt_state(struct amdgpu_device *adev, 6547 struct amdgpu_irq_src *src, 6548 unsigned int type, 6549 enum amdgpu_interrupt_state state) 6550 { 6551 uint32_t tmp, target; 6552 struct amdgpu_ring *ring = &(adev->gfx.kiq[0].ring); 6553 6554 target = SOC15_REG_OFFSET(GC, 0, regCP_ME1_PIPE0_INT_CNTL); 6555 target += ring->pipe; 6556 6557 switch (type) { 6558 case AMDGPU_CP_KIQ_IRQ_DRIVER0: 6559 if (state == AMDGPU_IRQ_STATE_DISABLE) { 6560 tmp = RREG32_SOC15(GC, 0, regCPC_INT_CNTL); 6561 tmp = REG_SET_FIELD(tmp, CPC_INT_CNTL, 6562 GENERIC2_INT_ENABLE, 0); 6563 WREG32_SOC15(GC, 0, regCPC_INT_CNTL, tmp); 6564 6565 tmp = RREG32_SOC15_IP(GC, target); 6566 tmp = REG_SET_FIELD(tmp, CP_ME1_PIPE0_INT_CNTL, 6567 GENERIC2_INT_ENABLE, 0); 6568 WREG32_SOC15_IP(GC, target, tmp); 6569 } else { 6570 tmp = RREG32_SOC15(GC, 0, regCPC_INT_CNTL); 6571 tmp = REG_SET_FIELD(tmp, CPC_INT_CNTL, 6572 GENERIC2_INT_ENABLE, 1); 6573 WREG32_SOC15(GC, 0, regCPC_INT_CNTL, tmp); 6574 6575 tmp = RREG32_SOC15_IP(GC, target); 6576 tmp = REG_SET_FIELD(tmp, CP_ME1_PIPE0_INT_CNTL, 6577 GENERIC2_INT_ENABLE, 1); 6578 WREG32_SOC15_IP(GC, target, tmp); 6579 } 6580 break; 6581 default: 6582 BUG(); /* kiq only support GENERIC2_INT now */ 6583 break; 6584 } 6585 return 0; 6586 } 6587 #endif 6588 6589 static void gfx_v11_0_emit_mem_sync(struct amdgpu_ring *ring) 6590 { 6591 const unsigned int gcr_cntl = 6592 PACKET3_ACQUIRE_MEM_GCR_CNTL_GL2_INV(1) | 6593 PACKET3_ACQUIRE_MEM_GCR_CNTL_GL2_WB(1) | 6594 PACKET3_ACQUIRE_MEM_GCR_CNTL_GLM_INV(1) | 6595 PACKET3_ACQUIRE_MEM_GCR_CNTL_GLM_WB(1) | 6596 PACKET3_ACQUIRE_MEM_GCR_CNTL_GL1_INV(1) | 6597 PACKET3_ACQUIRE_MEM_GCR_CNTL_GLV_INV(1) | 6598 PACKET3_ACQUIRE_MEM_GCR_CNTL_GLK_INV(1) | 6599 PACKET3_ACQUIRE_MEM_GCR_CNTL_GLI_INV(1); 6600 6601 /* ACQUIRE_MEM - make one or more surfaces valid for use by the subsequent operations */ 6602 amdgpu_ring_write(ring, PACKET3(PACKET3_ACQUIRE_MEM, 6)); 6603 amdgpu_ring_write(ring, 0); /* CP_COHER_CNTL */ 6604 amdgpu_ring_write(ring, 0xffffffff); /* CP_COHER_SIZE */ 6605 amdgpu_ring_write(ring, 0xffffff); /* CP_COHER_SIZE_HI */ 6606 amdgpu_ring_write(ring, 0); /* CP_COHER_BASE */ 6607 amdgpu_ring_write(ring, 0); /* CP_COHER_BASE_HI */ 6608 amdgpu_ring_write(ring, 0x0000000A); /* POLL_INTERVAL */ 6609 amdgpu_ring_write(ring, gcr_cntl); /* GCR_CNTL */ 6610 } 6611 6612 static int gfx_v11_0_reset_kgq(struct amdgpu_ring *ring, unsigned int vmid) 6613 { 6614 struct amdgpu_device *adev = ring->adev; 6615 int r; 6616 6617 if (amdgpu_sriov_vf(adev)) 6618 return -EINVAL; 6619 6620 r = amdgpu_mes_reset_legacy_queue(ring->adev, ring, vmid, false); 6621 if (r) 6622 return r; 6623 6624 r = gfx_v11_0_kgq_init_queue(ring, true); 6625 if (r) { 6626 dev_err(adev->dev, "failed to init kgq\n"); 6627 return r; 6628 } 6629 6630 r = amdgpu_mes_map_legacy_queue(adev, ring); 6631 if (r) { 6632 dev_err(adev->dev, "failed to remap kgq\n"); 6633 return r; 6634 } 6635 6636 return amdgpu_ring_test_ring(ring); 6637 } 6638 6639 static int gfx_v11_0_reset_kcq(struct amdgpu_ring *ring, unsigned int vmid) 6640 { 6641 struct amdgpu_device *adev = ring->adev; 6642 int r = 0; 6643 6644 if (amdgpu_sriov_vf(adev)) 6645 return -EINVAL; 6646 6647 r = amdgpu_mes_reset_legacy_queue(ring->adev, ring, vmid, true); 6648 if (r) { 6649 dev_err(adev->dev, "reset via MMIO failed %d\n", r); 6650 return r; 6651 } 6652 6653 r = gfx_v11_0_kcq_init_queue(ring, true); 6654 if (r) { 6655 dev_err(adev->dev, "fail to init kcq\n"); 6656 return r; 6657 } 6658 r = amdgpu_mes_map_legacy_queue(adev, ring); 6659 if (r) { 6660 dev_err(adev->dev, "failed to remap kcq\n"); 6661 return r; 6662 } 6663 6664 return amdgpu_ring_test_ring(ring); 6665 } 6666 6667 static void gfx_v11_ip_print(struct amdgpu_ip_block *ip_block, struct drm_printer *p) 6668 { 6669 struct amdgpu_device *adev = ip_block->adev; 6670 uint32_t i, j, k, reg, index = 0; 6671 uint32_t reg_count = ARRAY_SIZE(gc_reg_list_11_0); 6672 6673 if (!adev->gfx.ip_dump_core) 6674 return; 6675 6676 for (i = 0; i < reg_count; i++) 6677 drm_printf(p, "%-50s \t 0x%08x\n", 6678 gc_reg_list_11_0[i].reg_name, 6679 adev->gfx.ip_dump_core[i]); 6680 6681 /* print compute queue registers for all instances */ 6682 if (!adev->gfx.ip_dump_compute_queues) 6683 return; 6684 6685 reg_count = ARRAY_SIZE(gc_cp_reg_list_11); 6686 drm_printf(p, "\nnum_mec: %d num_pipe: %d num_queue: %d\n", 6687 adev->gfx.mec.num_mec, 6688 adev->gfx.mec.num_pipe_per_mec, 6689 adev->gfx.mec.num_queue_per_pipe); 6690 6691 for (i = 0; i < adev->gfx.mec.num_mec; i++) { 6692 for (j = 0; j < adev->gfx.mec.num_pipe_per_mec; j++) { 6693 for (k = 0; k < adev->gfx.mec.num_queue_per_pipe; k++) { 6694 drm_printf(p, "\nmec %d, pipe %d, queue %d\n", i, j, k); 6695 for (reg = 0; reg < reg_count; reg++) { 6696 drm_printf(p, "%-50s \t 0x%08x\n", 6697 gc_cp_reg_list_11[reg].reg_name, 6698 adev->gfx.ip_dump_compute_queues[index + reg]); 6699 } 6700 index += reg_count; 6701 } 6702 } 6703 } 6704 6705 /* print gfx queue registers for all instances */ 6706 if (!adev->gfx.ip_dump_gfx_queues) 6707 return; 6708 6709 index = 0; 6710 reg_count = ARRAY_SIZE(gc_gfx_queue_reg_list_11); 6711 drm_printf(p, "\nnum_me: %d num_pipe: %d num_queue: %d\n", 6712 adev->gfx.me.num_me, 6713 adev->gfx.me.num_pipe_per_me, 6714 adev->gfx.me.num_queue_per_pipe); 6715 6716 for (i = 0; i < adev->gfx.me.num_me; i++) { 6717 for (j = 0; j < adev->gfx.me.num_pipe_per_me; j++) { 6718 for (k = 0; k < adev->gfx.me.num_queue_per_pipe; k++) { 6719 drm_printf(p, "\nme %d, pipe %d, queue %d\n", i, j, k); 6720 for (reg = 0; reg < reg_count; reg++) { 6721 drm_printf(p, "%-50s \t 0x%08x\n", 6722 gc_gfx_queue_reg_list_11[reg].reg_name, 6723 adev->gfx.ip_dump_gfx_queues[index + reg]); 6724 } 6725 index += reg_count; 6726 } 6727 } 6728 } 6729 } 6730 6731 static void gfx_v11_ip_dump(struct amdgpu_ip_block *ip_block) 6732 { 6733 struct amdgpu_device *adev = ip_block->adev; 6734 uint32_t i, j, k, reg, index = 0; 6735 uint32_t reg_count = ARRAY_SIZE(gc_reg_list_11_0); 6736 6737 if (!adev->gfx.ip_dump_core) 6738 return; 6739 6740 amdgpu_gfx_off_ctrl(adev, false); 6741 for (i = 0; i < reg_count; i++) 6742 adev->gfx.ip_dump_core[i] = RREG32(SOC15_REG_ENTRY_OFFSET(gc_reg_list_11_0[i])); 6743 amdgpu_gfx_off_ctrl(adev, true); 6744 6745 /* dump compute queue registers for all instances */ 6746 if (!adev->gfx.ip_dump_compute_queues) 6747 return; 6748 6749 reg_count = ARRAY_SIZE(gc_cp_reg_list_11); 6750 amdgpu_gfx_off_ctrl(adev, false); 6751 mutex_lock(&adev->srbm_mutex); 6752 for (i = 0; i < adev->gfx.mec.num_mec; i++) { 6753 for (j = 0; j < adev->gfx.mec.num_pipe_per_mec; j++) { 6754 for (k = 0; k < adev->gfx.mec.num_queue_per_pipe; k++) { 6755 /* ME0 is for GFX so start from 1 for CP */ 6756 soc21_grbm_select(adev, adev->gfx.me.num_me + i, j, k, 0); 6757 for (reg = 0; reg < reg_count; reg++) { 6758 adev->gfx.ip_dump_compute_queues[index + reg] = 6759 RREG32(SOC15_REG_ENTRY_OFFSET( 6760 gc_cp_reg_list_11[reg])); 6761 } 6762 index += reg_count; 6763 } 6764 } 6765 } 6766 soc21_grbm_select(adev, 0, 0, 0, 0); 6767 mutex_unlock(&adev->srbm_mutex); 6768 amdgpu_gfx_off_ctrl(adev, true); 6769 6770 /* dump gfx queue registers for all instances */ 6771 if (!adev->gfx.ip_dump_gfx_queues) 6772 return; 6773 6774 index = 0; 6775 reg_count = ARRAY_SIZE(gc_gfx_queue_reg_list_11); 6776 amdgpu_gfx_off_ctrl(adev, false); 6777 mutex_lock(&adev->srbm_mutex); 6778 for (i = 0; i < adev->gfx.me.num_me; i++) { 6779 for (j = 0; j < adev->gfx.me.num_pipe_per_me; j++) { 6780 for (k = 0; k < adev->gfx.me.num_queue_per_pipe; k++) { 6781 soc21_grbm_select(adev, i, j, k, 0); 6782 6783 for (reg = 0; reg < reg_count; reg++) { 6784 adev->gfx.ip_dump_gfx_queues[index + reg] = 6785 RREG32(SOC15_REG_ENTRY_OFFSET( 6786 gc_gfx_queue_reg_list_11[reg])); 6787 } 6788 index += reg_count; 6789 } 6790 } 6791 } 6792 soc21_grbm_select(adev, 0, 0, 0, 0); 6793 mutex_unlock(&adev->srbm_mutex); 6794 amdgpu_gfx_off_ctrl(adev, true); 6795 } 6796 6797 static void gfx_v11_0_ring_emit_cleaner_shader(struct amdgpu_ring *ring) 6798 { 6799 /* Emit the cleaner shader */ 6800 amdgpu_ring_write(ring, PACKET3(PACKET3_RUN_CLEANER_SHADER, 0)); 6801 amdgpu_ring_write(ring, 0); /* RESERVED field, programmed to zero */ 6802 } 6803 6804 static void gfx_v11_0_ring_begin_use(struct amdgpu_ring *ring) 6805 { 6806 amdgpu_gfx_profile_ring_begin_use(ring); 6807 6808 amdgpu_gfx_enforce_isolation_ring_begin_use(ring); 6809 } 6810 6811 static void gfx_v11_0_ring_end_use(struct amdgpu_ring *ring) 6812 { 6813 amdgpu_gfx_profile_ring_end_use(ring); 6814 6815 amdgpu_gfx_enforce_isolation_ring_end_use(ring); 6816 } 6817 6818 static const struct amd_ip_funcs gfx_v11_0_ip_funcs = { 6819 .name = "gfx_v11_0", 6820 .early_init = gfx_v11_0_early_init, 6821 .late_init = gfx_v11_0_late_init, 6822 .sw_init = gfx_v11_0_sw_init, 6823 .sw_fini = gfx_v11_0_sw_fini, 6824 .hw_init = gfx_v11_0_hw_init, 6825 .hw_fini = gfx_v11_0_hw_fini, 6826 .suspend = gfx_v11_0_suspend, 6827 .resume = gfx_v11_0_resume, 6828 .is_idle = gfx_v11_0_is_idle, 6829 .wait_for_idle = gfx_v11_0_wait_for_idle, 6830 .soft_reset = gfx_v11_0_soft_reset, 6831 .check_soft_reset = gfx_v11_0_check_soft_reset, 6832 .post_soft_reset = gfx_v11_0_post_soft_reset, 6833 .set_clockgating_state = gfx_v11_0_set_clockgating_state, 6834 .set_powergating_state = gfx_v11_0_set_powergating_state, 6835 .get_clockgating_state = gfx_v11_0_get_clockgating_state, 6836 .dump_ip_state = gfx_v11_ip_dump, 6837 .print_ip_state = gfx_v11_ip_print, 6838 }; 6839 6840 static const struct amdgpu_ring_funcs gfx_v11_0_ring_funcs_gfx = { 6841 .type = AMDGPU_RING_TYPE_GFX, 6842 .align_mask = 0xff, 6843 .nop = PACKET3(PACKET3_NOP, 0x3FFF), 6844 .support_64bit_ptrs = true, 6845 .secure_submission_supported = true, 6846 .get_rptr = gfx_v11_0_ring_get_rptr_gfx, 6847 .get_wptr = gfx_v11_0_ring_get_wptr_gfx, 6848 .set_wptr = gfx_v11_0_ring_set_wptr_gfx, 6849 .emit_frame_size = /* totally 247 maximum if 16 IBs */ 6850 5 + /* update_spm_vmid */ 6851 5 + /* COND_EXEC */ 6852 22 + /* SET_Q_PREEMPTION_MODE */ 6853 7 + /* PIPELINE_SYNC */ 6854 SOC15_FLUSH_GPU_TLB_NUM_WREG * 5 + 6855 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 + 6856 4 + /* VM_FLUSH */ 6857 8 + /* FENCE for VM_FLUSH */ 6858 20 + /* GDS switch */ 6859 5 + /* COND_EXEC */ 6860 7 + /* HDP_flush */ 6861 4 + /* VGT_flush */ 6862 31 + /* DE_META */ 6863 3 + /* CNTX_CTRL */ 6864 5 + /* HDP_INVL */ 6865 22 + /* SET_Q_PREEMPTION_MODE */ 6866 8 + 8 + /* FENCE x2 */ 6867 8 + /* gfx_v11_0_emit_mem_sync */ 6868 2, /* gfx_v11_0_ring_emit_cleaner_shader */ 6869 .emit_ib_size = 4, /* gfx_v11_0_ring_emit_ib_gfx */ 6870 .emit_ib = gfx_v11_0_ring_emit_ib_gfx, 6871 .emit_fence = gfx_v11_0_ring_emit_fence, 6872 .emit_pipeline_sync = gfx_v11_0_ring_emit_pipeline_sync, 6873 .emit_vm_flush = gfx_v11_0_ring_emit_vm_flush, 6874 .emit_gds_switch = gfx_v11_0_ring_emit_gds_switch, 6875 .emit_hdp_flush = gfx_v11_0_ring_emit_hdp_flush, 6876 .test_ring = gfx_v11_0_ring_test_ring, 6877 .test_ib = gfx_v11_0_ring_test_ib, 6878 .insert_nop = gfx_v11_ring_insert_nop, 6879 .pad_ib = amdgpu_ring_generic_pad_ib, 6880 .emit_cntxcntl = gfx_v11_0_ring_emit_cntxcntl, 6881 .emit_gfx_shadow = gfx_v11_0_ring_emit_gfx_shadow, 6882 .init_cond_exec = gfx_v11_0_ring_emit_init_cond_exec, 6883 .preempt_ib = gfx_v11_0_ring_preempt_ib, 6884 .emit_frame_cntl = gfx_v11_0_ring_emit_frame_cntl, 6885 .emit_wreg = gfx_v11_0_ring_emit_wreg, 6886 .emit_reg_wait = gfx_v11_0_ring_emit_reg_wait, 6887 .emit_reg_write_reg_wait = gfx_v11_0_ring_emit_reg_write_reg_wait, 6888 .soft_recovery = gfx_v11_0_ring_soft_recovery, 6889 .emit_mem_sync = gfx_v11_0_emit_mem_sync, 6890 .reset = gfx_v11_0_reset_kgq, 6891 .emit_cleaner_shader = gfx_v11_0_ring_emit_cleaner_shader, 6892 .begin_use = gfx_v11_0_ring_begin_use, 6893 .end_use = gfx_v11_0_ring_end_use, 6894 }; 6895 6896 static const struct amdgpu_ring_funcs gfx_v11_0_ring_funcs_compute = { 6897 .type = AMDGPU_RING_TYPE_COMPUTE, 6898 .align_mask = 0xff, 6899 .nop = PACKET3(PACKET3_NOP, 0x3FFF), 6900 .support_64bit_ptrs = true, 6901 .get_rptr = gfx_v11_0_ring_get_rptr_compute, 6902 .get_wptr = gfx_v11_0_ring_get_wptr_compute, 6903 .set_wptr = gfx_v11_0_ring_set_wptr_compute, 6904 .emit_frame_size = 6905 5 + /* update_spm_vmid */ 6906 20 + /* gfx_v11_0_ring_emit_gds_switch */ 6907 7 + /* gfx_v11_0_ring_emit_hdp_flush */ 6908 5 + /* hdp invalidate */ 6909 7 + /* gfx_v11_0_ring_emit_pipeline_sync */ 6910 SOC15_FLUSH_GPU_TLB_NUM_WREG * 5 + 6911 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 + 6912 2 + /* gfx_v11_0_ring_emit_vm_flush */ 6913 8 + 8 + 8 + /* gfx_v11_0_ring_emit_fence x3 for user fence, vm fence */ 6914 8 + /* gfx_v11_0_emit_mem_sync */ 6915 2, /* gfx_v11_0_ring_emit_cleaner_shader */ 6916 .emit_ib_size = 7, /* gfx_v11_0_ring_emit_ib_compute */ 6917 .emit_ib = gfx_v11_0_ring_emit_ib_compute, 6918 .emit_fence = gfx_v11_0_ring_emit_fence, 6919 .emit_pipeline_sync = gfx_v11_0_ring_emit_pipeline_sync, 6920 .emit_vm_flush = gfx_v11_0_ring_emit_vm_flush, 6921 .emit_gds_switch = gfx_v11_0_ring_emit_gds_switch, 6922 .emit_hdp_flush = gfx_v11_0_ring_emit_hdp_flush, 6923 .test_ring = gfx_v11_0_ring_test_ring, 6924 .test_ib = gfx_v11_0_ring_test_ib, 6925 .insert_nop = gfx_v11_ring_insert_nop, 6926 .pad_ib = amdgpu_ring_generic_pad_ib, 6927 .emit_wreg = gfx_v11_0_ring_emit_wreg, 6928 .emit_reg_wait = gfx_v11_0_ring_emit_reg_wait, 6929 .emit_reg_write_reg_wait = gfx_v11_0_ring_emit_reg_write_reg_wait, 6930 .soft_recovery = gfx_v11_0_ring_soft_recovery, 6931 .emit_mem_sync = gfx_v11_0_emit_mem_sync, 6932 .reset = gfx_v11_0_reset_kcq, 6933 .emit_cleaner_shader = gfx_v11_0_ring_emit_cleaner_shader, 6934 .begin_use = gfx_v11_0_ring_begin_use, 6935 .end_use = gfx_v11_0_ring_end_use, 6936 }; 6937 6938 static const struct amdgpu_ring_funcs gfx_v11_0_ring_funcs_kiq = { 6939 .type = AMDGPU_RING_TYPE_KIQ, 6940 .align_mask = 0xff, 6941 .nop = PACKET3(PACKET3_NOP, 0x3FFF), 6942 .support_64bit_ptrs = true, 6943 .get_rptr = gfx_v11_0_ring_get_rptr_compute, 6944 .get_wptr = gfx_v11_0_ring_get_wptr_compute, 6945 .set_wptr = gfx_v11_0_ring_set_wptr_compute, 6946 .emit_frame_size = 6947 20 + /* gfx_v11_0_ring_emit_gds_switch */ 6948 7 + /* gfx_v11_0_ring_emit_hdp_flush */ 6949 5 + /*hdp invalidate */ 6950 7 + /* gfx_v11_0_ring_emit_pipeline_sync */ 6951 SOC15_FLUSH_GPU_TLB_NUM_WREG * 5 + 6952 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 + 6953 8 + 8 + 8, /* gfx_v11_0_ring_emit_fence_kiq x3 for user fence, vm fence */ 6954 .emit_ib_size = 7, /* gfx_v11_0_ring_emit_ib_compute */ 6955 .emit_ib = gfx_v11_0_ring_emit_ib_compute, 6956 .emit_fence = gfx_v11_0_ring_emit_fence_kiq, 6957 .test_ring = gfx_v11_0_ring_test_ring, 6958 .test_ib = gfx_v11_0_ring_test_ib, 6959 .insert_nop = amdgpu_ring_insert_nop, 6960 .pad_ib = amdgpu_ring_generic_pad_ib, 6961 .emit_rreg = gfx_v11_0_ring_emit_rreg, 6962 .emit_wreg = gfx_v11_0_ring_emit_wreg, 6963 .emit_reg_wait = gfx_v11_0_ring_emit_reg_wait, 6964 .emit_reg_write_reg_wait = gfx_v11_0_ring_emit_reg_write_reg_wait, 6965 }; 6966 6967 static void gfx_v11_0_set_ring_funcs(struct amdgpu_device *adev) 6968 { 6969 int i; 6970 6971 adev->gfx.kiq[0].ring.funcs = &gfx_v11_0_ring_funcs_kiq; 6972 6973 for (i = 0; i < adev->gfx.num_gfx_rings; i++) 6974 adev->gfx.gfx_ring[i].funcs = &gfx_v11_0_ring_funcs_gfx; 6975 6976 for (i = 0; i < adev->gfx.num_compute_rings; i++) 6977 adev->gfx.compute_ring[i].funcs = &gfx_v11_0_ring_funcs_compute; 6978 } 6979 6980 static const struct amdgpu_irq_src_funcs gfx_v11_0_eop_irq_funcs = { 6981 .set = gfx_v11_0_set_eop_interrupt_state, 6982 .process = gfx_v11_0_eop_irq, 6983 }; 6984 6985 static const struct amdgpu_irq_src_funcs gfx_v11_0_priv_reg_irq_funcs = { 6986 .set = gfx_v11_0_set_priv_reg_fault_state, 6987 .process = gfx_v11_0_priv_reg_irq, 6988 }; 6989 6990 static const struct amdgpu_irq_src_funcs gfx_v11_0_bad_op_irq_funcs = { 6991 .set = gfx_v11_0_set_bad_op_fault_state, 6992 .process = gfx_v11_0_bad_op_irq, 6993 }; 6994 6995 static const struct amdgpu_irq_src_funcs gfx_v11_0_priv_inst_irq_funcs = { 6996 .set = gfx_v11_0_set_priv_inst_fault_state, 6997 .process = gfx_v11_0_priv_inst_irq, 6998 }; 6999 7000 static const struct amdgpu_irq_src_funcs gfx_v11_0_rlc_gc_fed_irq_funcs = { 7001 .process = gfx_v11_0_rlc_gc_fed_irq, 7002 }; 7003 7004 static void gfx_v11_0_set_irq_funcs(struct amdgpu_device *adev) 7005 { 7006 adev->gfx.eop_irq.num_types = AMDGPU_CP_IRQ_LAST; 7007 adev->gfx.eop_irq.funcs = &gfx_v11_0_eop_irq_funcs; 7008 7009 adev->gfx.priv_reg_irq.num_types = 1; 7010 adev->gfx.priv_reg_irq.funcs = &gfx_v11_0_priv_reg_irq_funcs; 7011 7012 adev->gfx.bad_op_irq.num_types = 1; 7013 adev->gfx.bad_op_irq.funcs = &gfx_v11_0_bad_op_irq_funcs; 7014 7015 adev->gfx.priv_inst_irq.num_types = 1; 7016 adev->gfx.priv_inst_irq.funcs = &gfx_v11_0_priv_inst_irq_funcs; 7017 7018 adev->gfx.rlc_gc_fed_irq.num_types = 1; /* 0x80 FED error */ 7019 adev->gfx.rlc_gc_fed_irq.funcs = &gfx_v11_0_rlc_gc_fed_irq_funcs; 7020 7021 } 7022 7023 static void gfx_v11_0_set_imu_funcs(struct amdgpu_device *adev) 7024 { 7025 if (adev->flags & AMD_IS_APU) 7026 adev->gfx.imu.mode = MISSION_MODE; 7027 else 7028 adev->gfx.imu.mode = DEBUG_MODE; 7029 7030 adev->gfx.imu.funcs = &gfx_v11_0_imu_funcs; 7031 } 7032 7033 static void gfx_v11_0_set_rlc_funcs(struct amdgpu_device *adev) 7034 { 7035 adev->gfx.rlc.funcs = &gfx_v11_0_rlc_funcs; 7036 } 7037 7038 static void gfx_v11_0_set_gds_init(struct amdgpu_device *adev) 7039 { 7040 unsigned total_cu = adev->gfx.config.max_cu_per_sh * 7041 adev->gfx.config.max_sh_per_se * 7042 adev->gfx.config.max_shader_engines; 7043 7044 adev->gds.gds_size = 0x1000; 7045 adev->gds.gds_compute_max_wave_id = total_cu * 32 - 1; 7046 adev->gds.gws_size = 64; 7047 adev->gds.oa_size = 16; 7048 } 7049 7050 static void gfx_v11_0_set_mqd_funcs(struct amdgpu_device *adev) 7051 { 7052 /* set gfx eng mqd */ 7053 adev->mqds[AMDGPU_HW_IP_GFX].mqd_size = 7054 sizeof(struct v11_gfx_mqd); 7055 adev->mqds[AMDGPU_HW_IP_GFX].init_mqd = 7056 gfx_v11_0_gfx_mqd_init; 7057 /* set compute eng mqd */ 7058 adev->mqds[AMDGPU_HW_IP_COMPUTE].mqd_size = 7059 sizeof(struct v11_compute_mqd); 7060 adev->mqds[AMDGPU_HW_IP_COMPUTE].init_mqd = 7061 gfx_v11_0_compute_mqd_init; 7062 } 7063 7064 static void gfx_v11_0_set_user_wgp_inactive_bitmap_per_sh(struct amdgpu_device *adev, 7065 u32 bitmap) 7066 { 7067 u32 data; 7068 7069 if (!bitmap) 7070 return; 7071 7072 data = bitmap << GC_USER_SHADER_ARRAY_CONFIG__INACTIVE_WGPS__SHIFT; 7073 data &= GC_USER_SHADER_ARRAY_CONFIG__INACTIVE_WGPS_MASK; 7074 7075 WREG32_SOC15(GC, 0, regGC_USER_SHADER_ARRAY_CONFIG, data); 7076 } 7077 7078 static u32 gfx_v11_0_get_wgp_active_bitmap_per_sh(struct amdgpu_device *adev) 7079 { 7080 u32 data, wgp_bitmask; 7081 data = RREG32_SOC15(GC, 0, regCC_GC_SHADER_ARRAY_CONFIG); 7082 data |= RREG32_SOC15(GC, 0, regGC_USER_SHADER_ARRAY_CONFIG); 7083 7084 data &= CC_GC_SHADER_ARRAY_CONFIG__INACTIVE_WGPS_MASK; 7085 data >>= CC_GC_SHADER_ARRAY_CONFIG__INACTIVE_WGPS__SHIFT; 7086 7087 wgp_bitmask = 7088 amdgpu_gfx_create_bitmask(adev->gfx.config.max_cu_per_sh >> 1); 7089 7090 return (~data) & wgp_bitmask; 7091 } 7092 7093 static u32 gfx_v11_0_get_cu_active_bitmap_per_sh(struct amdgpu_device *adev) 7094 { 7095 u32 wgp_idx, wgp_active_bitmap; 7096 u32 cu_bitmap_per_wgp, cu_active_bitmap; 7097 7098 wgp_active_bitmap = gfx_v11_0_get_wgp_active_bitmap_per_sh(adev); 7099 cu_active_bitmap = 0; 7100 7101 for (wgp_idx = 0; wgp_idx < 16; wgp_idx++) { 7102 /* if there is one WGP enabled, it means 2 CUs will be enabled */ 7103 cu_bitmap_per_wgp = 3 << (2 * wgp_idx); 7104 if (wgp_active_bitmap & (1 << wgp_idx)) 7105 cu_active_bitmap |= cu_bitmap_per_wgp; 7106 } 7107 7108 return cu_active_bitmap; 7109 } 7110 7111 static int gfx_v11_0_get_cu_info(struct amdgpu_device *adev, 7112 struct amdgpu_cu_info *cu_info) 7113 { 7114 int i, j, k, counter, active_cu_number = 0; 7115 u32 mask, bitmap; 7116 unsigned disable_masks[8 * 2]; 7117 7118 if (!adev || !cu_info) 7119 return -EINVAL; 7120 7121 amdgpu_gfx_parse_disable_cu(disable_masks, 8, 2); 7122 7123 mutex_lock(&adev->grbm_idx_mutex); 7124 for (i = 0; i < adev->gfx.config.max_shader_engines; i++) { 7125 for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) { 7126 bitmap = i * adev->gfx.config.max_sh_per_se + j; 7127 if (!((gfx_v11_0_get_sa_active_bitmap(adev) >> bitmap) & 1)) 7128 continue; 7129 mask = 1; 7130 counter = 0; 7131 gfx_v11_0_select_se_sh(adev, i, j, 0xffffffff, 0); 7132 if (i < 8 && j < 2) 7133 gfx_v11_0_set_user_wgp_inactive_bitmap_per_sh( 7134 adev, disable_masks[i * 2 + j]); 7135 bitmap = gfx_v11_0_get_cu_active_bitmap_per_sh(adev); 7136 7137 /** 7138 * GFX11 could support more than 4 SEs, while the bitmap 7139 * in cu_info struct is 4x4 and ioctl interface struct 7140 * drm_amdgpu_info_device should keep stable. 7141 * So we use last two columns of bitmap to store cu mask for 7142 * SEs 4 to 7, the layout of the bitmap is as below: 7143 * SE0: {SH0,SH1} --> {bitmap[0][0], bitmap[0][1]} 7144 * SE1: {SH0,SH1} --> {bitmap[1][0], bitmap[1][1]} 7145 * SE2: {SH0,SH1} --> {bitmap[2][0], bitmap[2][1]} 7146 * SE3: {SH0,SH1} --> {bitmap[3][0], bitmap[3][1]} 7147 * SE4: {SH0,SH1} --> {bitmap[0][2], bitmap[0][3]} 7148 * SE5: {SH0,SH1} --> {bitmap[1][2], bitmap[1][3]} 7149 * SE6: {SH0,SH1} --> {bitmap[2][2], bitmap[2][3]} 7150 * SE7: {SH0,SH1} --> {bitmap[3][2], bitmap[3][3]} 7151 */ 7152 cu_info->bitmap[0][i % 4][j + (i / 4) * 2] = bitmap; 7153 7154 for (k = 0; k < adev->gfx.config.max_cu_per_sh; k++) { 7155 if (bitmap & mask) 7156 counter++; 7157 7158 mask <<= 1; 7159 } 7160 active_cu_number += counter; 7161 } 7162 } 7163 gfx_v11_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0); 7164 mutex_unlock(&adev->grbm_idx_mutex); 7165 7166 cu_info->number = active_cu_number; 7167 cu_info->simd_per_cu = NUM_SIMD_PER_CU; 7168 7169 return 0; 7170 } 7171 7172 const struct amdgpu_ip_block_version gfx_v11_0_ip_block = 7173 { 7174 .type = AMD_IP_BLOCK_TYPE_GFX, 7175 .major = 11, 7176 .minor = 0, 7177 .rev = 0, 7178 .funcs = &gfx_v11_0_ip_funcs, 7179 }; 7180