1 /* 2 * Copyright 2021 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 */ 23 #include <linux/delay.h> 24 #include <linux/kernel.h> 25 #include <linux/firmware.h> 26 #include <linux/module.h> 27 #include <linux/pci.h> 28 #include "amdgpu.h" 29 #include "amdgpu_gfx.h" 30 #include "amdgpu_psp.h" 31 #include "amdgpu_smu.h" 32 #include "imu_v11_0.h" 33 #include "soc21.h" 34 #include "nvd.h" 35 36 #include "gc/gc_11_0_0_offset.h" 37 #include "gc/gc_11_0_0_sh_mask.h" 38 #include "smuio/smuio_13_0_6_offset.h" 39 #include "smuio/smuio_13_0_6_sh_mask.h" 40 #include "navi10_enum.h" 41 #include "ivsrcid/gfx/irqsrcs_gfx_11_0_0.h" 42 43 #include "soc15.h" 44 #include "clearstate_gfx11.h" 45 #include "v11_structs.h" 46 #include "gfx_v11_0.h" 47 #include "gfx_v11_0_cleaner_shader.h" 48 #include "gfx_v11_0_3.h" 49 #include "nbio_v4_3.h" 50 #include "mes_v11_0.h" 51 #include "mes_userqueue.h" 52 #include "amdgpu_userq_fence.h" 53 54 #define GFX11_NUM_GFX_RINGS 1 55 #define GFX11_MEC_HPD_SIZE 2048 56 57 #define RLCG_UCODE_LOADING_START_ADDRESS 0x00002000L 58 #define RLC_PG_DELAY_3_DEFAULT_GC_11_0_1 0x1388 59 60 #define regCGTT_WD_CLK_CTRL 0x5086 61 #define regCGTT_WD_CLK_CTRL_BASE_IDX 1 62 #define regRLC_RLCS_BOOTLOAD_STATUS_gc_11_0_1 0x4e7e 63 #define regRLC_RLCS_BOOTLOAD_STATUS_gc_11_0_1_BASE_IDX 1 64 #define regPC_CONFIG_CNTL_1 0x194d 65 #define regPC_CONFIG_CNTL_1_BASE_IDX 1 66 67 #define regCP_GFX_MQD_CONTROL_DEFAULT 0x00000100 68 #define regCP_GFX_HQD_VMID_DEFAULT 0x00000000 69 #define regCP_GFX_HQD_QUEUE_PRIORITY_DEFAULT 0x00000000 70 #define regCP_GFX_HQD_QUANTUM_DEFAULT 0x00000a01 71 #define regCP_GFX_HQD_CNTL_DEFAULT 0x00a00000 72 #define regCP_RB_DOORBELL_CONTROL_DEFAULT 0x00000000 73 #define regCP_GFX_HQD_RPTR_DEFAULT 0x00000000 74 75 #define regCP_HQD_EOP_CONTROL_DEFAULT 0x00000006 76 #define regCP_HQD_PQ_DOORBELL_CONTROL_DEFAULT 0x00000000 77 #define regCP_MQD_CONTROL_DEFAULT 0x00000100 78 #define regCP_HQD_PQ_CONTROL_DEFAULT 0x00308509 79 #define regCP_HQD_PQ_DOORBELL_CONTROL_DEFAULT 0x00000000 80 #define regCP_HQD_PQ_RPTR_DEFAULT 0x00000000 81 #define regCP_HQD_PERSISTENT_STATE_DEFAULT 0x0be05501 82 #define regCP_HQD_IB_CONTROL_DEFAULT 0x00300000 83 84 MODULE_FIRMWARE("amdgpu/gc_11_0_0_pfp.bin"); 85 MODULE_FIRMWARE("amdgpu/gc_11_0_0_me.bin"); 86 MODULE_FIRMWARE("amdgpu/gc_11_0_0_mec.bin"); 87 MODULE_FIRMWARE("amdgpu/gc_11_0_0_rlc.bin"); 88 MODULE_FIRMWARE("amdgpu/gc_11_0_0_rlc_kicker.bin"); 89 MODULE_FIRMWARE("amdgpu/gc_11_0_0_rlc_1.bin"); 90 MODULE_FIRMWARE("amdgpu/gc_11_0_0_toc.bin"); 91 MODULE_FIRMWARE("amdgpu/gc_11_0_1_pfp.bin"); 92 MODULE_FIRMWARE("amdgpu/gc_11_0_1_me.bin"); 93 MODULE_FIRMWARE("amdgpu/gc_11_0_1_mec.bin"); 94 MODULE_FIRMWARE("amdgpu/gc_11_0_1_rlc.bin"); 95 MODULE_FIRMWARE("amdgpu/gc_11_0_2_pfp.bin"); 96 MODULE_FIRMWARE("amdgpu/gc_11_0_2_me.bin"); 97 MODULE_FIRMWARE("amdgpu/gc_11_0_2_mec.bin"); 98 MODULE_FIRMWARE("amdgpu/gc_11_0_2_rlc.bin"); 99 MODULE_FIRMWARE("amdgpu/gc_11_0_3_pfp.bin"); 100 MODULE_FIRMWARE("amdgpu/gc_11_0_3_me.bin"); 101 MODULE_FIRMWARE("amdgpu/gc_11_0_3_mec.bin"); 102 MODULE_FIRMWARE("amdgpu/gc_11_0_3_rlc.bin"); 103 MODULE_FIRMWARE("amdgpu/gc_11_0_4_pfp.bin"); 104 MODULE_FIRMWARE("amdgpu/gc_11_0_4_me.bin"); 105 MODULE_FIRMWARE("amdgpu/gc_11_0_4_mec.bin"); 106 MODULE_FIRMWARE("amdgpu/gc_11_0_4_rlc.bin"); 107 MODULE_FIRMWARE("amdgpu/gc_11_5_0_pfp.bin"); 108 MODULE_FIRMWARE("amdgpu/gc_11_5_0_me.bin"); 109 MODULE_FIRMWARE("amdgpu/gc_11_5_0_mec.bin"); 110 MODULE_FIRMWARE("amdgpu/gc_11_5_0_rlc.bin"); 111 MODULE_FIRMWARE("amdgpu/gc_11_5_1_pfp.bin"); 112 MODULE_FIRMWARE("amdgpu/gc_11_5_1_me.bin"); 113 MODULE_FIRMWARE("amdgpu/gc_11_5_1_mec.bin"); 114 MODULE_FIRMWARE("amdgpu/gc_11_5_1_rlc.bin"); 115 MODULE_FIRMWARE("amdgpu/gc_11_5_2_pfp.bin"); 116 MODULE_FIRMWARE("amdgpu/gc_11_5_2_me.bin"); 117 MODULE_FIRMWARE("amdgpu/gc_11_5_2_mec.bin"); 118 MODULE_FIRMWARE("amdgpu/gc_11_5_2_rlc.bin"); 119 MODULE_FIRMWARE("amdgpu/gc_11_5_3_pfp.bin"); 120 MODULE_FIRMWARE("amdgpu/gc_11_5_3_me.bin"); 121 MODULE_FIRMWARE("amdgpu/gc_11_5_3_mec.bin"); 122 MODULE_FIRMWARE("amdgpu/gc_11_5_3_rlc.bin"); 123 124 static const struct amdgpu_hwip_reg_entry gc_reg_list_11_0[] = { 125 SOC15_REG_ENTRY_STR(GC, 0, regGRBM_STATUS), 126 SOC15_REG_ENTRY_STR(GC, 0, regGRBM_STATUS2), 127 SOC15_REG_ENTRY_STR(GC, 0, regGRBM_STATUS3), 128 SOC15_REG_ENTRY_STR(GC, 0, regCP_STALLED_STAT1), 129 SOC15_REG_ENTRY_STR(GC, 0, regCP_STALLED_STAT2), 130 SOC15_REG_ENTRY_STR(GC, 0, regCP_STALLED_STAT3), 131 SOC15_REG_ENTRY_STR(GC, 0, regCP_CPC_STALLED_STAT1), 132 SOC15_REG_ENTRY_STR(GC, 0, regCP_CPF_STALLED_STAT1), 133 SOC15_REG_ENTRY_STR(GC, 0, regCP_BUSY_STAT), 134 SOC15_REG_ENTRY_STR(GC, 0, regCP_CPC_BUSY_STAT), 135 SOC15_REG_ENTRY_STR(GC, 0, regCP_CPF_BUSY_STAT), 136 SOC15_REG_ENTRY_STR(GC, 0, regCP_CPC_BUSY_STAT2), 137 SOC15_REG_ENTRY_STR(GC, 0, regCP_CPF_BUSY_STAT2), 138 SOC15_REG_ENTRY_STR(GC, 0, regCP_CPF_STATUS), 139 SOC15_REG_ENTRY_STR(GC, 0, regCP_GFX_ERROR), 140 SOC15_REG_ENTRY_STR(GC, 0, regCP_GFX_HPD_STATUS0), 141 SOC15_REG_ENTRY_STR(GC, 0, regCP_RB_BASE), 142 SOC15_REG_ENTRY_STR(GC, 0, regCP_RB_RPTR), 143 SOC15_REG_ENTRY_STR(GC, 0, regCP_RB_WPTR), 144 SOC15_REG_ENTRY_STR(GC, 0, regCP_RB0_BASE), 145 SOC15_REG_ENTRY_STR(GC, 0, regCP_RB0_RPTR), 146 SOC15_REG_ENTRY_STR(GC, 0, regCP_RB0_WPTR), 147 SOC15_REG_ENTRY_STR(GC, 0, regCP_RB1_BASE), 148 SOC15_REG_ENTRY_STR(GC, 0, regCP_RB1_RPTR), 149 SOC15_REG_ENTRY_STR(GC, 0, regCP_RB1_WPTR), 150 SOC15_REG_ENTRY_STR(GC, 0, regCP_IB1_CMD_BUFSZ), 151 SOC15_REG_ENTRY_STR(GC, 0, regCP_IB2_CMD_BUFSZ), 152 SOC15_REG_ENTRY_STR(GC, 0, regCP_IB1_BASE_LO), 153 SOC15_REG_ENTRY_STR(GC, 0, regCP_IB1_BASE_HI), 154 SOC15_REG_ENTRY_STR(GC, 0, regCP_IB1_BUFSZ), 155 SOC15_REG_ENTRY_STR(GC, 0, regCP_IB2_BASE_LO), 156 SOC15_REG_ENTRY_STR(GC, 0, regCP_IB2_BASE_HI), 157 SOC15_REG_ENTRY_STR(GC, 0, regCP_IB2_BUFSZ), 158 SOC15_REG_ENTRY_STR(GC, 0, regCPF_UTCL1_STATUS), 159 SOC15_REG_ENTRY_STR(GC, 0, regCPC_UTCL1_STATUS), 160 SOC15_REG_ENTRY_STR(GC, 0, regCPG_UTCL1_STATUS), 161 SOC15_REG_ENTRY_STR(GC, 0, regGDS_PROTECTION_FAULT), 162 SOC15_REG_ENTRY_STR(GC, 0, regGDS_VM_PROTECTION_FAULT), 163 SOC15_REG_ENTRY_STR(GC, 0, regIA_UTCL1_STATUS), 164 SOC15_REG_ENTRY_STR(GC, 0, regIA_UTCL1_STATUS_2), 165 SOC15_REG_ENTRY_STR(GC, 0, regPA_CL_CNTL_STATUS), 166 SOC15_REG_ENTRY_STR(GC, 0, regRLC_UTCL1_STATUS), 167 SOC15_REG_ENTRY_STR(GC, 0, regRMI_UTCL1_STATUS), 168 SOC15_REG_ENTRY_STR(GC, 0, regSQC_CACHES), 169 SOC15_REG_ENTRY_STR(GC, 0, regSQG_STATUS), 170 SOC15_REG_ENTRY_STR(GC, 0, regWD_UTCL1_STATUS), 171 SOC15_REG_ENTRY_STR(GC, 0, regGCVM_L2_PROTECTION_FAULT_CNTL), 172 SOC15_REG_ENTRY_STR(GC, 0, regGCVM_L2_PROTECTION_FAULT_STATUS), 173 SOC15_REG_ENTRY_STR(GC, 0, regCP_DEBUG), 174 SOC15_REG_ENTRY_STR(GC, 0, regCP_MEC_CNTL), 175 SOC15_REG_ENTRY_STR(GC, 0, regCP_MES_CNTL), 176 SOC15_REG_ENTRY_STR(GC, 0, regCP_MEC1_INSTR_PNTR), 177 SOC15_REG_ENTRY_STR(GC, 0, regCP_MES_DEBUG_INTERRUPT_INSTR_PNTR), 178 SOC15_REG_ENTRY_STR(GC, 0, regCP_MES_INSTR_PNTR), 179 SOC15_REG_ENTRY_STR(GC, 0, regCP_ME_INSTR_PNTR), 180 SOC15_REG_ENTRY_STR(GC, 0, regCP_PFP_INSTR_PNTR), 181 SOC15_REG_ENTRY_STR(GC, 0, regCP_CPC_STATUS), 182 /* cp header registers */ 183 SOC15_REG_ENTRY_STR(GC, 0, regCP_MES_HEADER_DUMP), 184 SOC15_REG_ENTRY_STR(GC, 0, regCP_MES_HEADER_DUMP), 185 SOC15_REG_ENTRY_STR(GC, 0, regCP_MES_HEADER_DUMP), 186 SOC15_REG_ENTRY_STR(GC, 0, regCP_MES_HEADER_DUMP), 187 SOC15_REG_ENTRY_STR(GC, 0, regCP_MES_HEADER_DUMP), 188 SOC15_REG_ENTRY_STR(GC, 0, regCP_MES_HEADER_DUMP), 189 SOC15_REG_ENTRY_STR(GC, 0, regCP_MES_HEADER_DUMP), 190 SOC15_REG_ENTRY_STR(GC, 0, regCP_MES_HEADER_DUMP), 191 /* SE status registers */ 192 SOC15_REG_ENTRY_STR(GC, 0, regGRBM_STATUS_SE0), 193 SOC15_REG_ENTRY_STR(GC, 0, regGRBM_STATUS_SE1), 194 SOC15_REG_ENTRY_STR(GC, 0, regGRBM_STATUS_SE2), 195 SOC15_REG_ENTRY_STR(GC, 0, regGRBM_STATUS_SE3), 196 SOC15_REG_ENTRY_STR(GC, 0, regGRBM_STATUS_SE4), 197 SOC15_REG_ENTRY_STR(GC, 0, regGRBM_STATUS_SE5) 198 }; 199 200 static const struct amdgpu_hwip_reg_entry gc_cp_reg_list_11[] = { 201 /* compute registers */ 202 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_VMID), 203 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_PERSISTENT_STATE), 204 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_PIPE_PRIORITY), 205 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_QUEUE_PRIORITY), 206 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_QUANTUM), 207 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_PQ_BASE), 208 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_PQ_BASE_HI), 209 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_PQ_RPTR), 210 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_PQ_WPTR_POLL_ADDR), 211 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_PQ_WPTR_POLL_ADDR_HI), 212 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_PQ_DOORBELL_CONTROL), 213 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_PQ_CONTROL), 214 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_IB_BASE_ADDR), 215 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_IB_BASE_ADDR_HI), 216 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_IB_RPTR), 217 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_IB_CONTROL), 218 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_DEQUEUE_REQUEST), 219 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_EOP_BASE_ADDR), 220 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_EOP_BASE_ADDR_HI), 221 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_EOP_CONTROL), 222 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_EOP_RPTR), 223 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_EOP_WPTR), 224 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_EOP_EVENTS), 225 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_CTX_SAVE_BASE_ADDR_LO), 226 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_CTX_SAVE_BASE_ADDR_HI), 227 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_CTX_SAVE_CONTROL), 228 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_CNTL_STACK_OFFSET), 229 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_CNTL_STACK_SIZE), 230 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_WG_STATE_OFFSET), 231 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_CTX_SAVE_SIZE), 232 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_GDS_RESOURCE_STATE), 233 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_ERROR), 234 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_EOP_WPTR_MEM), 235 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_PQ_WPTR_LO), 236 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_PQ_WPTR_HI), 237 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_SUSPEND_CNTL_STACK_OFFSET), 238 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_SUSPEND_CNTL_STACK_DW_CNT), 239 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_SUSPEND_WG_STATE_OFFSET), 240 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_DEQUEUE_STATUS), 241 /* cp header registers */ 242 SOC15_REG_ENTRY_STR(GC, 0, regCP_ME_HEADER_DUMP), 243 SOC15_REG_ENTRY_STR(GC, 0, regCP_ME_HEADER_DUMP), 244 SOC15_REG_ENTRY_STR(GC, 0, regCP_ME_HEADER_DUMP), 245 SOC15_REG_ENTRY_STR(GC, 0, regCP_ME_HEADER_DUMP), 246 SOC15_REG_ENTRY_STR(GC, 0, regCP_ME_HEADER_DUMP), 247 SOC15_REG_ENTRY_STR(GC, 0, regCP_ME_HEADER_DUMP), 248 SOC15_REG_ENTRY_STR(GC, 0, regCP_ME_HEADER_DUMP), 249 SOC15_REG_ENTRY_STR(GC, 0, regCP_ME_HEADER_DUMP), 250 }; 251 252 static const struct amdgpu_hwip_reg_entry gc_gfx_queue_reg_list_11[] = { 253 /* gfx queue registers */ 254 SOC15_REG_ENTRY_STR(GC, 0, regCP_GFX_HQD_ACTIVE), 255 SOC15_REG_ENTRY_STR(GC, 0, regCP_GFX_HQD_VMID), 256 SOC15_REG_ENTRY_STR(GC, 0, regCP_GFX_HQD_QUEUE_PRIORITY), 257 SOC15_REG_ENTRY_STR(GC, 0, regCP_GFX_HQD_QUANTUM), 258 SOC15_REG_ENTRY_STR(GC, 0, regCP_GFX_HQD_BASE), 259 SOC15_REG_ENTRY_STR(GC, 0, regCP_GFX_HQD_BASE_HI), 260 SOC15_REG_ENTRY_STR(GC, 0, regCP_GFX_HQD_OFFSET), 261 SOC15_REG_ENTRY_STR(GC, 0, regCP_GFX_HQD_CNTL), 262 SOC15_REG_ENTRY_STR(GC, 0, regCP_GFX_HQD_CSMD_RPTR), 263 SOC15_REG_ENTRY_STR(GC, 0, regCP_GFX_HQD_WPTR), 264 SOC15_REG_ENTRY_STR(GC, 0, regCP_GFX_HQD_WPTR_HI), 265 SOC15_REG_ENTRY_STR(GC, 0, regCP_GFX_HQD_DEQUEUE_REQUEST), 266 SOC15_REG_ENTRY_STR(GC, 0, regCP_GFX_HQD_MAPPED), 267 SOC15_REG_ENTRY_STR(GC, 0, regCP_GFX_HQD_QUE_MGR_CONTROL), 268 SOC15_REG_ENTRY_STR(GC, 0, regCP_GFX_HQD_HQ_CONTROL0), 269 SOC15_REG_ENTRY_STR(GC, 0, regCP_GFX_HQD_HQ_STATUS0), 270 SOC15_REG_ENTRY_STR(GC, 0, regCP_GFX_MQD_BASE_ADDR), 271 SOC15_REG_ENTRY_STR(GC, 0, regCP_GFX_MQD_BASE_ADDR_HI), 272 SOC15_REG_ENTRY_STR(GC, 0, regCP_RB_WPTR_POLL_ADDR_LO), 273 SOC15_REG_ENTRY_STR(GC, 0, regCP_RB_WPTR_POLL_ADDR_HI), 274 SOC15_REG_ENTRY_STR(GC, 0, regCP_RB_RPTR), 275 SOC15_REG_ENTRY_STR(GC, 0, regCP_IB1_BASE_LO), 276 SOC15_REG_ENTRY_STR(GC, 0, regCP_IB1_BASE_HI), 277 SOC15_REG_ENTRY_STR(GC, 0, regCP_IB1_CMD_BUFSZ), 278 SOC15_REG_ENTRY_STR(GC, 0, regCP_IB1_BUFSZ), 279 /* cp header registers */ 280 SOC15_REG_ENTRY_STR(GC, 0, regCP_PFP_HEADER_DUMP), 281 SOC15_REG_ENTRY_STR(GC, 0, regCP_PFP_HEADER_DUMP), 282 SOC15_REG_ENTRY_STR(GC, 0, regCP_PFP_HEADER_DUMP), 283 SOC15_REG_ENTRY_STR(GC, 0, regCP_PFP_HEADER_DUMP), 284 SOC15_REG_ENTRY_STR(GC, 0, regCP_PFP_HEADER_DUMP), 285 SOC15_REG_ENTRY_STR(GC, 0, regCP_PFP_HEADER_DUMP), 286 SOC15_REG_ENTRY_STR(GC, 0, regCP_PFP_HEADER_DUMP), 287 SOC15_REG_ENTRY_STR(GC, 0, regCP_PFP_HEADER_DUMP), 288 SOC15_REG_ENTRY_STR(GC, 0, regCP_ME_HEADER_DUMP), 289 SOC15_REG_ENTRY_STR(GC, 0, regCP_ME_HEADER_DUMP), 290 SOC15_REG_ENTRY_STR(GC, 0, regCP_ME_HEADER_DUMP), 291 SOC15_REG_ENTRY_STR(GC, 0, regCP_ME_HEADER_DUMP), 292 SOC15_REG_ENTRY_STR(GC, 0, regCP_ME_HEADER_DUMP), 293 SOC15_REG_ENTRY_STR(GC, 0, regCP_ME_HEADER_DUMP), 294 SOC15_REG_ENTRY_STR(GC, 0, regCP_ME_HEADER_DUMP), 295 SOC15_REG_ENTRY_STR(GC, 0, regCP_ME_HEADER_DUMP), 296 }; 297 298 static const struct soc15_reg_golden golden_settings_gc_11_0[] = { 299 SOC15_REG_GOLDEN_VALUE(GC, 0, regTCP_CNTL, 0x20000000, 0x20000000) 300 }; 301 302 static const struct soc15_reg_golden golden_settings_gc_11_0_1[] = 303 { 304 SOC15_REG_GOLDEN_VALUE(GC, 0, regCGTT_GS_NGG_CLK_CTRL, 0x9fff8fff, 0x00000010), 305 SOC15_REG_GOLDEN_VALUE(GC, 0, regCGTT_WD_CLK_CTRL, 0xffff8fff, 0x00000010), 306 SOC15_REG_GOLDEN_VALUE(GC, 0, regCPF_GCR_CNTL, 0x0007ffff, 0x0000c200), 307 SOC15_REG_GOLDEN_VALUE(GC, 0, regGL2C_CTRL3, 0xffff001b, 0x00f01988), 308 SOC15_REG_GOLDEN_VALUE(GC, 0, regPA_CL_ENHANCE, 0xf0ffffff, 0x00880007), 309 SOC15_REG_GOLDEN_VALUE(GC, 0, regPA_SC_ENHANCE_3, 0xfffffffd, 0x00000008), 310 SOC15_REG_GOLDEN_VALUE(GC, 0, regPA_SC_VRS_SURFACE_CNTL_1, 0xfff891ff, 0x55480100), 311 SOC15_REG_GOLDEN_VALUE(GC, 0, regTA_CNTL_AUX, 0xf7f7ffff, 0x01030000), 312 SOC15_REG_GOLDEN_VALUE(GC, 0, regTCP_CNTL2, 0xfcffffff, 0x0000000a) 313 }; 314 315 #define DEFAULT_SH_MEM_CONFIG \ 316 ((SH_MEM_ADDRESS_MODE_64 << SH_MEM_CONFIG__ADDRESS_MODE__SHIFT) | \ 317 (SH_MEM_ALIGNMENT_MODE_UNALIGNED << SH_MEM_CONFIG__ALIGNMENT_MODE__SHIFT) | \ 318 (3 << SH_MEM_CONFIG__INITIAL_INST_PREFETCH__SHIFT)) 319 320 static void gfx_v11_0_disable_gpa_mode(struct amdgpu_device *adev); 321 static void gfx_v11_0_set_ring_funcs(struct amdgpu_device *adev); 322 static void gfx_v11_0_set_irq_funcs(struct amdgpu_device *adev); 323 static void gfx_v11_0_set_gds_init(struct amdgpu_device *adev); 324 static void gfx_v11_0_set_rlc_funcs(struct amdgpu_device *adev); 325 static void gfx_v11_0_set_mqd_funcs(struct amdgpu_device *adev); 326 static void gfx_v11_0_set_imu_funcs(struct amdgpu_device *adev); 327 static int gfx_v11_0_get_cu_info(struct amdgpu_device *adev, 328 struct amdgpu_cu_info *cu_info); 329 static uint64_t gfx_v11_0_get_gpu_clock_counter(struct amdgpu_device *adev); 330 static void gfx_v11_0_select_se_sh(struct amdgpu_device *adev, u32 se_num, 331 u32 sh_num, u32 instance, int xcc_id); 332 static u32 gfx_v11_0_get_wgp_active_bitmap_per_sh(struct amdgpu_device *adev); 333 334 static void gfx_v11_0_ring_emit_de_meta(struct amdgpu_ring *ring, bool resume); 335 static void gfx_v11_0_ring_emit_frame_cntl(struct amdgpu_ring *ring, bool start, bool secure); 336 static void gfx_v11_0_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg, 337 uint32_t val); 338 static int gfx_v11_0_wait_for_rlc_autoload_complete(struct amdgpu_device *adev); 339 static void gfx_v11_0_ring_invalidate_tlbs(struct amdgpu_ring *ring, 340 uint16_t pasid, uint32_t flush_type, 341 bool all_hub, uint8_t dst_sel); 342 static void gfx_v11_0_set_safe_mode(struct amdgpu_device *adev, int xcc_id); 343 static void gfx_v11_0_unset_safe_mode(struct amdgpu_device *adev, int xcc_id); 344 static void gfx_v11_0_update_perf_clk(struct amdgpu_device *adev, 345 bool enable); 346 347 static void gfx11_kiq_set_resources(struct amdgpu_ring *kiq_ring, uint64_t queue_mask) 348 { 349 struct amdgpu_device *adev = kiq_ring->adev; 350 u64 shader_mc_addr; 351 352 /* Cleaner shader MC address */ 353 shader_mc_addr = adev->gfx.cleaner_shader_gpu_addr >> 8; 354 355 amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_SET_RESOURCES, 6)); 356 amdgpu_ring_write(kiq_ring, PACKET3_SET_RESOURCES_VMID_MASK(0) | 357 PACKET3_SET_RESOURCES_UNMAP_LATENTY(0xa) | /* unmap_latency: 0xa (~ 1s) */ 358 PACKET3_SET_RESOURCES_QUEUE_TYPE(0)); /* vmid_mask:0 queue_type:0 (KIQ) */ 359 amdgpu_ring_write(kiq_ring, lower_32_bits(queue_mask)); /* queue mask lo */ 360 amdgpu_ring_write(kiq_ring, upper_32_bits(queue_mask)); /* queue mask hi */ 361 amdgpu_ring_write(kiq_ring, lower_32_bits(shader_mc_addr)); /* cleaner shader addr lo */ 362 amdgpu_ring_write(kiq_ring, upper_32_bits(shader_mc_addr)); /* cleaner shader addr hi */ 363 amdgpu_ring_write(kiq_ring, 0); /* oac mask */ 364 amdgpu_ring_write(kiq_ring, 0); /* gds heap base:0, gds heap size:0 */ 365 } 366 367 static void gfx11_kiq_map_queues(struct amdgpu_ring *kiq_ring, 368 struct amdgpu_ring *ring) 369 { 370 uint64_t mqd_addr = amdgpu_bo_gpu_offset(ring->mqd_obj); 371 uint64_t wptr_addr = ring->wptr_gpu_addr; 372 uint32_t me = 0, eng_sel = 0; 373 374 switch (ring->funcs->type) { 375 case AMDGPU_RING_TYPE_COMPUTE: 376 me = 1; 377 eng_sel = 0; 378 break; 379 case AMDGPU_RING_TYPE_GFX: 380 me = 0; 381 eng_sel = 4; 382 break; 383 case AMDGPU_RING_TYPE_MES: 384 me = 2; 385 eng_sel = 5; 386 break; 387 default: 388 WARN_ON(1); 389 } 390 391 amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_MAP_QUEUES, 5)); 392 /* Q_sel:0, vmid:0, vidmem: 1, engine:0, num_Q:1*/ 393 amdgpu_ring_write(kiq_ring, /* Q_sel: 0, vmid: 0, engine: 0, num_Q: 1 */ 394 PACKET3_MAP_QUEUES_QUEUE_SEL(0) | /* Queue_Sel */ 395 PACKET3_MAP_QUEUES_VMID(0) | /* VMID */ 396 PACKET3_MAP_QUEUES_QUEUE(ring->queue) | 397 PACKET3_MAP_QUEUES_PIPE(ring->pipe) | 398 PACKET3_MAP_QUEUES_ME((me)) | 399 PACKET3_MAP_QUEUES_QUEUE_TYPE(0) | /*queue_type: normal compute queue */ 400 PACKET3_MAP_QUEUES_ALLOC_FORMAT(0) | /* alloc format: all_on_one_pipe */ 401 PACKET3_MAP_QUEUES_ENGINE_SEL(eng_sel) | 402 PACKET3_MAP_QUEUES_NUM_QUEUES(1)); /* num_queues: must be 1 */ 403 amdgpu_ring_write(kiq_ring, PACKET3_MAP_QUEUES_DOORBELL_OFFSET(ring->doorbell_index)); 404 amdgpu_ring_write(kiq_ring, lower_32_bits(mqd_addr)); 405 amdgpu_ring_write(kiq_ring, upper_32_bits(mqd_addr)); 406 amdgpu_ring_write(kiq_ring, lower_32_bits(wptr_addr)); 407 amdgpu_ring_write(kiq_ring, upper_32_bits(wptr_addr)); 408 } 409 410 static void gfx11_kiq_unmap_queues(struct amdgpu_ring *kiq_ring, 411 struct amdgpu_ring *ring, 412 enum amdgpu_unmap_queues_action action, 413 u64 gpu_addr, u64 seq) 414 { 415 struct amdgpu_device *adev = kiq_ring->adev; 416 uint32_t eng_sel = ring->funcs->type == AMDGPU_RING_TYPE_GFX ? 4 : 0; 417 418 if (adev->enable_mes && !adev->gfx.kiq[0].ring.sched.ready) { 419 amdgpu_mes_unmap_legacy_queue(adev, ring, action, gpu_addr, seq); 420 return; 421 } 422 423 amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_UNMAP_QUEUES, 4)); 424 amdgpu_ring_write(kiq_ring, /* Q_sel: 0, vmid: 0, engine: 0, num_Q: 1 */ 425 PACKET3_UNMAP_QUEUES_ACTION(action) | 426 PACKET3_UNMAP_QUEUES_QUEUE_SEL(0) | 427 PACKET3_UNMAP_QUEUES_ENGINE_SEL(eng_sel) | 428 PACKET3_UNMAP_QUEUES_NUM_QUEUES(1)); 429 amdgpu_ring_write(kiq_ring, 430 PACKET3_UNMAP_QUEUES_DOORBELL_OFFSET0(ring->doorbell_index)); 431 432 if (action == PREEMPT_QUEUES_NO_UNMAP) { 433 amdgpu_ring_write(kiq_ring, lower_32_bits(gpu_addr)); 434 amdgpu_ring_write(kiq_ring, upper_32_bits(gpu_addr)); 435 amdgpu_ring_write(kiq_ring, seq); 436 } else { 437 amdgpu_ring_write(kiq_ring, 0); 438 amdgpu_ring_write(kiq_ring, 0); 439 amdgpu_ring_write(kiq_ring, 0); 440 } 441 } 442 443 static void gfx11_kiq_query_status(struct amdgpu_ring *kiq_ring, 444 struct amdgpu_ring *ring, 445 u64 addr, 446 u64 seq) 447 { 448 uint32_t eng_sel = ring->funcs->type == AMDGPU_RING_TYPE_GFX ? 4 : 0; 449 450 amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_QUERY_STATUS, 5)); 451 amdgpu_ring_write(kiq_ring, 452 PACKET3_QUERY_STATUS_CONTEXT_ID(0) | 453 PACKET3_QUERY_STATUS_INTERRUPT_SEL(0) | 454 PACKET3_QUERY_STATUS_COMMAND(2)); 455 amdgpu_ring_write(kiq_ring, /* Q_sel: 0, vmid: 0, engine: 0, num_Q: 1 */ 456 PACKET3_QUERY_STATUS_DOORBELL_OFFSET(ring->doorbell_index) | 457 PACKET3_QUERY_STATUS_ENG_SEL(eng_sel)); 458 amdgpu_ring_write(kiq_ring, lower_32_bits(addr)); 459 amdgpu_ring_write(kiq_ring, upper_32_bits(addr)); 460 amdgpu_ring_write(kiq_ring, lower_32_bits(seq)); 461 amdgpu_ring_write(kiq_ring, upper_32_bits(seq)); 462 } 463 464 static void gfx11_kiq_invalidate_tlbs(struct amdgpu_ring *kiq_ring, 465 uint16_t pasid, uint32_t flush_type, 466 bool all_hub) 467 { 468 gfx_v11_0_ring_invalidate_tlbs(kiq_ring, pasid, flush_type, all_hub, 1); 469 } 470 471 static const struct kiq_pm4_funcs gfx_v11_0_kiq_pm4_funcs = { 472 .kiq_set_resources = gfx11_kiq_set_resources, 473 .kiq_map_queues = gfx11_kiq_map_queues, 474 .kiq_unmap_queues = gfx11_kiq_unmap_queues, 475 .kiq_query_status = gfx11_kiq_query_status, 476 .kiq_invalidate_tlbs = gfx11_kiq_invalidate_tlbs, 477 .set_resources_size = 8, 478 .map_queues_size = 7, 479 .unmap_queues_size = 6, 480 .query_status_size = 7, 481 .invalidate_tlbs_size = 2, 482 }; 483 484 static void gfx_v11_0_set_kiq_pm4_funcs(struct amdgpu_device *adev) 485 { 486 adev->gfx.kiq[0].pmf = &gfx_v11_0_kiq_pm4_funcs; 487 } 488 489 static void gfx_v11_0_init_golden_registers(struct amdgpu_device *adev) 490 { 491 if (amdgpu_sriov_vf(adev)) 492 return; 493 494 switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { 495 case IP_VERSION(11, 0, 1): 496 case IP_VERSION(11, 0, 4): 497 soc15_program_register_sequence(adev, 498 golden_settings_gc_11_0_1, 499 (const u32)ARRAY_SIZE(golden_settings_gc_11_0_1)); 500 break; 501 default: 502 break; 503 } 504 soc15_program_register_sequence(adev, 505 golden_settings_gc_11_0, 506 (const u32)ARRAY_SIZE(golden_settings_gc_11_0)); 507 508 } 509 510 static void gfx_v11_0_write_data_to_reg(struct amdgpu_ring *ring, int eng_sel, 511 bool wc, uint32_t reg, uint32_t val) 512 { 513 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3)); 514 amdgpu_ring_write(ring, WRITE_DATA_ENGINE_SEL(eng_sel) | 515 WRITE_DATA_DST_SEL(0) | (wc ? WR_CONFIRM : 0)); 516 amdgpu_ring_write(ring, reg); 517 amdgpu_ring_write(ring, 0); 518 amdgpu_ring_write(ring, val); 519 } 520 521 static void gfx_v11_0_wait_reg_mem(struct amdgpu_ring *ring, int eng_sel, 522 int mem_space, int opt, uint32_t addr0, 523 uint32_t addr1, uint32_t ref, uint32_t mask, 524 uint32_t inv) 525 { 526 amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5)); 527 amdgpu_ring_write(ring, 528 /* memory (1) or register (0) */ 529 (WAIT_REG_MEM_MEM_SPACE(mem_space) | 530 WAIT_REG_MEM_OPERATION(opt) | /* wait */ 531 WAIT_REG_MEM_FUNCTION(3) | /* equal */ 532 WAIT_REG_MEM_ENGINE(eng_sel))); 533 534 if (mem_space) 535 BUG_ON(addr0 & 0x3); /* Dword align */ 536 amdgpu_ring_write(ring, addr0); 537 amdgpu_ring_write(ring, addr1); 538 amdgpu_ring_write(ring, ref); 539 amdgpu_ring_write(ring, mask); 540 amdgpu_ring_write(ring, inv); /* poll interval */ 541 } 542 543 static void gfx_v11_ring_insert_nop(struct amdgpu_ring *ring, uint32_t num_nop) 544 { 545 /* Header itself is a NOP packet */ 546 if (num_nop == 1) { 547 amdgpu_ring_write(ring, ring->funcs->nop); 548 return; 549 } 550 551 /* Max HW optimization till 0x3ffe, followed by remaining one NOP at a time*/ 552 amdgpu_ring_write(ring, PACKET3(PACKET3_NOP, min(num_nop - 2, 0x3ffe))); 553 554 /* Header is at index 0, followed by num_nops - 1 NOP packet's */ 555 amdgpu_ring_insert_nop(ring, num_nop - 1); 556 } 557 558 static int gfx_v11_0_ring_test_ring(struct amdgpu_ring *ring) 559 { 560 struct amdgpu_device *adev = ring->adev; 561 uint32_t scratch = SOC15_REG_OFFSET(GC, 0, regSCRATCH_REG0); 562 uint32_t tmp = 0; 563 unsigned i; 564 int r; 565 566 WREG32(scratch, 0xCAFEDEAD); 567 r = amdgpu_ring_alloc(ring, 5); 568 if (r) { 569 DRM_ERROR("amdgpu: cp failed to lock ring %d (%d).\n", 570 ring->idx, r); 571 return r; 572 } 573 574 if (ring->funcs->type == AMDGPU_RING_TYPE_KIQ) { 575 gfx_v11_0_ring_emit_wreg(ring, scratch, 0xDEADBEEF); 576 } else { 577 amdgpu_ring_write(ring, PACKET3(PACKET3_SET_UCONFIG_REG, 1)); 578 amdgpu_ring_write(ring, scratch - 579 PACKET3_SET_UCONFIG_REG_START); 580 amdgpu_ring_write(ring, 0xDEADBEEF); 581 } 582 amdgpu_ring_commit(ring); 583 584 for (i = 0; i < adev->usec_timeout; i++) { 585 tmp = RREG32(scratch); 586 if (tmp == 0xDEADBEEF) 587 break; 588 if (amdgpu_emu_mode == 1) 589 msleep(1); 590 else 591 udelay(1); 592 } 593 594 if (i >= adev->usec_timeout) 595 r = -ETIMEDOUT; 596 return r; 597 } 598 599 static int gfx_v11_0_ring_test_ib(struct amdgpu_ring *ring, long timeout) 600 { 601 struct amdgpu_device *adev = ring->adev; 602 struct amdgpu_ib ib; 603 struct dma_fence *f = NULL; 604 unsigned index; 605 uint64_t gpu_addr; 606 uint32_t *cpu_ptr; 607 long r; 608 609 /* MES KIQ fw hasn't indirect buffer support for now */ 610 if (adev->enable_mes_kiq && 611 ring->funcs->type == AMDGPU_RING_TYPE_KIQ) 612 return 0; 613 614 memset(&ib, 0, sizeof(ib)); 615 616 r = amdgpu_device_wb_get(adev, &index); 617 if (r) 618 return r; 619 620 gpu_addr = adev->wb.gpu_addr + (index * 4); 621 adev->wb.wb[index] = cpu_to_le32(0xCAFEDEAD); 622 cpu_ptr = &adev->wb.wb[index]; 623 624 r = amdgpu_ib_get(adev, NULL, 20, AMDGPU_IB_POOL_DIRECT, &ib); 625 if (r) { 626 DRM_ERROR("amdgpu: failed to get ib (%ld).\n", r); 627 goto err1; 628 } 629 630 ib.ptr[0] = PACKET3(PACKET3_WRITE_DATA, 3); 631 ib.ptr[1] = WRITE_DATA_DST_SEL(5) | WR_CONFIRM; 632 ib.ptr[2] = lower_32_bits(gpu_addr); 633 ib.ptr[3] = upper_32_bits(gpu_addr); 634 ib.ptr[4] = 0xDEADBEEF; 635 ib.length_dw = 5; 636 637 r = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f); 638 if (r) 639 goto err2; 640 641 r = dma_fence_wait_timeout(f, false, timeout); 642 if (r == 0) { 643 r = -ETIMEDOUT; 644 goto err2; 645 } else if (r < 0) { 646 goto err2; 647 } 648 649 if (le32_to_cpu(*cpu_ptr) == 0xDEADBEEF) 650 r = 0; 651 else 652 r = -EINVAL; 653 err2: 654 amdgpu_ib_free(&ib, NULL); 655 dma_fence_put(f); 656 err1: 657 amdgpu_device_wb_free(adev, index); 658 return r; 659 } 660 661 static void gfx_v11_0_free_microcode(struct amdgpu_device *adev) 662 { 663 amdgpu_ucode_release(&adev->gfx.pfp_fw); 664 amdgpu_ucode_release(&adev->gfx.me_fw); 665 amdgpu_ucode_release(&adev->gfx.rlc_fw); 666 amdgpu_ucode_release(&adev->gfx.mec_fw); 667 668 kfree(adev->gfx.rlc.register_list_format); 669 } 670 671 static int gfx_v11_0_init_toc_microcode(struct amdgpu_device *adev, const char *ucode_prefix) 672 { 673 const struct psp_firmware_header_v1_0 *toc_hdr; 674 int err = 0; 675 676 err = amdgpu_ucode_request(adev, &adev->psp.toc_fw, 677 AMDGPU_UCODE_REQUIRED, 678 "amdgpu/%s_toc.bin", ucode_prefix); 679 if (err) 680 goto out; 681 682 toc_hdr = (const struct psp_firmware_header_v1_0 *)adev->psp.toc_fw->data; 683 adev->psp.toc.fw_version = le32_to_cpu(toc_hdr->header.ucode_version); 684 adev->psp.toc.feature_version = le32_to_cpu(toc_hdr->sos.fw_version); 685 adev->psp.toc.size_bytes = le32_to_cpu(toc_hdr->header.ucode_size_bytes); 686 adev->psp.toc.start_addr = (uint8_t *)toc_hdr + 687 le32_to_cpu(toc_hdr->header.ucode_array_offset_bytes); 688 return 0; 689 out: 690 amdgpu_ucode_release(&adev->psp.toc_fw); 691 return err; 692 } 693 694 static void gfx_v11_0_check_fw_cp_gfx_shadow(struct amdgpu_device *adev) 695 { 696 switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { 697 case IP_VERSION(11, 0, 0): 698 case IP_VERSION(11, 0, 2): 699 case IP_VERSION(11, 0, 3): 700 if ((adev->gfx.me_fw_version >= 1505) && 701 (adev->gfx.pfp_fw_version >= 1600) && 702 (adev->gfx.mec_fw_version >= 512)) { 703 if (amdgpu_sriov_vf(adev)) 704 adev->gfx.cp_gfx_shadow = true; 705 else 706 adev->gfx.cp_gfx_shadow = false; 707 } 708 break; 709 default: 710 adev->gfx.cp_gfx_shadow = false; 711 break; 712 } 713 } 714 715 static int gfx_v11_0_init_microcode(struct amdgpu_device *adev) 716 { 717 char ucode_prefix[25]; 718 int err; 719 const struct rlc_firmware_header_v2_0 *rlc_hdr; 720 uint16_t version_major; 721 uint16_t version_minor; 722 723 DRM_DEBUG("\n"); 724 725 amdgpu_ucode_ip_version_decode(adev, GC_HWIP, ucode_prefix, sizeof(ucode_prefix)); 726 err = amdgpu_ucode_request(adev, &adev->gfx.pfp_fw, 727 AMDGPU_UCODE_REQUIRED, 728 "amdgpu/%s_pfp.bin", ucode_prefix); 729 if (err) 730 goto out; 731 /* check pfp fw hdr version to decide if enable rs64 for gfx11.*/ 732 adev->gfx.rs64_enable = amdgpu_ucode_hdr_version( 733 (union amdgpu_firmware_header *) 734 adev->gfx.pfp_fw->data, 2, 0); 735 if (adev->gfx.rs64_enable) { 736 dev_info(adev->dev, "CP RS64 enable\n"); 737 amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_RS64_PFP); 738 amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_RS64_PFP_P0_STACK); 739 amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_RS64_PFP_P1_STACK); 740 } else { 741 amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_PFP); 742 } 743 744 err = amdgpu_ucode_request(adev, &adev->gfx.me_fw, 745 AMDGPU_UCODE_REQUIRED, 746 "amdgpu/%s_me.bin", ucode_prefix); 747 if (err) 748 goto out; 749 if (adev->gfx.rs64_enable) { 750 amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_RS64_ME); 751 amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_RS64_ME_P0_STACK); 752 amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_RS64_ME_P1_STACK); 753 } else { 754 amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_ME); 755 } 756 757 if (!amdgpu_sriov_vf(adev)) { 758 if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(11, 0, 0) && 759 adev->pdev->revision == 0xCE) 760 err = amdgpu_ucode_request(adev, &adev->gfx.rlc_fw, 761 AMDGPU_UCODE_REQUIRED, 762 "amdgpu/gc_11_0_0_rlc_1.bin"); 763 else if (amdgpu_is_kicker_fw(adev)) 764 err = amdgpu_ucode_request(adev, &adev->gfx.rlc_fw, 765 AMDGPU_UCODE_REQUIRED, 766 "amdgpu/%s_rlc_kicker.bin", ucode_prefix); 767 else 768 err = amdgpu_ucode_request(adev, &adev->gfx.rlc_fw, 769 AMDGPU_UCODE_REQUIRED, 770 "amdgpu/%s_rlc.bin", ucode_prefix); 771 if (err) 772 goto out; 773 rlc_hdr = (const struct rlc_firmware_header_v2_0 *)adev->gfx.rlc_fw->data; 774 version_major = le16_to_cpu(rlc_hdr->header.header_version_major); 775 version_minor = le16_to_cpu(rlc_hdr->header.header_version_minor); 776 err = amdgpu_gfx_rlc_init_microcode(adev, version_major, version_minor); 777 if (err) 778 goto out; 779 } 780 781 err = amdgpu_ucode_request(adev, &adev->gfx.mec_fw, 782 AMDGPU_UCODE_REQUIRED, 783 "amdgpu/%s_mec.bin", ucode_prefix); 784 if (err) 785 goto out; 786 if (adev->gfx.rs64_enable) { 787 amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_RS64_MEC); 788 amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_RS64_MEC_P0_STACK); 789 amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_RS64_MEC_P1_STACK); 790 amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_RS64_MEC_P2_STACK); 791 amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_RS64_MEC_P3_STACK); 792 } else { 793 amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_MEC1); 794 amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_MEC1_JT); 795 } 796 797 if (adev->firmware.load_type == AMDGPU_FW_LOAD_RLC_BACKDOOR_AUTO) 798 err = gfx_v11_0_init_toc_microcode(adev, ucode_prefix); 799 800 /* only one MEC for gfx 11.0.0. */ 801 adev->gfx.mec2_fw = NULL; 802 803 gfx_v11_0_check_fw_cp_gfx_shadow(adev); 804 805 if (adev->gfx.imu.funcs && adev->gfx.imu.funcs->init_microcode) { 806 err = adev->gfx.imu.funcs->init_microcode(adev); 807 if (err) 808 DRM_ERROR("Failed to init imu firmware!\n"); 809 return err; 810 } 811 812 out: 813 if (err) { 814 amdgpu_ucode_release(&adev->gfx.pfp_fw); 815 amdgpu_ucode_release(&adev->gfx.me_fw); 816 amdgpu_ucode_release(&adev->gfx.rlc_fw); 817 amdgpu_ucode_release(&adev->gfx.mec_fw); 818 } 819 820 return err; 821 } 822 823 static u32 gfx_v11_0_get_csb_size(struct amdgpu_device *adev) 824 { 825 u32 count = 0; 826 const struct cs_section_def *sect = NULL; 827 const struct cs_extent_def *ext = NULL; 828 829 /* begin clear state */ 830 count += 2; 831 /* context control state */ 832 count += 3; 833 834 for (sect = gfx11_cs_data; sect->section != NULL; ++sect) { 835 for (ext = sect->section; ext->extent != NULL; ++ext) { 836 if (sect->id == SECT_CONTEXT) 837 count += 2 + ext->reg_count; 838 else 839 return 0; 840 } 841 } 842 843 /* set PA_SC_TILE_STEERING_OVERRIDE */ 844 count += 3; 845 /* end clear state */ 846 count += 2; 847 /* clear state */ 848 count += 2; 849 850 return count; 851 } 852 853 static void gfx_v11_0_get_csb_buffer(struct amdgpu_device *adev, u32 *buffer) 854 { 855 u32 count = 0; 856 int ctx_reg_offset; 857 858 if (adev->gfx.rlc.cs_data == NULL) 859 return; 860 if (buffer == NULL) 861 return; 862 863 count = amdgpu_gfx_csb_preamble_start(buffer); 864 count = amdgpu_gfx_csb_data_parser(adev, buffer, count); 865 866 ctx_reg_offset = SOC15_REG_OFFSET(GC, 0, regPA_SC_TILE_STEERING_OVERRIDE) - PACKET3_SET_CONTEXT_REG_START; 867 buffer[count++] = cpu_to_le32(PACKET3(PACKET3_SET_CONTEXT_REG, 1)); 868 buffer[count++] = cpu_to_le32(ctx_reg_offset); 869 buffer[count++] = cpu_to_le32(adev->gfx.config.pa_sc_tile_steering_override); 870 871 amdgpu_gfx_csb_preamble_end(buffer, count); 872 } 873 874 static void gfx_v11_0_rlc_fini(struct amdgpu_device *adev) 875 { 876 /* clear state block */ 877 amdgpu_bo_free_kernel(&adev->gfx.rlc.clear_state_obj, 878 &adev->gfx.rlc.clear_state_gpu_addr, 879 (void **)&adev->gfx.rlc.cs_ptr); 880 881 /* jump table block */ 882 amdgpu_bo_free_kernel(&adev->gfx.rlc.cp_table_obj, 883 &adev->gfx.rlc.cp_table_gpu_addr, 884 (void **)&adev->gfx.rlc.cp_table_ptr); 885 } 886 887 static void gfx_v11_0_init_rlcg_reg_access_ctrl(struct amdgpu_device *adev) 888 { 889 struct amdgpu_rlcg_reg_access_ctrl *reg_access_ctrl; 890 891 reg_access_ctrl = &adev->gfx.rlc.reg_access_ctrl[0]; 892 reg_access_ctrl->scratch_reg0 = SOC15_REG_OFFSET(GC, 0, regSCRATCH_REG0); 893 reg_access_ctrl->scratch_reg1 = SOC15_REG_OFFSET(GC, 0, regSCRATCH_REG1); 894 reg_access_ctrl->scratch_reg2 = SOC15_REG_OFFSET(GC, 0, regSCRATCH_REG2); 895 reg_access_ctrl->scratch_reg3 = SOC15_REG_OFFSET(GC, 0, regSCRATCH_REG3); 896 reg_access_ctrl->grbm_cntl = SOC15_REG_OFFSET(GC, 0, regGRBM_GFX_CNTL); 897 reg_access_ctrl->grbm_idx = SOC15_REG_OFFSET(GC, 0, regGRBM_GFX_INDEX); 898 reg_access_ctrl->spare_int = SOC15_REG_OFFSET(GC, 0, regRLC_SPARE_INT_0); 899 adev->gfx.rlc.rlcg_reg_access_supported = true; 900 } 901 902 static int gfx_v11_0_rlc_init(struct amdgpu_device *adev) 903 { 904 const struct cs_section_def *cs_data; 905 int r; 906 907 adev->gfx.rlc.cs_data = gfx11_cs_data; 908 909 cs_data = adev->gfx.rlc.cs_data; 910 911 if (cs_data) { 912 /* init clear state block */ 913 r = amdgpu_gfx_rlc_init_csb(adev); 914 if (r) 915 return r; 916 } 917 918 /* init spm vmid with 0xf */ 919 if (adev->gfx.rlc.funcs->update_spm_vmid) 920 adev->gfx.rlc.funcs->update_spm_vmid(adev, NULL, 0xf); 921 922 return 0; 923 } 924 925 static void gfx_v11_0_mec_fini(struct amdgpu_device *adev) 926 { 927 amdgpu_bo_free_kernel(&adev->gfx.mec.hpd_eop_obj, NULL, NULL); 928 amdgpu_bo_free_kernel(&adev->gfx.mec.mec_fw_obj, NULL, NULL); 929 amdgpu_bo_free_kernel(&adev->gfx.mec.mec_fw_data_obj, NULL, NULL); 930 } 931 932 static void gfx_v11_0_me_init(struct amdgpu_device *adev) 933 { 934 bitmap_zero(adev->gfx.me.queue_bitmap, AMDGPU_MAX_GFX_QUEUES); 935 936 amdgpu_gfx_graphics_queue_acquire(adev); 937 } 938 939 static int gfx_v11_0_mec_init(struct amdgpu_device *adev) 940 { 941 int r; 942 u32 *hpd; 943 size_t mec_hpd_size; 944 945 bitmap_zero(adev->gfx.mec_bitmap[0].queue_bitmap, AMDGPU_MAX_COMPUTE_QUEUES); 946 947 /* take ownership of the relevant compute queues */ 948 amdgpu_gfx_compute_queue_acquire(adev); 949 mec_hpd_size = adev->gfx.num_compute_rings * GFX11_MEC_HPD_SIZE; 950 951 if (mec_hpd_size) { 952 r = amdgpu_bo_create_reserved(adev, mec_hpd_size, PAGE_SIZE, 953 AMDGPU_GEM_DOMAIN_GTT, 954 &adev->gfx.mec.hpd_eop_obj, 955 &adev->gfx.mec.hpd_eop_gpu_addr, 956 (void **)&hpd); 957 if (r) { 958 dev_warn(adev->dev, "(%d) create HDP EOP bo failed\n", r); 959 gfx_v11_0_mec_fini(adev); 960 return r; 961 } 962 963 memset(hpd, 0, mec_hpd_size); 964 965 amdgpu_bo_kunmap(adev->gfx.mec.hpd_eop_obj); 966 amdgpu_bo_unreserve(adev->gfx.mec.hpd_eop_obj); 967 } 968 969 return 0; 970 } 971 972 static uint32_t wave_read_ind(struct amdgpu_device *adev, uint32_t wave, uint32_t address) 973 { 974 WREG32_SOC15(GC, 0, regSQ_IND_INDEX, 975 (wave << SQ_IND_INDEX__WAVE_ID__SHIFT) | 976 (address << SQ_IND_INDEX__INDEX__SHIFT)); 977 return RREG32_SOC15(GC, 0, regSQ_IND_DATA); 978 } 979 980 static void wave_read_regs(struct amdgpu_device *adev, uint32_t wave, 981 uint32_t thread, uint32_t regno, 982 uint32_t num, uint32_t *out) 983 { 984 WREG32_SOC15(GC, 0, regSQ_IND_INDEX, 985 (wave << SQ_IND_INDEX__WAVE_ID__SHIFT) | 986 (regno << SQ_IND_INDEX__INDEX__SHIFT) | 987 (thread << SQ_IND_INDEX__WORKITEM_ID__SHIFT) | 988 (SQ_IND_INDEX__AUTO_INCR_MASK)); 989 while (num--) 990 *(out++) = RREG32_SOC15(GC, 0, regSQ_IND_DATA); 991 } 992 993 static void gfx_v11_0_read_wave_data(struct amdgpu_device *adev, uint32_t xcc_id, uint32_t simd, uint32_t wave, uint32_t *dst, int *no_fields) 994 { 995 /* in gfx11 the SIMD_ID is specified as part of the INSTANCE 996 * field when performing a select_se_sh so it should be 997 * zero here */ 998 WARN_ON(simd != 0); 999 1000 /* type 3 wave data */ 1001 dst[(*no_fields)++] = 3; 1002 dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_STATUS); 1003 dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_PC_LO); 1004 dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_PC_HI); 1005 dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_EXEC_LO); 1006 dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_EXEC_HI); 1007 dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_HW_ID1); 1008 dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_HW_ID2); 1009 dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_GPR_ALLOC); 1010 dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_LDS_ALLOC); 1011 dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_TRAPSTS); 1012 dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_IB_STS); 1013 dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_IB_STS2); 1014 dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_IB_DBG1); 1015 dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_M0); 1016 dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_MODE); 1017 } 1018 1019 static void gfx_v11_0_read_wave_sgprs(struct amdgpu_device *adev, uint32_t xcc_id, uint32_t simd, 1020 uint32_t wave, uint32_t start, 1021 uint32_t size, uint32_t *dst) 1022 { 1023 WARN_ON(simd != 0); 1024 1025 wave_read_regs( 1026 adev, wave, 0, start + SQIND_WAVE_SGPRS_OFFSET, size, 1027 dst); 1028 } 1029 1030 static void gfx_v11_0_read_wave_vgprs(struct amdgpu_device *adev, uint32_t xcc_id, uint32_t simd, 1031 uint32_t wave, uint32_t thread, 1032 uint32_t start, uint32_t size, 1033 uint32_t *dst) 1034 { 1035 wave_read_regs( 1036 adev, wave, thread, 1037 start + SQIND_WAVE_VGPRS_OFFSET, size, dst); 1038 } 1039 1040 static void gfx_v11_0_select_me_pipe_q(struct amdgpu_device *adev, 1041 u32 me, u32 pipe, u32 q, u32 vm, u32 xcc_id) 1042 { 1043 soc21_grbm_select(adev, me, pipe, q, vm); 1044 } 1045 1046 /* all sizes are in bytes */ 1047 #define MQD_SHADOW_BASE_SIZE 73728 1048 #define MQD_SHADOW_BASE_ALIGNMENT 256 1049 #define MQD_FWWORKAREA_SIZE 484 1050 #define MQD_FWWORKAREA_ALIGNMENT 256 1051 1052 static void gfx_v11_0_get_gfx_shadow_info_nocheck(struct amdgpu_device *adev, 1053 struct amdgpu_gfx_shadow_info *shadow_info) 1054 { 1055 shadow_info->shadow_size = MQD_SHADOW_BASE_SIZE; 1056 shadow_info->shadow_alignment = MQD_SHADOW_BASE_ALIGNMENT; 1057 shadow_info->csa_size = MQD_FWWORKAREA_SIZE; 1058 shadow_info->csa_alignment = MQD_FWWORKAREA_ALIGNMENT; 1059 } 1060 1061 static int gfx_v11_0_get_gfx_shadow_info(struct amdgpu_device *adev, 1062 struct amdgpu_gfx_shadow_info *shadow_info, 1063 bool skip_check) 1064 { 1065 if (adev->gfx.cp_gfx_shadow || skip_check) { 1066 gfx_v11_0_get_gfx_shadow_info_nocheck(adev, shadow_info); 1067 return 0; 1068 } else { 1069 memset(shadow_info, 0, sizeof(struct amdgpu_gfx_shadow_info)); 1070 return -ENOTSUPP; 1071 } 1072 } 1073 1074 static const struct amdgpu_gfx_funcs gfx_v11_0_gfx_funcs = { 1075 .get_gpu_clock_counter = &gfx_v11_0_get_gpu_clock_counter, 1076 .select_se_sh = &gfx_v11_0_select_se_sh, 1077 .read_wave_data = &gfx_v11_0_read_wave_data, 1078 .read_wave_sgprs = &gfx_v11_0_read_wave_sgprs, 1079 .read_wave_vgprs = &gfx_v11_0_read_wave_vgprs, 1080 .select_me_pipe_q = &gfx_v11_0_select_me_pipe_q, 1081 .update_perfmon_mgcg = &gfx_v11_0_update_perf_clk, 1082 .get_gfx_shadow_info = &gfx_v11_0_get_gfx_shadow_info, 1083 }; 1084 1085 static int gfx_v11_0_gpu_early_init(struct amdgpu_device *adev) 1086 { 1087 switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { 1088 case IP_VERSION(11, 0, 0): 1089 case IP_VERSION(11, 0, 2): 1090 adev->gfx.config.max_hw_contexts = 8; 1091 adev->gfx.config.sc_prim_fifo_size_frontend = 0x20; 1092 adev->gfx.config.sc_prim_fifo_size_backend = 0x100; 1093 adev->gfx.config.sc_hiz_tile_fifo_size = 0; 1094 adev->gfx.config.sc_earlyz_tile_fifo_size = 0x4C0; 1095 break; 1096 case IP_VERSION(11, 0, 3): 1097 adev->gfx.ras = &gfx_v11_0_3_ras; 1098 adev->gfx.config.max_hw_contexts = 8; 1099 adev->gfx.config.sc_prim_fifo_size_frontend = 0x20; 1100 adev->gfx.config.sc_prim_fifo_size_backend = 0x100; 1101 adev->gfx.config.sc_hiz_tile_fifo_size = 0; 1102 adev->gfx.config.sc_earlyz_tile_fifo_size = 0x4C0; 1103 break; 1104 case IP_VERSION(11, 0, 1): 1105 case IP_VERSION(11, 0, 4): 1106 case IP_VERSION(11, 5, 0): 1107 case IP_VERSION(11, 5, 1): 1108 case IP_VERSION(11, 5, 2): 1109 case IP_VERSION(11, 5, 3): 1110 adev->gfx.config.max_hw_contexts = 8; 1111 adev->gfx.config.sc_prim_fifo_size_frontend = 0x20; 1112 adev->gfx.config.sc_prim_fifo_size_backend = 0x100; 1113 adev->gfx.config.sc_hiz_tile_fifo_size = 0x80; 1114 adev->gfx.config.sc_earlyz_tile_fifo_size = 0x300; 1115 break; 1116 default: 1117 BUG(); 1118 break; 1119 } 1120 1121 return 0; 1122 } 1123 1124 static int gfx_v11_0_gfx_ring_init(struct amdgpu_device *adev, int ring_id, 1125 int me, int pipe, int queue) 1126 { 1127 struct amdgpu_ring *ring; 1128 unsigned int irq_type; 1129 unsigned int hw_prio; 1130 1131 ring = &adev->gfx.gfx_ring[ring_id]; 1132 1133 ring->me = me; 1134 ring->pipe = pipe; 1135 ring->queue = queue; 1136 1137 ring->ring_obj = NULL; 1138 ring->use_doorbell = true; 1139 if (adev->gfx.disable_kq) { 1140 ring->no_scheduler = true; 1141 ring->no_user_submission = true; 1142 } 1143 1144 if (!ring_id) 1145 ring->doorbell_index = adev->doorbell_index.gfx_ring0 << 1; 1146 else 1147 ring->doorbell_index = adev->doorbell_index.gfx_ring1 << 1; 1148 ring->vm_hub = AMDGPU_GFXHUB(0); 1149 sprintf(ring->name, "gfx_%d.%d.%d", ring->me, ring->pipe, ring->queue); 1150 1151 irq_type = AMDGPU_CP_IRQ_GFX_ME0_PIPE0_EOP + ring->pipe; 1152 hw_prio = amdgpu_gfx_is_high_priority_graphics_queue(adev, ring) ? 1153 AMDGPU_GFX_PIPE_PRIO_HIGH : AMDGPU_GFX_PIPE_PRIO_NORMAL; 1154 return amdgpu_ring_init(adev, ring, 1024, &adev->gfx.eop_irq, irq_type, 1155 hw_prio, NULL); 1156 } 1157 1158 static int gfx_v11_0_compute_ring_init(struct amdgpu_device *adev, int ring_id, 1159 int mec, int pipe, int queue) 1160 { 1161 int r; 1162 unsigned irq_type; 1163 struct amdgpu_ring *ring; 1164 unsigned int hw_prio; 1165 1166 ring = &adev->gfx.compute_ring[ring_id]; 1167 1168 /* mec0 is me1 */ 1169 ring->me = mec + 1; 1170 ring->pipe = pipe; 1171 ring->queue = queue; 1172 1173 ring->ring_obj = NULL; 1174 ring->use_doorbell = true; 1175 ring->doorbell_index = (adev->doorbell_index.mec_ring0 + ring_id) << 1; 1176 ring->eop_gpu_addr = adev->gfx.mec.hpd_eop_gpu_addr 1177 + (ring_id * GFX11_MEC_HPD_SIZE); 1178 ring->vm_hub = AMDGPU_GFXHUB(0); 1179 sprintf(ring->name, "comp_%d.%d.%d", ring->me, ring->pipe, ring->queue); 1180 1181 irq_type = AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP 1182 + ((ring->me - 1) * adev->gfx.mec.num_pipe_per_mec) 1183 + ring->pipe; 1184 hw_prio = amdgpu_gfx_is_high_priority_compute_queue(adev, ring) ? 1185 AMDGPU_GFX_PIPE_PRIO_HIGH : AMDGPU_GFX_PIPE_PRIO_NORMAL; 1186 /* type-2 packets are deprecated on MEC, use type-3 instead */ 1187 r = amdgpu_ring_init(adev, ring, 1024, &adev->gfx.eop_irq, irq_type, 1188 hw_prio, NULL); 1189 if (r) 1190 return r; 1191 1192 return 0; 1193 } 1194 1195 static struct { 1196 SOC21_FIRMWARE_ID id; 1197 unsigned int offset; 1198 unsigned int size; 1199 } rlc_autoload_info[SOC21_FIRMWARE_ID_MAX]; 1200 1201 static void gfx_v11_0_parse_rlc_toc(struct amdgpu_device *adev, void *rlc_toc) 1202 { 1203 RLC_TABLE_OF_CONTENT *ucode = rlc_toc; 1204 1205 while (ucode && (ucode->id > SOC21_FIRMWARE_ID_INVALID) && 1206 (ucode->id < SOC21_FIRMWARE_ID_MAX)) { 1207 rlc_autoload_info[ucode->id].id = ucode->id; 1208 rlc_autoload_info[ucode->id].offset = ucode->offset * 4; 1209 rlc_autoload_info[ucode->id].size = ucode->size * 4; 1210 1211 ucode++; 1212 } 1213 } 1214 1215 static uint32_t gfx_v11_0_calc_toc_total_size(struct amdgpu_device *adev) 1216 { 1217 uint32_t total_size = 0; 1218 SOC21_FIRMWARE_ID id; 1219 1220 gfx_v11_0_parse_rlc_toc(adev, adev->psp.toc.start_addr); 1221 1222 for (id = SOC21_FIRMWARE_ID_RLC_G_UCODE; id < SOC21_FIRMWARE_ID_MAX; id++) 1223 total_size += rlc_autoload_info[id].size; 1224 1225 /* In case the offset in rlc toc ucode is aligned */ 1226 if (total_size < rlc_autoload_info[SOC21_FIRMWARE_ID_MAX-1].offset) 1227 total_size = rlc_autoload_info[SOC21_FIRMWARE_ID_MAX-1].offset + 1228 rlc_autoload_info[SOC21_FIRMWARE_ID_MAX-1].size; 1229 1230 return total_size; 1231 } 1232 1233 static int gfx_v11_0_rlc_autoload_buffer_init(struct amdgpu_device *adev) 1234 { 1235 int r; 1236 uint32_t total_size; 1237 1238 total_size = gfx_v11_0_calc_toc_total_size(adev); 1239 1240 r = amdgpu_bo_create_reserved(adev, total_size, 64 * 1024, 1241 AMDGPU_GEM_DOMAIN_VRAM | 1242 AMDGPU_GEM_DOMAIN_GTT, 1243 &adev->gfx.rlc.rlc_autoload_bo, 1244 &adev->gfx.rlc.rlc_autoload_gpu_addr, 1245 (void **)&adev->gfx.rlc.rlc_autoload_ptr); 1246 1247 if (r) { 1248 dev_err(adev->dev, "(%d) failed to create fw autoload bo\n", r); 1249 return r; 1250 } 1251 1252 return 0; 1253 } 1254 1255 static void gfx_v11_0_rlc_backdoor_autoload_copy_ucode(struct amdgpu_device *adev, 1256 SOC21_FIRMWARE_ID id, 1257 const void *fw_data, 1258 uint32_t fw_size, 1259 uint32_t *fw_autoload_mask) 1260 { 1261 uint32_t toc_offset; 1262 uint32_t toc_fw_size; 1263 char *ptr = adev->gfx.rlc.rlc_autoload_ptr; 1264 1265 if (id <= SOC21_FIRMWARE_ID_INVALID || id >= SOC21_FIRMWARE_ID_MAX) 1266 return; 1267 1268 toc_offset = rlc_autoload_info[id].offset; 1269 toc_fw_size = rlc_autoload_info[id].size; 1270 1271 if (fw_size == 0) 1272 fw_size = toc_fw_size; 1273 1274 if (fw_size > toc_fw_size) 1275 fw_size = toc_fw_size; 1276 1277 memcpy(ptr + toc_offset, fw_data, fw_size); 1278 1279 if (fw_size < toc_fw_size) 1280 memset(ptr + toc_offset + fw_size, 0, toc_fw_size - fw_size); 1281 1282 if ((id != SOC21_FIRMWARE_ID_RS64_PFP) && (id != SOC21_FIRMWARE_ID_RS64_ME)) 1283 *(uint64_t *)fw_autoload_mask |= 1ULL << id; 1284 } 1285 1286 static void gfx_v11_0_rlc_backdoor_autoload_copy_toc_ucode(struct amdgpu_device *adev, 1287 uint32_t *fw_autoload_mask) 1288 { 1289 void *data; 1290 uint32_t size; 1291 uint64_t *toc_ptr; 1292 1293 *(uint64_t *)fw_autoload_mask |= 0x1; 1294 1295 DRM_DEBUG("rlc autoload enabled fw: 0x%llx\n", *(uint64_t *)fw_autoload_mask); 1296 1297 data = adev->psp.toc.start_addr; 1298 size = rlc_autoload_info[SOC21_FIRMWARE_ID_RLC_TOC].size; 1299 1300 toc_ptr = (uint64_t *)data + size / 8 - 1; 1301 *toc_ptr = *(uint64_t *)fw_autoload_mask; 1302 1303 gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev, SOC21_FIRMWARE_ID_RLC_TOC, 1304 data, size, fw_autoload_mask); 1305 } 1306 1307 static void gfx_v11_0_rlc_backdoor_autoload_copy_gfx_ucode(struct amdgpu_device *adev, 1308 uint32_t *fw_autoload_mask) 1309 { 1310 const __le32 *fw_data; 1311 uint32_t fw_size; 1312 const struct gfx_firmware_header_v1_0 *cp_hdr; 1313 const struct gfx_firmware_header_v2_0 *cpv2_hdr; 1314 const struct rlc_firmware_header_v2_0 *rlc_hdr; 1315 const struct rlc_firmware_header_v2_2 *rlcv22_hdr; 1316 uint16_t version_major, version_minor; 1317 1318 if (adev->gfx.rs64_enable) { 1319 /* pfp ucode */ 1320 cpv2_hdr = (const struct gfx_firmware_header_v2_0 *) 1321 adev->gfx.pfp_fw->data; 1322 /* instruction */ 1323 fw_data = (const __le32 *)(adev->gfx.pfp_fw->data + 1324 le32_to_cpu(cpv2_hdr->ucode_offset_bytes)); 1325 fw_size = le32_to_cpu(cpv2_hdr->ucode_size_bytes); 1326 gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev, SOC21_FIRMWARE_ID_RS64_PFP, 1327 fw_data, fw_size, fw_autoload_mask); 1328 /* data */ 1329 fw_data = (const __le32 *)(adev->gfx.pfp_fw->data + 1330 le32_to_cpu(cpv2_hdr->data_offset_bytes)); 1331 fw_size = le32_to_cpu(cpv2_hdr->data_size_bytes); 1332 gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev, SOC21_FIRMWARE_ID_RS64_PFP_P0_STACK, 1333 fw_data, fw_size, fw_autoload_mask); 1334 gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev, SOC21_FIRMWARE_ID_RS64_PFP_P1_STACK, 1335 fw_data, fw_size, fw_autoload_mask); 1336 /* me ucode */ 1337 cpv2_hdr = (const struct gfx_firmware_header_v2_0 *) 1338 adev->gfx.me_fw->data; 1339 /* instruction */ 1340 fw_data = (const __le32 *)(adev->gfx.me_fw->data + 1341 le32_to_cpu(cpv2_hdr->ucode_offset_bytes)); 1342 fw_size = le32_to_cpu(cpv2_hdr->ucode_size_bytes); 1343 gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev, SOC21_FIRMWARE_ID_RS64_ME, 1344 fw_data, fw_size, fw_autoload_mask); 1345 /* data */ 1346 fw_data = (const __le32 *)(adev->gfx.me_fw->data + 1347 le32_to_cpu(cpv2_hdr->data_offset_bytes)); 1348 fw_size = le32_to_cpu(cpv2_hdr->data_size_bytes); 1349 gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev, SOC21_FIRMWARE_ID_RS64_ME_P0_STACK, 1350 fw_data, fw_size, fw_autoload_mask); 1351 gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev, SOC21_FIRMWARE_ID_RS64_ME_P1_STACK, 1352 fw_data, fw_size, fw_autoload_mask); 1353 /* mec ucode */ 1354 cpv2_hdr = (const struct gfx_firmware_header_v2_0 *) 1355 adev->gfx.mec_fw->data; 1356 /* instruction */ 1357 fw_data = (const __le32 *) (adev->gfx.mec_fw->data + 1358 le32_to_cpu(cpv2_hdr->ucode_offset_bytes)); 1359 fw_size = le32_to_cpu(cpv2_hdr->ucode_size_bytes); 1360 gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev, SOC21_FIRMWARE_ID_RS64_MEC, 1361 fw_data, fw_size, fw_autoload_mask); 1362 /* data */ 1363 fw_data = (const __le32 *) (adev->gfx.mec_fw->data + 1364 le32_to_cpu(cpv2_hdr->data_offset_bytes)); 1365 fw_size = le32_to_cpu(cpv2_hdr->data_size_bytes); 1366 gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev, SOC21_FIRMWARE_ID_RS64_MEC_P0_STACK, 1367 fw_data, fw_size, fw_autoload_mask); 1368 gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev, SOC21_FIRMWARE_ID_RS64_MEC_P1_STACK, 1369 fw_data, fw_size, fw_autoload_mask); 1370 gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev, SOC21_FIRMWARE_ID_RS64_MEC_P2_STACK, 1371 fw_data, fw_size, fw_autoload_mask); 1372 gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev, SOC21_FIRMWARE_ID_RS64_MEC_P3_STACK, 1373 fw_data, fw_size, fw_autoload_mask); 1374 } else { 1375 /* pfp ucode */ 1376 cp_hdr = (const struct gfx_firmware_header_v1_0 *) 1377 adev->gfx.pfp_fw->data; 1378 fw_data = (const __le32 *)(adev->gfx.pfp_fw->data + 1379 le32_to_cpu(cp_hdr->header.ucode_array_offset_bytes)); 1380 fw_size = le32_to_cpu(cp_hdr->header.ucode_size_bytes); 1381 gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev, SOC21_FIRMWARE_ID_CP_PFP, 1382 fw_data, fw_size, fw_autoload_mask); 1383 1384 /* me ucode */ 1385 cp_hdr = (const struct gfx_firmware_header_v1_0 *) 1386 adev->gfx.me_fw->data; 1387 fw_data = (const __le32 *)(adev->gfx.me_fw->data + 1388 le32_to_cpu(cp_hdr->header.ucode_array_offset_bytes)); 1389 fw_size = le32_to_cpu(cp_hdr->header.ucode_size_bytes); 1390 gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev, SOC21_FIRMWARE_ID_CP_ME, 1391 fw_data, fw_size, fw_autoload_mask); 1392 1393 /* mec ucode */ 1394 cp_hdr = (const struct gfx_firmware_header_v1_0 *) 1395 adev->gfx.mec_fw->data; 1396 fw_data = (const __le32 *) (adev->gfx.mec_fw->data + 1397 le32_to_cpu(cp_hdr->header.ucode_array_offset_bytes)); 1398 fw_size = le32_to_cpu(cp_hdr->header.ucode_size_bytes) - 1399 cp_hdr->jt_size * 4; 1400 gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev, SOC21_FIRMWARE_ID_CP_MEC, 1401 fw_data, fw_size, fw_autoload_mask); 1402 } 1403 1404 /* rlc ucode */ 1405 rlc_hdr = (const struct rlc_firmware_header_v2_0 *) 1406 adev->gfx.rlc_fw->data; 1407 fw_data = (const __le32 *)(adev->gfx.rlc_fw->data + 1408 le32_to_cpu(rlc_hdr->header.ucode_array_offset_bytes)); 1409 fw_size = le32_to_cpu(rlc_hdr->header.ucode_size_bytes); 1410 gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev, SOC21_FIRMWARE_ID_RLC_G_UCODE, 1411 fw_data, fw_size, fw_autoload_mask); 1412 1413 version_major = le16_to_cpu(rlc_hdr->header.header_version_major); 1414 version_minor = le16_to_cpu(rlc_hdr->header.header_version_minor); 1415 if (version_major == 2) { 1416 if (version_minor >= 2) { 1417 rlcv22_hdr = (const struct rlc_firmware_header_v2_2 *)adev->gfx.rlc_fw->data; 1418 1419 fw_data = (const __le32 *)(adev->gfx.rlc_fw->data + 1420 le32_to_cpu(rlcv22_hdr->rlc_iram_ucode_offset_bytes)); 1421 fw_size = le32_to_cpu(rlcv22_hdr->rlc_iram_ucode_size_bytes); 1422 gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev, SOC21_FIRMWARE_ID_RLX6_UCODE, 1423 fw_data, fw_size, fw_autoload_mask); 1424 1425 fw_data = (const __le32 *)(adev->gfx.rlc_fw->data + 1426 le32_to_cpu(rlcv22_hdr->rlc_dram_ucode_offset_bytes)); 1427 fw_size = le32_to_cpu(rlcv22_hdr->rlc_dram_ucode_size_bytes); 1428 gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev, SOC21_FIRMWARE_ID_RLX6_DRAM_BOOT, 1429 fw_data, fw_size, fw_autoload_mask); 1430 } 1431 } 1432 } 1433 1434 static void gfx_v11_0_rlc_backdoor_autoload_copy_sdma_ucode(struct amdgpu_device *adev, 1435 uint32_t *fw_autoload_mask) 1436 { 1437 const __le32 *fw_data; 1438 uint32_t fw_size; 1439 const struct sdma_firmware_header_v2_0 *sdma_hdr; 1440 1441 sdma_hdr = (const struct sdma_firmware_header_v2_0 *) 1442 adev->sdma.instance[0].fw->data; 1443 fw_data = (const __le32 *) (adev->sdma.instance[0].fw->data + 1444 le32_to_cpu(sdma_hdr->header.ucode_array_offset_bytes)); 1445 fw_size = le32_to_cpu(sdma_hdr->ctx_ucode_size_bytes); 1446 1447 gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev, 1448 SOC21_FIRMWARE_ID_SDMA_UCODE_TH0, fw_data, fw_size, fw_autoload_mask); 1449 1450 fw_data = (const __le32 *) (adev->sdma.instance[0].fw->data + 1451 le32_to_cpu(sdma_hdr->ctl_ucode_offset)); 1452 fw_size = le32_to_cpu(sdma_hdr->ctl_ucode_size_bytes); 1453 1454 gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev, 1455 SOC21_FIRMWARE_ID_SDMA_UCODE_TH1, fw_data, fw_size, fw_autoload_mask); 1456 } 1457 1458 static void gfx_v11_0_rlc_backdoor_autoload_copy_mes_ucode(struct amdgpu_device *adev, 1459 uint32_t *fw_autoload_mask) 1460 { 1461 const __le32 *fw_data; 1462 unsigned fw_size; 1463 const struct mes_firmware_header_v1_0 *mes_hdr; 1464 int pipe, ucode_id, data_id; 1465 1466 for (pipe = 0; pipe < 2; pipe++) { 1467 if (pipe==0) { 1468 ucode_id = SOC21_FIRMWARE_ID_RS64_MES_P0; 1469 data_id = SOC21_FIRMWARE_ID_RS64_MES_P0_STACK; 1470 } else { 1471 ucode_id = SOC21_FIRMWARE_ID_RS64_MES_P1; 1472 data_id = SOC21_FIRMWARE_ID_RS64_MES_P1_STACK; 1473 } 1474 1475 mes_hdr = (const struct mes_firmware_header_v1_0 *) 1476 adev->mes.fw[pipe]->data; 1477 1478 fw_data = (const __le32 *)(adev->mes.fw[pipe]->data + 1479 le32_to_cpu(mes_hdr->mes_ucode_offset_bytes)); 1480 fw_size = le32_to_cpu(mes_hdr->mes_ucode_size_bytes); 1481 1482 gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev, 1483 ucode_id, fw_data, fw_size, fw_autoload_mask); 1484 1485 fw_data = (const __le32 *)(adev->mes.fw[pipe]->data + 1486 le32_to_cpu(mes_hdr->mes_ucode_data_offset_bytes)); 1487 fw_size = le32_to_cpu(mes_hdr->mes_ucode_data_size_bytes); 1488 1489 gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev, 1490 data_id, fw_data, fw_size, fw_autoload_mask); 1491 } 1492 } 1493 1494 static int gfx_v11_0_rlc_backdoor_autoload_enable(struct amdgpu_device *adev) 1495 { 1496 uint32_t rlc_g_offset, rlc_g_size; 1497 uint64_t gpu_addr; 1498 uint32_t autoload_fw_id[2]; 1499 1500 memset(autoload_fw_id, 0, sizeof(uint32_t) * 2); 1501 1502 /* RLC autoload sequence 2: copy ucode */ 1503 gfx_v11_0_rlc_backdoor_autoload_copy_sdma_ucode(adev, autoload_fw_id); 1504 gfx_v11_0_rlc_backdoor_autoload_copy_gfx_ucode(adev, autoload_fw_id); 1505 gfx_v11_0_rlc_backdoor_autoload_copy_mes_ucode(adev, autoload_fw_id); 1506 gfx_v11_0_rlc_backdoor_autoload_copy_toc_ucode(adev, autoload_fw_id); 1507 1508 rlc_g_offset = rlc_autoload_info[SOC21_FIRMWARE_ID_RLC_G_UCODE].offset; 1509 rlc_g_size = rlc_autoload_info[SOC21_FIRMWARE_ID_RLC_G_UCODE].size; 1510 gpu_addr = adev->gfx.rlc.rlc_autoload_gpu_addr + rlc_g_offset; 1511 1512 WREG32_SOC15(GC, 0, regGFX_IMU_RLC_BOOTLOADER_ADDR_HI, upper_32_bits(gpu_addr)); 1513 WREG32_SOC15(GC, 0, regGFX_IMU_RLC_BOOTLOADER_ADDR_LO, lower_32_bits(gpu_addr)); 1514 1515 WREG32_SOC15(GC, 0, regGFX_IMU_RLC_BOOTLOADER_SIZE, rlc_g_size); 1516 1517 /* RLC autoload sequence 3: load IMU fw */ 1518 if (adev->gfx.imu.funcs->load_microcode) 1519 adev->gfx.imu.funcs->load_microcode(adev); 1520 /* RLC autoload sequence 4 init IMU fw */ 1521 if (adev->gfx.imu.funcs->setup_imu) 1522 adev->gfx.imu.funcs->setup_imu(adev); 1523 if (adev->gfx.imu.funcs->start_imu) 1524 adev->gfx.imu.funcs->start_imu(adev); 1525 1526 /* RLC autoload sequence 5 disable gpa mode */ 1527 gfx_v11_0_disable_gpa_mode(adev); 1528 1529 return 0; 1530 } 1531 1532 static void gfx_v11_0_alloc_ip_dump(struct amdgpu_device *adev) 1533 { 1534 uint32_t reg_count = ARRAY_SIZE(gc_reg_list_11_0); 1535 uint32_t *ptr; 1536 uint32_t inst; 1537 1538 ptr = kcalloc(reg_count, sizeof(uint32_t), GFP_KERNEL); 1539 if (!ptr) { 1540 DRM_ERROR("Failed to allocate memory for GFX IP Dump\n"); 1541 adev->gfx.ip_dump_core = NULL; 1542 } else { 1543 adev->gfx.ip_dump_core = ptr; 1544 } 1545 1546 /* Allocate memory for compute queue registers for all the instances */ 1547 reg_count = ARRAY_SIZE(gc_cp_reg_list_11); 1548 inst = adev->gfx.mec.num_mec * adev->gfx.mec.num_pipe_per_mec * 1549 adev->gfx.mec.num_queue_per_pipe; 1550 1551 ptr = kcalloc(reg_count * inst, sizeof(uint32_t), GFP_KERNEL); 1552 if (!ptr) { 1553 DRM_ERROR("Failed to allocate memory for Compute Queues IP Dump\n"); 1554 adev->gfx.ip_dump_compute_queues = NULL; 1555 } else { 1556 adev->gfx.ip_dump_compute_queues = ptr; 1557 } 1558 1559 /* Allocate memory for gfx queue registers for all the instances */ 1560 reg_count = ARRAY_SIZE(gc_gfx_queue_reg_list_11); 1561 inst = adev->gfx.me.num_me * adev->gfx.me.num_pipe_per_me * 1562 adev->gfx.me.num_queue_per_pipe; 1563 1564 ptr = kcalloc(reg_count * inst, sizeof(uint32_t), GFP_KERNEL); 1565 if (!ptr) { 1566 DRM_ERROR("Failed to allocate memory for GFX Queues IP Dump\n"); 1567 adev->gfx.ip_dump_gfx_queues = NULL; 1568 } else { 1569 adev->gfx.ip_dump_gfx_queues = ptr; 1570 } 1571 } 1572 1573 static int gfx_v11_0_sw_init(struct amdgpu_ip_block *ip_block) 1574 { 1575 int i, j, k, r, ring_id; 1576 int xcc_id = 0; 1577 struct amdgpu_device *adev = ip_block->adev; 1578 int num_queue_per_pipe = 1; /* we only enable 1 KGQ per pipe */ 1579 1580 INIT_DELAYED_WORK(&adev->gfx.idle_work, amdgpu_gfx_profile_idle_work_handler); 1581 1582 switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { 1583 case IP_VERSION(11, 0, 0): 1584 case IP_VERSION(11, 0, 1): 1585 case IP_VERSION(11, 0, 2): 1586 case IP_VERSION(11, 0, 3): 1587 case IP_VERSION(11, 0, 4): 1588 case IP_VERSION(11, 5, 0): 1589 case IP_VERSION(11, 5, 1): 1590 case IP_VERSION(11, 5, 2): 1591 case IP_VERSION(11, 5, 3): 1592 adev->gfx.me.num_me = 1; 1593 adev->gfx.me.num_pipe_per_me = 1; 1594 adev->gfx.me.num_queue_per_pipe = 2; 1595 adev->gfx.mec.num_mec = 1; 1596 adev->gfx.mec.num_pipe_per_mec = 4; 1597 adev->gfx.mec.num_queue_per_pipe = 4; 1598 break; 1599 default: 1600 adev->gfx.me.num_me = 1; 1601 adev->gfx.me.num_pipe_per_me = 1; 1602 adev->gfx.me.num_queue_per_pipe = 1; 1603 adev->gfx.mec.num_mec = 1; 1604 adev->gfx.mec.num_pipe_per_mec = 4; 1605 adev->gfx.mec.num_queue_per_pipe = 8; 1606 break; 1607 } 1608 1609 switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { 1610 case IP_VERSION(11, 0, 0): 1611 case IP_VERSION(11, 0, 2): 1612 case IP_VERSION(11, 0, 3): 1613 if (!adev->gfx.disable_uq && 1614 adev->gfx.me_fw_version >= 2420 && 1615 adev->gfx.pfp_fw_version >= 2580 && 1616 adev->gfx.mec_fw_version >= 2650 && 1617 adev->mes.fw_version[0] >= 120) { 1618 adev->userq_funcs[AMDGPU_HW_IP_GFX] = &userq_mes_funcs; 1619 adev->userq_funcs[AMDGPU_HW_IP_COMPUTE] = &userq_mes_funcs; 1620 } 1621 break; 1622 case IP_VERSION(11, 0, 1): 1623 case IP_VERSION(11, 0, 4): 1624 case IP_VERSION(11, 5, 0): 1625 case IP_VERSION(11, 5, 1): 1626 case IP_VERSION(11, 5, 2): 1627 case IP_VERSION(11, 5, 3): 1628 /* add firmware version checks here */ 1629 if (0 && !adev->gfx.disable_uq) { 1630 adev->userq_funcs[AMDGPU_HW_IP_GFX] = &userq_mes_funcs; 1631 adev->userq_funcs[AMDGPU_HW_IP_COMPUTE] = &userq_mes_funcs; 1632 } 1633 break; 1634 default: 1635 break; 1636 } 1637 1638 switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { 1639 case IP_VERSION(11, 0, 0): 1640 case IP_VERSION(11, 0, 2): 1641 case IP_VERSION(11, 0, 3): 1642 adev->gfx.cleaner_shader_ptr = gfx_11_0_3_cleaner_shader_hex; 1643 adev->gfx.cleaner_shader_size = sizeof(gfx_11_0_3_cleaner_shader_hex); 1644 if (adev->gfx.me_fw_version >= 2280 && 1645 adev->gfx.pfp_fw_version >= 2370 && 1646 adev->gfx.mec_fw_version >= 2450 && 1647 adev->mes.fw_version[0] >= 99) { 1648 adev->gfx.enable_cleaner_shader = true; 1649 r = amdgpu_gfx_cleaner_shader_sw_init(adev, adev->gfx.cleaner_shader_size); 1650 if (r) { 1651 adev->gfx.enable_cleaner_shader = false; 1652 dev_err(adev->dev, "Failed to initialize cleaner shader\n"); 1653 } 1654 } 1655 break; 1656 case IP_VERSION(11, 0, 1): 1657 case IP_VERSION(11, 0, 4): 1658 adev->gfx.cleaner_shader_ptr = gfx_11_0_3_cleaner_shader_hex; 1659 adev->gfx.cleaner_shader_size = sizeof(gfx_11_0_3_cleaner_shader_hex); 1660 if (adev->gfx.pfp_fw_version >= 102 && 1661 adev->gfx.mec_fw_version >= 66 && 1662 adev->mes.fw_version[0] >= 128) { 1663 adev->gfx.enable_cleaner_shader = true; 1664 r = amdgpu_gfx_cleaner_shader_sw_init(adev, adev->gfx.cleaner_shader_size); 1665 if (r) { 1666 adev->gfx.enable_cleaner_shader = false; 1667 dev_err(adev->dev, "Failed to initialize cleaner shader\n"); 1668 } 1669 } 1670 break; 1671 case IP_VERSION(11, 5, 0): 1672 case IP_VERSION(11, 5, 1): 1673 adev->gfx.cleaner_shader_ptr = gfx_11_0_3_cleaner_shader_hex; 1674 adev->gfx.cleaner_shader_size = sizeof(gfx_11_0_3_cleaner_shader_hex); 1675 if (adev->gfx.mec_fw_version >= 26 && 1676 adev->mes.fw_version[0] >= 114) { 1677 adev->gfx.enable_cleaner_shader = true; 1678 r = amdgpu_gfx_cleaner_shader_sw_init(adev, adev->gfx.cleaner_shader_size); 1679 if (r) { 1680 adev->gfx.enable_cleaner_shader = false; 1681 dev_err(adev->dev, "Failed to initialize cleaner shader\n"); 1682 } 1683 } 1684 break; 1685 case IP_VERSION(11, 5, 2): 1686 adev->gfx.cleaner_shader_ptr = gfx_11_0_3_cleaner_shader_hex; 1687 adev->gfx.cleaner_shader_size = sizeof(gfx_11_0_3_cleaner_shader_hex); 1688 if (adev->gfx.me_fw_version >= 12 && 1689 adev->gfx.pfp_fw_version >= 15 && 1690 adev->gfx.mec_fw_version >= 15) { 1691 adev->gfx.enable_cleaner_shader = true; 1692 r = amdgpu_gfx_cleaner_shader_sw_init(adev, adev->gfx.cleaner_shader_size); 1693 if (r) { 1694 adev->gfx.enable_cleaner_shader = false; 1695 dev_err(adev->dev, "Failed to initialize cleaner shader\n"); 1696 } 1697 } 1698 break; 1699 case IP_VERSION(11, 5, 3): 1700 adev->gfx.cleaner_shader_ptr = gfx_11_0_3_cleaner_shader_hex; 1701 adev->gfx.cleaner_shader_size = sizeof(gfx_11_0_3_cleaner_shader_hex); 1702 if (adev->gfx.me_fw_version >= 7 && 1703 adev->gfx.pfp_fw_version >= 8 && 1704 adev->gfx.mec_fw_version >= 8) { 1705 adev->gfx.enable_cleaner_shader = true; 1706 r = amdgpu_gfx_cleaner_shader_sw_init(adev, adev->gfx.cleaner_shader_size); 1707 if (r) { 1708 adev->gfx.enable_cleaner_shader = false; 1709 dev_err(adev->dev, "Failed to initialize cleaner shader\n"); 1710 } 1711 } 1712 break; 1713 default: 1714 adev->gfx.enable_cleaner_shader = false; 1715 break; 1716 } 1717 1718 /* Enable CG flag in one VF mode for enabling RLC safe mode enter/exit */ 1719 if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(11, 0, 3) && 1720 amdgpu_sriov_is_pp_one_vf(adev)) 1721 adev->cg_flags = AMD_CG_SUPPORT_GFX_CGCG; 1722 1723 /* EOP Event */ 1724 r = amdgpu_irq_add_id(adev, SOC21_IH_CLIENTID_GRBM_CP, 1725 GFX_11_0_0__SRCID__CP_EOP_INTERRUPT, 1726 &adev->gfx.eop_irq); 1727 if (r) 1728 return r; 1729 1730 /* Bad opcode Event */ 1731 r = amdgpu_irq_add_id(adev, SOC21_IH_CLIENTID_GRBM_CP, 1732 GFX_11_0_0__SRCID__CP_BAD_OPCODE_ERROR, 1733 &adev->gfx.bad_op_irq); 1734 if (r) 1735 return r; 1736 1737 /* Privileged reg */ 1738 r = amdgpu_irq_add_id(adev, SOC21_IH_CLIENTID_GRBM_CP, 1739 GFX_11_0_0__SRCID__CP_PRIV_REG_FAULT, 1740 &adev->gfx.priv_reg_irq); 1741 if (r) 1742 return r; 1743 1744 /* Privileged inst */ 1745 r = amdgpu_irq_add_id(adev, SOC21_IH_CLIENTID_GRBM_CP, 1746 GFX_11_0_0__SRCID__CP_PRIV_INSTR_FAULT, 1747 &adev->gfx.priv_inst_irq); 1748 if (r) 1749 return r; 1750 1751 /* FED error */ 1752 r = amdgpu_irq_add_id(adev, SOC21_IH_CLIENTID_GFX, 1753 GFX_11_0_0__SRCID__RLC_GC_FED_INTERRUPT, 1754 &adev->gfx.rlc_gc_fed_irq); 1755 if (r) 1756 return r; 1757 1758 adev->gfx.gfx_current_status = AMDGPU_GFX_NORMAL_MODE; 1759 1760 gfx_v11_0_me_init(adev); 1761 1762 r = gfx_v11_0_rlc_init(adev); 1763 if (r) { 1764 DRM_ERROR("Failed to init rlc BOs!\n"); 1765 return r; 1766 } 1767 1768 r = gfx_v11_0_mec_init(adev); 1769 if (r) { 1770 DRM_ERROR("Failed to init MEC BOs!\n"); 1771 return r; 1772 } 1773 1774 if (adev->gfx.num_gfx_rings) { 1775 ring_id = 0; 1776 /* set up the gfx ring */ 1777 for (i = 0; i < adev->gfx.me.num_me; i++) { 1778 for (j = 0; j < num_queue_per_pipe; j++) { 1779 for (k = 0; k < adev->gfx.me.num_pipe_per_me; k++) { 1780 if (!amdgpu_gfx_is_me_queue_enabled(adev, i, k, j)) 1781 continue; 1782 1783 r = gfx_v11_0_gfx_ring_init(adev, ring_id, 1784 i, k, j); 1785 if (r) 1786 return r; 1787 ring_id++; 1788 } 1789 } 1790 } 1791 } 1792 1793 if (adev->gfx.num_compute_rings) { 1794 ring_id = 0; 1795 /* set up the compute queues - allocate horizontally across pipes */ 1796 for (i = 0; i < adev->gfx.mec.num_mec; ++i) { 1797 for (j = 0; j < adev->gfx.mec.num_queue_per_pipe; j++) { 1798 for (k = 0; k < adev->gfx.mec.num_pipe_per_mec; k++) { 1799 if (!amdgpu_gfx_is_mec_queue_enabled(adev, 0, i, 1800 k, j)) 1801 continue; 1802 1803 r = gfx_v11_0_compute_ring_init(adev, ring_id, 1804 i, k, j); 1805 if (r) 1806 return r; 1807 1808 ring_id++; 1809 } 1810 } 1811 } 1812 } 1813 1814 adev->gfx.gfx_supported_reset = 1815 amdgpu_get_soft_full_reset_mask(&adev->gfx.gfx_ring[0]); 1816 adev->gfx.compute_supported_reset = 1817 amdgpu_get_soft_full_reset_mask(&adev->gfx.compute_ring[0]); 1818 switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { 1819 case IP_VERSION(11, 0, 0): 1820 case IP_VERSION(11, 0, 2): 1821 case IP_VERSION(11, 0, 3): 1822 if ((adev->gfx.me_fw_version >= 2280) && 1823 (adev->gfx.mec_fw_version >= 2410) && 1824 !amdgpu_sriov_vf(adev) && 1825 !adev->debug_disable_gpu_ring_reset) { 1826 adev->gfx.compute_supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE; 1827 adev->gfx.gfx_supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE; 1828 } 1829 break; 1830 default: 1831 if (!amdgpu_sriov_vf(adev) && 1832 !adev->debug_disable_gpu_ring_reset) { 1833 adev->gfx.compute_supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE; 1834 adev->gfx.gfx_supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE; 1835 } 1836 break; 1837 } 1838 1839 if (!adev->enable_mes_kiq) { 1840 r = amdgpu_gfx_kiq_init(adev, GFX11_MEC_HPD_SIZE, 0); 1841 if (r) { 1842 DRM_ERROR("Failed to init KIQ BOs!\n"); 1843 return r; 1844 } 1845 1846 r = amdgpu_gfx_kiq_init_ring(adev, xcc_id); 1847 if (r) 1848 return r; 1849 } 1850 1851 r = amdgpu_gfx_mqd_sw_init(adev, sizeof(struct v11_compute_mqd), 0); 1852 if (r) 1853 return r; 1854 1855 /* allocate visible FB for rlc auto-loading fw */ 1856 if (adev->firmware.load_type == AMDGPU_FW_LOAD_RLC_BACKDOOR_AUTO) { 1857 r = gfx_v11_0_rlc_autoload_buffer_init(adev); 1858 if (r) 1859 return r; 1860 } 1861 1862 r = gfx_v11_0_gpu_early_init(adev); 1863 if (r) 1864 return r; 1865 1866 if (amdgpu_gfx_ras_sw_init(adev)) { 1867 dev_err(adev->dev, "Failed to initialize gfx ras block!\n"); 1868 return -EINVAL; 1869 } 1870 1871 gfx_v11_0_alloc_ip_dump(adev); 1872 1873 r = amdgpu_gfx_sysfs_init(adev); 1874 if (r) 1875 return r; 1876 1877 return 0; 1878 } 1879 1880 static void gfx_v11_0_pfp_fini(struct amdgpu_device *adev) 1881 { 1882 amdgpu_bo_free_kernel(&adev->gfx.pfp.pfp_fw_obj, 1883 &adev->gfx.pfp.pfp_fw_gpu_addr, 1884 (void **)&adev->gfx.pfp.pfp_fw_ptr); 1885 1886 amdgpu_bo_free_kernel(&adev->gfx.pfp.pfp_fw_data_obj, 1887 &adev->gfx.pfp.pfp_fw_data_gpu_addr, 1888 (void **)&adev->gfx.pfp.pfp_fw_data_ptr); 1889 } 1890 1891 static void gfx_v11_0_me_fini(struct amdgpu_device *adev) 1892 { 1893 amdgpu_bo_free_kernel(&adev->gfx.me.me_fw_obj, 1894 &adev->gfx.me.me_fw_gpu_addr, 1895 (void **)&adev->gfx.me.me_fw_ptr); 1896 1897 amdgpu_bo_free_kernel(&adev->gfx.me.me_fw_data_obj, 1898 &adev->gfx.me.me_fw_data_gpu_addr, 1899 (void **)&adev->gfx.me.me_fw_data_ptr); 1900 } 1901 1902 static void gfx_v11_0_rlc_autoload_buffer_fini(struct amdgpu_device *adev) 1903 { 1904 amdgpu_bo_free_kernel(&adev->gfx.rlc.rlc_autoload_bo, 1905 &adev->gfx.rlc.rlc_autoload_gpu_addr, 1906 (void **)&adev->gfx.rlc.rlc_autoload_ptr); 1907 } 1908 1909 static int gfx_v11_0_sw_fini(struct amdgpu_ip_block *ip_block) 1910 { 1911 int i; 1912 struct amdgpu_device *adev = ip_block->adev; 1913 1914 for (i = 0; i < adev->gfx.num_gfx_rings; i++) 1915 amdgpu_ring_fini(&adev->gfx.gfx_ring[i]); 1916 for (i = 0; i < adev->gfx.num_compute_rings; i++) 1917 amdgpu_ring_fini(&adev->gfx.compute_ring[i]); 1918 1919 amdgpu_gfx_mqd_sw_fini(adev, 0); 1920 1921 if (!adev->enable_mes_kiq) { 1922 amdgpu_gfx_kiq_free_ring(&adev->gfx.kiq[0].ring); 1923 amdgpu_gfx_kiq_fini(adev, 0); 1924 } 1925 1926 amdgpu_gfx_cleaner_shader_sw_fini(adev); 1927 1928 gfx_v11_0_pfp_fini(adev); 1929 gfx_v11_0_me_fini(adev); 1930 gfx_v11_0_rlc_fini(adev); 1931 gfx_v11_0_mec_fini(adev); 1932 1933 if (adev->firmware.load_type == AMDGPU_FW_LOAD_RLC_BACKDOOR_AUTO) 1934 gfx_v11_0_rlc_autoload_buffer_fini(adev); 1935 1936 gfx_v11_0_free_microcode(adev); 1937 1938 amdgpu_gfx_sysfs_fini(adev); 1939 1940 kfree(adev->gfx.ip_dump_core); 1941 kfree(adev->gfx.ip_dump_compute_queues); 1942 kfree(adev->gfx.ip_dump_gfx_queues); 1943 1944 return 0; 1945 } 1946 1947 static void gfx_v11_0_select_se_sh(struct amdgpu_device *adev, u32 se_num, 1948 u32 sh_num, u32 instance, int xcc_id) 1949 { 1950 u32 data; 1951 1952 if (instance == 0xffffffff) 1953 data = REG_SET_FIELD(0, GRBM_GFX_INDEX, 1954 INSTANCE_BROADCAST_WRITES, 1); 1955 else 1956 data = REG_SET_FIELD(0, GRBM_GFX_INDEX, INSTANCE_INDEX, 1957 instance); 1958 1959 if (se_num == 0xffffffff) 1960 data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SE_BROADCAST_WRITES, 1961 1); 1962 else 1963 data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SE_INDEX, se_num); 1964 1965 if (sh_num == 0xffffffff) 1966 data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SA_BROADCAST_WRITES, 1967 1); 1968 else 1969 data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SA_INDEX, sh_num); 1970 1971 WREG32_SOC15(GC, 0, regGRBM_GFX_INDEX, data); 1972 } 1973 1974 static u32 gfx_v11_0_get_sa_active_bitmap(struct amdgpu_device *adev) 1975 { 1976 u32 gc_disabled_sa_mask, gc_user_disabled_sa_mask, sa_mask; 1977 1978 gc_disabled_sa_mask = RREG32_SOC15(GC, 0, regCC_GC_SA_UNIT_DISABLE); 1979 gc_disabled_sa_mask = REG_GET_FIELD(gc_disabled_sa_mask, 1980 CC_GC_SA_UNIT_DISABLE, 1981 SA_DISABLE); 1982 gc_user_disabled_sa_mask = RREG32_SOC15(GC, 0, regGC_USER_SA_UNIT_DISABLE); 1983 gc_user_disabled_sa_mask = REG_GET_FIELD(gc_user_disabled_sa_mask, 1984 GC_USER_SA_UNIT_DISABLE, 1985 SA_DISABLE); 1986 sa_mask = amdgpu_gfx_create_bitmask(adev->gfx.config.max_sh_per_se * 1987 adev->gfx.config.max_shader_engines); 1988 1989 return sa_mask & (~(gc_disabled_sa_mask | gc_user_disabled_sa_mask)); 1990 } 1991 1992 static u32 gfx_v11_0_get_rb_active_bitmap(struct amdgpu_device *adev) 1993 { 1994 u32 gc_disabled_rb_mask, gc_user_disabled_rb_mask; 1995 u32 rb_mask; 1996 1997 gc_disabled_rb_mask = RREG32_SOC15(GC, 0, regCC_RB_BACKEND_DISABLE); 1998 gc_disabled_rb_mask = REG_GET_FIELD(gc_disabled_rb_mask, 1999 CC_RB_BACKEND_DISABLE, 2000 BACKEND_DISABLE); 2001 gc_user_disabled_rb_mask = RREG32_SOC15(GC, 0, regGC_USER_RB_BACKEND_DISABLE); 2002 gc_user_disabled_rb_mask = REG_GET_FIELD(gc_user_disabled_rb_mask, 2003 GC_USER_RB_BACKEND_DISABLE, 2004 BACKEND_DISABLE); 2005 rb_mask = amdgpu_gfx_create_bitmask(adev->gfx.config.max_backends_per_se * 2006 adev->gfx.config.max_shader_engines); 2007 2008 return rb_mask & (~(gc_disabled_rb_mask | gc_user_disabled_rb_mask)); 2009 } 2010 2011 static void gfx_v11_0_setup_rb(struct amdgpu_device *adev) 2012 { 2013 u32 rb_bitmap_per_sa; 2014 u32 rb_bitmap_width_per_sa; 2015 u32 max_sa; 2016 u32 active_sa_bitmap; 2017 u32 global_active_rb_bitmap; 2018 u32 active_rb_bitmap = 0; 2019 u32 i; 2020 2021 /* query sa bitmap from SA_UNIT_DISABLE registers */ 2022 active_sa_bitmap = gfx_v11_0_get_sa_active_bitmap(adev); 2023 /* query rb bitmap from RB_BACKEND_DISABLE registers */ 2024 global_active_rb_bitmap = gfx_v11_0_get_rb_active_bitmap(adev); 2025 2026 /* generate active rb bitmap according to active sa bitmap */ 2027 max_sa = adev->gfx.config.max_shader_engines * 2028 adev->gfx.config.max_sh_per_se; 2029 rb_bitmap_width_per_sa = adev->gfx.config.max_backends_per_se / 2030 adev->gfx.config.max_sh_per_se; 2031 rb_bitmap_per_sa = amdgpu_gfx_create_bitmask(rb_bitmap_width_per_sa); 2032 2033 for (i = 0; i < max_sa; i++) { 2034 if (active_sa_bitmap & (1 << i)) 2035 active_rb_bitmap |= (rb_bitmap_per_sa << (i * rb_bitmap_width_per_sa)); 2036 } 2037 2038 active_rb_bitmap &= global_active_rb_bitmap; 2039 adev->gfx.config.backend_enable_mask = active_rb_bitmap; 2040 adev->gfx.config.num_rbs = hweight32(active_rb_bitmap); 2041 } 2042 2043 #define DEFAULT_SH_MEM_BASES (0x6000) 2044 #define LDS_APP_BASE 0x1 2045 #define SCRATCH_APP_BASE 0x2 2046 2047 static void gfx_v11_0_init_compute_vmid(struct amdgpu_device *adev) 2048 { 2049 int i; 2050 uint32_t sh_mem_bases; 2051 uint32_t data; 2052 2053 /* 2054 * Configure apertures: 2055 * LDS: 0x60000000'00000000 - 0x60000001'00000000 (4GB) 2056 * Scratch: 0x60000001'00000000 - 0x60000002'00000000 (4GB) 2057 * GPUVM: 0x60010000'00000000 - 0x60020000'00000000 (1TB) 2058 */ 2059 sh_mem_bases = (LDS_APP_BASE << SH_MEM_BASES__SHARED_BASE__SHIFT) | 2060 SCRATCH_APP_BASE; 2061 2062 mutex_lock(&adev->srbm_mutex); 2063 for (i = adev->vm_manager.first_kfd_vmid; i < AMDGPU_NUM_VMID; i++) { 2064 soc21_grbm_select(adev, 0, 0, 0, i); 2065 /* CP and shaders */ 2066 WREG32_SOC15(GC, 0, regSH_MEM_CONFIG, DEFAULT_SH_MEM_CONFIG); 2067 WREG32_SOC15(GC, 0, regSH_MEM_BASES, sh_mem_bases); 2068 2069 /* Enable trap for each kfd vmid. */ 2070 data = RREG32_SOC15(GC, 0, regSPI_GDBG_PER_VMID_CNTL); 2071 data = REG_SET_FIELD(data, SPI_GDBG_PER_VMID_CNTL, TRAP_EN, 1); 2072 WREG32_SOC15(GC, 0, regSPI_GDBG_PER_VMID_CNTL, data); 2073 } 2074 soc21_grbm_select(adev, 0, 0, 0, 0); 2075 mutex_unlock(&adev->srbm_mutex); 2076 2077 /* 2078 * Initialize all compute VMIDs to have no GDS, GWS, or OA 2079 * access. These should be enabled by FW for target VMIDs. 2080 */ 2081 for (i = adev->vm_manager.first_kfd_vmid; i < AMDGPU_NUM_VMID; i++) { 2082 WREG32_SOC15_OFFSET(GC, 0, regGDS_VMID0_BASE, 2 * i, 0); 2083 WREG32_SOC15_OFFSET(GC, 0, regGDS_VMID0_SIZE, 2 * i, 0); 2084 WREG32_SOC15_OFFSET(GC, 0, regGDS_GWS_VMID0, i, 0); 2085 WREG32_SOC15_OFFSET(GC, 0, regGDS_OA_VMID0, i, 0); 2086 } 2087 } 2088 2089 static void gfx_v11_0_init_gds_vmid(struct amdgpu_device *adev) 2090 { 2091 int vmid; 2092 2093 /* 2094 * Initialize all compute and user-gfx VMIDs to have no GDS, GWS, or OA 2095 * access. Compute VMIDs should be enabled by FW for target VMIDs, 2096 * the driver can enable them for graphics. VMID0 should maintain 2097 * access so that HWS firmware can save/restore entries. 2098 */ 2099 for (vmid = 1; vmid < 16; vmid++) { 2100 WREG32_SOC15_OFFSET(GC, 0, regGDS_VMID0_BASE, 2 * vmid, 0); 2101 WREG32_SOC15_OFFSET(GC, 0, regGDS_VMID0_SIZE, 2 * vmid, 0); 2102 WREG32_SOC15_OFFSET(GC, 0, regGDS_GWS_VMID0, vmid, 0); 2103 WREG32_SOC15_OFFSET(GC, 0, regGDS_OA_VMID0, vmid, 0); 2104 } 2105 } 2106 2107 static void gfx_v11_0_tcp_harvest(struct amdgpu_device *adev) 2108 { 2109 /* TODO: harvest feature to be added later. */ 2110 } 2111 2112 static void gfx_v11_0_get_tcc_info(struct amdgpu_device *adev) 2113 { 2114 /* TCCs are global (not instanced). */ 2115 uint32_t tcc_disable = RREG32_SOC15(GC, 0, regCGTS_TCC_DISABLE) | 2116 RREG32_SOC15(GC, 0, regCGTS_USER_TCC_DISABLE); 2117 2118 adev->gfx.config.tcc_disabled_mask = 2119 REG_GET_FIELD(tcc_disable, CGTS_TCC_DISABLE, TCC_DISABLE) | 2120 (REG_GET_FIELD(tcc_disable, CGTS_TCC_DISABLE, HI_TCC_DISABLE) << 16); 2121 } 2122 2123 static void gfx_v11_0_constants_init(struct amdgpu_device *adev) 2124 { 2125 u32 tmp; 2126 int i; 2127 2128 if (!amdgpu_sriov_vf(adev)) 2129 WREG32_FIELD15_PREREG(GC, 0, GRBM_CNTL, READ_TIMEOUT, 0xff); 2130 2131 gfx_v11_0_setup_rb(adev); 2132 gfx_v11_0_get_cu_info(adev, &adev->gfx.cu_info); 2133 gfx_v11_0_get_tcc_info(adev); 2134 adev->gfx.config.pa_sc_tile_steering_override = 0; 2135 2136 /* Set whether texture coordinate truncation is conformant. */ 2137 tmp = RREG32_SOC15(GC, 0, regTA_CNTL2); 2138 adev->gfx.config.ta_cntl2_truncate_coord_mode = 2139 REG_GET_FIELD(tmp, TA_CNTL2, TRUNCATE_COORD_MODE); 2140 2141 /* XXX SH_MEM regs */ 2142 /* where to put LDS, scratch, GPUVM in FSA64 space */ 2143 mutex_lock(&adev->srbm_mutex); 2144 for (i = 0; i < adev->vm_manager.id_mgr[AMDGPU_GFXHUB(0)].num_ids; i++) { 2145 soc21_grbm_select(adev, 0, 0, 0, i); 2146 /* CP and shaders */ 2147 WREG32_SOC15(GC, 0, regSH_MEM_CONFIG, DEFAULT_SH_MEM_CONFIG); 2148 if (i != 0) { 2149 tmp = REG_SET_FIELD(0, SH_MEM_BASES, PRIVATE_BASE, 2150 (adev->gmc.private_aperture_start >> 48)); 2151 tmp = REG_SET_FIELD(tmp, SH_MEM_BASES, SHARED_BASE, 2152 (adev->gmc.shared_aperture_start >> 48)); 2153 WREG32_SOC15(GC, 0, regSH_MEM_BASES, tmp); 2154 } 2155 } 2156 soc21_grbm_select(adev, 0, 0, 0, 0); 2157 2158 mutex_unlock(&adev->srbm_mutex); 2159 2160 gfx_v11_0_init_compute_vmid(adev); 2161 gfx_v11_0_init_gds_vmid(adev); 2162 } 2163 2164 static u32 gfx_v11_0_get_cpg_int_cntl(struct amdgpu_device *adev, 2165 int me, int pipe) 2166 { 2167 if (me != 0) 2168 return 0; 2169 2170 switch (pipe) { 2171 case 0: 2172 return SOC15_REG_OFFSET(GC, 0, regCP_INT_CNTL_RING0); 2173 case 1: 2174 return SOC15_REG_OFFSET(GC, 0, regCP_INT_CNTL_RING1); 2175 default: 2176 return 0; 2177 } 2178 } 2179 2180 static u32 gfx_v11_0_get_cpc_int_cntl(struct amdgpu_device *adev, 2181 int me, int pipe) 2182 { 2183 /* 2184 * amdgpu controls only the first MEC. That's why this function only 2185 * handles the setting of interrupts for this specific MEC. All other 2186 * pipes' interrupts are set by amdkfd. 2187 */ 2188 if (me != 1) 2189 return 0; 2190 2191 switch (pipe) { 2192 case 0: 2193 return SOC15_REG_OFFSET(GC, 0, regCP_ME1_PIPE0_INT_CNTL); 2194 case 1: 2195 return SOC15_REG_OFFSET(GC, 0, regCP_ME1_PIPE1_INT_CNTL); 2196 case 2: 2197 return SOC15_REG_OFFSET(GC, 0, regCP_ME1_PIPE2_INT_CNTL); 2198 case 3: 2199 return SOC15_REG_OFFSET(GC, 0, regCP_ME1_PIPE3_INT_CNTL); 2200 default: 2201 return 0; 2202 } 2203 } 2204 2205 static void gfx_v11_0_enable_gui_idle_interrupt(struct amdgpu_device *adev, 2206 bool enable) 2207 { 2208 u32 tmp, cp_int_cntl_reg; 2209 int i, j; 2210 2211 if (amdgpu_sriov_vf(adev)) 2212 return; 2213 2214 for (i = 0; i < adev->gfx.me.num_me; i++) { 2215 for (j = 0; j < adev->gfx.me.num_pipe_per_me; j++) { 2216 cp_int_cntl_reg = gfx_v11_0_get_cpg_int_cntl(adev, i, j); 2217 2218 if (cp_int_cntl_reg) { 2219 tmp = RREG32_SOC15_IP(GC, cp_int_cntl_reg); 2220 tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CNTX_BUSY_INT_ENABLE, 2221 enable ? 1 : 0); 2222 tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CNTX_EMPTY_INT_ENABLE, 2223 enable ? 1 : 0); 2224 tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CMP_BUSY_INT_ENABLE, 2225 enable ? 1 : 0); 2226 tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, GFX_IDLE_INT_ENABLE, 2227 enable ? 1 : 0); 2228 WREG32_SOC15_IP(GC, cp_int_cntl_reg, tmp); 2229 } 2230 } 2231 } 2232 } 2233 2234 static int gfx_v11_0_init_csb(struct amdgpu_device *adev) 2235 { 2236 adev->gfx.rlc.funcs->get_csb_buffer(adev, adev->gfx.rlc.cs_ptr); 2237 2238 WREG32_SOC15(GC, 0, regRLC_CSIB_ADDR_HI, 2239 adev->gfx.rlc.clear_state_gpu_addr >> 32); 2240 WREG32_SOC15(GC, 0, regRLC_CSIB_ADDR_LO, 2241 adev->gfx.rlc.clear_state_gpu_addr & 0xfffffffc); 2242 WREG32_SOC15(GC, 0, regRLC_CSIB_LENGTH, adev->gfx.rlc.clear_state_size); 2243 2244 return 0; 2245 } 2246 2247 static void gfx_v11_0_rlc_stop(struct amdgpu_device *adev) 2248 { 2249 u32 tmp = RREG32_SOC15(GC, 0, regRLC_CNTL); 2250 2251 tmp = REG_SET_FIELD(tmp, RLC_CNTL, RLC_ENABLE_F32, 0); 2252 WREG32_SOC15(GC, 0, regRLC_CNTL, tmp); 2253 } 2254 2255 static void gfx_v11_0_rlc_reset(struct amdgpu_device *adev) 2256 { 2257 WREG32_FIELD15_PREREG(GC, 0, GRBM_SOFT_RESET, SOFT_RESET_RLC, 1); 2258 udelay(50); 2259 WREG32_FIELD15_PREREG(GC, 0, GRBM_SOFT_RESET, SOFT_RESET_RLC, 0); 2260 udelay(50); 2261 } 2262 2263 static void gfx_v11_0_rlc_smu_handshake_cntl(struct amdgpu_device *adev, 2264 bool enable) 2265 { 2266 uint32_t rlc_pg_cntl; 2267 2268 rlc_pg_cntl = RREG32_SOC15(GC, 0, regRLC_PG_CNTL); 2269 2270 if (!enable) { 2271 /* RLC_PG_CNTL[23] = 0 (default) 2272 * RLC will wait for handshake acks with SMU 2273 * GFXOFF will be enabled 2274 * RLC_PG_CNTL[23] = 1 2275 * RLC will not issue any message to SMU 2276 * hence no handshake between SMU & RLC 2277 * GFXOFF will be disabled 2278 */ 2279 rlc_pg_cntl |= RLC_PG_CNTL__SMU_HANDSHAKE_DISABLE_MASK; 2280 } else 2281 rlc_pg_cntl &= ~RLC_PG_CNTL__SMU_HANDSHAKE_DISABLE_MASK; 2282 WREG32_SOC15(GC, 0, regRLC_PG_CNTL, rlc_pg_cntl); 2283 } 2284 2285 static void gfx_v11_0_rlc_start(struct amdgpu_device *adev) 2286 { 2287 /* TODO: enable rlc & smu handshake until smu 2288 * and gfxoff feature works as expected */ 2289 if (!(amdgpu_pp_feature_mask & PP_GFXOFF_MASK)) 2290 gfx_v11_0_rlc_smu_handshake_cntl(adev, false); 2291 2292 WREG32_FIELD15_PREREG(GC, 0, RLC_CNTL, RLC_ENABLE_F32, 1); 2293 udelay(50); 2294 } 2295 2296 static void gfx_v11_0_rlc_enable_srm(struct amdgpu_device *adev) 2297 { 2298 uint32_t tmp; 2299 2300 /* enable Save Restore Machine */ 2301 tmp = RREG32(SOC15_REG_OFFSET(GC, 0, regRLC_SRM_CNTL)); 2302 tmp |= RLC_SRM_CNTL__AUTO_INCR_ADDR_MASK; 2303 tmp |= RLC_SRM_CNTL__SRM_ENABLE_MASK; 2304 WREG32(SOC15_REG_OFFSET(GC, 0, regRLC_SRM_CNTL), tmp); 2305 } 2306 2307 static void gfx_v11_0_load_rlcg_microcode(struct amdgpu_device *adev) 2308 { 2309 const struct rlc_firmware_header_v2_0 *hdr; 2310 const __le32 *fw_data; 2311 unsigned i, fw_size; 2312 2313 hdr = (const struct rlc_firmware_header_v2_0 *)adev->gfx.rlc_fw->data; 2314 fw_data = (const __le32 *)(adev->gfx.rlc_fw->data + 2315 le32_to_cpu(hdr->header.ucode_array_offset_bytes)); 2316 fw_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4; 2317 2318 WREG32_SOC15(GC, 0, regRLC_GPM_UCODE_ADDR, 2319 RLCG_UCODE_LOADING_START_ADDRESS); 2320 2321 for (i = 0; i < fw_size; i++) 2322 WREG32_SOC15(GC, 0, regRLC_GPM_UCODE_DATA, 2323 le32_to_cpup(fw_data++)); 2324 2325 WREG32_SOC15(GC, 0, regRLC_GPM_UCODE_ADDR, adev->gfx.rlc_fw_version); 2326 } 2327 2328 static void gfx_v11_0_load_rlc_iram_dram_microcode(struct amdgpu_device *adev) 2329 { 2330 const struct rlc_firmware_header_v2_2 *hdr; 2331 const __le32 *fw_data; 2332 unsigned i, fw_size; 2333 u32 tmp; 2334 2335 hdr = (const struct rlc_firmware_header_v2_2 *)adev->gfx.rlc_fw->data; 2336 2337 fw_data = (const __le32 *)(adev->gfx.rlc_fw->data + 2338 le32_to_cpu(hdr->rlc_iram_ucode_offset_bytes)); 2339 fw_size = le32_to_cpu(hdr->rlc_iram_ucode_size_bytes) / 4; 2340 2341 WREG32_SOC15(GC, 0, regRLC_LX6_IRAM_ADDR, 0); 2342 2343 for (i = 0; i < fw_size; i++) { 2344 if ((amdgpu_emu_mode == 1) && (i % 100 == 99)) 2345 msleep(1); 2346 WREG32_SOC15(GC, 0, regRLC_LX6_IRAM_DATA, 2347 le32_to_cpup(fw_data++)); 2348 } 2349 2350 WREG32_SOC15(GC, 0, regRLC_LX6_IRAM_ADDR, adev->gfx.rlc_fw_version); 2351 2352 fw_data = (const __le32 *)(adev->gfx.rlc_fw->data + 2353 le32_to_cpu(hdr->rlc_dram_ucode_offset_bytes)); 2354 fw_size = le32_to_cpu(hdr->rlc_dram_ucode_size_bytes) / 4; 2355 2356 WREG32_SOC15(GC, 0, regRLC_LX6_DRAM_ADDR, 0); 2357 for (i = 0; i < fw_size; i++) { 2358 if ((amdgpu_emu_mode == 1) && (i % 100 == 99)) 2359 msleep(1); 2360 WREG32_SOC15(GC, 0, regRLC_LX6_DRAM_DATA, 2361 le32_to_cpup(fw_data++)); 2362 } 2363 2364 WREG32_SOC15(GC, 0, regRLC_LX6_IRAM_ADDR, adev->gfx.rlc_fw_version); 2365 2366 tmp = RREG32_SOC15(GC, 0, regRLC_LX6_CNTL); 2367 tmp = REG_SET_FIELD(tmp, RLC_LX6_CNTL, PDEBUG_ENABLE, 1); 2368 tmp = REG_SET_FIELD(tmp, RLC_LX6_CNTL, BRESET, 0); 2369 WREG32_SOC15(GC, 0, regRLC_LX6_CNTL, tmp); 2370 } 2371 2372 static void gfx_v11_0_load_rlcp_rlcv_microcode(struct amdgpu_device *adev) 2373 { 2374 const struct rlc_firmware_header_v2_3 *hdr; 2375 const __le32 *fw_data; 2376 unsigned i, fw_size; 2377 u32 tmp; 2378 2379 hdr = (const struct rlc_firmware_header_v2_3 *)adev->gfx.rlc_fw->data; 2380 2381 fw_data = (const __le32 *)(adev->gfx.rlc_fw->data + 2382 le32_to_cpu(hdr->rlcp_ucode_offset_bytes)); 2383 fw_size = le32_to_cpu(hdr->rlcp_ucode_size_bytes) / 4; 2384 2385 WREG32_SOC15(GC, 0, regRLC_PACE_UCODE_ADDR, 0); 2386 2387 for (i = 0; i < fw_size; i++) { 2388 if ((amdgpu_emu_mode == 1) && (i % 100 == 99)) 2389 msleep(1); 2390 WREG32_SOC15(GC, 0, regRLC_PACE_UCODE_DATA, 2391 le32_to_cpup(fw_data++)); 2392 } 2393 2394 WREG32_SOC15(GC, 0, regRLC_PACE_UCODE_ADDR, adev->gfx.rlc_fw_version); 2395 2396 tmp = RREG32_SOC15(GC, 0, regRLC_GPM_THREAD_ENABLE); 2397 tmp = REG_SET_FIELD(tmp, RLC_GPM_THREAD_ENABLE, THREAD1_ENABLE, 1); 2398 WREG32_SOC15(GC, 0, regRLC_GPM_THREAD_ENABLE, tmp); 2399 2400 fw_data = (const __le32 *)(adev->gfx.rlc_fw->data + 2401 le32_to_cpu(hdr->rlcv_ucode_offset_bytes)); 2402 fw_size = le32_to_cpu(hdr->rlcv_ucode_size_bytes) / 4; 2403 2404 WREG32_SOC15(GC, 0, regRLC_GPU_IOV_UCODE_ADDR, 0); 2405 2406 for (i = 0; i < fw_size; i++) { 2407 if ((amdgpu_emu_mode == 1) && (i % 100 == 99)) 2408 msleep(1); 2409 WREG32_SOC15(GC, 0, regRLC_GPU_IOV_UCODE_DATA, 2410 le32_to_cpup(fw_data++)); 2411 } 2412 2413 WREG32_SOC15(GC, 0, regRLC_GPU_IOV_UCODE_ADDR, adev->gfx.rlc_fw_version); 2414 2415 tmp = RREG32_SOC15(GC, 0, regRLC_GPU_IOV_F32_CNTL); 2416 tmp = REG_SET_FIELD(tmp, RLC_GPU_IOV_F32_CNTL, ENABLE, 1); 2417 WREG32_SOC15(GC, 0, regRLC_GPU_IOV_F32_CNTL, tmp); 2418 } 2419 2420 static int gfx_v11_0_rlc_load_microcode(struct amdgpu_device *adev) 2421 { 2422 const struct rlc_firmware_header_v2_0 *hdr; 2423 uint16_t version_major; 2424 uint16_t version_minor; 2425 2426 if (!adev->gfx.rlc_fw) 2427 return -EINVAL; 2428 2429 hdr = (const struct rlc_firmware_header_v2_0 *)adev->gfx.rlc_fw->data; 2430 amdgpu_ucode_print_rlc_hdr(&hdr->header); 2431 2432 version_major = le16_to_cpu(hdr->header.header_version_major); 2433 version_minor = le16_to_cpu(hdr->header.header_version_minor); 2434 2435 if (version_major == 2) { 2436 gfx_v11_0_load_rlcg_microcode(adev); 2437 if (amdgpu_dpm == 1) { 2438 if (version_minor >= 2) 2439 gfx_v11_0_load_rlc_iram_dram_microcode(adev); 2440 if (version_minor == 3) 2441 gfx_v11_0_load_rlcp_rlcv_microcode(adev); 2442 } 2443 2444 return 0; 2445 } 2446 2447 return -EINVAL; 2448 } 2449 2450 static int gfx_v11_0_rlc_resume(struct amdgpu_device *adev) 2451 { 2452 int r; 2453 2454 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) { 2455 gfx_v11_0_init_csb(adev); 2456 2457 if (!amdgpu_sriov_vf(adev)) /* enable RLC SRM */ 2458 gfx_v11_0_rlc_enable_srm(adev); 2459 } else { 2460 if (amdgpu_sriov_vf(adev)) { 2461 gfx_v11_0_init_csb(adev); 2462 return 0; 2463 } 2464 2465 adev->gfx.rlc.funcs->stop(adev); 2466 2467 /* disable CG */ 2468 WREG32_SOC15(GC, 0, regRLC_CGCG_CGLS_CTRL, 0); 2469 2470 /* disable PG */ 2471 WREG32_SOC15(GC, 0, regRLC_PG_CNTL, 0); 2472 2473 if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT) { 2474 /* legacy rlc firmware loading */ 2475 r = gfx_v11_0_rlc_load_microcode(adev); 2476 if (r) 2477 return r; 2478 } 2479 2480 gfx_v11_0_init_csb(adev); 2481 2482 adev->gfx.rlc.funcs->start(adev); 2483 } 2484 return 0; 2485 } 2486 2487 static int gfx_v11_0_config_me_cache(struct amdgpu_device *adev, uint64_t addr) 2488 { 2489 uint32_t usec_timeout = 50000; /* wait for 50ms */ 2490 uint32_t tmp; 2491 int i; 2492 2493 /* Trigger an invalidation of the L1 instruction caches */ 2494 tmp = RREG32_SOC15(GC, 0, regCP_ME_IC_OP_CNTL); 2495 tmp = REG_SET_FIELD(tmp, CP_ME_IC_OP_CNTL, INVALIDATE_CACHE, 1); 2496 WREG32_SOC15(GC, 0, regCP_ME_IC_OP_CNTL, tmp); 2497 2498 /* Wait for invalidation complete */ 2499 for (i = 0; i < usec_timeout; i++) { 2500 tmp = RREG32_SOC15(GC, 0, regCP_ME_IC_OP_CNTL); 2501 if (1 == REG_GET_FIELD(tmp, CP_ME_IC_OP_CNTL, 2502 INVALIDATE_CACHE_COMPLETE)) 2503 break; 2504 udelay(1); 2505 } 2506 2507 if (i >= usec_timeout) { 2508 dev_err(adev->dev, "failed to invalidate instruction cache\n"); 2509 return -EINVAL; 2510 } 2511 2512 if (amdgpu_emu_mode == 1) 2513 amdgpu_device_flush_hdp(adev, NULL); 2514 2515 tmp = RREG32_SOC15(GC, 0, regCP_ME_IC_BASE_CNTL); 2516 tmp = REG_SET_FIELD(tmp, CP_ME_IC_BASE_CNTL, VMID, 0); 2517 tmp = REG_SET_FIELD(tmp, CP_ME_IC_BASE_CNTL, CACHE_POLICY, 0); 2518 tmp = REG_SET_FIELD(tmp, CP_ME_IC_BASE_CNTL, EXE_DISABLE, 0); 2519 tmp = REG_SET_FIELD(tmp, CP_ME_IC_BASE_CNTL, ADDRESS_CLAMP, 1); 2520 WREG32_SOC15(GC, 0, regCP_ME_IC_BASE_CNTL, tmp); 2521 2522 /* Program me ucode address into intruction cache address register */ 2523 WREG32_SOC15(GC, 0, regCP_ME_IC_BASE_LO, 2524 lower_32_bits(addr) & 0xFFFFF000); 2525 WREG32_SOC15(GC, 0, regCP_ME_IC_BASE_HI, 2526 upper_32_bits(addr)); 2527 2528 return 0; 2529 } 2530 2531 static int gfx_v11_0_config_pfp_cache(struct amdgpu_device *adev, uint64_t addr) 2532 { 2533 uint32_t usec_timeout = 50000; /* wait for 50ms */ 2534 uint32_t tmp; 2535 int i; 2536 2537 /* Trigger an invalidation of the L1 instruction caches */ 2538 tmp = RREG32_SOC15(GC, 0, regCP_PFP_IC_OP_CNTL); 2539 tmp = REG_SET_FIELD(tmp, CP_PFP_IC_OP_CNTL, INVALIDATE_CACHE, 1); 2540 WREG32_SOC15(GC, 0, regCP_PFP_IC_OP_CNTL, tmp); 2541 2542 /* Wait for invalidation complete */ 2543 for (i = 0; i < usec_timeout; i++) { 2544 tmp = RREG32_SOC15(GC, 0, regCP_PFP_IC_OP_CNTL); 2545 if (1 == REG_GET_FIELD(tmp, CP_PFP_IC_OP_CNTL, 2546 INVALIDATE_CACHE_COMPLETE)) 2547 break; 2548 udelay(1); 2549 } 2550 2551 if (i >= usec_timeout) { 2552 dev_err(adev->dev, "failed to invalidate instruction cache\n"); 2553 return -EINVAL; 2554 } 2555 2556 if (amdgpu_emu_mode == 1) 2557 amdgpu_device_flush_hdp(adev, NULL); 2558 2559 tmp = RREG32_SOC15(GC, 0, regCP_PFP_IC_BASE_CNTL); 2560 tmp = REG_SET_FIELD(tmp, CP_PFP_IC_BASE_CNTL, VMID, 0); 2561 tmp = REG_SET_FIELD(tmp, CP_PFP_IC_BASE_CNTL, CACHE_POLICY, 0); 2562 tmp = REG_SET_FIELD(tmp, CP_PFP_IC_BASE_CNTL, EXE_DISABLE, 0); 2563 tmp = REG_SET_FIELD(tmp, CP_PFP_IC_BASE_CNTL, ADDRESS_CLAMP, 1); 2564 WREG32_SOC15(GC, 0, regCP_PFP_IC_BASE_CNTL, tmp); 2565 2566 /* Program pfp ucode address into intruction cache address register */ 2567 WREG32_SOC15(GC, 0, regCP_PFP_IC_BASE_LO, 2568 lower_32_bits(addr) & 0xFFFFF000); 2569 WREG32_SOC15(GC, 0, regCP_PFP_IC_BASE_HI, 2570 upper_32_bits(addr)); 2571 2572 return 0; 2573 } 2574 2575 static int gfx_v11_0_config_mec_cache(struct amdgpu_device *adev, uint64_t addr) 2576 { 2577 uint32_t usec_timeout = 50000; /* wait for 50ms */ 2578 uint32_t tmp; 2579 int i; 2580 2581 /* Trigger an invalidation of the L1 instruction caches */ 2582 tmp = RREG32_SOC15(GC, 0, regCP_CPC_IC_OP_CNTL); 2583 tmp = REG_SET_FIELD(tmp, CP_CPC_IC_OP_CNTL, INVALIDATE_CACHE, 1); 2584 2585 WREG32_SOC15(GC, 0, regCP_CPC_IC_OP_CNTL, tmp); 2586 2587 /* Wait for invalidation complete */ 2588 for (i = 0; i < usec_timeout; i++) { 2589 tmp = RREG32_SOC15(GC, 0, regCP_CPC_IC_OP_CNTL); 2590 if (1 == REG_GET_FIELD(tmp, CP_CPC_IC_OP_CNTL, 2591 INVALIDATE_CACHE_COMPLETE)) 2592 break; 2593 udelay(1); 2594 } 2595 2596 if (i >= usec_timeout) { 2597 dev_err(adev->dev, "failed to invalidate instruction cache\n"); 2598 return -EINVAL; 2599 } 2600 2601 if (amdgpu_emu_mode == 1) 2602 amdgpu_device_flush_hdp(adev, NULL); 2603 2604 tmp = RREG32_SOC15(GC, 0, regCP_CPC_IC_BASE_CNTL); 2605 tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, CACHE_POLICY, 0); 2606 tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, EXE_DISABLE, 0); 2607 tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, ADDRESS_CLAMP, 1); 2608 WREG32_SOC15(GC, 0, regCP_CPC_IC_BASE_CNTL, tmp); 2609 2610 /* Program mec1 ucode address into intruction cache address register */ 2611 WREG32_SOC15(GC, 0, regCP_CPC_IC_BASE_LO, 2612 lower_32_bits(addr) & 0xFFFFF000); 2613 WREG32_SOC15(GC, 0, regCP_CPC_IC_BASE_HI, 2614 upper_32_bits(addr)); 2615 2616 return 0; 2617 } 2618 2619 static int gfx_v11_0_config_pfp_cache_rs64(struct amdgpu_device *adev, uint64_t addr, uint64_t addr2) 2620 { 2621 uint32_t usec_timeout = 50000; /* wait for 50ms */ 2622 uint32_t tmp; 2623 unsigned i, pipe_id; 2624 const struct gfx_firmware_header_v2_0 *pfp_hdr; 2625 2626 pfp_hdr = (const struct gfx_firmware_header_v2_0 *) 2627 adev->gfx.pfp_fw->data; 2628 2629 WREG32_SOC15(GC, 0, regCP_PFP_IC_BASE_LO, 2630 lower_32_bits(addr)); 2631 WREG32_SOC15(GC, 0, regCP_PFP_IC_BASE_HI, 2632 upper_32_bits(addr)); 2633 2634 tmp = RREG32_SOC15(GC, 0, regCP_PFP_IC_BASE_CNTL); 2635 tmp = REG_SET_FIELD(tmp, CP_PFP_IC_BASE_CNTL, VMID, 0); 2636 tmp = REG_SET_FIELD(tmp, CP_PFP_IC_BASE_CNTL, CACHE_POLICY, 0); 2637 tmp = REG_SET_FIELD(tmp, CP_PFP_IC_BASE_CNTL, EXE_DISABLE, 0); 2638 WREG32_SOC15(GC, 0, regCP_PFP_IC_BASE_CNTL, tmp); 2639 2640 /* 2641 * Programming any of the CP_PFP_IC_BASE registers 2642 * forces invalidation of the ME L1 I$. Wait for the 2643 * invalidation complete 2644 */ 2645 for (i = 0; i < usec_timeout; i++) { 2646 tmp = RREG32_SOC15(GC, 0, regCP_PFP_IC_OP_CNTL); 2647 if (1 == REG_GET_FIELD(tmp, CP_PFP_IC_OP_CNTL, 2648 INVALIDATE_CACHE_COMPLETE)) 2649 break; 2650 udelay(1); 2651 } 2652 2653 if (i >= usec_timeout) { 2654 dev_err(adev->dev, "failed to invalidate instruction cache\n"); 2655 return -EINVAL; 2656 } 2657 2658 /* Prime the L1 instruction caches */ 2659 tmp = RREG32_SOC15(GC, 0, regCP_PFP_IC_OP_CNTL); 2660 tmp = REG_SET_FIELD(tmp, CP_PFP_IC_OP_CNTL, PRIME_ICACHE, 1); 2661 WREG32_SOC15(GC, 0, regCP_PFP_IC_OP_CNTL, tmp); 2662 /* Waiting for cache primed*/ 2663 for (i = 0; i < usec_timeout; i++) { 2664 tmp = RREG32_SOC15(GC, 0, regCP_PFP_IC_OP_CNTL); 2665 if (1 == REG_GET_FIELD(tmp, CP_PFP_IC_OP_CNTL, 2666 ICACHE_PRIMED)) 2667 break; 2668 udelay(1); 2669 } 2670 2671 if (i >= usec_timeout) { 2672 dev_err(adev->dev, "failed to prime instruction cache\n"); 2673 return -EINVAL; 2674 } 2675 2676 mutex_lock(&adev->srbm_mutex); 2677 for (pipe_id = 0; pipe_id < adev->gfx.me.num_pipe_per_me; pipe_id++) { 2678 soc21_grbm_select(adev, 0, pipe_id, 0, 0); 2679 WREG32_SOC15(GC, 0, regCP_PFP_PRGRM_CNTR_START, 2680 (pfp_hdr->ucode_start_addr_hi << 30) | 2681 (pfp_hdr->ucode_start_addr_lo >> 2)); 2682 WREG32_SOC15(GC, 0, regCP_PFP_PRGRM_CNTR_START_HI, 2683 pfp_hdr->ucode_start_addr_hi >> 2); 2684 2685 /* 2686 * Program CP_ME_CNTL to reset given PIPE to take 2687 * effect of CP_PFP_PRGRM_CNTR_START. 2688 */ 2689 tmp = RREG32_SOC15(GC, 0, regCP_ME_CNTL); 2690 if (pipe_id == 0) 2691 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, 2692 PFP_PIPE0_RESET, 1); 2693 else 2694 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, 2695 PFP_PIPE1_RESET, 1); 2696 WREG32_SOC15(GC, 0, regCP_ME_CNTL, tmp); 2697 2698 /* Clear pfp pipe0 reset bit. */ 2699 if (pipe_id == 0) 2700 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, 2701 PFP_PIPE0_RESET, 0); 2702 else 2703 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, 2704 PFP_PIPE1_RESET, 0); 2705 WREG32_SOC15(GC, 0, regCP_ME_CNTL, tmp); 2706 2707 WREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_BASE0_LO, 2708 lower_32_bits(addr2)); 2709 WREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_BASE0_HI, 2710 upper_32_bits(addr2)); 2711 } 2712 soc21_grbm_select(adev, 0, 0, 0, 0); 2713 mutex_unlock(&adev->srbm_mutex); 2714 2715 tmp = RREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_BASE_CNTL); 2716 tmp = REG_SET_FIELD(tmp, CP_GFX_RS64_DC_BASE_CNTL, VMID, 0); 2717 tmp = REG_SET_FIELD(tmp, CP_GFX_RS64_DC_BASE_CNTL, CACHE_POLICY, 0); 2718 WREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_BASE_CNTL, tmp); 2719 2720 /* Invalidate the data caches */ 2721 tmp = RREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_OP_CNTL); 2722 tmp = REG_SET_FIELD(tmp, CP_GFX_RS64_DC_OP_CNTL, INVALIDATE_DCACHE, 1); 2723 WREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_OP_CNTL, tmp); 2724 2725 for (i = 0; i < usec_timeout; i++) { 2726 tmp = RREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_OP_CNTL); 2727 if (1 == REG_GET_FIELD(tmp, CP_GFX_RS64_DC_OP_CNTL, 2728 INVALIDATE_DCACHE_COMPLETE)) 2729 break; 2730 udelay(1); 2731 } 2732 2733 if (i >= usec_timeout) { 2734 dev_err(adev->dev, "failed to invalidate RS64 data cache\n"); 2735 return -EINVAL; 2736 } 2737 2738 return 0; 2739 } 2740 2741 static int gfx_v11_0_config_me_cache_rs64(struct amdgpu_device *adev, uint64_t addr, uint64_t addr2) 2742 { 2743 uint32_t usec_timeout = 50000; /* wait for 50ms */ 2744 uint32_t tmp; 2745 unsigned i, pipe_id; 2746 const struct gfx_firmware_header_v2_0 *me_hdr; 2747 2748 me_hdr = (const struct gfx_firmware_header_v2_0 *) 2749 adev->gfx.me_fw->data; 2750 2751 WREG32_SOC15(GC, 0, regCP_ME_IC_BASE_LO, 2752 lower_32_bits(addr)); 2753 WREG32_SOC15(GC, 0, regCP_ME_IC_BASE_HI, 2754 upper_32_bits(addr)); 2755 2756 tmp = RREG32_SOC15(GC, 0, regCP_ME_IC_BASE_CNTL); 2757 tmp = REG_SET_FIELD(tmp, CP_ME_IC_BASE_CNTL, VMID, 0); 2758 tmp = REG_SET_FIELD(tmp, CP_ME_IC_BASE_CNTL, CACHE_POLICY, 0); 2759 tmp = REG_SET_FIELD(tmp, CP_ME_IC_BASE_CNTL, EXE_DISABLE, 0); 2760 WREG32_SOC15(GC, 0, regCP_ME_IC_BASE_CNTL, tmp); 2761 2762 /* 2763 * Programming any of the CP_ME_IC_BASE registers 2764 * forces invalidation of the ME L1 I$. Wait for the 2765 * invalidation complete 2766 */ 2767 for (i = 0; i < usec_timeout; i++) { 2768 tmp = RREG32_SOC15(GC, 0, regCP_ME_IC_OP_CNTL); 2769 if (1 == REG_GET_FIELD(tmp, CP_ME_IC_OP_CNTL, 2770 INVALIDATE_CACHE_COMPLETE)) 2771 break; 2772 udelay(1); 2773 } 2774 2775 if (i >= usec_timeout) { 2776 dev_err(adev->dev, "failed to invalidate instruction cache\n"); 2777 return -EINVAL; 2778 } 2779 2780 /* Prime the instruction caches */ 2781 tmp = RREG32_SOC15(GC, 0, regCP_ME_IC_OP_CNTL); 2782 tmp = REG_SET_FIELD(tmp, CP_ME_IC_OP_CNTL, PRIME_ICACHE, 1); 2783 WREG32_SOC15(GC, 0, regCP_ME_IC_OP_CNTL, tmp); 2784 2785 /* Waiting for instruction cache primed*/ 2786 for (i = 0; i < usec_timeout; i++) { 2787 tmp = RREG32_SOC15(GC, 0, regCP_ME_IC_OP_CNTL); 2788 if (1 == REG_GET_FIELD(tmp, CP_ME_IC_OP_CNTL, 2789 ICACHE_PRIMED)) 2790 break; 2791 udelay(1); 2792 } 2793 2794 if (i >= usec_timeout) { 2795 dev_err(adev->dev, "failed to prime instruction cache\n"); 2796 return -EINVAL; 2797 } 2798 2799 mutex_lock(&adev->srbm_mutex); 2800 for (pipe_id = 0; pipe_id < adev->gfx.me.num_pipe_per_me; pipe_id++) { 2801 soc21_grbm_select(adev, 0, pipe_id, 0, 0); 2802 WREG32_SOC15(GC, 0, regCP_ME_PRGRM_CNTR_START, 2803 (me_hdr->ucode_start_addr_hi << 30) | 2804 (me_hdr->ucode_start_addr_lo >> 2) ); 2805 WREG32_SOC15(GC, 0, regCP_ME_PRGRM_CNTR_START_HI, 2806 me_hdr->ucode_start_addr_hi>>2); 2807 2808 /* 2809 * Program CP_ME_CNTL to reset given PIPE to take 2810 * effect of CP_PFP_PRGRM_CNTR_START. 2811 */ 2812 tmp = RREG32_SOC15(GC, 0, regCP_ME_CNTL); 2813 if (pipe_id == 0) 2814 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, 2815 ME_PIPE0_RESET, 1); 2816 else 2817 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, 2818 ME_PIPE1_RESET, 1); 2819 WREG32_SOC15(GC, 0, regCP_ME_CNTL, tmp); 2820 2821 /* Clear pfp pipe0 reset bit. */ 2822 if (pipe_id == 0) 2823 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, 2824 ME_PIPE0_RESET, 0); 2825 else 2826 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, 2827 ME_PIPE1_RESET, 0); 2828 WREG32_SOC15(GC, 0, regCP_ME_CNTL, tmp); 2829 2830 WREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_BASE1_LO, 2831 lower_32_bits(addr2)); 2832 WREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_BASE1_HI, 2833 upper_32_bits(addr2)); 2834 } 2835 soc21_grbm_select(adev, 0, 0, 0, 0); 2836 mutex_unlock(&adev->srbm_mutex); 2837 2838 tmp = RREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_BASE_CNTL); 2839 tmp = REG_SET_FIELD(tmp, CP_GFX_RS64_DC_BASE_CNTL, VMID, 0); 2840 tmp = REG_SET_FIELD(tmp, CP_GFX_RS64_DC_BASE_CNTL, CACHE_POLICY, 0); 2841 WREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_BASE_CNTL, tmp); 2842 2843 /* Invalidate the data caches */ 2844 tmp = RREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_OP_CNTL); 2845 tmp = REG_SET_FIELD(tmp, CP_GFX_RS64_DC_OP_CNTL, INVALIDATE_DCACHE, 1); 2846 WREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_OP_CNTL, tmp); 2847 2848 for (i = 0; i < usec_timeout; i++) { 2849 tmp = RREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_OP_CNTL); 2850 if (1 == REG_GET_FIELD(tmp, CP_GFX_RS64_DC_OP_CNTL, 2851 INVALIDATE_DCACHE_COMPLETE)) 2852 break; 2853 udelay(1); 2854 } 2855 2856 if (i >= usec_timeout) { 2857 dev_err(adev->dev, "failed to invalidate RS64 data cache\n"); 2858 return -EINVAL; 2859 } 2860 2861 return 0; 2862 } 2863 2864 static int gfx_v11_0_config_mec_cache_rs64(struct amdgpu_device *adev, uint64_t addr, uint64_t addr2) 2865 { 2866 uint32_t usec_timeout = 50000; /* wait for 50ms */ 2867 uint32_t tmp; 2868 unsigned i; 2869 const struct gfx_firmware_header_v2_0 *mec_hdr; 2870 2871 mec_hdr = (const struct gfx_firmware_header_v2_0 *) 2872 adev->gfx.mec_fw->data; 2873 2874 tmp = RREG32_SOC15(GC, 0, regCP_CPC_IC_BASE_CNTL); 2875 tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, VMID, 0); 2876 tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, EXE_DISABLE, 0); 2877 tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, CACHE_POLICY, 0); 2878 WREG32_SOC15(GC, 0, regCP_CPC_IC_BASE_CNTL, tmp); 2879 2880 tmp = RREG32_SOC15(GC, 0, regCP_MEC_DC_BASE_CNTL); 2881 tmp = REG_SET_FIELD(tmp, CP_MEC_DC_BASE_CNTL, VMID, 0); 2882 tmp = REG_SET_FIELD(tmp, CP_MEC_DC_BASE_CNTL, CACHE_POLICY, 0); 2883 WREG32_SOC15(GC, 0, regCP_MEC_DC_BASE_CNTL, tmp); 2884 2885 mutex_lock(&adev->srbm_mutex); 2886 for (i = 0; i < adev->gfx.mec.num_pipe_per_mec; i++) { 2887 soc21_grbm_select(adev, 1, i, 0, 0); 2888 2889 WREG32_SOC15(GC, 0, regCP_MEC_MDBASE_LO, addr2); 2890 WREG32_SOC15(GC, 0, regCP_MEC_MDBASE_HI, 2891 upper_32_bits(addr2)); 2892 2893 WREG32_SOC15(GC, 0, regCP_MEC_RS64_PRGRM_CNTR_START, 2894 mec_hdr->ucode_start_addr_lo >> 2 | 2895 mec_hdr->ucode_start_addr_hi << 30); 2896 WREG32_SOC15(GC, 0, regCP_MEC_RS64_PRGRM_CNTR_START_HI, 2897 mec_hdr->ucode_start_addr_hi >> 2); 2898 2899 WREG32_SOC15(GC, 0, regCP_CPC_IC_BASE_LO, addr); 2900 WREG32_SOC15(GC, 0, regCP_CPC_IC_BASE_HI, 2901 upper_32_bits(addr)); 2902 } 2903 mutex_unlock(&adev->srbm_mutex); 2904 soc21_grbm_select(adev, 0, 0, 0, 0); 2905 2906 /* Trigger an invalidation of the L1 instruction caches */ 2907 tmp = RREG32_SOC15(GC, 0, regCP_MEC_DC_OP_CNTL); 2908 tmp = REG_SET_FIELD(tmp, CP_MEC_DC_OP_CNTL, INVALIDATE_DCACHE, 1); 2909 WREG32_SOC15(GC, 0, regCP_MEC_DC_OP_CNTL, tmp); 2910 2911 /* Wait for invalidation complete */ 2912 for (i = 0; i < usec_timeout; i++) { 2913 tmp = RREG32_SOC15(GC, 0, regCP_MEC_DC_OP_CNTL); 2914 if (1 == REG_GET_FIELD(tmp, CP_MEC_DC_OP_CNTL, 2915 INVALIDATE_DCACHE_COMPLETE)) 2916 break; 2917 udelay(1); 2918 } 2919 2920 if (i >= usec_timeout) { 2921 dev_err(adev->dev, "failed to invalidate instruction cache\n"); 2922 return -EINVAL; 2923 } 2924 2925 /* Trigger an invalidation of the L1 instruction caches */ 2926 tmp = RREG32_SOC15(GC, 0, regCP_CPC_IC_OP_CNTL); 2927 tmp = REG_SET_FIELD(tmp, CP_CPC_IC_OP_CNTL, INVALIDATE_CACHE, 1); 2928 WREG32_SOC15(GC, 0, regCP_CPC_IC_OP_CNTL, tmp); 2929 2930 /* Wait for invalidation complete */ 2931 for (i = 0; i < usec_timeout; i++) { 2932 tmp = RREG32_SOC15(GC, 0, regCP_CPC_IC_OP_CNTL); 2933 if (1 == REG_GET_FIELD(tmp, CP_CPC_IC_OP_CNTL, 2934 INVALIDATE_CACHE_COMPLETE)) 2935 break; 2936 udelay(1); 2937 } 2938 2939 if (i >= usec_timeout) { 2940 dev_err(adev->dev, "failed to invalidate instruction cache\n"); 2941 return -EINVAL; 2942 } 2943 2944 return 0; 2945 } 2946 2947 static void gfx_v11_0_config_gfx_rs64(struct amdgpu_device *adev) 2948 { 2949 const struct gfx_firmware_header_v2_0 *pfp_hdr; 2950 const struct gfx_firmware_header_v2_0 *me_hdr; 2951 const struct gfx_firmware_header_v2_0 *mec_hdr; 2952 uint32_t pipe_id, tmp; 2953 2954 mec_hdr = (const struct gfx_firmware_header_v2_0 *) 2955 adev->gfx.mec_fw->data; 2956 me_hdr = (const struct gfx_firmware_header_v2_0 *) 2957 adev->gfx.me_fw->data; 2958 pfp_hdr = (const struct gfx_firmware_header_v2_0 *) 2959 adev->gfx.pfp_fw->data; 2960 2961 /* config pfp program start addr */ 2962 for (pipe_id = 0; pipe_id < 2; pipe_id++) { 2963 soc21_grbm_select(adev, 0, pipe_id, 0, 0); 2964 WREG32_SOC15(GC, 0, regCP_PFP_PRGRM_CNTR_START, 2965 (pfp_hdr->ucode_start_addr_hi << 30) | 2966 (pfp_hdr->ucode_start_addr_lo >> 2)); 2967 WREG32_SOC15(GC, 0, regCP_PFP_PRGRM_CNTR_START_HI, 2968 pfp_hdr->ucode_start_addr_hi >> 2); 2969 } 2970 soc21_grbm_select(adev, 0, 0, 0, 0); 2971 2972 /* reset pfp pipe */ 2973 tmp = RREG32_SOC15(GC, 0, regCP_ME_CNTL); 2974 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, PFP_PIPE0_RESET, 1); 2975 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, PFP_PIPE1_RESET, 1); 2976 WREG32_SOC15(GC, 0, regCP_ME_CNTL, tmp); 2977 2978 /* clear pfp pipe reset */ 2979 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, PFP_PIPE0_RESET, 0); 2980 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, PFP_PIPE1_RESET, 0); 2981 WREG32_SOC15(GC, 0, regCP_ME_CNTL, tmp); 2982 2983 /* config me program start addr */ 2984 for (pipe_id = 0; pipe_id < 2; pipe_id++) { 2985 soc21_grbm_select(adev, 0, pipe_id, 0, 0); 2986 WREG32_SOC15(GC, 0, regCP_ME_PRGRM_CNTR_START, 2987 (me_hdr->ucode_start_addr_hi << 30) | 2988 (me_hdr->ucode_start_addr_lo >> 2) ); 2989 WREG32_SOC15(GC, 0, regCP_ME_PRGRM_CNTR_START_HI, 2990 me_hdr->ucode_start_addr_hi>>2); 2991 } 2992 soc21_grbm_select(adev, 0, 0, 0, 0); 2993 2994 /* reset me pipe */ 2995 tmp = RREG32_SOC15(GC, 0, regCP_ME_CNTL); 2996 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, ME_PIPE0_RESET, 1); 2997 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, ME_PIPE1_RESET, 1); 2998 WREG32_SOC15(GC, 0, regCP_ME_CNTL, tmp); 2999 3000 /* clear me pipe reset */ 3001 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, ME_PIPE0_RESET, 0); 3002 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, ME_PIPE1_RESET, 0); 3003 WREG32_SOC15(GC, 0, regCP_ME_CNTL, tmp); 3004 3005 /* config mec program start addr */ 3006 for (pipe_id = 0; pipe_id < 4; pipe_id++) { 3007 soc21_grbm_select(adev, 1, pipe_id, 0, 0); 3008 WREG32_SOC15(GC, 0, regCP_MEC_RS64_PRGRM_CNTR_START, 3009 mec_hdr->ucode_start_addr_lo >> 2 | 3010 mec_hdr->ucode_start_addr_hi << 30); 3011 WREG32_SOC15(GC, 0, regCP_MEC_RS64_PRGRM_CNTR_START_HI, 3012 mec_hdr->ucode_start_addr_hi >> 2); 3013 } 3014 soc21_grbm_select(adev, 0, 0, 0, 0); 3015 3016 /* reset mec pipe */ 3017 tmp = RREG32_SOC15(GC, 0, regCP_MEC_RS64_CNTL); 3018 tmp = REG_SET_FIELD(tmp, CP_MEC_RS64_CNTL, MEC_PIPE0_RESET, 1); 3019 tmp = REG_SET_FIELD(tmp, CP_MEC_RS64_CNTL, MEC_PIPE1_RESET, 1); 3020 tmp = REG_SET_FIELD(tmp, CP_MEC_RS64_CNTL, MEC_PIPE2_RESET, 1); 3021 tmp = REG_SET_FIELD(tmp, CP_MEC_RS64_CNTL, MEC_PIPE3_RESET, 1); 3022 WREG32_SOC15(GC, 0, regCP_MEC_RS64_CNTL, tmp); 3023 3024 /* clear mec pipe reset */ 3025 tmp = REG_SET_FIELD(tmp, CP_MEC_RS64_CNTL, MEC_PIPE0_RESET, 0); 3026 tmp = REG_SET_FIELD(tmp, CP_MEC_RS64_CNTL, MEC_PIPE1_RESET, 0); 3027 tmp = REG_SET_FIELD(tmp, CP_MEC_RS64_CNTL, MEC_PIPE2_RESET, 0); 3028 tmp = REG_SET_FIELD(tmp, CP_MEC_RS64_CNTL, MEC_PIPE3_RESET, 0); 3029 WREG32_SOC15(GC, 0, regCP_MEC_RS64_CNTL, tmp); 3030 } 3031 3032 static int gfx_v11_0_wait_for_rlc_autoload_complete(struct amdgpu_device *adev) 3033 { 3034 uint32_t cp_status; 3035 uint32_t bootload_status; 3036 int i, r; 3037 uint64_t addr, addr2; 3038 3039 for (i = 0; i < adev->usec_timeout; i++) { 3040 cp_status = RREG32_SOC15(GC, 0, regCP_STAT); 3041 3042 if (amdgpu_ip_version(adev, GC_HWIP, 0) == 3043 IP_VERSION(11, 0, 1) || 3044 amdgpu_ip_version(adev, GC_HWIP, 0) == 3045 IP_VERSION(11, 0, 4) || 3046 amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(11, 5, 0) || 3047 amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(11, 5, 1) || 3048 amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(11, 5, 2) || 3049 amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(11, 5, 3)) 3050 bootload_status = RREG32_SOC15(GC, 0, 3051 regRLC_RLCS_BOOTLOAD_STATUS_gc_11_0_1); 3052 else 3053 bootload_status = RREG32_SOC15(GC, 0, regRLC_RLCS_BOOTLOAD_STATUS); 3054 3055 if ((cp_status == 0) && 3056 (REG_GET_FIELD(bootload_status, 3057 RLC_RLCS_BOOTLOAD_STATUS, BOOTLOAD_COMPLETE) == 1)) { 3058 break; 3059 } 3060 udelay(1); 3061 } 3062 3063 if (i >= adev->usec_timeout) { 3064 dev_err(adev->dev, "rlc autoload: gc ucode autoload timeout\n"); 3065 return -ETIMEDOUT; 3066 } 3067 3068 if (adev->firmware.load_type == AMDGPU_FW_LOAD_RLC_BACKDOOR_AUTO) { 3069 if (adev->gfx.rs64_enable) { 3070 addr = adev->gfx.rlc.rlc_autoload_gpu_addr + 3071 rlc_autoload_info[SOC21_FIRMWARE_ID_RS64_ME].offset; 3072 addr2 = adev->gfx.rlc.rlc_autoload_gpu_addr + 3073 rlc_autoload_info[SOC21_FIRMWARE_ID_RS64_ME_P0_STACK].offset; 3074 r = gfx_v11_0_config_me_cache_rs64(adev, addr, addr2); 3075 if (r) 3076 return r; 3077 addr = adev->gfx.rlc.rlc_autoload_gpu_addr + 3078 rlc_autoload_info[SOC21_FIRMWARE_ID_RS64_PFP].offset; 3079 addr2 = adev->gfx.rlc.rlc_autoload_gpu_addr + 3080 rlc_autoload_info[SOC21_FIRMWARE_ID_RS64_PFP_P0_STACK].offset; 3081 r = gfx_v11_0_config_pfp_cache_rs64(adev, addr, addr2); 3082 if (r) 3083 return r; 3084 addr = adev->gfx.rlc.rlc_autoload_gpu_addr + 3085 rlc_autoload_info[SOC21_FIRMWARE_ID_RS64_MEC].offset; 3086 addr2 = adev->gfx.rlc.rlc_autoload_gpu_addr + 3087 rlc_autoload_info[SOC21_FIRMWARE_ID_RS64_MEC_P0_STACK].offset; 3088 r = gfx_v11_0_config_mec_cache_rs64(adev, addr, addr2); 3089 if (r) 3090 return r; 3091 } else { 3092 addr = adev->gfx.rlc.rlc_autoload_gpu_addr + 3093 rlc_autoload_info[SOC21_FIRMWARE_ID_CP_ME].offset; 3094 r = gfx_v11_0_config_me_cache(adev, addr); 3095 if (r) 3096 return r; 3097 addr = adev->gfx.rlc.rlc_autoload_gpu_addr + 3098 rlc_autoload_info[SOC21_FIRMWARE_ID_CP_PFP].offset; 3099 r = gfx_v11_0_config_pfp_cache(adev, addr); 3100 if (r) 3101 return r; 3102 addr = adev->gfx.rlc.rlc_autoload_gpu_addr + 3103 rlc_autoload_info[SOC21_FIRMWARE_ID_CP_MEC].offset; 3104 r = gfx_v11_0_config_mec_cache(adev, addr); 3105 if (r) 3106 return r; 3107 } 3108 } 3109 3110 return 0; 3111 } 3112 3113 static int gfx_v11_0_cp_gfx_enable(struct amdgpu_device *adev, bool enable) 3114 { 3115 int i; 3116 u32 tmp = RREG32_SOC15(GC, 0, regCP_ME_CNTL); 3117 3118 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, ME_HALT, enable ? 0 : 1); 3119 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, PFP_HALT, enable ? 0 : 1); 3120 WREG32_SOC15(GC, 0, regCP_ME_CNTL, tmp); 3121 3122 for (i = 0; i < adev->usec_timeout; i++) { 3123 if (RREG32_SOC15(GC, 0, regCP_STAT) == 0) 3124 break; 3125 udelay(1); 3126 } 3127 3128 if (i >= adev->usec_timeout) 3129 DRM_ERROR("failed to %s cp gfx\n", enable ? "unhalt" : "halt"); 3130 3131 return 0; 3132 } 3133 3134 static int gfx_v11_0_cp_gfx_load_pfp_microcode(struct amdgpu_device *adev) 3135 { 3136 int r; 3137 const struct gfx_firmware_header_v1_0 *pfp_hdr; 3138 const __le32 *fw_data; 3139 unsigned i, fw_size; 3140 3141 pfp_hdr = (const struct gfx_firmware_header_v1_0 *) 3142 adev->gfx.pfp_fw->data; 3143 3144 amdgpu_ucode_print_gfx_hdr(&pfp_hdr->header); 3145 3146 fw_data = (const __le32 *)(adev->gfx.pfp_fw->data + 3147 le32_to_cpu(pfp_hdr->header.ucode_array_offset_bytes)); 3148 fw_size = le32_to_cpu(pfp_hdr->header.ucode_size_bytes); 3149 3150 r = amdgpu_bo_create_reserved(adev, pfp_hdr->header.ucode_size_bytes, 3151 PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT, 3152 &adev->gfx.pfp.pfp_fw_obj, 3153 &adev->gfx.pfp.pfp_fw_gpu_addr, 3154 (void **)&adev->gfx.pfp.pfp_fw_ptr); 3155 if (r) { 3156 dev_err(adev->dev, "(%d) failed to create pfp fw bo\n", r); 3157 gfx_v11_0_pfp_fini(adev); 3158 return r; 3159 } 3160 3161 memcpy(adev->gfx.pfp.pfp_fw_ptr, fw_data, fw_size); 3162 3163 amdgpu_bo_kunmap(adev->gfx.pfp.pfp_fw_obj); 3164 amdgpu_bo_unreserve(adev->gfx.pfp.pfp_fw_obj); 3165 3166 gfx_v11_0_config_pfp_cache(adev, adev->gfx.pfp.pfp_fw_gpu_addr); 3167 3168 WREG32_SOC15(GC, 0, regCP_HYP_PFP_UCODE_ADDR, 0); 3169 3170 for (i = 0; i < pfp_hdr->jt_size; i++) 3171 WREG32_SOC15(GC, 0, regCP_HYP_PFP_UCODE_DATA, 3172 le32_to_cpup(fw_data + pfp_hdr->jt_offset + i)); 3173 3174 WREG32_SOC15(GC, 0, regCP_HYP_PFP_UCODE_ADDR, adev->gfx.pfp_fw_version); 3175 3176 return 0; 3177 } 3178 3179 static int gfx_v11_0_cp_gfx_load_pfp_microcode_rs64(struct amdgpu_device *adev) 3180 { 3181 int r; 3182 const struct gfx_firmware_header_v2_0 *pfp_hdr; 3183 const __le32 *fw_ucode, *fw_data; 3184 unsigned i, pipe_id, fw_ucode_size, fw_data_size; 3185 uint32_t tmp; 3186 uint32_t usec_timeout = 50000; /* wait for 50ms */ 3187 3188 pfp_hdr = (const struct gfx_firmware_header_v2_0 *) 3189 adev->gfx.pfp_fw->data; 3190 3191 amdgpu_ucode_print_gfx_hdr(&pfp_hdr->header); 3192 3193 /* instruction */ 3194 fw_ucode = (const __le32 *)(adev->gfx.pfp_fw->data + 3195 le32_to_cpu(pfp_hdr->ucode_offset_bytes)); 3196 fw_ucode_size = le32_to_cpu(pfp_hdr->ucode_size_bytes); 3197 /* data */ 3198 fw_data = (const __le32 *)(adev->gfx.pfp_fw->data + 3199 le32_to_cpu(pfp_hdr->data_offset_bytes)); 3200 fw_data_size = le32_to_cpu(pfp_hdr->data_size_bytes); 3201 3202 /* 64kb align */ 3203 r = amdgpu_bo_create_reserved(adev, fw_ucode_size, 3204 64 * 1024, 3205 AMDGPU_GEM_DOMAIN_VRAM | 3206 AMDGPU_GEM_DOMAIN_GTT, 3207 &adev->gfx.pfp.pfp_fw_obj, 3208 &adev->gfx.pfp.pfp_fw_gpu_addr, 3209 (void **)&adev->gfx.pfp.pfp_fw_ptr); 3210 if (r) { 3211 dev_err(adev->dev, "(%d) failed to create pfp ucode fw bo\n", r); 3212 gfx_v11_0_pfp_fini(adev); 3213 return r; 3214 } 3215 3216 r = amdgpu_bo_create_reserved(adev, fw_data_size, 3217 64 * 1024, 3218 AMDGPU_GEM_DOMAIN_VRAM | 3219 AMDGPU_GEM_DOMAIN_GTT, 3220 &adev->gfx.pfp.pfp_fw_data_obj, 3221 &adev->gfx.pfp.pfp_fw_data_gpu_addr, 3222 (void **)&adev->gfx.pfp.pfp_fw_data_ptr); 3223 if (r) { 3224 dev_err(adev->dev, "(%d) failed to create pfp data fw bo\n", r); 3225 gfx_v11_0_pfp_fini(adev); 3226 return r; 3227 } 3228 3229 memcpy(adev->gfx.pfp.pfp_fw_ptr, fw_ucode, fw_ucode_size); 3230 memcpy(adev->gfx.pfp.pfp_fw_data_ptr, fw_data, fw_data_size); 3231 3232 amdgpu_bo_kunmap(adev->gfx.pfp.pfp_fw_obj); 3233 amdgpu_bo_kunmap(adev->gfx.pfp.pfp_fw_data_obj); 3234 amdgpu_bo_unreserve(adev->gfx.pfp.pfp_fw_obj); 3235 amdgpu_bo_unreserve(adev->gfx.pfp.pfp_fw_data_obj); 3236 3237 if (amdgpu_emu_mode == 1) 3238 amdgpu_device_flush_hdp(adev, NULL); 3239 3240 WREG32_SOC15(GC, 0, regCP_PFP_IC_BASE_LO, 3241 lower_32_bits(adev->gfx.pfp.pfp_fw_gpu_addr)); 3242 WREG32_SOC15(GC, 0, regCP_PFP_IC_BASE_HI, 3243 upper_32_bits(adev->gfx.pfp.pfp_fw_gpu_addr)); 3244 3245 tmp = RREG32_SOC15(GC, 0, regCP_PFP_IC_BASE_CNTL); 3246 tmp = REG_SET_FIELD(tmp, CP_PFP_IC_BASE_CNTL, VMID, 0); 3247 tmp = REG_SET_FIELD(tmp, CP_PFP_IC_BASE_CNTL, CACHE_POLICY, 0); 3248 tmp = REG_SET_FIELD(tmp, CP_PFP_IC_BASE_CNTL, EXE_DISABLE, 0); 3249 WREG32_SOC15(GC, 0, regCP_PFP_IC_BASE_CNTL, tmp); 3250 3251 /* 3252 * Programming any of the CP_PFP_IC_BASE registers 3253 * forces invalidation of the ME L1 I$. Wait for the 3254 * invalidation complete 3255 */ 3256 for (i = 0; i < usec_timeout; i++) { 3257 tmp = RREG32_SOC15(GC, 0, regCP_PFP_IC_OP_CNTL); 3258 if (1 == REG_GET_FIELD(tmp, CP_PFP_IC_OP_CNTL, 3259 INVALIDATE_CACHE_COMPLETE)) 3260 break; 3261 udelay(1); 3262 } 3263 3264 if (i >= usec_timeout) { 3265 dev_err(adev->dev, "failed to invalidate instruction cache\n"); 3266 return -EINVAL; 3267 } 3268 3269 /* Prime the L1 instruction caches */ 3270 tmp = RREG32_SOC15(GC, 0, regCP_PFP_IC_OP_CNTL); 3271 tmp = REG_SET_FIELD(tmp, CP_PFP_IC_OP_CNTL, PRIME_ICACHE, 1); 3272 WREG32_SOC15(GC, 0, regCP_PFP_IC_OP_CNTL, tmp); 3273 /* Waiting for cache primed*/ 3274 for (i = 0; i < usec_timeout; i++) { 3275 tmp = RREG32_SOC15(GC, 0, regCP_PFP_IC_OP_CNTL); 3276 if (1 == REG_GET_FIELD(tmp, CP_PFP_IC_OP_CNTL, 3277 ICACHE_PRIMED)) 3278 break; 3279 udelay(1); 3280 } 3281 3282 if (i >= usec_timeout) { 3283 dev_err(adev->dev, "failed to prime instruction cache\n"); 3284 return -EINVAL; 3285 } 3286 3287 mutex_lock(&adev->srbm_mutex); 3288 for (pipe_id = 0; pipe_id < adev->gfx.me.num_pipe_per_me; pipe_id++) { 3289 soc21_grbm_select(adev, 0, pipe_id, 0, 0); 3290 WREG32_SOC15(GC, 0, regCP_PFP_PRGRM_CNTR_START, 3291 (pfp_hdr->ucode_start_addr_hi << 30) | 3292 (pfp_hdr->ucode_start_addr_lo >> 2) ); 3293 WREG32_SOC15(GC, 0, regCP_PFP_PRGRM_CNTR_START_HI, 3294 pfp_hdr->ucode_start_addr_hi>>2); 3295 3296 /* 3297 * Program CP_ME_CNTL to reset given PIPE to take 3298 * effect of CP_PFP_PRGRM_CNTR_START. 3299 */ 3300 tmp = RREG32_SOC15(GC, 0, regCP_ME_CNTL); 3301 if (pipe_id == 0) 3302 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, 3303 PFP_PIPE0_RESET, 1); 3304 else 3305 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, 3306 PFP_PIPE1_RESET, 1); 3307 WREG32_SOC15(GC, 0, regCP_ME_CNTL, tmp); 3308 3309 /* Clear pfp pipe0 reset bit. */ 3310 if (pipe_id == 0) 3311 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, 3312 PFP_PIPE0_RESET, 0); 3313 else 3314 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, 3315 PFP_PIPE1_RESET, 0); 3316 WREG32_SOC15(GC, 0, regCP_ME_CNTL, tmp); 3317 3318 WREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_BASE0_LO, 3319 lower_32_bits(adev->gfx.pfp.pfp_fw_data_gpu_addr)); 3320 WREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_BASE0_HI, 3321 upper_32_bits(adev->gfx.pfp.pfp_fw_data_gpu_addr)); 3322 } 3323 soc21_grbm_select(adev, 0, 0, 0, 0); 3324 mutex_unlock(&adev->srbm_mutex); 3325 3326 tmp = RREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_BASE_CNTL); 3327 tmp = REG_SET_FIELD(tmp, CP_GFX_RS64_DC_BASE_CNTL, VMID, 0); 3328 tmp = REG_SET_FIELD(tmp, CP_GFX_RS64_DC_BASE_CNTL, CACHE_POLICY, 0); 3329 WREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_BASE_CNTL, tmp); 3330 3331 /* Invalidate the data caches */ 3332 tmp = RREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_OP_CNTL); 3333 tmp = REG_SET_FIELD(tmp, CP_GFX_RS64_DC_OP_CNTL, INVALIDATE_DCACHE, 1); 3334 WREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_OP_CNTL, tmp); 3335 3336 for (i = 0; i < usec_timeout; i++) { 3337 tmp = RREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_OP_CNTL); 3338 if (1 == REG_GET_FIELD(tmp, CP_GFX_RS64_DC_OP_CNTL, 3339 INVALIDATE_DCACHE_COMPLETE)) 3340 break; 3341 udelay(1); 3342 } 3343 3344 if (i >= usec_timeout) { 3345 dev_err(adev->dev, "failed to invalidate RS64 data cache\n"); 3346 return -EINVAL; 3347 } 3348 3349 return 0; 3350 } 3351 3352 static int gfx_v11_0_cp_gfx_load_me_microcode(struct amdgpu_device *adev) 3353 { 3354 int r; 3355 const struct gfx_firmware_header_v1_0 *me_hdr; 3356 const __le32 *fw_data; 3357 unsigned i, fw_size; 3358 3359 me_hdr = (const struct gfx_firmware_header_v1_0 *) 3360 adev->gfx.me_fw->data; 3361 3362 amdgpu_ucode_print_gfx_hdr(&me_hdr->header); 3363 3364 fw_data = (const __le32 *)(adev->gfx.me_fw->data + 3365 le32_to_cpu(me_hdr->header.ucode_array_offset_bytes)); 3366 fw_size = le32_to_cpu(me_hdr->header.ucode_size_bytes); 3367 3368 r = amdgpu_bo_create_reserved(adev, me_hdr->header.ucode_size_bytes, 3369 PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT, 3370 &adev->gfx.me.me_fw_obj, 3371 &adev->gfx.me.me_fw_gpu_addr, 3372 (void **)&adev->gfx.me.me_fw_ptr); 3373 if (r) { 3374 dev_err(adev->dev, "(%d) failed to create me fw bo\n", r); 3375 gfx_v11_0_me_fini(adev); 3376 return r; 3377 } 3378 3379 memcpy(adev->gfx.me.me_fw_ptr, fw_data, fw_size); 3380 3381 amdgpu_bo_kunmap(adev->gfx.me.me_fw_obj); 3382 amdgpu_bo_unreserve(adev->gfx.me.me_fw_obj); 3383 3384 gfx_v11_0_config_me_cache(adev, adev->gfx.me.me_fw_gpu_addr); 3385 3386 WREG32_SOC15(GC, 0, regCP_HYP_ME_UCODE_ADDR, 0); 3387 3388 for (i = 0; i < me_hdr->jt_size; i++) 3389 WREG32_SOC15(GC, 0, regCP_HYP_ME_UCODE_DATA, 3390 le32_to_cpup(fw_data + me_hdr->jt_offset + i)); 3391 3392 WREG32_SOC15(GC, 0, regCP_HYP_ME_UCODE_ADDR, adev->gfx.me_fw_version); 3393 3394 return 0; 3395 } 3396 3397 static int gfx_v11_0_cp_gfx_load_me_microcode_rs64(struct amdgpu_device *adev) 3398 { 3399 int r; 3400 const struct gfx_firmware_header_v2_0 *me_hdr; 3401 const __le32 *fw_ucode, *fw_data; 3402 unsigned i, pipe_id, fw_ucode_size, fw_data_size; 3403 uint32_t tmp; 3404 uint32_t usec_timeout = 50000; /* wait for 50ms */ 3405 3406 me_hdr = (const struct gfx_firmware_header_v2_0 *) 3407 adev->gfx.me_fw->data; 3408 3409 amdgpu_ucode_print_gfx_hdr(&me_hdr->header); 3410 3411 /* instruction */ 3412 fw_ucode = (const __le32 *)(adev->gfx.me_fw->data + 3413 le32_to_cpu(me_hdr->ucode_offset_bytes)); 3414 fw_ucode_size = le32_to_cpu(me_hdr->ucode_size_bytes); 3415 /* data */ 3416 fw_data = (const __le32 *)(adev->gfx.me_fw->data + 3417 le32_to_cpu(me_hdr->data_offset_bytes)); 3418 fw_data_size = le32_to_cpu(me_hdr->data_size_bytes); 3419 3420 /* 64kb align*/ 3421 r = amdgpu_bo_create_reserved(adev, fw_ucode_size, 3422 64 * 1024, 3423 AMDGPU_GEM_DOMAIN_VRAM | 3424 AMDGPU_GEM_DOMAIN_GTT, 3425 &adev->gfx.me.me_fw_obj, 3426 &adev->gfx.me.me_fw_gpu_addr, 3427 (void **)&adev->gfx.me.me_fw_ptr); 3428 if (r) { 3429 dev_err(adev->dev, "(%d) failed to create me ucode bo\n", r); 3430 gfx_v11_0_me_fini(adev); 3431 return r; 3432 } 3433 3434 r = amdgpu_bo_create_reserved(adev, fw_data_size, 3435 64 * 1024, 3436 AMDGPU_GEM_DOMAIN_VRAM | 3437 AMDGPU_GEM_DOMAIN_GTT, 3438 &adev->gfx.me.me_fw_data_obj, 3439 &adev->gfx.me.me_fw_data_gpu_addr, 3440 (void **)&adev->gfx.me.me_fw_data_ptr); 3441 if (r) { 3442 dev_err(adev->dev, "(%d) failed to create me data bo\n", r); 3443 gfx_v11_0_pfp_fini(adev); 3444 return r; 3445 } 3446 3447 memcpy(adev->gfx.me.me_fw_ptr, fw_ucode, fw_ucode_size); 3448 memcpy(adev->gfx.me.me_fw_data_ptr, fw_data, fw_data_size); 3449 3450 amdgpu_bo_kunmap(adev->gfx.me.me_fw_obj); 3451 amdgpu_bo_kunmap(adev->gfx.me.me_fw_data_obj); 3452 amdgpu_bo_unreserve(adev->gfx.me.me_fw_obj); 3453 amdgpu_bo_unreserve(adev->gfx.me.me_fw_data_obj); 3454 3455 if (amdgpu_emu_mode == 1) 3456 amdgpu_device_flush_hdp(adev, NULL); 3457 3458 WREG32_SOC15(GC, 0, regCP_ME_IC_BASE_LO, 3459 lower_32_bits(adev->gfx.me.me_fw_gpu_addr)); 3460 WREG32_SOC15(GC, 0, regCP_ME_IC_BASE_HI, 3461 upper_32_bits(adev->gfx.me.me_fw_gpu_addr)); 3462 3463 tmp = RREG32_SOC15(GC, 0, regCP_ME_IC_BASE_CNTL); 3464 tmp = REG_SET_FIELD(tmp, CP_ME_IC_BASE_CNTL, VMID, 0); 3465 tmp = REG_SET_FIELD(tmp, CP_ME_IC_BASE_CNTL, CACHE_POLICY, 0); 3466 tmp = REG_SET_FIELD(tmp, CP_ME_IC_BASE_CNTL, EXE_DISABLE, 0); 3467 WREG32_SOC15(GC, 0, regCP_ME_IC_BASE_CNTL, tmp); 3468 3469 /* 3470 * Programming any of the CP_ME_IC_BASE registers 3471 * forces invalidation of the ME L1 I$. Wait for the 3472 * invalidation complete 3473 */ 3474 for (i = 0; i < usec_timeout; i++) { 3475 tmp = RREG32_SOC15(GC, 0, regCP_ME_IC_OP_CNTL); 3476 if (1 == REG_GET_FIELD(tmp, CP_ME_IC_OP_CNTL, 3477 INVALIDATE_CACHE_COMPLETE)) 3478 break; 3479 udelay(1); 3480 } 3481 3482 if (i >= usec_timeout) { 3483 dev_err(adev->dev, "failed to invalidate instruction cache\n"); 3484 return -EINVAL; 3485 } 3486 3487 /* Prime the instruction caches */ 3488 tmp = RREG32_SOC15(GC, 0, regCP_ME_IC_OP_CNTL); 3489 tmp = REG_SET_FIELD(tmp, CP_ME_IC_OP_CNTL, PRIME_ICACHE, 1); 3490 WREG32_SOC15(GC, 0, regCP_ME_IC_OP_CNTL, tmp); 3491 3492 /* Waiting for instruction cache primed*/ 3493 for (i = 0; i < usec_timeout; i++) { 3494 tmp = RREG32_SOC15(GC, 0, regCP_ME_IC_OP_CNTL); 3495 if (1 == REG_GET_FIELD(tmp, CP_ME_IC_OP_CNTL, 3496 ICACHE_PRIMED)) 3497 break; 3498 udelay(1); 3499 } 3500 3501 if (i >= usec_timeout) { 3502 dev_err(adev->dev, "failed to prime instruction cache\n"); 3503 return -EINVAL; 3504 } 3505 3506 mutex_lock(&adev->srbm_mutex); 3507 for (pipe_id = 0; pipe_id < adev->gfx.me.num_pipe_per_me; pipe_id++) { 3508 soc21_grbm_select(adev, 0, pipe_id, 0, 0); 3509 WREG32_SOC15(GC, 0, regCP_ME_PRGRM_CNTR_START, 3510 (me_hdr->ucode_start_addr_hi << 30) | 3511 (me_hdr->ucode_start_addr_lo >> 2) ); 3512 WREG32_SOC15(GC, 0, regCP_ME_PRGRM_CNTR_START_HI, 3513 me_hdr->ucode_start_addr_hi>>2); 3514 3515 /* 3516 * Program CP_ME_CNTL to reset given PIPE to take 3517 * effect of CP_PFP_PRGRM_CNTR_START. 3518 */ 3519 tmp = RREG32_SOC15(GC, 0, regCP_ME_CNTL); 3520 if (pipe_id == 0) 3521 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, 3522 ME_PIPE0_RESET, 1); 3523 else 3524 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, 3525 ME_PIPE1_RESET, 1); 3526 WREG32_SOC15(GC, 0, regCP_ME_CNTL, tmp); 3527 3528 /* Clear pfp pipe0 reset bit. */ 3529 if (pipe_id == 0) 3530 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, 3531 ME_PIPE0_RESET, 0); 3532 else 3533 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, 3534 ME_PIPE1_RESET, 0); 3535 WREG32_SOC15(GC, 0, regCP_ME_CNTL, tmp); 3536 3537 WREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_BASE1_LO, 3538 lower_32_bits(adev->gfx.me.me_fw_data_gpu_addr)); 3539 WREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_BASE1_HI, 3540 upper_32_bits(adev->gfx.me.me_fw_data_gpu_addr)); 3541 } 3542 soc21_grbm_select(adev, 0, 0, 0, 0); 3543 mutex_unlock(&adev->srbm_mutex); 3544 3545 tmp = RREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_BASE_CNTL); 3546 tmp = REG_SET_FIELD(tmp, CP_GFX_RS64_DC_BASE_CNTL, VMID, 0); 3547 tmp = REG_SET_FIELD(tmp, CP_GFX_RS64_DC_BASE_CNTL, CACHE_POLICY, 0); 3548 WREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_BASE_CNTL, tmp); 3549 3550 /* Invalidate the data caches */ 3551 tmp = RREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_OP_CNTL); 3552 tmp = REG_SET_FIELD(tmp, CP_GFX_RS64_DC_OP_CNTL, INVALIDATE_DCACHE, 1); 3553 WREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_OP_CNTL, tmp); 3554 3555 for (i = 0; i < usec_timeout; i++) { 3556 tmp = RREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_OP_CNTL); 3557 if (1 == REG_GET_FIELD(tmp, CP_GFX_RS64_DC_OP_CNTL, 3558 INVALIDATE_DCACHE_COMPLETE)) 3559 break; 3560 udelay(1); 3561 } 3562 3563 if (i >= usec_timeout) { 3564 dev_err(adev->dev, "failed to invalidate RS64 data cache\n"); 3565 return -EINVAL; 3566 } 3567 3568 return 0; 3569 } 3570 3571 static int gfx_v11_0_cp_gfx_load_microcode(struct amdgpu_device *adev) 3572 { 3573 int r; 3574 3575 if (!adev->gfx.me_fw || !adev->gfx.pfp_fw) 3576 return -EINVAL; 3577 3578 gfx_v11_0_cp_gfx_enable(adev, false); 3579 3580 if (adev->gfx.rs64_enable) 3581 r = gfx_v11_0_cp_gfx_load_pfp_microcode_rs64(adev); 3582 else 3583 r = gfx_v11_0_cp_gfx_load_pfp_microcode(adev); 3584 if (r) { 3585 dev_err(adev->dev, "(%d) failed to load pfp fw\n", r); 3586 return r; 3587 } 3588 3589 if (adev->gfx.rs64_enable) 3590 r = gfx_v11_0_cp_gfx_load_me_microcode_rs64(adev); 3591 else 3592 r = gfx_v11_0_cp_gfx_load_me_microcode(adev); 3593 if (r) { 3594 dev_err(adev->dev, "(%d) failed to load me fw\n", r); 3595 return r; 3596 } 3597 3598 return 0; 3599 } 3600 3601 static int gfx_v11_0_cp_gfx_start(struct amdgpu_device *adev) 3602 { 3603 struct amdgpu_ring *ring; 3604 const struct cs_section_def *sect = NULL; 3605 const struct cs_extent_def *ext = NULL; 3606 int r, i; 3607 int ctx_reg_offset; 3608 3609 /* init the CP */ 3610 WREG32_SOC15(GC, 0, regCP_MAX_CONTEXT, 3611 adev->gfx.config.max_hw_contexts - 1); 3612 WREG32_SOC15(GC, 0, regCP_DEVICE_ID, 1); 3613 3614 if (!amdgpu_async_gfx_ring) 3615 gfx_v11_0_cp_gfx_enable(adev, true); 3616 3617 ring = &adev->gfx.gfx_ring[0]; 3618 r = amdgpu_ring_alloc(ring, gfx_v11_0_get_csb_size(adev)); 3619 if (r) { 3620 DRM_ERROR("amdgpu: cp failed to lock ring (%d).\n", r); 3621 return r; 3622 } 3623 3624 amdgpu_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0)); 3625 amdgpu_ring_write(ring, PACKET3_PREAMBLE_BEGIN_CLEAR_STATE); 3626 3627 amdgpu_ring_write(ring, PACKET3(PACKET3_CONTEXT_CONTROL, 1)); 3628 amdgpu_ring_write(ring, 0x80000000); 3629 amdgpu_ring_write(ring, 0x80000000); 3630 3631 for (sect = gfx11_cs_data; sect->section != NULL; ++sect) { 3632 for (ext = sect->section; ext->extent != NULL; ++ext) { 3633 if (sect->id == SECT_CONTEXT) { 3634 amdgpu_ring_write(ring, 3635 PACKET3(PACKET3_SET_CONTEXT_REG, 3636 ext->reg_count)); 3637 amdgpu_ring_write(ring, ext->reg_index - 3638 PACKET3_SET_CONTEXT_REG_START); 3639 for (i = 0; i < ext->reg_count; i++) 3640 amdgpu_ring_write(ring, ext->extent[i]); 3641 } 3642 } 3643 } 3644 3645 ctx_reg_offset = 3646 SOC15_REG_OFFSET(GC, 0, regPA_SC_TILE_STEERING_OVERRIDE) - PACKET3_SET_CONTEXT_REG_START; 3647 amdgpu_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 1)); 3648 amdgpu_ring_write(ring, ctx_reg_offset); 3649 amdgpu_ring_write(ring, adev->gfx.config.pa_sc_tile_steering_override); 3650 3651 amdgpu_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0)); 3652 amdgpu_ring_write(ring, PACKET3_PREAMBLE_END_CLEAR_STATE); 3653 3654 amdgpu_ring_write(ring, PACKET3(PACKET3_CLEAR_STATE, 0)); 3655 amdgpu_ring_write(ring, 0); 3656 3657 amdgpu_ring_commit(ring); 3658 3659 /* submit cs packet to copy state 0 to next available state */ 3660 if (adev->gfx.num_gfx_rings > 1) { 3661 /* maximum supported gfx ring is 2 */ 3662 ring = &adev->gfx.gfx_ring[1]; 3663 r = amdgpu_ring_alloc(ring, 2); 3664 if (r) { 3665 DRM_ERROR("amdgpu: cp failed to lock ring (%d).\n", r); 3666 return r; 3667 } 3668 3669 amdgpu_ring_write(ring, PACKET3(PACKET3_CLEAR_STATE, 0)); 3670 amdgpu_ring_write(ring, 0); 3671 3672 amdgpu_ring_commit(ring); 3673 } 3674 return 0; 3675 } 3676 3677 static void gfx_v11_0_cp_gfx_switch_pipe(struct amdgpu_device *adev, 3678 CP_PIPE_ID pipe) 3679 { 3680 u32 tmp; 3681 3682 tmp = RREG32_SOC15(GC, 0, regGRBM_GFX_CNTL); 3683 tmp = REG_SET_FIELD(tmp, GRBM_GFX_CNTL, PIPEID, pipe); 3684 3685 WREG32_SOC15(GC, 0, regGRBM_GFX_CNTL, tmp); 3686 } 3687 3688 static void gfx_v11_0_cp_gfx_set_doorbell(struct amdgpu_device *adev, 3689 struct amdgpu_ring *ring) 3690 { 3691 u32 tmp; 3692 3693 tmp = RREG32_SOC15(GC, 0, regCP_RB_DOORBELL_CONTROL); 3694 if (ring->use_doorbell) { 3695 tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL, 3696 DOORBELL_OFFSET, ring->doorbell_index); 3697 tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL, 3698 DOORBELL_EN, 1); 3699 } else { 3700 tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL, 3701 DOORBELL_EN, 0); 3702 } 3703 WREG32_SOC15(GC, 0, regCP_RB_DOORBELL_CONTROL, tmp); 3704 3705 tmp = REG_SET_FIELD(0, CP_RB_DOORBELL_RANGE_LOWER, 3706 DOORBELL_RANGE_LOWER, ring->doorbell_index); 3707 WREG32_SOC15(GC, 0, regCP_RB_DOORBELL_RANGE_LOWER, tmp); 3708 3709 WREG32_SOC15(GC, 0, regCP_RB_DOORBELL_RANGE_UPPER, 3710 CP_RB_DOORBELL_RANGE_UPPER__DOORBELL_RANGE_UPPER_MASK); 3711 } 3712 3713 static int gfx_v11_0_cp_gfx_resume(struct amdgpu_device *adev) 3714 { 3715 struct amdgpu_ring *ring; 3716 u32 tmp; 3717 u32 rb_bufsz; 3718 u64 rb_addr, rptr_addr, wptr_gpu_addr; 3719 3720 /* Set the write pointer delay */ 3721 WREG32_SOC15(GC, 0, regCP_RB_WPTR_DELAY, 0); 3722 3723 /* set the RB to use vmid 0 */ 3724 WREG32_SOC15(GC, 0, regCP_RB_VMID, 0); 3725 3726 /* Init gfx ring 0 for pipe 0 */ 3727 mutex_lock(&adev->srbm_mutex); 3728 gfx_v11_0_cp_gfx_switch_pipe(adev, PIPE_ID0); 3729 3730 /* Set ring buffer size */ 3731 ring = &adev->gfx.gfx_ring[0]; 3732 rb_bufsz = order_base_2(ring->ring_size / 8); 3733 tmp = REG_SET_FIELD(0, CP_RB0_CNTL, RB_BUFSZ, rb_bufsz); 3734 tmp = REG_SET_FIELD(tmp, CP_RB0_CNTL, RB_BLKSZ, rb_bufsz - 2); 3735 WREG32_SOC15(GC, 0, regCP_RB0_CNTL, tmp); 3736 3737 /* Initialize the ring buffer's write pointers */ 3738 ring->wptr = 0; 3739 WREG32_SOC15(GC, 0, regCP_RB0_WPTR, lower_32_bits(ring->wptr)); 3740 WREG32_SOC15(GC, 0, regCP_RB0_WPTR_HI, upper_32_bits(ring->wptr)); 3741 3742 /* set the wb address whether it's enabled or not */ 3743 rptr_addr = ring->rptr_gpu_addr; 3744 WREG32_SOC15(GC, 0, regCP_RB0_RPTR_ADDR, lower_32_bits(rptr_addr)); 3745 WREG32_SOC15(GC, 0, regCP_RB0_RPTR_ADDR_HI, upper_32_bits(rptr_addr) & 3746 CP_RB_RPTR_ADDR_HI__RB_RPTR_ADDR_HI_MASK); 3747 3748 wptr_gpu_addr = ring->wptr_gpu_addr; 3749 WREG32_SOC15(GC, 0, regCP_RB_WPTR_POLL_ADDR_LO, 3750 lower_32_bits(wptr_gpu_addr)); 3751 WREG32_SOC15(GC, 0, regCP_RB_WPTR_POLL_ADDR_HI, 3752 upper_32_bits(wptr_gpu_addr)); 3753 3754 mdelay(1); 3755 WREG32_SOC15(GC, 0, regCP_RB0_CNTL, tmp); 3756 3757 rb_addr = ring->gpu_addr >> 8; 3758 WREG32_SOC15(GC, 0, regCP_RB0_BASE, rb_addr); 3759 WREG32_SOC15(GC, 0, regCP_RB0_BASE_HI, upper_32_bits(rb_addr)); 3760 3761 WREG32_SOC15(GC, 0, regCP_RB_ACTIVE, 1); 3762 3763 gfx_v11_0_cp_gfx_set_doorbell(adev, ring); 3764 mutex_unlock(&adev->srbm_mutex); 3765 3766 /* Init gfx ring 1 for pipe 1 */ 3767 if (adev->gfx.num_gfx_rings > 1) { 3768 mutex_lock(&adev->srbm_mutex); 3769 gfx_v11_0_cp_gfx_switch_pipe(adev, PIPE_ID1); 3770 /* maximum supported gfx ring is 2 */ 3771 ring = &adev->gfx.gfx_ring[1]; 3772 rb_bufsz = order_base_2(ring->ring_size / 8); 3773 tmp = REG_SET_FIELD(0, CP_RB1_CNTL, RB_BUFSZ, rb_bufsz); 3774 tmp = REG_SET_FIELD(tmp, CP_RB1_CNTL, RB_BLKSZ, rb_bufsz - 2); 3775 WREG32_SOC15(GC, 0, regCP_RB1_CNTL, tmp); 3776 /* Initialize the ring buffer's write pointers */ 3777 ring->wptr = 0; 3778 WREG32_SOC15(GC, 0, regCP_RB1_WPTR, lower_32_bits(ring->wptr)); 3779 WREG32_SOC15(GC, 0, regCP_RB1_WPTR_HI, upper_32_bits(ring->wptr)); 3780 /* Set the wb address whether it's enabled or not */ 3781 rptr_addr = ring->rptr_gpu_addr; 3782 WREG32_SOC15(GC, 0, regCP_RB1_RPTR_ADDR, lower_32_bits(rptr_addr)); 3783 WREG32_SOC15(GC, 0, regCP_RB1_RPTR_ADDR_HI, upper_32_bits(rptr_addr) & 3784 CP_RB1_RPTR_ADDR_HI__RB_RPTR_ADDR_HI_MASK); 3785 wptr_gpu_addr = ring->wptr_gpu_addr; 3786 WREG32_SOC15(GC, 0, regCP_RB_WPTR_POLL_ADDR_LO, 3787 lower_32_bits(wptr_gpu_addr)); 3788 WREG32_SOC15(GC, 0, regCP_RB_WPTR_POLL_ADDR_HI, 3789 upper_32_bits(wptr_gpu_addr)); 3790 3791 mdelay(1); 3792 WREG32_SOC15(GC, 0, regCP_RB1_CNTL, tmp); 3793 3794 rb_addr = ring->gpu_addr >> 8; 3795 WREG32_SOC15(GC, 0, regCP_RB1_BASE, rb_addr); 3796 WREG32_SOC15(GC, 0, regCP_RB1_BASE_HI, upper_32_bits(rb_addr)); 3797 WREG32_SOC15(GC, 0, regCP_RB1_ACTIVE, 1); 3798 3799 gfx_v11_0_cp_gfx_set_doorbell(adev, ring); 3800 mutex_unlock(&adev->srbm_mutex); 3801 } 3802 /* Switch to pipe 0 */ 3803 mutex_lock(&adev->srbm_mutex); 3804 gfx_v11_0_cp_gfx_switch_pipe(adev, PIPE_ID0); 3805 mutex_unlock(&adev->srbm_mutex); 3806 3807 /* start the ring */ 3808 gfx_v11_0_cp_gfx_start(adev); 3809 3810 return 0; 3811 } 3812 3813 static void gfx_v11_0_cp_compute_enable(struct amdgpu_device *adev, bool enable) 3814 { 3815 u32 data; 3816 3817 if (adev->gfx.rs64_enable) { 3818 data = RREG32_SOC15(GC, 0, regCP_MEC_RS64_CNTL); 3819 data = REG_SET_FIELD(data, CP_MEC_RS64_CNTL, MEC_INVALIDATE_ICACHE, 3820 enable ? 0 : 1); 3821 data = REG_SET_FIELD(data, CP_MEC_RS64_CNTL, MEC_PIPE0_RESET, 3822 enable ? 0 : 1); 3823 data = REG_SET_FIELD(data, CP_MEC_RS64_CNTL, MEC_PIPE1_RESET, 3824 enable ? 0 : 1); 3825 data = REG_SET_FIELD(data, CP_MEC_RS64_CNTL, MEC_PIPE2_RESET, 3826 enable ? 0 : 1); 3827 data = REG_SET_FIELD(data, CP_MEC_RS64_CNTL, MEC_PIPE3_RESET, 3828 enable ? 0 : 1); 3829 data = REG_SET_FIELD(data, CP_MEC_RS64_CNTL, MEC_PIPE0_ACTIVE, 3830 enable ? 1 : 0); 3831 data = REG_SET_FIELD(data, CP_MEC_RS64_CNTL, MEC_PIPE1_ACTIVE, 3832 enable ? 1 : 0); 3833 data = REG_SET_FIELD(data, CP_MEC_RS64_CNTL, MEC_PIPE2_ACTIVE, 3834 enable ? 1 : 0); 3835 data = REG_SET_FIELD(data, CP_MEC_RS64_CNTL, MEC_PIPE3_ACTIVE, 3836 enable ? 1 : 0); 3837 data = REG_SET_FIELD(data, CP_MEC_RS64_CNTL, MEC_HALT, 3838 enable ? 0 : 1); 3839 WREG32_SOC15(GC, 0, regCP_MEC_RS64_CNTL, data); 3840 } else { 3841 data = RREG32_SOC15(GC, 0, regCP_MEC_CNTL); 3842 3843 if (enable) { 3844 data = REG_SET_FIELD(data, CP_MEC_CNTL, MEC_ME1_HALT, 0); 3845 if (!adev->enable_mes_kiq) 3846 data = REG_SET_FIELD(data, CP_MEC_CNTL, 3847 MEC_ME2_HALT, 0); 3848 } else { 3849 data = REG_SET_FIELD(data, CP_MEC_CNTL, MEC_ME1_HALT, 1); 3850 data = REG_SET_FIELD(data, CP_MEC_CNTL, MEC_ME2_HALT, 1); 3851 } 3852 WREG32_SOC15(GC, 0, regCP_MEC_CNTL, data); 3853 } 3854 3855 udelay(50); 3856 } 3857 3858 static int gfx_v11_0_cp_compute_load_microcode(struct amdgpu_device *adev) 3859 { 3860 const struct gfx_firmware_header_v1_0 *mec_hdr; 3861 const __le32 *fw_data; 3862 unsigned i, fw_size; 3863 u32 *fw = NULL; 3864 int r; 3865 3866 if (!adev->gfx.mec_fw) 3867 return -EINVAL; 3868 3869 gfx_v11_0_cp_compute_enable(adev, false); 3870 3871 mec_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data; 3872 amdgpu_ucode_print_gfx_hdr(&mec_hdr->header); 3873 3874 fw_data = (const __le32 *) 3875 (adev->gfx.mec_fw->data + 3876 le32_to_cpu(mec_hdr->header.ucode_array_offset_bytes)); 3877 fw_size = le32_to_cpu(mec_hdr->header.ucode_size_bytes); 3878 3879 r = amdgpu_bo_create_reserved(adev, mec_hdr->header.ucode_size_bytes, 3880 PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT, 3881 &adev->gfx.mec.mec_fw_obj, 3882 &adev->gfx.mec.mec_fw_gpu_addr, 3883 (void **)&fw); 3884 if (r) { 3885 dev_err(adev->dev, "(%d) failed to create mec fw bo\n", r); 3886 gfx_v11_0_mec_fini(adev); 3887 return r; 3888 } 3889 3890 memcpy(fw, fw_data, fw_size); 3891 3892 amdgpu_bo_kunmap(adev->gfx.mec.mec_fw_obj); 3893 amdgpu_bo_unreserve(adev->gfx.mec.mec_fw_obj); 3894 3895 gfx_v11_0_config_mec_cache(adev, adev->gfx.mec.mec_fw_gpu_addr); 3896 3897 /* MEC1 */ 3898 WREG32_SOC15(GC, 0, regCP_MEC_ME1_UCODE_ADDR, 0); 3899 3900 for (i = 0; i < mec_hdr->jt_size; i++) 3901 WREG32_SOC15(GC, 0, regCP_MEC_ME1_UCODE_DATA, 3902 le32_to_cpup(fw_data + mec_hdr->jt_offset + i)); 3903 3904 WREG32_SOC15(GC, 0, regCP_MEC_ME1_UCODE_ADDR, adev->gfx.mec_fw_version); 3905 3906 return 0; 3907 } 3908 3909 static int gfx_v11_0_cp_compute_load_microcode_rs64(struct amdgpu_device *adev) 3910 { 3911 const struct gfx_firmware_header_v2_0 *mec_hdr; 3912 const __le32 *fw_ucode, *fw_data; 3913 u32 tmp, fw_ucode_size, fw_data_size; 3914 u32 i, usec_timeout = 50000; /* Wait for 50 ms */ 3915 u32 *fw_ucode_ptr, *fw_data_ptr; 3916 int r; 3917 3918 if (!adev->gfx.mec_fw) 3919 return -EINVAL; 3920 3921 gfx_v11_0_cp_compute_enable(adev, false); 3922 3923 mec_hdr = (const struct gfx_firmware_header_v2_0 *)adev->gfx.mec_fw->data; 3924 amdgpu_ucode_print_gfx_hdr(&mec_hdr->header); 3925 3926 fw_ucode = (const __le32 *) (adev->gfx.mec_fw->data + 3927 le32_to_cpu(mec_hdr->ucode_offset_bytes)); 3928 fw_ucode_size = le32_to_cpu(mec_hdr->ucode_size_bytes); 3929 3930 fw_data = (const __le32 *) (adev->gfx.mec_fw->data + 3931 le32_to_cpu(mec_hdr->data_offset_bytes)); 3932 fw_data_size = le32_to_cpu(mec_hdr->data_size_bytes); 3933 3934 r = amdgpu_bo_create_reserved(adev, fw_ucode_size, 3935 64 * 1024, 3936 AMDGPU_GEM_DOMAIN_VRAM | 3937 AMDGPU_GEM_DOMAIN_GTT, 3938 &adev->gfx.mec.mec_fw_obj, 3939 &adev->gfx.mec.mec_fw_gpu_addr, 3940 (void **)&fw_ucode_ptr); 3941 if (r) { 3942 dev_err(adev->dev, "(%d) failed to create mec fw ucode bo\n", r); 3943 gfx_v11_0_mec_fini(adev); 3944 return r; 3945 } 3946 3947 r = amdgpu_bo_create_reserved(adev, fw_data_size, 3948 64 * 1024, 3949 AMDGPU_GEM_DOMAIN_VRAM | 3950 AMDGPU_GEM_DOMAIN_GTT, 3951 &adev->gfx.mec.mec_fw_data_obj, 3952 &adev->gfx.mec.mec_fw_data_gpu_addr, 3953 (void **)&fw_data_ptr); 3954 if (r) { 3955 dev_err(adev->dev, "(%d) failed to create mec fw ucode bo\n", r); 3956 gfx_v11_0_mec_fini(adev); 3957 return r; 3958 } 3959 3960 memcpy(fw_ucode_ptr, fw_ucode, fw_ucode_size); 3961 memcpy(fw_data_ptr, fw_data, fw_data_size); 3962 3963 amdgpu_bo_kunmap(adev->gfx.mec.mec_fw_obj); 3964 amdgpu_bo_kunmap(adev->gfx.mec.mec_fw_data_obj); 3965 amdgpu_bo_unreserve(adev->gfx.mec.mec_fw_obj); 3966 amdgpu_bo_unreserve(adev->gfx.mec.mec_fw_data_obj); 3967 3968 tmp = RREG32_SOC15(GC, 0, regCP_CPC_IC_BASE_CNTL); 3969 tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, VMID, 0); 3970 tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, EXE_DISABLE, 0); 3971 tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, CACHE_POLICY, 0); 3972 WREG32_SOC15(GC, 0, regCP_CPC_IC_BASE_CNTL, tmp); 3973 3974 tmp = RREG32_SOC15(GC, 0, regCP_MEC_DC_BASE_CNTL); 3975 tmp = REG_SET_FIELD(tmp, CP_MEC_DC_BASE_CNTL, VMID, 0); 3976 tmp = REG_SET_FIELD(tmp, CP_MEC_DC_BASE_CNTL, CACHE_POLICY, 0); 3977 WREG32_SOC15(GC, 0, regCP_MEC_DC_BASE_CNTL, tmp); 3978 3979 mutex_lock(&adev->srbm_mutex); 3980 for (i = 0; i < adev->gfx.mec.num_pipe_per_mec; i++) { 3981 soc21_grbm_select(adev, 1, i, 0, 0); 3982 3983 WREG32_SOC15(GC, 0, regCP_MEC_MDBASE_LO, adev->gfx.mec.mec_fw_data_gpu_addr); 3984 WREG32_SOC15(GC, 0, regCP_MEC_MDBASE_HI, 3985 upper_32_bits(adev->gfx.mec.mec_fw_data_gpu_addr)); 3986 3987 WREG32_SOC15(GC, 0, regCP_MEC_RS64_PRGRM_CNTR_START, 3988 mec_hdr->ucode_start_addr_lo >> 2 | 3989 mec_hdr->ucode_start_addr_hi << 30); 3990 WREG32_SOC15(GC, 0, regCP_MEC_RS64_PRGRM_CNTR_START_HI, 3991 mec_hdr->ucode_start_addr_hi >> 2); 3992 3993 WREG32_SOC15(GC, 0, regCP_CPC_IC_BASE_LO, adev->gfx.mec.mec_fw_gpu_addr); 3994 WREG32_SOC15(GC, 0, regCP_CPC_IC_BASE_HI, 3995 upper_32_bits(adev->gfx.mec.mec_fw_gpu_addr)); 3996 } 3997 mutex_unlock(&adev->srbm_mutex); 3998 soc21_grbm_select(adev, 0, 0, 0, 0); 3999 4000 /* Trigger an invalidation of the L1 instruction caches */ 4001 tmp = RREG32_SOC15(GC, 0, regCP_MEC_DC_OP_CNTL); 4002 tmp = REG_SET_FIELD(tmp, CP_MEC_DC_OP_CNTL, INVALIDATE_DCACHE, 1); 4003 WREG32_SOC15(GC, 0, regCP_MEC_DC_OP_CNTL, tmp); 4004 4005 /* Wait for invalidation complete */ 4006 for (i = 0; i < usec_timeout; i++) { 4007 tmp = RREG32_SOC15(GC, 0, regCP_MEC_DC_OP_CNTL); 4008 if (1 == REG_GET_FIELD(tmp, CP_MEC_DC_OP_CNTL, 4009 INVALIDATE_DCACHE_COMPLETE)) 4010 break; 4011 udelay(1); 4012 } 4013 4014 if (i >= usec_timeout) { 4015 dev_err(adev->dev, "failed to invalidate instruction cache\n"); 4016 return -EINVAL; 4017 } 4018 4019 /* Trigger an invalidation of the L1 instruction caches */ 4020 tmp = RREG32_SOC15(GC, 0, regCP_CPC_IC_OP_CNTL); 4021 tmp = REG_SET_FIELD(tmp, CP_CPC_IC_OP_CNTL, INVALIDATE_CACHE, 1); 4022 WREG32_SOC15(GC, 0, regCP_CPC_IC_OP_CNTL, tmp); 4023 4024 /* Wait for invalidation complete */ 4025 for (i = 0; i < usec_timeout; i++) { 4026 tmp = RREG32_SOC15(GC, 0, regCP_CPC_IC_OP_CNTL); 4027 if (1 == REG_GET_FIELD(tmp, CP_CPC_IC_OP_CNTL, 4028 INVALIDATE_CACHE_COMPLETE)) 4029 break; 4030 udelay(1); 4031 } 4032 4033 if (i >= usec_timeout) { 4034 dev_err(adev->dev, "failed to invalidate instruction cache\n"); 4035 return -EINVAL; 4036 } 4037 4038 return 0; 4039 } 4040 4041 static void gfx_v11_0_kiq_setting(struct amdgpu_ring *ring) 4042 { 4043 uint32_t tmp; 4044 struct amdgpu_device *adev = ring->adev; 4045 4046 /* tell RLC which is KIQ queue */ 4047 tmp = RREG32_SOC15(GC, 0, regRLC_CP_SCHEDULERS); 4048 tmp &= 0xffffff00; 4049 tmp |= (ring->me << 5) | (ring->pipe << 3) | (ring->queue); 4050 WREG32_SOC15(GC, 0, regRLC_CP_SCHEDULERS, tmp | 0x80); 4051 } 4052 4053 static void gfx_v11_0_cp_set_doorbell_range(struct amdgpu_device *adev) 4054 { 4055 /* set graphics engine doorbell range */ 4056 WREG32_SOC15(GC, 0, regCP_RB_DOORBELL_RANGE_LOWER, 4057 (adev->doorbell_index.gfx_ring0 * 2) << 2); 4058 WREG32_SOC15(GC, 0, regCP_RB_DOORBELL_RANGE_UPPER, 4059 (adev->doorbell_index.gfx_userqueue_end * 2) << 2); 4060 4061 /* set compute engine doorbell range */ 4062 WREG32_SOC15(GC, 0, regCP_MEC_DOORBELL_RANGE_LOWER, 4063 (adev->doorbell_index.kiq * 2) << 2); 4064 WREG32_SOC15(GC, 0, regCP_MEC_DOORBELL_RANGE_UPPER, 4065 (adev->doorbell_index.userqueue_end * 2) << 2); 4066 } 4067 4068 static void gfx_v11_0_gfx_mqd_set_priority(struct amdgpu_device *adev, 4069 struct v11_gfx_mqd *mqd, 4070 struct amdgpu_mqd_prop *prop) 4071 { 4072 bool priority = 0; 4073 u32 tmp; 4074 4075 /* set up default queue priority level 4076 * 0x0 = low priority, 0x1 = high priority 4077 */ 4078 if (prop->hqd_pipe_priority == AMDGPU_GFX_PIPE_PRIO_HIGH) 4079 priority = 1; 4080 4081 tmp = regCP_GFX_HQD_QUEUE_PRIORITY_DEFAULT; 4082 tmp = REG_SET_FIELD(tmp, CP_GFX_HQD_QUEUE_PRIORITY, PRIORITY_LEVEL, priority); 4083 mqd->cp_gfx_hqd_queue_priority = tmp; 4084 } 4085 4086 static int gfx_v11_0_gfx_mqd_init(struct amdgpu_device *adev, void *m, 4087 struct amdgpu_mqd_prop *prop) 4088 { 4089 struct v11_gfx_mqd *mqd = m; 4090 uint64_t hqd_gpu_addr, wb_gpu_addr; 4091 uint32_t tmp; 4092 uint32_t rb_bufsz; 4093 4094 /* set up gfx hqd wptr */ 4095 mqd->cp_gfx_hqd_wptr = 0; 4096 mqd->cp_gfx_hqd_wptr_hi = 0; 4097 4098 /* set the pointer to the MQD */ 4099 mqd->cp_mqd_base_addr = prop->mqd_gpu_addr & 0xfffffffc; 4100 mqd->cp_mqd_base_addr_hi = upper_32_bits(prop->mqd_gpu_addr); 4101 4102 /* set up mqd control */ 4103 tmp = regCP_GFX_MQD_CONTROL_DEFAULT; 4104 tmp = REG_SET_FIELD(tmp, CP_GFX_MQD_CONTROL, VMID, 0); 4105 tmp = REG_SET_FIELD(tmp, CP_GFX_MQD_CONTROL, PRIV_STATE, 1); 4106 tmp = REG_SET_FIELD(tmp, CP_GFX_MQD_CONTROL, CACHE_POLICY, 0); 4107 mqd->cp_gfx_mqd_control = tmp; 4108 4109 /* set up gfx_hqd_vimd with 0x0 to indicate the ring buffer's vmid */ 4110 tmp = regCP_GFX_HQD_VMID_DEFAULT; 4111 tmp = REG_SET_FIELD(tmp, CP_GFX_HQD_VMID, VMID, 0); 4112 mqd->cp_gfx_hqd_vmid = 0; 4113 4114 /* set up gfx queue priority */ 4115 gfx_v11_0_gfx_mqd_set_priority(adev, mqd, prop); 4116 4117 /* set up time quantum */ 4118 tmp = regCP_GFX_HQD_QUANTUM_DEFAULT; 4119 tmp = REG_SET_FIELD(tmp, CP_GFX_HQD_QUANTUM, QUANTUM_EN, 1); 4120 mqd->cp_gfx_hqd_quantum = tmp; 4121 4122 /* set up gfx hqd base. this is similar as CP_RB_BASE */ 4123 hqd_gpu_addr = prop->hqd_base_gpu_addr >> 8; 4124 mqd->cp_gfx_hqd_base = hqd_gpu_addr; 4125 mqd->cp_gfx_hqd_base_hi = upper_32_bits(hqd_gpu_addr); 4126 4127 /* set up hqd_rptr_addr/_hi, similar as CP_RB_RPTR */ 4128 wb_gpu_addr = prop->rptr_gpu_addr; 4129 mqd->cp_gfx_hqd_rptr_addr = wb_gpu_addr & 0xfffffffc; 4130 mqd->cp_gfx_hqd_rptr_addr_hi = 4131 upper_32_bits(wb_gpu_addr) & 0xffff; 4132 4133 /* set up rb_wptr_poll addr */ 4134 wb_gpu_addr = prop->wptr_gpu_addr; 4135 mqd->cp_rb_wptr_poll_addr_lo = wb_gpu_addr & 0xfffffffc; 4136 mqd->cp_rb_wptr_poll_addr_hi = upper_32_bits(wb_gpu_addr) & 0xffff; 4137 4138 /* set up the gfx_hqd_control, similar as CP_RB0_CNTL */ 4139 rb_bufsz = order_base_2(prop->queue_size / 4) - 1; 4140 tmp = regCP_GFX_HQD_CNTL_DEFAULT; 4141 tmp = REG_SET_FIELD(tmp, CP_GFX_HQD_CNTL, RB_BUFSZ, rb_bufsz); 4142 tmp = REG_SET_FIELD(tmp, CP_GFX_HQD_CNTL, RB_BLKSZ, rb_bufsz - 2); 4143 #ifdef __BIG_ENDIAN 4144 tmp = REG_SET_FIELD(tmp, CP_GFX_HQD_CNTL, BUF_SWAP, 1); 4145 #endif 4146 if (prop->tmz_queue) 4147 tmp = REG_SET_FIELD(tmp, CP_GFX_HQD_CNTL, TMZ_MATCH, 1); 4148 if (!prop->kernel_queue) 4149 tmp = REG_SET_FIELD(tmp, CP_GFX_HQD_CNTL, RB_NON_PRIV, 1); 4150 mqd->cp_gfx_hqd_cntl = tmp; 4151 4152 /* set up cp_doorbell_control */ 4153 tmp = regCP_RB_DOORBELL_CONTROL_DEFAULT; 4154 if (prop->use_doorbell) { 4155 tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL, 4156 DOORBELL_OFFSET, prop->doorbell_index); 4157 tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL, 4158 DOORBELL_EN, 1); 4159 } else 4160 tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL, 4161 DOORBELL_EN, 0); 4162 mqd->cp_rb_doorbell_control = tmp; 4163 4164 /* reset read and write pointers, similar to CP_RB0_WPTR/_RPTR */ 4165 mqd->cp_gfx_hqd_rptr = regCP_GFX_HQD_RPTR_DEFAULT; 4166 4167 /* active the queue */ 4168 mqd->cp_gfx_hqd_active = 1; 4169 4170 /* set gfx UQ items */ 4171 mqd->shadow_base_lo = lower_32_bits(prop->shadow_addr); 4172 mqd->shadow_base_hi = upper_32_bits(prop->shadow_addr); 4173 mqd->gds_bkup_base_lo = lower_32_bits(prop->gds_bkup_addr); 4174 mqd->gds_bkup_base_hi = upper_32_bits(prop->gds_bkup_addr); 4175 mqd->fw_work_area_base_lo = lower_32_bits(prop->csa_addr); 4176 mqd->fw_work_area_base_hi = upper_32_bits(prop->csa_addr); 4177 mqd->fence_address_lo = lower_32_bits(prop->fence_address); 4178 mqd->fence_address_hi = upper_32_bits(prop->fence_address); 4179 4180 return 0; 4181 } 4182 4183 static int gfx_v11_0_kgq_init_queue(struct amdgpu_ring *ring, bool reset) 4184 { 4185 struct amdgpu_device *adev = ring->adev; 4186 struct v11_gfx_mqd *mqd = ring->mqd_ptr; 4187 int mqd_idx = ring - &adev->gfx.gfx_ring[0]; 4188 4189 if (!reset && !amdgpu_in_reset(adev) && !adev->in_suspend) { 4190 memset((void *)mqd, 0, sizeof(*mqd)); 4191 mutex_lock(&adev->srbm_mutex); 4192 soc21_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0); 4193 amdgpu_ring_init_mqd(ring); 4194 soc21_grbm_select(adev, 0, 0, 0, 0); 4195 mutex_unlock(&adev->srbm_mutex); 4196 if (adev->gfx.me.mqd_backup[mqd_idx]) 4197 memcpy_fromio(adev->gfx.me.mqd_backup[mqd_idx], mqd, sizeof(*mqd)); 4198 } else { 4199 /* restore mqd with the backup copy */ 4200 if (adev->gfx.me.mqd_backup[mqd_idx]) 4201 memcpy_toio(mqd, adev->gfx.me.mqd_backup[mqd_idx], sizeof(*mqd)); 4202 /* reset the ring */ 4203 ring->wptr = 0; 4204 *ring->wptr_cpu_addr = 0; 4205 amdgpu_ring_clear_ring(ring); 4206 } 4207 4208 return 0; 4209 } 4210 4211 static int gfx_v11_0_cp_async_gfx_ring_resume(struct amdgpu_device *adev) 4212 { 4213 int r, i; 4214 4215 for (i = 0; i < adev->gfx.num_gfx_rings; i++) { 4216 r = gfx_v11_0_kgq_init_queue(&adev->gfx.gfx_ring[i], false); 4217 if (r) 4218 return r; 4219 } 4220 4221 r = amdgpu_gfx_enable_kgq(adev, 0); 4222 if (r) 4223 return r; 4224 4225 return gfx_v11_0_cp_gfx_start(adev); 4226 } 4227 4228 static int gfx_v11_0_compute_mqd_init(struct amdgpu_device *adev, void *m, 4229 struct amdgpu_mqd_prop *prop) 4230 { 4231 struct v11_compute_mqd *mqd = m; 4232 uint64_t hqd_gpu_addr, wb_gpu_addr, eop_base_addr; 4233 uint32_t tmp; 4234 4235 mqd->header = 0xC0310800; 4236 mqd->compute_pipelinestat_enable = 0x00000001; 4237 mqd->compute_static_thread_mgmt_se0 = 0xffffffff; 4238 mqd->compute_static_thread_mgmt_se1 = 0xffffffff; 4239 mqd->compute_static_thread_mgmt_se2 = 0xffffffff; 4240 mqd->compute_static_thread_mgmt_se3 = 0xffffffff; 4241 mqd->compute_misc_reserved = 0x00000007; 4242 4243 eop_base_addr = prop->eop_gpu_addr >> 8; 4244 mqd->cp_hqd_eop_base_addr_lo = eop_base_addr; 4245 mqd->cp_hqd_eop_base_addr_hi = upper_32_bits(eop_base_addr); 4246 4247 /* set the EOP size, register value is 2^(EOP_SIZE+1) dwords */ 4248 tmp = regCP_HQD_EOP_CONTROL_DEFAULT; 4249 tmp = REG_SET_FIELD(tmp, CP_HQD_EOP_CONTROL, EOP_SIZE, 4250 (order_base_2(GFX11_MEC_HPD_SIZE / 4) - 1)); 4251 4252 mqd->cp_hqd_eop_control = tmp; 4253 4254 /* enable doorbell? */ 4255 tmp = regCP_HQD_PQ_DOORBELL_CONTROL_DEFAULT; 4256 4257 if (prop->use_doorbell) { 4258 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL, 4259 DOORBELL_OFFSET, prop->doorbell_index); 4260 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL, 4261 DOORBELL_EN, 1); 4262 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL, 4263 DOORBELL_SOURCE, 0); 4264 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL, 4265 DOORBELL_HIT, 0); 4266 } else { 4267 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL, 4268 DOORBELL_EN, 0); 4269 } 4270 4271 mqd->cp_hqd_pq_doorbell_control = tmp; 4272 4273 /* disable the queue if it's active */ 4274 mqd->cp_hqd_dequeue_request = 0; 4275 mqd->cp_hqd_pq_rptr = 0; 4276 mqd->cp_hqd_pq_wptr_lo = 0; 4277 mqd->cp_hqd_pq_wptr_hi = 0; 4278 4279 /* set the pointer to the MQD */ 4280 mqd->cp_mqd_base_addr_lo = prop->mqd_gpu_addr & 0xfffffffc; 4281 mqd->cp_mqd_base_addr_hi = upper_32_bits(prop->mqd_gpu_addr); 4282 4283 /* set MQD vmid to 0 */ 4284 tmp = regCP_MQD_CONTROL_DEFAULT; 4285 tmp = REG_SET_FIELD(tmp, CP_MQD_CONTROL, VMID, 0); 4286 mqd->cp_mqd_control = tmp; 4287 4288 /* set the pointer to the HQD, this is similar CP_RB0_BASE/_HI */ 4289 hqd_gpu_addr = prop->hqd_base_gpu_addr >> 8; 4290 mqd->cp_hqd_pq_base_lo = hqd_gpu_addr; 4291 mqd->cp_hqd_pq_base_hi = upper_32_bits(hqd_gpu_addr); 4292 4293 /* set up the HQD, this is similar to CP_RB0_CNTL */ 4294 tmp = regCP_HQD_PQ_CONTROL_DEFAULT; 4295 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, QUEUE_SIZE, 4296 (order_base_2(prop->queue_size / 4) - 1)); 4297 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, RPTR_BLOCK_SIZE, 4298 (order_base_2(AMDGPU_GPU_PAGE_SIZE / 4) - 1)); 4299 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, UNORD_DISPATCH, 1); 4300 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, TUNNEL_DISPATCH, 4301 prop->allow_tunneling); 4302 if (prop->kernel_queue) { 4303 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, PRIV_STATE, 1); 4304 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, KMD_QUEUE, 1); 4305 } 4306 if (prop->tmz_queue) 4307 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, TMZ, 1); 4308 mqd->cp_hqd_pq_control = tmp; 4309 4310 /* set the wb address whether it's enabled or not */ 4311 wb_gpu_addr = prop->rptr_gpu_addr; 4312 mqd->cp_hqd_pq_rptr_report_addr_lo = wb_gpu_addr & 0xfffffffc; 4313 mqd->cp_hqd_pq_rptr_report_addr_hi = 4314 upper_32_bits(wb_gpu_addr) & 0xffff; 4315 4316 /* only used if CP_PQ_WPTR_POLL_CNTL.CP_PQ_WPTR_POLL_CNTL__EN_MASK=1 */ 4317 wb_gpu_addr = prop->wptr_gpu_addr; 4318 mqd->cp_hqd_pq_wptr_poll_addr_lo = wb_gpu_addr & 0xfffffffc; 4319 mqd->cp_hqd_pq_wptr_poll_addr_hi = upper_32_bits(wb_gpu_addr) & 0xffff; 4320 4321 tmp = 0; 4322 /* enable the doorbell if requested */ 4323 if (prop->use_doorbell) { 4324 tmp = regCP_HQD_PQ_DOORBELL_CONTROL_DEFAULT; 4325 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL, 4326 DOORBELL_OFFSET, prop->doorbell_index); 4327 4328 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL, 4329 DOORBELL_EN, 1); 4330 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL, 4331 DOORBELL_SOURCE, 0); 4332 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL, 4333 DOORBELL_HIT, 0); 4334 } 4335 4336 mqd->cp_hqd_pq_doorbell_control = tmp; 4337 4338 /* reset read and write pointers, similar to CP_RB0_WPTR/_RPTR */ 4339 mqd->cp_hqd_pq_rptr = regCP_HQD_PQ_RPTR_DEFAULT; 4340 4341 /* set the vmid for the queue */ 4342 mqd->cp_hqd_vmid = 0; 4343 4344 tmp = regCP_HQD_PERSISTENT_STATE_DEFAULT; 4345 tmp = REG_SET_FIELD(tmp, CP_HQD_PERSISTENT_STATE, PRELOAD_SIZE, 0x55); 4346 mqd->cp_hqd_persistent_state = tmp; 4347 4348 /* set MIN_IB_AVAIL_SIZE */ 4349 tmp = regCP_HQD_IB_CONTROL_DEFAULT; 4350 tmp = REG_SET_FIELD(tmp, CP_HQD_IB_CONTROL, MIN_IB_AVAIL_SIZE, 3); 4351 mqd->cp_hqd_ib_control = tmp; 4352 4353 /* set static priority for a compute queue/ring */ 4354 mqd->cp_hqd_pipe_priority = prop->hqd_pipe_priority; 4355 mqd->cp_hqd_queue_priority = prop->hqd_queue_priority; 4356 4357 mqd->cp_hqd_active = prop->hqd_active; 4358 4359 /* set UQ fenceaddress */ 4360 mqd->fence_address_lo = lower_32_bits(prop->fence_address); 4361 mqd->fence_address_hi = upper_32_bits(prop->fence_address); 4362 4363 return 0; 4364 } 4365 4366 static int gfx_v11_0_kiq_init_register(struct amdgpu_ring *ring) 4367 { 4368 struct amdgpu_device *adev = ring->adev; 4369 struct v11_compute_mqd *mqd = ring->mqd_ptr; 4370 int j; 4371 4372 /* inactivate the queue */ 4373 if (amdgpu_sriov_vf(adev)) 4374 WREG32_SOC15(GC, 0, regCP_HQD_ACTIVE, 0); 4375 4376 /* disable wptr polling */ 4377 WREG32_FIELD15_PREREG(GC, 0, CP_PQ_WPTR_POLL_CNTL, EN, 0); 4378 4379 /* write the EOP addr */ 4380 WREG32_SOC15(GC, 0, regCP_HQD_EOP_BASE_ADDR, 4381 mqd->cp_hqd_eop_base_addr_lo); 4382 WREG32_SOC15(GC, 0, regCP_HQD_EOP_BASE_ADDR_HI, 4383 mqd->cp_hqd_eop_base_addr_hi); 4384 4385 /* set the EOP size, register value is 2^(EOP_SIZE+1) dwords */ 4386 WREG32_SOC15(GC, 0, regCP_HQD_EOP_CONTROL, 4387 mqd->cp_hqd_eop_control); 4388 4389 /* enable doorbell? */ 4390 WREG32_SOC15(GC, 0, regCP_HQD_PQ_DOORBELL_CONTROL, 4391 mqd->cp_hqd_pq_doorbell_control); 4392 4393 /* disable the queue if it's active */ 4394 if (RREG32_SOC15(GC, 0, regCP_HQD_ACTIVE) & 1) { 4395 WREG32_SOC15(GC, 0, regCP_HQD_DEQUEUE_REQUEST, 1); 4396 for (j = 0; j < adev->usec_timeout; j++) { 4397 if (!(RREG32_SOC15(GC, 0, regCP_HQD_ACTIVE) & 1)) 4398 break; 4399 udelay(1); 4400 } 4401 WREG32_SOC15(GC, 0, regCP_HQD_DEQUEUE_REQUEST, 4402 mqd->cp_hqd_dequeue_request); 4403 WREG32_SOC15(GC, 0, regCP_HQD_PQ_RPTR, 4404 mqd->cp_hqd_pq_rptr); 4405 WREG32_SOC15(GC, 0, regCP_HQD_PQ_WPTR_LO, 4406 mqd->cp_hqd_pq_wptr_lo); 4407 WREG32_SOC15(GC, 0, regCP_HQD_PQ_WPTR_HI, 4408 mqd->cp_hqd_pq_wptr_hi); 4409 } 4410 4411 /* set the pointer to the MQD */ 4412 WREG32_SOC15(GC, 0, regCP_MQD_BASE_ADDR, 4413 mqd->cp_mqd_base_addr_lo); 4414 WREG32_SOC15(GC, 0, regCP_MQD_BASE_ADDR_HI, 4415 mqd->cp_mqd_base_addr_hi); 4416 4417 /* set MQD vmid to 0 */ 4418 WREG32_SOC15(GC, 0, regCP_MQD_CONTROL, 4419 mqd->cp_mqd_control); 4420 4421 /* set the pointer to the HQD, this is similar CP_RB0_BASE/_HI */ 4422 WREG32_SOC15(GC, 0, regCP_HQD_PQ_BASE, 4423 mqd->cp_hqd_pq_base_lo); 4424 WREG32_SOC15(GC, 0, regCP_HQD_PQ_BASE_HI, 4425 mqd->cp_hqd_pq_base_hi); 4426 4427 /* set up the HQD, this is similar to CP_RB0_CNTL */ 4428 WREG32_SOC15(GC, 0, regCP_HQD_PQ_CONTROL, 4429 mqd->cp_hqd_pq_control); 4430 4431 /* set the wb address whether it's enabled or not */ 4432 WREG32_SOC15(GC, 0, regCP_HQD_PQ_RPTR_REPORT_ADDR, 4433 mqd->cp_hqd_pq_rptr_report_addr_lo); 4434 WREG32_SOC15(GC, 0, regCP_HQD_PQ_RPTR_REPORT_ADDR_HI, 4435 mqd->cp_hqd_pq_rptr_report_addr_hi); 4436 4437 /* only used if CP_PQ_WPTR_POLL_CNTL.CP_PQ_WPTR_POLL_CNTL__EN_MASK=1 */ 4438 WREG32_SOC15(GC, 0, regCP_HQD_PQ_WPTR_POLL_ADDR, 4439 mqd->cp_hqd_pq_wptr_poll_addr_lo); 4440 WREG32_SOC15(GC, 0, regCP_HQD_PQ_WPTR_POLL_ADDR_HI, 4441 mqd->cp_hqd_pq_wptr_poll_addr_hi); 4442 4443 /* enable the doorbell if requested */ 4444 if (ring->use_doorbell) { 4445 WREG32_SOC15(GC, 0, regCP_MEC_DOORBELL_RANGE_LOWER, 4446 (adev->doorbell_index.kiq * 2) << 2); 4447 WREG32_SOC15(GC, 0, regCP_MEC_DOORBELL_RANGE_UPPER, 4448 (adev->doorbell_index.userqueue_end * 2) << 2); 4449 } 4450 4451 WREG32_SOC15(GC, 0, regCP_HQD_PQ_DOORBELL_CONTROL, 4452 mqd->cp_hqd_pq_doorbell_control); 4453 4454 /* reset read and write pointers, similar to CP_RB0_WPTR/_RPTR */ 4455 WREG32_SOC15(GC, 0, regCP_HQD_PQ_WPTR_LO, 4456 mqd->cp_hqd_pq_wptr_lo); 4457 WREG32_SOC15(GC, 0, regCP_HQD_PQ_WPTR_HI, 4458 mqd->cp_hqd_pq_wptr_hi); 4459 4460 /* set the vmid for the queue */ 4461 WREG32_SOC15(GC, 0, regCP_HQD_VMID, mqd->cp_hqd_vmid); 4462 4463 WREG32_SOC15(GC, 0, regCP_HQD_PERSISTENT_STATE, 4464 mqd->cp_hqd_persistent_state); 4465 4466 /* activate the queue */ 4467 WREG32_SOC15(GC, 0, regCP_HQD_ACTIVE, 4468 mqd->cp_hqd_active); 4469 4470 if (ring->use_doorbell) 4471 WREG32_FIELD15_PREREG(GC, 0, CP_PQ_STATUS, DOORBELL_ENABLE, 1); 4472 4473 return 0; 4474 } 4475 4476 static int gfx_v11_0_kiq_init_queue(struct amdgpu_ring *ring) 4477 { 4478 struct amdgpu_device *adev = ring->adev; 4479 struct v11_compute_mqd *mqd = ring->mqd_ptr; 4480 4481 gfx_v11_0_kiq_setting(ring); 4482 4483 if (amdgpu_in_reset(adev)) { /* for GPU_RESET case */ 4484 /* reset MQD to a clean status */ 4485 if (adev->gfx.kiq[0].mqd_backup) 4486 memcpy_toio(mqd, adev->gfx.kiq[0].mqd_backup, sizeof(*mqd)); 4487 4488 /* reset ring buffer */ 4489 ring->wptr = 0; 4490 amdgpu_ring_clear_ring(ring); 4491 4492 mutex_lock(&adev->srbm_mutex); 4493 soc21_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0); 4494 gfx_v11_0_kiq_init_register(ring); 4495 soc21_grbm_select(adev, 0, 0, 0, 0); 4496 mutex_unlock(&adev->srbm_mutex); 4497 } else { 4498 memset((void *)mqd, 0, sizeof(*mqd)); 4499 if (amdgpu_sriov_vf(adev) && adev->in_suspend) 4500 amdgpu_ring_clear_ring(ring); 4501 mutex_lock(&adev->srbm_mutex); 4502 soc21_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0); 4503 amdgpu_ring_init_mqd(ring); 4504 gfx_v11_0_kiq_init_register(ring); 4505 soc21_grbm_select(adev, 0, 0, 0, 0); 4506 mutex_unlock(&adev->srbm_mutex); 4507 4508 if (adev->gfx.kiq[0].mqd_backup) 4509 memcpy_fromio(adev->gfx.kiq[0].mqd_backup, mqd, sizeof(*mqd)); 4510 } 4511 4512 return 0; 4513 } 4514 4515 static int gfx_v11_0_kcq_init_queue(struct amdgpu_ring *ring, bool reset) 4516 { 4517 struct amdgpu_device *adev = ring->adev; 4518 struct v11_compute_mqd *mqd = ring->mqd_ptr; 4519 int mqd_idx = ring - &adev->gfx.compute_ring[0]; 4520 4521 if (!reset && !amdgpu_in_reset(adev) && !adev->in_suspend) { 4522 memset((void *)mqd, 0, sizeof(*mqd)); 4523 mutex_lock(&adev->srbm_mutex); 4524 soc21_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0); 4525 amdgpu_ring_init_mqd(ring); 4526 soc21_grbm_select(adev, 0, 0, 0, 0); 4527 mutex_unlock(&adev->srbm_mutex); 4528 4529 if (adev->gfx.mec.mqd_backup[mqd_idx]) 4530 memcpy_fromio(adev->gfx.mec.mqd_backup[mqd_idx], mqd, sizeof(*mqd)); 4531 } else { 4532 /* restore MQD to a clean status */ 4533 if (adev->gfx.mec.mqd_backup[mqd_idx]) 4534 memcpy_toio(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(*mqd)); 4535 /* reset ring buffer */ 4536 ring->wptr = 0; 4537 atomic64_set((atomic64_t *)ring->wptr_cpu_addr, 0); 4538 amdgpu_ring_clear_ring(ring); 4539 } 4540 4541 return 0; 4542 } 4543 4544 static int gfx_v11_0_kiq_resume(struct amdgpu_device *adev) 4545 { 4546 gfx_v11_0_kiq_init_queue(&adev->gfx.kiq[0].ring); 4547 return 0; 4548 } 4549 4550 static int gfx_v11_0_kcq_resume(struct amdgpu_device *adev) 4551 { 4552 int i, r; 4553 4554 if (!amdgpu_async_gfx_ring) 4555 gfx_v11_0_cp_compute_enable(adev, true); 4556 4557 for (i = 0; i < adev->gfx.num_compute_rings; i++) { 4558 r = gfx_v11_0_kcq_init_queue(&adev->gfx.compute_ring[i], false); 4559 if (r) 4560 return r; 4561 } 4562 4563 return amdgpu_gfx_enable_kcq(adev, 0); 4564 } 4565 4566 static int gfx_v11_0_cp_resume(struct amdgpu_device *adev) 4567 { 4568 int r, i; 4569 struct amdgpu_ring *ring; 4570 4571 if (!(adev->flags & AMD_IS_APU)) 4572 gfx_v11_0_enable_gui_idle_interrupt(adev, false); 4573 4574 if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT) { 4575 /* legacy firmware loading */ 4576 r = gfx_v11_0_cp_gfx_load_microcode(adev); 4577 if (r) 4578 return r; 4579 4580 if (adev->gfx.rs64_enable) 4581 r = gfx_v11_0_cp_compute_load_microcode_rs64(adev); 4582 else 4583 r = gfx_v11_0_cp_compute_load_microcode(adev); 4584 if (r) 4585 return r; 4586 } 4587 4588 gfx_v11_0_cp_set_doorbell_range(adev); 4589 4590 if (amdgpu_async_gfx_ring) { 4591 gfx_v11_0_cp_compute_enable(adev, true); 4592 gfx_v11_0_cp_gfx_enable(adev, true); 4593 } 4594 4595 if (adev->enable_mes_kiq && adev->mes.kiq_hw_init) 4596 r = amdgpu_mes_kiq_hw_init(adev); 4597 else 4598 r = gfx_v11_0_kiq_resume(adev); 4599 if (r) 4600 return r; 4601 4602 r = gfx_v11_0_kcq_resume(adev); 4603 if (r) 4604 return r; 4605 4606 if (!amdgpu_async_gfx_ring) { 4607 r = gfx_v11_0_cp_gfx_resume(adev); 4608 if (r) 4609 return r; 4610 } else { 4611 r = gfx_v11_0_cp_async_gfx_ring_resume(adev); 4612 if (r) 4613 return r; 4614 } 4615 4616 if (adev->gfx.disable_kq) { 4617 for (i = 0; i < adev->gfx.num_gfx_rings; i++) { 4618 ring = &adev->gfx.gfx_ring[i]; 4619 /* we don't want to set ring->ready */ 4620 r = amdgpu_ring_test_ring(ring); 4621 if (r) 4622 return r; 4623 } 4624 if (amdgpu_async_gfx_ring) 4625 amdgpu_gfx_disable_kgq(adev, 0); 4626 } else { 4627 for (i = 0; i < adev->gfx.num_gfx_rings; i++) { 4628 ring = &adev->gfx.gfx_ring[i]; 4629 r = amdgpu_ring_test_helper(ring); 4630 if (r) 4631 return r; 4632 } 4633 } 4634 4635 for (i = 0; i < adev->gfx.num_compute_rings; i++) { 4636 ring = &adev->gfx.compute_ring[i]; 4637 r = amdgpu_ring_test_helper(ring); 4638 if (r) 4639 return r; 4640 } 4641 4642 return 0; 4643 } 4644 4645 static void gfx_v11_0_cp_enable(struct amdgpu_device *adev, bool enable) 4646 { 4647 gfx_v11_0_cp_gfx_enable(adev, enable); 4648 gfx_v11_0_cp_compute_enable(adev, enable); 4649 } 4650 4651 static int gfx_v11_0_gfxhub_enable(struct amdgpu_device *adev) 4652 { 4653 int r; 4654 bool value; 4655 4656 r = adev->gfxhub.funcs->gart_enable(adev); 4657 if (r) 4658 return r; 4659 4660 amdgpu_device_flush_hdp(adev, NULL); 4661 4662 value = amdgpu_vm_fault_stop != AMDGPU_VM_FAULT_STOP_ALWAYS; 4663 4664 adev->gfxhub.funcs->set_fault_enable_default(adev, value); 4665 /* TODO investigate why this and the hdp flush above is needed, 4666 * are we missing a flush somewhere else? */ 4667 adev->gmc.gmc_funcs->flush_gpu_tlb(adev, 0, AMDGPU_GFXHUB(0), 0); 4668 4669 return 0; 4670 } 4671 4672 static void gfx_v11_0_select_cp_fw_arch(struct amdgpu_device *adev) 4673 { 4674 u32 tmp; 4675 4676 /* select RS64 */ 4677 if (adev->gfx.rs64_enable) { 4678 tmp = RREG32_SOC15(GC, 0, regCP_GFX_CNTL); 4679 tmp = REG_SET_FIELD(tmp, CP_GFX_CNTL, ENGINE_SEL, 1); 4680 WREG32_SOC15(GC, 0, regCP_GFX_CNTL, tmp); 4681 4682 tmp = RREG32_SOC15(GC, 0, regCP_MEC_ISA_CNTL); 4683 tmp = REG_SET_FIELD(tmp, CP_MEC_ISA_CNTL, ISA_MODE, 1); 4684 WREG32_SOC15(GC, 0, regCP_MEC_ISA_CNTL, tmp); 4685 } 4686 4687 if (amdgpu_emu_mode == 1) 4688 msleep(100); 4689 } 4690 4691 static int get_gb_addr_config(struct amdgpu_device * adev) 4692 { 4693 u32 gb_addr_config; 4694 4695 gb_addr_config = RREG32_SOC15(GC, 0, regGB_ADDR_CONFIG); 4696 if (gb_addr_config == 0) 4697 return -EINVAL; 4698 4699 adev->gfx.config.gb_addr_config_fields.num_pkrs = 4700 1 << REG_GET_FIELD(gb_addr_config, GB_ADDR_CONFIG, NUM_PKRS); 4701 4702 adev->gfx.config.gb_addr_config = gb_addr_config; 4703 4704 adev->gfx.config.gb_addr_config_fields.num_pipes = 1 << 4705 REG_GET_FIELD(adev->gfx.config.gb_addr_config, 4706 GB_ADDR_CONFIG, NUM_PIPES); 4707 4708 adev->gfx.config.max_tile_pipes = 4709 adev->gfx.config.gb_addr_config_fields.num_pipes; 4710 4711 adev->gfx.config.gb_addr_config_fields.max_compress_frags = 1 << 4712 REG_GET_FIELD(adev->gfx.config.gb_addr_config, 4713 GB_ADDR_CONFIG, MAX_COMPRESSED_FRAGS); 4714 adev->gfx.config.gb_addr_config_fields.num_rb_per_se = 1 << 4715 REG_GET_FIELD(adev->gfx.config.gb_addr_config, 4716 GB_ADDR_CONFIG, NUM_RB_PER_SE); 4717 adev->gfx.config.gb_addr_config_fields.num_se = 1 << 4718 REG_GET_FIELD(adev->gfx.config.gb_addr_config, 4719 GB_ADDR_CONFIG, NUM_SHADER_ENGINES); 4720 adev->gfx.config.gb_addr_config_fields.pipe_interleave_size = 1 << (8 + 4721 REG_GET_FIELD(adev->gfx.config.gb_addr_config, 4722 GB_ADDR_CONFIG, PIPE_INTERLEAVE_SIZE)); 4723 4724 return 0; 4725 } 4726 4727 static void gfx_v11_0_disable_gpa_mode(struct amdgpu_device *adev) 4728 { 4729 uint32_t data; 4730 4731 data = RREG32_SOC15(GC, 0, regCPC_PSP_DEBUG); 4732 data |= CPC_PSP_DEBUG__GPA_OVERRIDE_MASK; 4733 WREG32_SOC15(GC, 0, regCPC_PSP_DEBUG, data); 4734 4735 data = RREG32_SOC15(GC, 0, regCPG_PSP_DEBUG); 4736 data |= CPG_PSP_DEBUG__GPA_OVERRIDE_MASK; 4737 WREG32_SOC15(GC, 0, regCPG_PSP_DEBUG, data); 4738 } 4739 4740 static int gfx_v11_0_hw_init(struct amdgpu_ip_block *ip_block) 4741 { 4742 int r; 4743 struct amdgpu_device *adev = ip_block->adev; 4744 4745 amdgpu_gfx_cleaner_shader_init(adev, adev->gfx.cleaner_shader_size, 4746 adev->gfx.cleaner_shader_ptr); 4747 4748 if (adev->firmware.load_type == AMDGPU_FW_LOAD_RLC_BACKDOOR_AUTO) { 4749 if (adev->gfx.imu.funcs) { 4750 /* RLC autoload sequence 1: Program rlc ram */ 4751 if (adev->gfx.imu.funcs->program_rlc_ram) 4752 adev->gfx.imu.funcs->program_rlc_ram(adev); 4753 /* rlc autoload firmware */ 4754 r = gfx_v11_0_rlc_backdoor_autoload_enable(adev); 4755 if (r) 4756 return r; 4757 } 4758 } else { 4759 if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT) { 4760 if (adev->gfx.imu.funcs && (amdgpu_dpm > 0)) { 4761 if (adev->gfx.imu.funcs->load_microcode) 4762 adev->gfx.imu.funcs->load_microcode(adev); 4763 if (adev->gfx.imu.funcs->setup_imu) 4764 adev->gfx.imu.funcs->setup_imu(adev); 4765 if (adev->gfx.imu.funcs->start_imu) 4766 adev->gfx.imu.funcs->start_imu(adev); 4767 } 4768 4769 /* disable gpa mode in backdoor loading */ 4770 gfx_v11_0_disable_gpa_mode(adev); 4771 } 4772 } 4773 4774 if ((adev->firmware.load_type == AMDGPU_FW_LOAD_RLC_BACKDOOR_AUTO) || 4775 (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP)) { 4776 r = gfx_v11_0_wait_for_rlc_autoload_complete(adev); 4777 if (r) { 4778 dev_err(adev->dev, "(%d) failed to wait rlc autoload complete\n", r); 4779 return r; 4780 } 4781 } 4782 4783 adev->gfx.is_poweron = true; 4784 4785 if(get_gb_addr_config(adev)) 4786 DRM_WARN("Invalid gb_addr_config !\n"); 4787 4788 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP && 4789 adev->gfx.rs64_enable) 4790 gfx_v11_0_config_gfx_rs64(adev); 4791 4792 r = gfx_v11_0_gfxhub_enable(adev); 4793 if (r) 4794 return r; 4795 4796 if (!amdgpu_emu_mode) 4797 gfx_v11_0_init_golden_registers(adev); 4798 4799 if ((adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT) || 4800 (adev->firmware.load_type == AMDGPU_FW_LOAD_RLC_BACKDOOR_AUTO && amdgpu_dpm == 1)) { 4801 /** 4802 * For gfx 11, rlc firmware loading relies on smu firmware is 4803 * loaded firstly, so in direct type, it has to load smc ucode 4804 * here before rlc. 4805 */ 4806 r = amdgpu_pm_load_smu_firmware(adev, NULL); 4807 if (r) 4808 return r; 4809 } 4810 4811 gfx_v11_0_constants_init(adev); 4812 4813 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) 4814 gfx_v11_0_select_cp_fw_arch(adev); 4815 4816 if (adev->nbio.funcs->gc_doorbell_init) 4817 adev->nbio.funcs->gc_doorbell_init(adev); 4818 4819 r = gfx_v11_0_rlc_resume(adev); 4820 if (r) 4821 return r; 4822 4823 /* 4824 * init golden registers and rlc resume may override some registers, 4825 * reconfig them here 4826 */ 4827 gfx_v11_0_tcp_harvest(adev); 4828 4829 r = gfx_v11_0_cp_resume(adev); 4830 if (r) 4831 return r; 4832 4833 /* get IMU version from HW if it's not set */ 4834 if (!adev->gfx.imu_fw_version) 4835 adev->gfx.imu_fw_version = RREG32_SOC15(GC, 0, regGFX_IMU_SCRATCH_0); 4836 4837 return r; 4838 } 4839 4840 static int gfx_v11_0_set_userq_eop_interrupts(struct amdgpu_device *adev, 4841 bool enable) 4842 { 4843 unsigned int irq_type; 4844 int m, p, r; 4845 4846 if (adev->userq_funcs[AMDGPU_HW_IP_GFX]) { 4847 for (m = 0; m < adev->gfx.me.num_me; m++) { 4848 for (p = 0; p < adev->gfx.me.num_pipe_per_me; p++) { 4849 irq_type = AMDGPU_CP_IRQ_GFX_ME0_PIPE0_EOP + p; 4850 if (enable) 4851 r = amdgpu_irq_get(adev, &adev->gfx.eop_irq, 4852 irq_type); 4853 else 4854 r = amdgpu_irq_put(adev, &adev->gfx.eop_irq, 4855 irq_type); 4856 if (r) 4857 return r; 4858 } 4859 } 4860 } 4861 4862 if (adev->userq_funcs[AMDGPU_HW_IP_COMPUTE]) { 4863 for (m = 0; m < adev->gfx.mec.num_mec; ++m) { 4864 for (p = 0; p < adev->gfx.mec.num_pipe_per_mec; p++) { 4865 irq_type = AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP 4866 + (m * adev->gfx.mec.num_pipe_per_mec) 4867 + p; 4868 if (enable) 4869 r = amdgpu_irq_get(adev, &adev->gfx.eop_irq, 4870 irq_type); 4871 else 4872 r = amdgpu_irq_put(adev, &adev->gfx.eop_irq, 4873 irq_type); 4874 if (r) 4875 return r; 4876 } 4877 } 4878 } 4879 4880 return 0; 4881 } 4882 4883 static int gfx_v11_0_hw_fini(struct amdgpu_ip_block *ip_block) 4884 { 4885 struct amdgpu_device *adev = ip_block->adev; 4886 4887 cancel_delayed_work_sync(&adev->gfx.idle_work); 4888 4889 amdgpu_irq_put(adev, &adev->gfx.priv_reg_irq, 0); 4890 amdgpu_irq_put(adev, &adev->gfx.priv_inst_irq, 0); 4891 amdgpu_irq_put(adev, &adev->gfx.bad_op_irq, 0); 4892 gfx_v11_0_set_userq_eop_interrupts(adev, false); 4893 4894 if (!adev->no_hw_access) { 4895 if (amdgpu_async_gfx_ring && 4896 !adev->gfx.disable_kq) { 4897 if (amdgpu_gfx_disable_kgq(adev, 0)) 4898 DRM_ERROR("KGQ disable failed\n"); 4899 } 4900 4901 if (amdgpu_gfx_disable_kcq(adev, 0)) 4902 DRM_ERROR("KCQ disable failed\n"); 4903 4904 amdgpu_mes_kiq_hw_fini(adev); 4905 } 4906 4907 if (amdgpu_sriov_vf(adev)) 4908 /* Remove the steps disabling CPG and clearing KIQ position, 4909 * so that CP could perform IDLE-SAVE during switch. Those 4910 * steps are necessary to avoid a DMAR error in gfx9 but it is 4911 * not reproduced on gfx11. 4912 */ 4913 return 0; 4914 4915 gfx_v11_0_cp_enable(adev, false); 4916 gfx_v11_0_enable_gui_idle_interrupt(adev, false); 4917 4918 adev->gfxhub.funcs->gart_disable(adev); 4919 4920 adev->gfx.is_poweron = false; 4921 4922 return 0; 4923 } 4924 4925 static int gfx_v11_0_suspend(struct amdgpu_ip_block *ip_block) 4926 { 4927 return gfx_v11_0_hw_fini(ip_block); 4928 } 4929 4930 static int gfx_v11_0_resume(struct amdgpu_ip_block *ip_block) 4931 { 4932 return gfx_v11_0_hw_init(ip_block); 4933 } 4934 4935 static bool gfx_v11_0_is_idle(struct amdgpu_ip_block *ip_block) 4936 { 4937 struct amdgpu_device *adev = ip_block->adev; 4938 4939 if (REG_GET_FIELD(RREG32_SOC15(GC, 0, regGRBM_STATUS), 4940 GRBM_STATUS, GUI_ACTIVE)) 4941 return false; 4942 else 4943 return true; 4944 } 4945 4946 static int gfx_v11_0_wait_for_idle(struct amdgpu_ip_block *ip_block) 4947 { 4948 unsigned i; 4949 u32 tmp; 4950 struct amdgpu_device *adev = ip_block->adev; 4951 4952 for (i = 0; i < adev->usec_timeout; i++) { 4953 /* read MC_STATUS */ 4954 tmp = RREG32_SOC15(GC, 0, regGRBM_STATUS) & 4955 GRBM_STATUS__GUI_ACTIVE_MASK; 4956 4957 if (!REG_GET_FIELD(tmp, GRBM_STATUS, GUI_ACTIVE)) 4958 return 0; 4959 udelay(1); 4960 } 4961 return -ETIMEDOUT; 4962 } 4963 4964 int gfx_v11_0_request_gfx_index_mutex(struct amdgpu_device *adev, 4965 bool req) 4966 { 4967 u32 i, tmp, val; 4968 4969 for (i = 0; i < adev->usec_timeout; i++) { 4970 /* Request with MeId=2, PipeId=0 */ 4971 tmp = REG_SET_FIELD(0, CP_GFX_INDEX_MUTEX, REQUEST, req); 4972 tmp = REG_SET_FIELD(tmp, CP_GFX_INDEX_MUTEX, CLIENTID, 4); 4973 WREG32_SOC15(GC, 0, regCP_GFX_INDEX_MUTEX, tmp); 4974 4975 val = RREG32_SOC15(GC, 0, regCP_GFX_INDEX_MUTEX); 4976 if (req) { 4977 if (val == tmp) 4978 break; 4979 } else { 4980 tmp = REG_SET_FIELD(tmp, CP_GFX_INDEX_MUTEX, 4981 REQUEST, 1); 4982 4983 /* unlocked or locked by firmware */ 4984 if (val != tmp) 4985 break; 4986 } 4987 udelay(1); 4988 } 4989 4990 if (i >= adev->usec_timeout) 4991 return -EINVAL; 4992 4993 return 0; 4994 } 4995 4996 static int gfx_v11_0_soft_reset(struct amdgpu_ip_block *ip_block) 4997 { 4998 u32 grbm_soft_reset = 0; 4999 u32 tmp; 5000 int r, i, j, k; 5001 struct amdgpu_device *adev = ip_block->adev; 5002 5003 amdgpu_gfx_rlc_enter_safe_mode(adev, 0); 5004 5005 tmp = RREG32_SOC15(GC, 0, regCP_INT_CNTL); 5006 tmp = REG_SET_FIELD(tmp, CP_INT_CNTL, CMP_BUSY_INT_ENABLE, 0); 5007 tmp = REG_SET_FIELD(tmp, CP_INT_CNTL, CNTX_BUSY_INT_ENABLE, 0); 5008 tmp = REG_SET_FIELD(tmp, CP_INT_CNTL, CNTX_EMPTY_INT_ENABLE, 0); 5009 tmp = REG_SET_FIELD(tmp, CP_INT_CNTL, GFX_IDLE_INT_ENABLE, 0); 5010 WREG32_SOC15(GC, 0, regCP_INT_CNTL, tmp); 5011 5012 mutex_lock(&adev->srbm_mutex); 5013 for (i = 0; i < adev->gfx.mec.num_mec; ++i) { 5014 for (j = 0; j < adev->gfx.mec.num_queue_per_pipe; j++) { 5015 for (k = 0; k < adev->gfx.mec.num_pipe_per_mec; k++) { 5016 soc21_grbm_select(adev, i, k, j, 0); 5017 5018 WREG32_SOC15(GC, 0, regCP_HQD_DEQUEUE_REQUEST, 0x2); 5019 WREG32_SOC15(GC, 0, regSPI_COMPUTE_QUEUE_RESET, 0x1); 5020 } 5021 } 5022 } 5023 for (i = 0; i < adev->gfx.me.num_me; ++i) { 5024 for (j = 0; j < adev->gfx.me.num_queue_per_pipe; j++) { 5025 for (k = 0; k < adev->gfx.me.num_pipe_per_me; k++) { 5026 soc21_grbm_select(adev, i, k, j, 0); 5027 5028 WREG32_SOC15(GC, 0, regCP_GFX_HQD_DEQUEUE_REQUEST, 0x1); 5029 } 5030 } 5031 } 5032 soc21_grbm_select(adev, 0, 0, 0, 0); 5033 mutex_unlock(&adev->srbm_mutex); 5034 5035 /* Try to acquire the gfx mutex before access to CP_VMID_RESET */ 5036 mutex_lock(&adev->gfx.reset_sem_mutex); 5037 r = gfx_v11_0_request_gfx_index_mutex(adev, true); 5038 if (r) { 5039 mutex_unlock(&adev->gfx.reset_sem_mutex); 5040 DRM_ERROR("Failed to acquire the gfx mutex during soft reset\n"); 5041 return r; 5042 } 5043 5044 WREG32_SOC15(GC, 0, regCP_VMID_RESET, 0xfffffffe); 5045 5046 // Read CP_VMID_RESET register three times. 5047 // to get sufficient time for GFX_HQD_ACTIVE reach 0 5048 RREG32_SOC15(GC, 0, regCP_VMID_RESET); 5049 RREG32_SOC15(GC, 0, regCP_VMID_RESET); 5050 RREG32_SOC15(GC, 0, regCP_VMID_RESET); 5051 5052 /* release the gfx mutex */ 5053 r = gfx_v11_0_request_gfx_index_mutex(adev, false); 5054 mutex_unlock(&adev->gfx.reset_sem_mutex); 5055 if (r) { 5056 DRM_ERROR("Failed to release the gfx mutex during soft reset\n"); 5057 return r; 5058 } 5059 5060 for (i = 0; i < adev->usec_timeout; i++) { 5061 if (!RREG32_SOC15(GC, 0, regCP_HQD_ACTIVE) && 5062 !RREG32_SOC15(GC, 0, regCP_GFX_HQD_ACTIVE)) 5063 break; 5064 udelay(1); 5065 } 5066 if (i >= adev->usec_timeout) { 5067 printk("Failed to wait all pipes clean\n"); 5068 return -EINVAL; 5069 } 5070 5071 /********** trigger soft reset ***********/ 5072 grbm_soft_reset = RREG32_SOC15(GC, 0, regGRBM_SOFT_RESET); 5073 grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, 5074 SOFT_RESET_CP, 1); 5075 grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, 5076 SOFT_RESET_GFX, 1); 5077 grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, 5078 SOFT_RESET_CPF, 1); 5079 grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, 5080 SOFT_RESET_CPC, 1); 5081 grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, 5082 SOFT_RESET_CPG, 1); 5083 WREG32_SOC15(GC, 0, regGRBM_SOFT_RESET, grbm_soft_reset); 5084 /********** exit soft reset ***********/ 5085 grbm_soft_reset = RREG32_SOC15(GC, 0, regGRBM_SOFT_RESET); 5086 grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, 5087 SOFT_RESET_CP, 0); 5088 grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, 5089 SOFT_RESET_GFX, 0); 5090 grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, 5091 SOFT_RESET_CPF, 0); 5092 grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, 5093 SOFT_RESET_CPC, 0); 5094 grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, 5095 SOFT_RESET_CPG, 0); 5096 WREG32_SOC15(GC, 0, regGRBM_SOFT_RESET, grbm_soft_reset); 5097 5098 tmp = RREG32_SOC15(GC, 0, regCP_SOFT_RESET_CNTL); 5099 tmp = REG_SET_FIELD(tmp, CP_SOFT_RESET_CNTL, CMP_HQD_REG_RESET, 0x1); 5100 WREG32_SOC15(GC, 0, regCP_SOFT_RESET_CNTL, tmp); 5101 5102 WREG32_SOC15(GC, 0, regCP_ME_CNTL, 0x0); 5103 WREG32_SOC15(GC, 0, regCP_MEC_RS64_CNTL, 0x0); 5104 5105 for (i = 0; i < adev->usec_timeout; i++) { 5106 if (!RREG32_SOC15(GC, 0, regCP_VMID_RESET)) 5107 break; 5108 udelay(1); 5109 } 5110 if (i >= adev->usec_timeout) { 5111 printk("Failed to wait CP_VMID_RESET to 0\n"); 5112 return -EINVAL; 5113 } 5114 5115 tmp = RREG32_SOC15(GC, 0, regCP_INT_CNTL); 5116 tmp = REG_SET_FIELD(tmp, CP_INT_CNTL, CMP_BUSY_INT_ENABLE, 1); 5117 tmp = REG_SET_FIELD(tmp, CP_INT_CNTL, CNTX_BUSY_INT_ENABLE, 1); 5118 tmp = REG_SET_FIELD(tmp, CP_INT_CNTL, CNTX_EMPTY_INT_ENABLE, 1); 5119 tmp = REG_SET_FIELD(tmp, CP_INT_CNTL, GFX_IDLE_INT_ENABLE, 1); 5120 WREG32_SOC15(GC, 0, regCP_INT_CNTL, tmp); 5121 5122 amdgpu_gfx_rlc_exit_safe_mode(adev, 0); 5123 5124 return gfx_v11_0_cp_resume(adev); 5125 } 5126 5127 static bool gfx_v11_0_check_soft_reset(struct amdgpu_ip_block *ip_block) 5128 { 5129 int i, r; 5130 struct amdgpu_device *adev = ip_block->adev; 5131 struct amdgpu_ring *ring; 5132 long tmo = msecs_to_jiffies(1000); 5133 5134 for (i = 0; i < adev->gfx.num_gfx_rings; i++) { 5135 ring = &adev->gfx.gfx_ring[i]; 5136 r = amdgpu_ring_test_ib(ring, tmo); 5137 if (r) 5138 return true; 5139 } 5140 5141 for (i = 0; i < adev->gfx.num_compute_rings; i++) { 5142 ring = &adev->gfx.compute_ring[i]; 5143 r = amdgpu_ring_test_ib(ring, tmo); 5144 if (r) 5145 return true; 5146 } 5147 5148 return false; 5149 } 5150 5151 static int gfx_v11_0_post_soft_reset(struct amdgpu_ip_block *ip_block) 5152 { 5153 struct amdgpu_device *adev = ip_block->adev; 5154 /** 5155 * GFX soft reset will impact MES, need resume MES when do GFX soft reset 5156 */ 5157 return amdgpu_mes_resume(adev); 5158 } 5159 5160 static uint64_t gfx_v11_0_get_gpu_clock_counter(struct amdgpu_device *adev) 5161 { 5162 uint64_t clock; 5163 uint64_t clock_counter_lo, clock_counter_hi_pre, clock_counter_hi_after; 5164 5165 if (amdgpu_sriov_vf(adev)) { 5166 amdgpu_gfx_off_ctrl(adev, false); 5167 mutex_lock(&adev->gfx.gpu_clock_mutex); 5168 clock_counter_hi_pre = (uint64_t)RREG32_SOC15(GC, 0, regCP_MES_MTIME_HI); 5169 clock_counter_lo = (uint64_t)RREG32_SOC15(GC, 0, regCP_MES_MTIME_LO); 5170 clock_counter_hi_after = (uint64_t)RREG32_SOC15(GC, 0, regCP_MES_MTIME_HI); 5171 if (clock_counter_hi_pre != clock_counter_hi_after) 5172 clock_counter_lo = (uint64_t)RREG32_SOC15(GC, 0, regCP_MES_MTIME_LO); 5173 mutex_unlock(&adev->gfx.gpu_clock_mutex); 5174 amdgpu_gfx_off_ctrl(adev, true); 5175 } else { 5176 preempt_disable(); 5177 clock_counter_hi_pre = (uint64_t)RREG32_SOC15(SMUIO, 0, regGOLDEN_TSC_COUNT_UPPER); 5178 clock_counter_lo = (uint64_t)RREG32_SOC15(SMUIO, 0, regGOLDEN_TSC_COUNT_LOWER); 5179 clock_counter_hi_after = (uint64_t)RREG32_SOC15(SMUIO, 0, regGOLDEN_TSC_COUNT_UPPER); 5180 if (clock_counter_hi_pre != clock_counter_hi_after) 5181 clock_counter_lo = (uint64_t)RREG32_SOC15(SMUIO, 0, regGOLDEN_TSC_COUNT_LOWER); 5182 preempt_enable(); 5183 } 5184 clock = clock_counter_lo | (clock_counter_hi_after << 32ULL); 5185 5186 return clock; 5187 } 5188 5189 static void gfx_v11_0_ring_emit_gds_switch(struct amdgpu_ring *ring, 5190 uint32_t vmid, 5191 uint32_t gds_base, uint32_t gds_size, 5192 uint32_t gws_base, uint32_t gws_size, 5193 uint32_t oa_base, uint32_t oa_size) 5194 { 5195 struct amdgpu_device *adev = ring->adev; 5196 5197 /* GDS Base */ 5198 gfx_v11_0_write_data_to_reg(ring, 0, false, 5199 SOC15_REG_OFFSET(GC, 0, regGDS_VMID0_BASE) + 2 * vmid, 5200 gds_base); 5201 5202 /* GDS Size */ 5203 gfx_v11_0_write_data_to_reg(ring, 0, false, 5204 SOC15_REG_OFFSET(GC, 0, regGDS_VMID0_SIZE) + 2 * vmid, 5205 gds_size); 5206 5207 /* GWS */ 5208 gfx_v11_0_write_data_to_reg(ring, 0, false, 5209 SOC15_REG_OFFSET(GC, 0, regGDS_GWS_VMID0) + vmid, 5210 gws_size << GDS_GWS_VMID0__SIZE__SHIFT | gws_base); 5211 5212 /* OA */ 5213 gfx_v11_0_write_data_to_reg(ring, 0, false, 5214 SOC15_REG_OFFSET(GC, 0, regGDS_OA_VMID0) + vmid, 5215 (1 << (oa_size + oa_base)) - (1 << oa_base)); 5216 } 5217 5218 static int gfx_v11_0_early_init(struct amdgpu_ip_block *ip_block) 5219 { 5220 struct amdgpu_device *adev = ip_block->adev; 5221 5222 switch (amdgpu_user_queue) { 5223 case -1: 5224 case 0: 5225 default: 5226 adev->gfx.disable_kq = false; 5227 adev->gfx.disable_uq = true; 5228 break; 5229 case 1: 5230 adev->gfx.disable_kq = false; 5231 adev->gfx.disable_uq = false; 5232 break; 5233 case 2: 5234 adev->gfx.disable_kq = true; 5235 adev->gfx.disable_uq = false; 5236 break; 5237 } 5238 5239 adev->gfx.funcs = &gfx_v11_0_gfx_funcs; 5240 5241 if (adev->gfx.disable_kq) { 5242 /* We need one GFX ring temporarily to set up 5243 * the clear state. 5244 */ 5245 adev->gfx.num_gfx_rings = 1; 5246 adev->gfx.num_compute_rings = 0; 5247 } else { 5248 adev->gfx.num_gfx_rings = GFX11_NUM_GFX_RINGS; 5249 adev->gfx.num_compute_rings = min(amdgpu_gfx_get_num_kcq(adev), 5250 AMDGPU_MAX_COMPUTE_RINGS); 5251 } 5252 5253 gfx_v11_0_set_kiq_pm4_funcs(adev); 5254 gfx_v11_0_set_ring_funcs(adev); 5255 gfx_v11_0_set_irq_funcs(adev); 5256 gfx_v11_0_set_gds_init(adev); 5257 gfx_v11_0_set_rlc_funcs(adev); 5258 gfx_v11_0_set_mqd_funcs(adev); 5259 gfx_v11_0_set_imu_funcs(adev); 5260 5261 gfx_v11_0_init_rlcg_reg_access_ctrl(adev); 5262 5263 return gfx_v11_0_init_microcode(adev); 5264 } 5265 5266 static int gfx_v11_0_late_init(struct amdgpu_ip_block *ip_block) 5267 { 5268 struct amdgpu_device *adev = ip_block->adev; 5269 int r; 5270 5271 r = amdgpu_irq_get(adev, &adev->gfx.priv_reg_irq, 0); 5272 if (r) 5273 return r; 5274 5275 r = amdgpu_irq_get(adev, &adev->gfx.priv_inst_irq, 0); 5276 if (r) 5277 return r; 5278 5279 r = amdgpu_irq_get(adev, &adev->gfx.bad_op_irq, 0); 5280 if (r) 5281 return r; 5282 5283 r = gfx_v11_0_set_userq_eop_interrupts(adev, true); 5284 if (r) 5285 return r; 5286 5287 return 0; 5288 } 5289 5290 static bool gfx_v11_0_is_rlc_enabled(struct amdgpu_device *adev) 5291 { 5292 uint32_t rlc_cntl; 5293 5294 /* if RLC is not enabled, do nothing */ 5295 rlc_cntl = RREG32_SOC15(GC, 0, regRLC_CNTL); 5296 return (REG_GET_FIELD(rlc_cntl, RLC_CNTL, RLC_ENABLE_F32)) ? true : false; 5297 } 5298 5299 static void gfx_v11_0_set_safe_mode(struct amdgpu_device *adev, int xcc_id) 5300 { 5301 uint32_t data; 5302 unsigned i; 5303 5304 data = RLC_SAFE_MODE__CMD_MASK; 5305 data |= (1 << RLC_SAFE_MODE__MESSAGE__SHIFT); 5306 5307 WREG32_SOC15(GC, 0, regRLC_SAFE_MODE, data); 5308 5309 /* wait for RLC_SAFE_MODE */ 5310 for (i = 0; i < adev->usec_timeout; i++) { 5311 if (!REG_GET_FIELD(RREG32_SOC15(GC, 0, regRLC_SAFE_MODE), 5312 RLC_SAFE_MODE, CMD)) 5313 break; 5314 udelay(1); 5315 } 5316 } 5317 5318 static void gfx_v11_0_unset_safe_mode(struct amdgpu_device *adev, int xcc_id) 5319 { 5320 WREG32_SOC15(GC, 0, regRLC_SAFE_MODE, RLC_SAFE_MODE__CMD_MASK); 5321 } 5322 5323 static void gfx_v11_0_update_perf_clk(struct amdgpu_device *adev, 5324 bool enable) 5325 { 5326 uint32_t def, data; 5327 5328 if (!(adev->cg_flags & AMD_CG_SUPPORT_GFX_PERF_CLK)) 5329 return; 5330 5331 def = data = RREG32_SOC15(GC, 0, regRLC_CGTT_MGCG_OVERRIDE); 5332 5333 if (enable) 5334 data &= ~RLC_CGTT_MGCG_OVERRIDE__PERFMON_CLOCK_STATE_MASK; 5335 else 5336 data |= RLC_CGTT_MGCG_OVERRIDE__PERFMON_CLOCK_STATE_MASK; 5337 5338 if (def != data) 5339 WREG32_SOC15(GC, 0, regRLC_CGTT_MGCG_OVERRIDE, data); 5340 } 5341 5342 static void gfx_v11_0_update_sram_fgcg(struct amdgpu_device *adev, 5343 bool enable) 5344 { 5345 uint32_t def, data; 5346 5347 if (!(adev->cg_flags & AMD_CG_SUPPORT_GFX_FGCG)) 5348 return; 5349 5350 def = data = RREG32_SOC15(GC, 0, regRLC_CGTT_MGCG_OVERRIDE); 5351 5352 if (enable) 5353 data &= ~RLC_CGTT_MGCG_OVERRIDE__GFXIP_FGCG_OVERRIDE_MASK; 5354 else 5355 data |= RLC_CGTT_MGCG_OVERRIDE__GFXIP_FGCG_OVERRIDE_MASK; 5356 5357 if (def != data) 5358 WREG32_SOC15(GC, 0, regRLC_CGTT_MGCG_OVERRIDE, data); 5359 } 5360 5361 static void gfx_v11_0_update_repeater_fgcg(struct amdgpu_device *adev, 5362 bool enable) 5363 { 5364 uint32_t def, data; 5365 5366 if (!(adev->cg_flags & AMD_CG_SUPPORT_REPEATER_FGCG)) 5367 return; 5368 5369 def = data = RREG32_SOC15(GC, 0, regRLC_CGTT_MGCG_OVERRIDE); 5370 5371 if (enable) 5372 data &= ~RLC_CGTT_MGCG_OVERRIDE__GFXIP_REPEATER_FGCG_OVERRIDE_MASK; 5373 else 5374 data |= RLC_CGTT_MGCG_OVERRIDE__GFXIP_REPEATER_FGCG_OVERRIDE_MASK; 5375 5376 if (def != data) 5377 WREG32_SOC15(GC, 0, regRLC_CGTT_MGCG_OVERRIDE, data); 5378 } 5379 5380 static void gfx_v11_0_update_medium_grain_clock_gating(struct amdgpu_device *adev, 5381 bool enable) 5382 { 5383 uint32_t data, def; 5384 5385 if (!(adev->cg_flags & (AMD_CG_SUPPORT_GFX_MGCG | AMD_CG_SUPPORT_GFX_MGLS))) 5386 return; 5387 5388 /* It is disabled by HW by default */ 5389 if (enable) { 5390 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGCG) { 5391 /* 1 - RLC_CGTT_MGCG_OVERRIDE */ 5392 def = data = RREG32_SOC15(GC, 0, regRLC_CGTT_MGCG_OVERRIDE); 5393 5394 data &= ~(RLC_CGTT_MGCG_OVERRIDE__GRBM_CGTT_SCLK_OVERRIDE_MASK | 5395 RLC_CGTT_MGCG_OVERRIDE__RLC_CGTT_SCLK_OVERRIDE_MASK | 5396 RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGCG_OVERRIDE_MASK); 5397 5398 if (def != data) 5399 WREG32_SOC15(GC, 0, regRLC_CGTT_MGCG_OVERRIDE, data); 5400 } 5401 } else { 5402 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGCG) { 5403 def = data = RREG32_SOC15(GC, 0, regRLC_CGTT_MGCG_OVERRIDE); 5404 5405 data |= (RLC_CGTT_MGCG_OVERRIDE__RLC_CGTT_SCLK_OVERRIDE_MASK | 5406 RLC_CGTT_MGCG_OVERRIDE__GRBM_CGTT_SCLK_OVERRIDE_MASK | 5407 RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGCG_OVERRIDE_MASK); 5408 5409 if (def != data) 5410 WREG32_SOC15(GC, 0, regRLC_CGTT_MGCG_OVERRIDE, data); 5411 } 5412 } 5413 } 5414 5415 static void gfx_v11_0_update_coarse_grain_clock_gating(struct amdgpu_device *adev, 5416 bool enable) 5417 { 5418 uint32_t def, data; 5419 5420 if (!(adev->cg_flags & 5421 (AMD_CG_SUPPORT_GFX_CGCG | 5422 AMD_CG_SUPPORT_GFX_CGLS | 5423 AMD_CG_SUPPORT_GFX_3D_CGCG | 5424 AMD_CG_SUPPORT_GFX_3D_CGLS))) 5425 return; 5426 5427 if (enable) { 5428 def = data = RREG32_SOC15(GC, 0, regRLC_CGTT_MGCG_OVERRIDE); 5429 5430 /* unset CGCG override */ 5431 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGCG) 5432 data &= ~RLC_CGTT_MGCG_OVERRIDE__GFXIP_CGCG_OVERRIDE_MASK; 5433 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGLS) 5434 data &= ~RLC_CGTT_MGCG_OVERRIDE__GFXIP_CGLS_OVERRIDE_MASK; 5435 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_3D_CGCG || 5436 adev->cg_flags & AMD_CG_SUPPORT_GFX_3D_CGLS) 5437 data &= ~RLC_CGTT_MGCG_OVERRIDE__GFXIP_GFX3D_CG_OVERRIDE_MASK; 5438 5439 /* update CGCG override bits */ 5440 if (def != data) 5441 WREG32_SOC15(GC, 0, regRLC_CGTT_MGCG_OVERRIDE, data); 5442 5443 /* enable cgcg FSM(0x0000363F) */ 5444 def = data = RREG32_SOC15(GC, 0, regRLC_CGCG_CGLS_CTRL); 5445 5446 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGCG) { 5447 data &= ~RLC_CGCG_CGLS_CTRL__CGCG_GFX_IDLE_THRESHOLD_MASK; 5448 data |= (0x36 << RLC_CGCG_CGLS_CTRL__CGCG_GFX_IDLE_THRESHOLD__SHIFT) | 5449 RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK; 5450 } 5451 5452 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGLS) { 5453 data &= ~RLC_CGCG_CGLS_CTRL__CGLS_REP_COMPANSAT_DELAY_MASK; 5454 data |= (0x000F << RLC_CGCG_CGLS_CTRL__CGLS_REP_COMPANSAT_DELAY__SHIFT) | 5455 RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK; 5456 } 5457 5458 if (def != data) 5459 WREG32_SOC15(GC, 0, regRLC_CGCG_CGLS_CTRL, data); 5460 5461 /* Program RLC_CGCG_CGLS_CTRL_3D */ 5462 def = data = RREG32_SOC15(GC, 0, regRLC_CGCG_CGLS_CTRL_3D); 5463 5464 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_3D_CGCG) { 5465 data &= ~RLC_CGCG_CGLS_CTRL_3D__CGCG_GFX_IDLE_THRESHOLD_MASK; 5466 data |= (0x36 << RLC_CGCG_CGLS_CTRL_3D__CGCG_GFX_IDLE_THRESHOLD__SHIFT) | 5467 RLC_CGCG_CGLS_CTRL_3D__CGCG_EN_MASK; 5468 } 5469 5470 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_3D_CGLS) { 5471 data &= ~RLC_CGCG_CGLS_CTRL_3D__CGLS_REP_COMPANSAT_DELAY_MASK; 5472 data |= (0xf << RLC_CGCG_CGLS_CTRL_3D__CGLS_REP_COMPANSAT_DELAY__SHIFT) | 5473 RLC_CGCG_CGLS_CTRL_3D__CGLS_EN_MASK; 5474 } 5475 5476 if (def != data) 5477 WREG32_SOC15(GC, 0, regRLC_CGCG_CGLS_CTRL_3D, data); 5478 5479 /* set IDLE_POLL_COUNT(0x00900100) */ 5480 def = data = RREG32_SOC15(GC, 0, regCP_RB_WPTR_POLL_CNTL); 5481 5482 data &= ~(CP_RB_WPTR_POLL_CNTL__POLL_FREQUENCY_MASK | CP_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK); 5483 data |= (0x0100 << CP_RB_WPTR_POLL_CNTL__POLL_FREQUENCY__SHIFT) | 5484 (0x0090 << CP_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT); 5485 5486 if (def != data) 5487 WREG32_SOC15(GC, 0, regCP_RB_WPTR_POLL_CNTL, data); 5488 5489 data = RREG32_SOC15(GC, 0, regCP_INT_CNTL); 5490 data = REG_SET_FIELD(data, CP_INT_CNTL, CNTX_BUSY_INT_ENABLE, 1); 5491 data = REG_SET_FIELD(data, CP_INT_CNTL, CNTX_EMPTY_INT_ENABLE, 1); 5492 data = REG_SET_FIELD(data, CP_INT_CNTL, CMP_BUSY_INT_ENABLE, 1); 5493 data = REG_SET_FIELD(data, CP_INT_CNTL, GFX_IDLE_INT_ENABLE, 1); 5494 WREG32_SOC15(GC, 0, regCP_INT_CNTL, data); 5495 5496 data = RREG32_SOC15(GC, 0, regSDMA0_RLC_CGCG_CTRL); 5497 data = REG_SET_FIELD(data, SDMA0_RLC_CGCG_CTRL, CGCG_INT_ENABLE, 1); 5498 WREG32_SOC15(GC, 0, regSDMA0_RLC_CGCG_CTRL, data); 5499 5500 /* Some ASICs only have one SDMA instance, not need to configure SDMA1 */ 5501 if (adev->sdma.num_instances > 1) { 5502 data = RREG32_SOC15(GC, 0, regSDMA1_RLC_CGCG_CTRL); 5503 data = REG_SET_FIELD(data, SDMA1_RLC_CGCG_CTRL, CGCG_INT_ENABLE, 1); 5504 WREG32_SOC15(GC, 0, regSDMA1_RLC_CGCG_CTRL, data); 5505 } 5506 } else { 5507 /* Program RLC_CGCG_CGLS_CTRL */ 5508 def = data = RREG32_SOC15(GC, 0, regRLC_CGCG_CGLS_CTRL); 5509 5510 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGCG) 5511 data &= ~RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK; 5512 5513 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGLS) 5514 data &= ~RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK; 5515 5516 if (def != data) 5517 WREG32_SOC15(GC, 0, regRLC_CGCG_CGLS_CTRL, data); 5518 5519 /* Program RLC_CGCG_CGLS_CTRL_3D */ 5520 def = data = RREG32_SOC15(GC, 0, regRLC_CGCG_CGLS_CTRL_3D); 5521 5522 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_3D_CGCG) 5523 data &= ~RLC_CGCG_CGLS_CTRL_3D__CGCG_EN_MASK; 5524 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_3D_CGLS) 5525 data &= ~RLC_CGCG_CGLS_CTRL_3D__CGLS_EN_MASK; 5526 5527 if (def != data) 5528 WREG32_SOC15(GC, 0, regRLC_CGCG_CGLS_CTRL_3D, data); 5529 5530 data = RREG32_SOC15(GC, 0, regSDMA0_RLC_CGCG_CTRL); 5531 data &= ~SDMA0_RLC_CGCG_CTRL__CGCG_INT_ENABLE_MASK; 5532 WREG32_SOC15(GC, 0, regSDMA0_RLC_CGCG_CTRL, data); 5533 5534 /* Some ASICs only have one SDMA instance, not need to configure SDMA1 */ 5535 if (adev->sdma.num_instances > 1) { 5536 data = RREG32_SOC15(GC, 0, regSDMA1_RLC_CGCG_CTRL); 5537 data &= ~SDMA1_RLC_CGCG_CTRL__CGCG_INT_ENABLE_MASK; 5538 WREG32_SOC15(GC, 0, regSDMA1_RLC_CGCG_CTRL, data); 5539 } 5540 } 5541 } 5542 5543 static int gfx_v11_0_update_gfx_clock_gating(struct amdgpu_device *adev, 5544 bool enable) 5545 { 5546 amdgpu_gfx_rlc_enter_safe_mode(adev, 0); 5547 5548 gfx_v11_0_update_coarse_grain_clock_gating(adev, enable); 5549 5550 gfx_v11_0_update_medium_grain_clock_gating(adev, enable); 5551 5552 gfx_v11_0_update_repeater_fgcg(adev, enable); 5553 5554 gfx_v11_0_update_sram_fgcg(adev, enable); 5555 5556 gfx_v11_0_update_perf_clk(adev, enable); 5557 5558 if (adev->cg_flags & 5559 (AMD_CG_SUPPORT_GFX_MGCG | 5560 AMD_CG_SUPPORT_GFX_CGLS | 5561 AMD_CG_SUPPORT_GFX_CGCG | 5562 AMD_CG_SUPPORT_GFX_3D_CGCG | 5563 AMD_CG_SUPPORT_GFX_3D_CGLS)) 5564 gfx_v11_0_enable_gui_idle_interrupt(adev, enable); 5565 5566 amdgpu_gfx_rlc_exit_safe_mode(adev, 0); 5567 5568 return 0; 5569 } 5570 5571 static void gfx_v11_0_update_spm_vmid(struct amdgpu_device *adev, struct amdgpu_ring *ring, unsigned vmid) 5572 { 5573 u32 reg, pre_data, data; 5574 5575 amdgpu_gfx_off_ctrl(adev, false); 5576 reg = SOC15_REG_OFFSET(GC, 0, regRLC_SPM_MC_CNTL); 5577 if (amdgpu_sriov_is_pp_one_vf(adev) && !amdgpu_sriov_runtime(adev)) 5578 pre_data = RREG32_NO_KIQ(reg); 5579 else 5580 pre_data = RREG32(reg); 5581 5582 data = pre_data & (~RLC_SPM_MC_CNTL__RLC_SPM_VMID_MASK); 5583 data |= (vmid & RLC_SPM_MC_CNTL__RLC_SPM_VMID_MASK) << RLC_SPM_MC_CNTL__RLC_SPM_VMID__SHIFT; 5584 5585 if (pre_data != data) { 5586 if (amdgpu_sriov_is_pp_one_vf(adev) && !amdgpu_sriov_runtime(adev)) { 5587 WREG32_SOC15_NO_KIQ(GC, 0, regRLC_SPM_MC_CNTL, data); 5588 } else 5589 WREG32_SOC15(GC, 0, regRLC_SPM_MC_CNTL, data); 5590 } 5591 amdgpu_gfx_off_ctrl(adev, true); 5592 5593 if (ring 5594 && amdgpu_sriov_is_pp_one_vf(adev) 5595 && (pre_data != data) 5596 && ((ring->funcs->type == AMDGPU_RING_TYPE_GFX) 5597 || (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE))) { 5598 amdgpu_ring_emit_wreg(ring, reg, data); 5599 } 5600 } 5601 5602 static const struct amdgpu_rlc_funcs gfx_v11_0_rlc_funcs = { 5603 .is_rlc_enabled = gfx_v11_0_is_rlc_enabled, 5604 .set_safe_mode = gfx_v11_0_set_safe_mode, 5605 .unset_safe_mode = gfx_v11_0_unset_safe_mode, 5606 .init = gfx_v11_0_rlc_init, 5607 .get_csb_size = gfx_v11_0_get_csb_size, 5608 .get_csb_buffer = gfx_v11_0_get_csb_buffer, 5609 .resume = gfx_v11_0_rlc_resume, 5610 .stop = gfx_v11_0_rlc_stop, 5611 .reset = gfx_v11_0_rlc_reset, 5612 .start = gfx_v11_0_rlc_start, 5613 .update_spm_vmid = gfx_v11_0_update_spm_vmid, 5614 }; 5615 5616 static void gfx_v11_cntl_power_gating(struct amdgpu_device *adev, bool enable) 5617 { 5618 u32 data = RREG32_SOC15(GC, 0, regRLC_PG_CNTL); 5619 5620 if (enable && (adev->pg_flags & AMD_PG_SUPPORT_GFX_PG)) 5621 data |= RLC_PG_CNTL__GFX_POWER_GATING_ENABLE_MASK; 5622 else 5623 data &= ~RLC_PG_CNTL__GFX_POWER_GATING_ENABLE_MASK; 5624 5625 WREG32_SOC15(GC, 0, regRLC_PG_CNTL, data); 5626 5627 // Program RLC_PG_DELAY3 for CGPG hysteresis 5628 if (enable && (adev->pg_flags & AMD_PG_SUPPORT_GFX_PG)) { 5629 switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { 5630 case IP_VERSION(11, 0, 1): 5631 case IP_VERSION(11, 0, 4): 5632 case IP_VERSION(11, 5, 0): 5633 case IP_VERSION(11, 5, 1): 5634 case IP_VERSION(11, 5, 2): 5635 case IP_VERSION(11, 5, 3): 5636 WREG32_SOC15(GC, 0, regRLC_PG_DELAY_3, RLC_PG_DELAY_3_DEFAULT_GC_11_0_1); 5637 break; 5638 default: 5639 break; 5640 } 5641 } 5642 } 5643 5644 static void gfx_v11_cntl_pg(struct amdgpu_device *adev, bool enable) 5645 { 5646 amdgpu_gfx_rlc_enter_safe_mode(adev, 0); 5647 5648 gfx_v11_cntl_power_gating(adev, enable); 5649 5650 amdgpu_gfx_rlc_exit_safe_mode(adev, 0); 5651 } 5652 5653 static int gfx_v11_0_set_powergating_state(struct amdgpu_ip_block *ip_block, 5654 enum amd_powergating_state state) 5655 { 5656 struct amdgpu_device *adev = ip_block->adev; 5657 bool enable = (state == AMD_PG_STATE_GATE); 5658 5659 if (amdgpu_sriov_vf(adev)) 5660 return 0; 5661 5662 switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { 5663 case IP_VERSION(11, 0, 0): 5664 case IP_VERSION(11, 0, 2): 5665 case IP_VERSION(11, 0, 3): 5666 amdgpu_gfx_off_ctrl(adev, enable); 5667 break; 5668 case IP_VERSION(11, 0, 1): 5669 case IP_VERSION(11, 0, 4): 5670 case IP_VERSION(11, 5, 0): 5671 case IP_VERSION(11, 5, 1): 5672 case IP_VERSION(11, 5, 2): 5673 case IP_VERSION(11, 5, 3): 5674 if (!enable) 5675 amdgpu_gfx_off_ctrl(adev, false); 5676 5677 gfx_v11_cntl_pg(adev, enable); 5678 5679 if (enable) 5680 amdgpu_gfx_off_ctrl(adev, true); 5681 5682 break; 5683 default: 5684 break; 5685 } 5686 5687 return 0; 5688 } 5689 5690 static int gfx_v11_0_set_clockgating_state(struct amdgpu_ip_block *ip_block, 5691 enum amd_clockgating_state state) 5692 { 5693 struct amdgpu_device *adev = ip_block->adev; 5694 5695 if (amdgpu_sriov_vf(adev)) 5696 return 0; 5697 5698 switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { 5699 case IP_VERSION(11, 0, 0): 5700 case IP_VERSION(11, 0, 1): 5701 case IP_VERSION(11, 0, 2): 5702 case IP_VERSION(11, 0, 3): 5703 case IP_VERSION(11, 0, 4): 5704 case IP_VERSION(11, 5, 0): 5705 case IP_VERSION(11, 5, 1): 5706 case IP_VERSION(11, 5, 2): 5707 case IP_VERSION(11, 5, 3): 5708 gfx_v11_0_update_gfx_clock_gating(adev, 5709 state == AMD_CG_STATE_GATE); 5710 break; 5711 default: 5712 break; 5713 } 5714 5715 return 0; 5716 } 5717 5718 static void gfx_v11_0_get_clockgating_state(struct amdgpu_ip_block *ip_block, u64 *flags) 5719 { 5720 struct amdgpu_device *adev = ip_block->adev; 5721 int data; 5722 5723 /* AMD_CG_SUPPORT_GFX_MGCG */ 5724 data = RREG32_SOC15(GC, 0, regRLC_CGTT_MGCG_OVERRIDE); 5725 if (!(data & RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGCG_OVERRIDE_MASK)) 5726 *flags |= AMD_CG_SUPPORT_GFX_MGCG; 5727 5728 /* AMD_CG_SUPPORT_REPEATER_FGCG */ 5729 if (!(data & RLC_CGTT_MGCG_OVERRIDE__GFXIP_REPEATER_FGCG_OVERRIDE_MASK)) 5730 *flags |= AMD_CG_SUPPORT_REPEATER_FGCG; 5731 5732 /* AMD_CG_SUPPORT_GFX_FGCG */ 5733 if (!(data & RLC_CGTT_MGCG_OVERRIDE__GFXIP_FGCG_OVERRIDE_MASK)) 5734 *flags |= AMD_CG_SUPPORT_GFX_FGCG; 5735 5736 /* AMD_CG_SUPPORT_GFX_PERF_CLK */ 5737 if (!(data & RLC_CGTT_MGCG_OVERRIDE__PERFMON_CLOCK_STATE_MASK)) 5738 *flags |= AMD_CG_SUPPORT_GFX_PERF_CLK; 5739 5740 /* AMD_CG_SUPPORT_GFX_CGCG */ 5741 data = RREG32_SOC15(GC, 0, regRLC_CGCG_CGLS_CTRL); 5742 if (data & RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK) 5743 *flags |= AMD_CG_SUPPORT_GFX_CGCG; 5744 5745 /* AMD_CG_SUPPORT_GFX_CGLS */ 5746 if (data & RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK) 5747 *flags |= AMD_CG_SUPPORT_GFX_CGLS; 5748 5749 /* AMD_CG_SUPPORT_GFX_3D_CGCG */ 5750 data = RREG32_SOC15(GC, 0, regRLC_CGCG_CGLS_CTRL_3D); 5751 if (data & RLC_CGCG_CGLS_CTRL_3D__CGCG_EN_MASK) 5752 *flags |= AMD_CG_SUPPORT_GFX_3D_CGCG; 5753 5754 /* AMD_CG_SUPPORT_GFX_3D_CGLS */ 5755 if (data & RLC_CGCG_CGLS_CTRL_3D__CGLS_EN_MASK) 5756 *flags |= AMD_CG_SUPPORT_GFX_3D_CGLS; 5757 } 5758 5759 static u64 gfx_v11_0_ring_get_rptr_gfx(struct amdgpu_ring *ring) 5760 { 5761 /* gfx11 is 32bit rptr*/ 5762 return *(uint32_t *)ring->rptr_cpu_addr; 5763 } 5764 5765 static u64 gfx_v11_0_ring_get_wptr_gfx(struct amdgpu_ring *ring) 5766 { 5767 struct amdgpu_device *adev = ring->adev; 5768 u64 wptr; 5769 5770 /* XXX check if swapping is necessary on BE */ 5771 if (ring->use_doorbell) { 5772 wptr = atomic64_read((atomic64_t *)ring->wptr_cpu_addr); 5773 } else { 5774 wptr = RREG32_SOC15(GC, 0, regCP_RB0_WPTR); 5775 wptr += (u64)RREG32_SOC15(GC, 0, regCP_RB0_WPTR_HI) << 32; 5776 } 5777 5778 return wptr; 5779 } 5780 5781 static void gfx_v11_0_ring_set_wptr_gfx(struct amdgpu_ring *ring) 5782 { 5783 struct amdgpu_device *adev = ring->adev; 5784 5785 if (ring->use_doorbell) { 5786 /* XXX check if swapping is necessary on BE */ 5787 atomic64_set((atomic64_t *)ring->wptr_cpu_addr, 5788 ring->wptr); 5789 WDOORBELL64(ring->doorbell_index, ring->wptr); 5790 } else { 5791 WREG32_SOC15(GC, 0, regCP_RB0_WPTR, 5792 lower_32_bits(ring->wptr)); 5793 WREG32_SOC15(GC, 0, regCP_RB0_WPTR_HI, 5794 upper_32_bits(ring->wptr)); 5795 } 5796 } 5797 5798 static u64 gfx_v11_0_ring_get_rptr_compute(struct amdgpu_ring *ring) 5799 { 5800 /* gfx11 hardware is 32bit rptr */ 5801 return *(uint32_t *)ring->rptr_cpu_addr; 5802 } 5803 5804 static u64 gfx_v11_0_ring_get_wptr_compute(struct amdgpu_ring *ring) 5805 { 5806 u64 wptr; 5807 5808 /* XXX check if swapping is necessary on BE */ 5809 if (ring->use_doorbell) 5810 wptr = atomic64_read((atomic64_t *)ring->wptr_cpu_addr); 5811 else 5812 BUG(); 5813 return wptr; 5814 } 5815 5816 static void gfx_v11_0_ring_set_wptr_compute(struct amdgpu_ring *ring) 5817 { 5818 struct amdgpu_device *adev = ring->adev; 5819 5820 /* XXX check if swapping is necessary on BE */ 5821 if (ring->use_doorbell) { 5822 atomic64_set((atomic64_t *)ring->wptr_cpu_addr, 5823 ring->wptr); 5824 WDOORBELL64(ring->doorbell_index, ring->wptr); 5825 } else { 5826 BUG(); /* only DOORBELL method supported on gfx11 now */ 5827 } 5828 } 5829 5830 static void gfx_v11_0_ring_emit_hdp_flush(struct amdgpu_ring *ring) 5831 { 5832 struct amdgpu_device *adev = ring->adev; 5833 u32 ref_and_mask, reg_mem_engine; 5834 const struct nbio_hdp_flush_reg *nbio_hf_reg = adev->nbio.hdp_flush_reg; 5835 5836 if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) { 5837 switch (ring->me) { 5838 case 1: 5839 ref_and_mask = nbio_hf_reg->ref_and_mask_cp2 << ring->pipe; 5840 break; 5841 case 2: 5842 ref_and_mask = nbio_hf_reg->ref_and_mask_cp6 << ring->pipe; 5843 break; 5844 default: 5845 return; 5846 } 5847 reg_mem_engine = 0; 5848 } else { 5849 ref_and_mask = nbio_hf_reg->ref_and_mask_cp0 << ring->pipe; 5850 reg_mem_engine = 1; /* pfp */ 5851 } 5852 5853 gfx_v11_0_wait_reg_mem(ring, reg_mem_engine, 0, 1, 5854 adev->nbio.funcs->get_hdp_flush_req_offset(adev), 5855 adev->nbio.funcs->get_hdp_flush_done_offset(adev), 5856 ref_and_mask, ref_and_mask, 0x20); 5857 } 5858 5859 static void gfx_v11_0_ring_emit_ib_gfx(struct amdgpu_ring *ring, 5860 struct amdgpu_job *job, 5861 struct amdgpu_ib *ib, 5862 uint32_t flags) 5863 { 5864 unsigned vmid = AMDGPU_JOB_GET_VMID(job); 5865 u32 header, control = 0; 5866 5867 header = PACKET3(PACKET3_INDIRECT_BUFFER, 2); 5868 5869 control |= ib->length_dw | (vmid << 24); 5870 5871 if (ring->adev->gfx.mcbp && (ib->flags & AMDGPU_IB_FLAG_PREEMPT)) { 5872 control |= INDIRECT_BUFFER_PRE_ENB(1); 5873 5874 if (flags & AMDGPU_IB_PREEMPTED) 5875 control |= INDIRECT_BUFFER_PRE_RESUME(1); 5876 5877 if (vmid) 5878 gfx_v11_0_ring_emit_de_meta(ring, 5879 (!amdgpu_sriov_vf(ring->adev) && flags & AMDGPU_IB_PREEMPTED) ? true : false); 5880 } 5881 5882 amdgpu_ring_write(ring, header); 5883 BUG_ON(ib->gpu_addr & 0x3); /* Dword align */ 5884 amdgpu_ring_write(ring, 5885 #ifdef __BIG_ENDIAN 5886 (2 << 0) | 5887 #endif 5888 lower_32_bits(ib->gpu_addr)); 5889 amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr)); 5890 amdgpu_ring_write(ring, control); 5891 } 5892 5893 static void gfx_v11_0_ring_emit_ib_compute(struct amdgpu_ring *ring, 5894 struct amdgpu_job *job, 5895 struct amdgpu_ib *ib, 5896 uint32_t flags) 5897 { 5898 unsigned vmid = AMDGPU_JOB_GET_VMID(job); 5899 u32 control = INDIRECT_BUFFER_VALID | ib->length_dw | (vmid << 24); 5900 5901 /* Currently, there is a high possibility to get wave ID mismatch 5902 * between ME and GDS, leading to a hw deadlock, because ME generates 5903 * different wave IDs than the GDS expects. This situation happens 5904 * randomly when at least 5 compute pipes use GDS ordered append. 5905 * The wave IDs generated by ME are also wrong after suspend/resume. 5906 * Those are probably bugs somewhere else in the kernel driver. 5907 * 5908 * Writing GDS_COMPUTE_MAX_WAVE_ID resets wave ID counters in ME and 5909 * GDS to 0 for this ring (me/pipe). 5910 */ 5911 if (ib->flags & AMDGPU_IB_FLAG_RESET_GDS_MAX_WAVE_ID) { 5912 amdgpu_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1)); 5913 amdgpu_ring_write(ring, regGDS_COMPUTE_MAX_WAVE_ID); 5914 amdgpu_ring_write(ring, ring->adev->gds.gds_compute_max_wave_id); 5915 } 5916 5917 amdgpu_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2)); 5918 BUG_ON(ib->gpu_addr & 0x3); /* Dword align */ 5919 amdgpu_ring_write(ring, 5920 #ifdef __BIG_ENDIAN 5921 (2 << 0) | 5922 #endif 5923 lower_32_bits(ib->gpu_addr)); 5924 amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr)); 5925 amdgpu_ring_write(ring, control); 5926 } 5927 5928 static void gfx_v11_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, 5929 u64 seq, unsigned flags) 5930 { 5931 bool write64bit = flags & AMDGPU_FENCE_FLAG_64BIT; 5932 bool int_sel = flags & AMDGPU_FENCE_FLAG_INT; 5933 5934 /* RELEASE_MEM - flush caches, send int */ 5935 amdgpu_ring_write(ring, PACKET3(PACKET3_RELEASE_MEM, 6)); 5936 amdgpu_ring_write(ring, (PACKET3_RELEASE_MEM_GCR_SEQ | 5937 PACKET3_RELEASE_MEM_GCR_GL2_WB | 5938 PACKET3_RELEASE_MEM_GCR_GLM_INV | /* must be set with GLM_WB */ 5939 PACKET3_RELEASE_MEM_GCR_GLM_WB | 5940 PACKET3_RELEASE_MEM_CACHE_POLICY(3) | 5941 PACKET3_RELEASE_MEM_EVENT_TYPE(CACHE_FLUSH_AND_INV_TS_EVENT) | 5942 PACKET3_RELEASE_MEM_EVENT_INDEX(5))); 5943 amdgpu_ring_write(ring, (PACKET3_RELEASE_MEM_DATA_SEL(write64bit ? 2 : 1) | 5944 PACKET3_RELEASE_MEM_INT_SEL(int_sel ? 2 : 0))); 5945 5946 /* 5947 * the address should be Qword aligned if 64bit write, Dword 5948 * aligned if only send 32bit data low (discard data high) 5949 */ 5950 if (write64bit) 5951 BUG_ON(addr & 0x7); 5952 else 5953 BUG_ON(addr & 0x3); 5954 amdgpu_ring_write(ring, lower_32_bits(addr)); 5955 amdgpu_ring_write(ring, upper_32_bits(addr)); 5956 amdgpu_ring_write(ring, lower_32_bits(seq)); 5957 amdgpu_ring_write(ring, upper_32_bits(seq)); 5958 amdgpu_ring_write(ring, 0); 5959 } 5960 5961 static void gfx_v11_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring) 5962 { 5963 int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX); 5964 uint32_t seq = ring->fence_drv.sync_seq; 5965 uint64_t addr = ring->fence_drv.gpu_addr; 5966 5967 gfx_v11_0_wait_reg_mem(ring, usepfp, 1, 0, lower_32_bits(addr), 5968 upper_32_bits(addr), seq, 0xffffffff, 4); 5969 } 5970 5971 static void gfx_v11_0_ring_invalidate_tlbs(struct amdgpu_ring *ring, 5972 uint16_t pasid, uint32_t flush_type, 5973 bool all_hub, uint8_t dst_sel) 5974 { 5975 amdgpu_ring_write(ring, PACKET3(PACKET3_INVALIDATE_TLBS, 0)); 5976 amdgpu_ring_write(ring, 5977 PACKET3_INVALIDATE_TLBS_DST_SEL(dst_sel) | 5978 PACKET3_INVALIDATE_TLBS_ALL_HUB(all_hub) | 5979 PACKET3_INVALIDATE_TLBS_PASID(pasid) | 5980 PACKET3_INVALIDATE_TLBS_FLUSH_TYPE(flush_type)); 5981 } 5982 5983 static void gfx_v11_0_ring_emit_vm_flush(struct amdgpu_ring *ring, 5984 unsigned vmid, uint64_t pd_addr) 5985 { 5986 amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr); 5987 5988 /* compute doesn't have PFP */ 5989 if (ring->funcs->type == AMDGPU_RING_TYPE_GFX) { 5990 /* sync PFP to ME, otherwise we might get invalid PFP reads */ 5991 amdgpu_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0)); 5992 amdgpu_ring_write(ring, 0x0); 5993 } 5994 5995 /* Make sure that we can't skip the SET_Q_MODE packets when the VM 5996 * changed in any way. 5997 */ 5998 ring->set_q_mode_offs = 0; 5999 ring->set_q_mode_ptr = NULL; 6000 } 6001 6002 static void gfx_v11_0_ring_emit_fence_kiq(struct amdgpu_ring *ring, u64 addr, 6003 u64 seq, unsigned int flags) 6004 { 6005 struct amdgpu_device *adev = ring->adev; 6006 6007 /* we only allocate 32bit for each seq wb address */ 6008 BUG_ON(flags & AMDGPU_FENCE_FLAG_64BIT); 6009 6010 /* write fence seq to the "addr" */ 6011 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3)); 6012 amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) | 6013 WRITE_DATA_DST_SEL(5) | WR_CONFIRM)); 6014 amdgpu_ring_write(ring, lower_32_bits(addr)); 6015 amdgpu_ring_write(ring, upper_32_bits(addr)); 6016 amdgpu_ring_write(ring, lower_32_bits(seq)); 6017 6018 if (flags & AMDGPU_FENCE_FLAG_INT) { 6019 /* set register to trigger INT */ 6020 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3)); 6021 amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) | 6022 WRITE_DATA_DST_SEL(0) | WR_CONFIRM)); 6023 amdgpu_ring_write(ring, SOC15_REG_OFFSET(GC, 0, regCPC_INT_STATUS)); 6024 amdgpu_ring_write(ring, 0); 6025 amdgpu_ring_write(ring, 0x20000000); /* src_id is 178 */ 6026 } 6027 } 6028 6029 static void gfx_v11_0_ring_emit_cntxcntl(struct amdgpu_ring *ring, 6030 uint32_t flags) 6031 { 6032 uint32_t dw2 = 0; 6033 6034 dw2 |= 0x80000000; /* set load_enable otherwise this package is just NOPs */ 6035 if (flags & AMDGPU_HAVE_CTX_SWITCH) { 6036 /* set load_global_config & load_global_uconfig */ 6037 dw2 |= 0x8001; 6038 /* set load_cs_sh_regs */ 6039 dw2 |= 0x01000000; 6040 /* set load_per_context_state & load_gfx_sh_regs for GFX */ 6041 dw2 |= 0x10002; 6042 } 6043 6044 amdgpu_ring_write(ring, PACKET3(PACKET3_CONTEXT_CONTROL, 1)); 6045 amdgpu_ring_write(ring, dw2); 6046 amdgpu_ring_write(ring, 0); 6047 } 6048 6049 static unsigned gfx_v11_0_ring_emit_init_cond_exec(struct amdgpu_ring *ring, 6050 uint64_t addr) 6051 { 6052 unsigned ret; 6053 6054 amdgpu_ring_write(ring, PACKET3(PACKET3_COND_EXEC, 3)); 6055 amdgpu_ring_write(ring, lower_32_bits(addr)); 6056 amdgpu_ring_write(ring, upper_32_bits(addr)); 6057 /* discard following DWs if *cond_exec_gpu_addr==0 */ 6058 amdgpu_ring_write(ring, 0); 6059 ret = ring->wptr & ring->buf_mask; 6060 /* patch dummy value later */ 6061 amdgpu_ring_write(ring, 0); 6062 6063 return ret; 6064 } 6065 6066 static void gfx_v11_0_ring_emit_gfx_shadow(struct amdgpu_ring *ring, 6067 u64 shadow_va, u64 csa_va, 6068 u64 gds_va, bool init_shadow, 6069 int vmid) 6070 { 6071 struct amdgpu_device *adev = ring->adev; 6072 unsigned int offs, end; 6073 6074 if (!adev->gfx.cp_gfx_shadow || !ring->ring_obj) 6075 return; 6076 6077 /* 6078 * The logic here isn't easy to understand because we need to keep state 6079 * accross multiple executions of the function as well as between the 6080 * CPU and GPU. The general idea is that the newly written GPU command 6081 * has a condition on the previous one and only executed if really 6082 * necessary. 6083 */ 6084 6085 /* 6086 * The dw in the NOP controls if the next SET_Q_MODE packet should be 6087 * executed or not. Reserve 64bits just to be on the save side. 6088 */ 6089 amdgpu_ring_write(ring, PACKET3(PACKET3_NOP, 1)); 6090 offs = ring->wptr & ring->buf_mask; 6091 6092 /* 6093 * We start with skipping the prefix SET_Q_MODE and always executing 6094 * the postfix SET_Q_MODE packet. This is changed below with a 6095 * WRITE_DATA command when the postfix executed. 6096 */ 6097 amdgpu_ring_write(ring, shadow_va ? 1 : 0); 6098 amdgpu_ring_write(ring, 0); 6099 6100 if (ring->set_q_mode_offs) { 6101 uint64_t addr; 6102 6103 addr = amdgpu_bo_gpu_offset(ring->ring_obj); 6104 addr += ring->set_q_mode_offs << 2; 6105 end = gfx_v11_0_ring_emit_init_cond_exec(ring, addr); 6106 } 6107 6108 /* 6109 * When the postfix SET_Q_MODE packet executes we need to make sure that the 6110 * next prefix SET_Q_MODE packet executes as well. 6111 */ 6112 if (!shadow_va) { 6113 uint64_t addr; 6114 6115 addr = amdgpu_bo_gpu_offset(ring->ring_obj); 6116 addr += offs << 2; 6117 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3)); 6118 amdgpu_ring_write(ring, WRITE_DATA_DST_SEL(5) | WR_CONFIRM); 6119 amdgpu_ring_write(ring, lower_32_bits(addr)); 6120 amdgpu_ring_write(ring, upper_32_bits(addr)); 6121 amdgpu_ring_write(ring, 0x1); 6122 } 6123 6124 amdgpu_ring_write(ring, PACKET3(PACKET3_SET_Q_PREEMPTION_MODE, 7)); 6125 amdgpu_ring_write(ring, lower_32_bits(shadow_va)); 6126 amdgpu_ring_write(ring, upper_32_bits(shadow_va)); 6127 amdgpu_ring_write(ring, lower_32_bits(gds_va)); 6128 amdgpu_ring_write(ring, upper_32_bits(gds_va)); 6129 amdgpu_ring_write(ring, lower_32_bits(csa_va)); 6130 amdgpu_ring_write(ring, upper_32_bits(csa_va)); 6131 amdgpu_ring_write(ring, shadow_va ? 6132 PACKET3_SET_Q_PREEMPTION_MODE_IB_VMID(vmid) : 0); 6133 amdgpu_ring_write(ring, init_shadow ? 6134 PACKET3_SET_Q_PREEMPTION_MODE_INIT_SHADOW_MEM : 0); 6135 6136 if (ring->set_q_mode_offs) 6137 amdgpu_ring_patch_cond_exec(ring, end); 6138 6139 if (shadow_va) { 6140 uint64_t token = shadow_va ^ csa_va ^ gds_va ^ vmid; 6141 6142 /* 6143 * If the tokens match try to skip the last postfix SET_Q_MODE 6144 * packet to avoid saving/restoring the state all the time. 6145 */ 6146 if (ring->set_q_mode_ptr && ring->set_q_mode_token == token) 6147 *ring->set_q_mode_ptr = 0; 6148 6149 ring->set_q_mode_token = token; 6150 } else { 6151 ring->set_q_mode_ptr = &ring->ring[ring->set_q_mode_offs]; 6152 } 6153 6154 ring->set_q_mode_offs = offs; 6155 } 6156 6157 static int gfx_v11_0_ring_preempt_ib(struct amdgpu_ring *ring) 6158 { 6159 int i, r = 0; 6160 struct amdgpu_device *adev = ring->adev; 6161 struct amdgpu_kiq *kiq = &adev->gfx.kiq[0]; 6162 struct amdgpu_ring *kiq_ring = &kiq->ring; 6163 unsigned long flags; 6164 6165 if (adev->enable_mes) 6166 return -EINVAL; 6167 6168 if (!kiq->pmf || !kiq->pmf->kiq_unmap_queues) 6169 return -EINVAL; 6170 6171 spin_lock_irqsave(&kiq->ring_lock, flags); 6172 6173 if (amdgpu_ring_alloc(kiq_ring, kiq->pmf->unmap_queues_size)) { 6174 spin_unlock_irqrestore(&kiq->ring_lock, flags); 6175 return -ENOMEM; 6176 } 6177 6178 /* assert preemption condition */ 6179 amdgpu_ring_set_preempt_cond_exec(ring, false); 6180 6181 /* assert IB preemption, emit the trailing fence */ 6182 kiq->pmf->kiq_unmap_queues(kiq_ring, ring, PREEMPT_QUEUES_NO_UNMAP, 6183 ring->trail_fence_gpu_addr, 6184 ++ring->trail_seq); 6185 amdgpu_ring_commit(kiq_ring); 6186 6187 spin_unlock_irqrestore(&kiq->ring_lock, flags); 6188 6189 /* poll the trailing fence */ 6190 for (i = 0; i < adev->usec_timeout; i++) { 6191 if (ring->trail_seq == 6192 le32_to_cpu(*(ring->trail_fence_cpu_addr))) 6193 break; 6194 udelay(1); 6195 } 6196 6197 if (i >= adev->usec_timeout) { 6198 r = -EINVAL; 6199 DRM_ERROR("ring %d failed to preempt ib\n", ring->idx); 6200 } 6201 6202 /* deassert preemption condition */ 6203 amdgpu_ring_set_preempt_cond_exec(ring, true); 6204 return r; 6205 } 6206 6207 static void gfx_v11_0_ring_emit_de_meta(struct amdgpu_ring *ring, bool resume) 6208 { 6209 struct amdgpu_device *adev = ring->adev; 6210 struct v10_de_ib_state de_payload = {0}; 6211 uint64_t offset, gds_addr, de_payload_gpu_addr; 6212 void *de_payload_cpu_addr; 6213 int cnt; 6214 6215 offset = offsetof(struct v10_gfx_meta_data, de_payload); 6216 de_payload_gpu_addr = amdgpu_csa_vaddr(ring->adev) + offset; 6217 de_payload_cpu_addr = adev->virt.csa_cpu_addr + offset; 6218 6219 gds_addr = ALIGN(amdgpu_csa_vaddr(ring->adev) + 6220 AMDGPU_CSA_SIZE - adev->gds.gds_size, 6221 PAGE_SIZE); 6222 6223 de_payload.gds_backup_addrlo = lower_32_bits(gds_addr); 6224 de_payload.gds_backup_addrhi = upper_32_bits(gds_addr); 6225 6226 cnt = (sizeof(de_payload) >> 2) + 4 - 2; 6227 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, cnt)); 6228 amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(1) | 6229 WRITE_DATA_DST_SEL(8) | 6230 WR_CONFIRM) | 6231 WRITE_DATA_CACHE_POLICY(0)); 6232 amdgpu_ring_write(ring, lower_32_bits(de_payload_gpu_addr)); 6233 amdgpu_ring_write(ring, upper_32_bits(de_payload_gpu_addr)); 6234 6235 if (resume) 6236 amdgpu_ring_write_multiple(ring, de_payload_cpu_addr, 6237 sizeof(de_payload) >> 2); 6238 else 6239 amdgpu_ring_write_multiple(ring, (void *)&de_payload, 6240 sizeof(de_payload) >> 2); 6241 } 6242 6243 static void gfx_v11_0_ring_emit_frame_cntl(struct amdgpu_ring *ring, bool start, 6244 bool secure) 6245 { 6246 uint32_t v = secure ? FRAME_TMZ : 0; 6247 6248 amdgpu_ring_write(ring, PACKET3(PACKET3_FRAME_CONTROL, 0)); 6249 amdgpu_ring_write(ring, v | FRAME_CMD(start ? 0 : 1)); 6250 } 6251 6252 static void gfx_v11_0_ring_emit_rreg(struct amdgpu_ring *ring, uint32_t reg, 6253 uint32_t reg_val_offs) 6254 { 6255 struct amdgpu_device *adev = ring->adev; 6256 6257 amdgpu_ring_write(ring, PACKET3(PACKET3_COPY_DATA, 4)); 6258 amdgpu_ring_write(ring, 0 | /* src: register*/ 6259 (5 << 8) | /* dst: memory */ 6260 (1 << 20)); /* write confirm */ 6261 amdgpu_ring_write(ring, reg); 6262 amdgpu_ring_write(ring, 0); 6263 amdgpu_ring_write(ring, lower_32_bits(adev->wb.gpu_addr + 6264 reg_val_offs * 4)); 6265 amdgpu_ring_write(ring, upper_32_bits(adev->wb.gpu_addr + 6266 reg_val_offs * 4)); 6267 } 6268 6269 static void gfx_v11_0_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg, 6270 uint32_t val) 6271 { 6272 uint32_t cmd = 0; 6273 6274 switch (ring->funcs->type) { 6275 case AMDGPU_RING_TYPE_GFX: 6276 cmd = WRITE_DATA_ENGINE_SEL(1) | WR_CONFIRM; 6277 break; 6278 case AMDGPU_RING_TYPE_KIQ: 6279 cmd = (1 << 16); /* no inc addr */ 6280 break; 6281 default: 6282 cmd = WR_CONFIRM; 6283 break; 6284 } 6285 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3)); 6286 amdgpu_ring_write(ring, cmd); 6287 amdgpu_ring_write(ring, reg); 6288 amdgpu_ring_write(ring, 0); 6289 amdgpu_ring_write(ring, val); 6290 } 6291 6292 static void gfx_v11_0_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg, 6293 uint32_t val, uint32_t mask) 6294 { 6295 gfx_v11_0_wait_reg_mem(ring, 0, 0, 0, reg, 0, val, mask, 0x20); 6296 } 6297 6298 static void gfx_v11_0_ring_emit_reg_write_reg_wait(struct amdgpu_ring *ring, 6299 uint32_t reg0, uint32_t reg1, 6300 uint32_t ref, uint32_t mask) 6301 { 6302 int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX); 6303 6304 gfx_v11_0_wait_reg_mem(ring, usepfp, 0, 1, reg0, reg1, 6305 ref, mask, 0x20); 6306 } 6307 6308 static void 6309 gfx_v11_0_set_gfx_eop_interrupt_state(struct amdgpu_device *adev, 6310 uint32_t me, uint32_t pipe, 6311 enum amdgpu_interrupt_state state) 6312 { 6313 uint32_t cp_int_cntl, cp_int_cntl_reg; 6314 6315 if (!me) { 6316 switch (pipe) { 6317 case 0: 6318 cp_int_cntl_reg = SOC15_REG_OFFSET(GC, 0, regCP_INT_CNTL_RING0); 6319 break; 6320 case 1: 6321 cp_int_cntl_reg = SOC15_REG_OFFSET(GC, 0, regCP_INT_CNTL_RING1); 6322 break; 6323 default: 6324 DRM_DEBUG("invalid pipe %d\n", pipe); 6325 return; 6326 } 6327 } else { 6328 DRM_DEBUG("invalid me %d\n", me); 6329 return; 6330 } 6331 6332 switch (state) { 6333 case AMDGPU_IRQ_STATE_DISABLE: 6334 cp_int_cntl = RREG32_SOC15_IP(GC, cp_int_cntl_reg); 6335 cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_INT_CNTL_RING0, 6336 TIME_STAMP_INT_ENABLE, 0); 6337 cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_INT_CNTL_RING0, 6338 GENERIC0_INT_ENABLE, 0); 6339 WREG32_SOC15_IP(GC, cp_int_cntl_reg, cp_int_cntl); 6340 break; 6341 case AMDGPU_IRQ_STATE_ENABLE: 6342 cp_int_cntl = RREG32_SOC15_IP(GC, cp_int_cntl_reg); 6343 cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_INT_CNTL_RING0, 6344 TIME_STAMP_INT_ENABLE, 1); 6345 cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_INT_CNTL_RING0, 6346 GENERIC0_INT_ENABLE, 1); 6347 WREG32_SOC15_IP(GC, cp_int_cntl_reg, cp_int_cntl); 6348 break; 6349 default: 6350 break; 6351 } 6352 } 6353 6354 static void gfx_v11_0_set_compute_eop_interrupt_state(struct amdgpu_device *adev, 6355 int me, int pipe, 6356 enum amdgpu_interrupt_state state) 6357 { 6358 u32 mec_int_cntl, mec_int_cntl_reg; 6359 6360 /* 6361 * amdgpu controls only the first MEC. That's why this function only 6362 * handles the setting of interrupts for this specific MEC. All other 6363 * pipes' interrupts are set by amdkfd. 6364 */ 6365 6366 if (me == 1) { 6367 switch (pipe) { 6368 case 0: 6369 mec_int_cntl_reg = SOC15_REG_OFFSET(GC, 0, regCP_ME1_PIPE0_INT_CNTL); 6370 break; 6371 case 1: 6372 mec_int_cntl_reg = SOC15_REG_OFFSET(GC, 0, regCP_ME1_PIPE1_INT_CNTL); 6373 break; 6374 case 2: 6375 mec_int_cntl_reg = SOC15_REG_OFFSET(GC, 0, regCP_ME1_PIPE2_INT_CNTL); 6376 break; 6377 case 3: 6378 mec_int_cntl_reg = SOC15_REG_OFFSET(GC, 0, regCP_ME1_PIPE3_INT_CNTL); 6379 break; 6380 default: 6381 DRM_DEBUG("invalid pipe %d\n", pipe); 6382 return; 6383 } 6384 } else { 6385 DRM_DEBUG("invalid me %d\n", me); 6386 return; 6387 } 6388 6389 switch (state) { 6390 case AMDGPU_IRQ_STATE_DISABLE: 6391 mec_int_cntl = RREG32_SOC15_IP(GC, mec_int_cntl_reg); 6392 mec_int_cntl = REG_SET_FIELD(mec_int_cntl, CP_ME1_PIPE0_INT_CNTL, 6393 TIME_STAMP_INT_ENABLE, 0); 6394 mec_int_cntl = REG_SET_FIELD(mec_int_cntl, CP_ME1_PIPE0_INT_CNTL, 6395 GENERIC0_INT_ENABLE, 0); 6396 WREG32_SOC15_IP(GC, mec_int_cntl_reg, mec_int_cntl); 6397 break; 6398 case AMDGPU_IRQ_STATE_ENABLE: 6399 mec_int_cntl = RREG32_SOC15_IP(GC, mec_int_cntl_reg); 6400 mec_int_cntl = REG_SET_FIELD(mec_int_cntl, CP_ME1_PIPE0_INT_CNTL, 6401 TIME_STAMP_INT_ENABLE, 1); 6402 mec_int_cntl = REG_SET_FIELD(mec_int_cntl, CP_ME1_PIPE0_INT_CNTL, 6403 GENERIC0_INT_ENABLE, 1); 6404 WREG32_SOC15_IP(GC, mec_int_cntl_reg, mec_int_cntl); 6405 break; 6406 default: 6407 break; 6408 } 6409 } 6410 6411 static int gfx_v11_0_set_eop_interrupt_state(struct amdgpu_device *adev, 6412 struct amdgpu_irq_src *src, 6413 unsigned type, 6414 enum amdgpu_interrupt_state state) 6415 { 6416 switch (type) { 6417 case AMDGPU_CP_IRQ_GFX_ME0_PIPE0_EOP: 6418 gfx_v11_0_set_gfx_eop_interrupt_state(adev, 0, 0, state); 6419 break; 6420 case AMDGPU_CP_IRQ_GFX_ME0_PIPE1_EOP: 6421 gfx_v11_0_set_gfx_eop_interrupt_state(adev, 0, 1, state); 6422 break; 6423 case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP: 6424 gfx_v11_0_set_compute_eop_interrupt_state(adev, 1, 0, state); 6425 break; 6426 case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE1_EOP: 6427 gfx_v11_0_set_compute_eop_interrupt_state(adev, 1, 1, state); 6428 break; 6429 case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE2_EOP: 6430 gfx_v11_0_set_compute_eop_interrupt_state(adev, 1, 2, state); 6431 break; 6432 case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE3_EOP: 6433 gfx_v11_0_set_compute_eop_interrupt_state(adev, 1, 3, state); 6434 break; 6435 default: 6436 break; 6437 } 6438 return 0; 6439 } 6440 6441 static int gfx_v11_0_eop_irq(struct amdgpu_device *adev, 6442 struct amdgpu_irq_src *source, 6443 struct amdgpu_iv_entry *entry) 6444 { 6445 u32 doorbell_offset = entry->src_data[0]; 6446 u8 me_id, pipe_id, queue_id; 6447 struct amdgpu_ring *ring; 6448 int i; 6449 6450 DRM_DEBUG("IH: CP EOP\n"); 6451 6452 if (adev->enable_mes && doorbell_offset) { 6453 struct amdgpu_userq_fence_driver *fence_drv = NULL; 6454 struct xarray *xa = &adev->userq_xa; 6455 unsigned long flags; 6456 6457 xa_lock_irqsave(xa, flags); 6458 fence_drv = xa_load(xa, doorbell_offset); 6459 if (fence_drv) 6460 amdgpu_userq_fence_driver_process(fence_drv); 6461 xa_unlock_irqrestore(xa, flags); 6462 } else { 6463 me_id = (entry->ring_id & 0x0c) >> 2; 6464 pipe_id = (entry->ring_id & 0x03) >> 0; 6465 queue_id = (entry->ring_id & 0x70) >> 4; 6466 6467 switch (me_id) { 6468 case 0: 6469 if (pipe_id == 0) 6470 amdgpu_fence_process(&adev->gfx.gfx_ring[0]); 6471 else 6472 amdgpu_fence_process(&adev->gfx.gfx_ring[1]); 6473 break; 6474 case 1: 6475 case 2: 6476 for (i = 0; i < adev->gfx.num_compute_rings; i++) { 6477 ring = &adev->gfx.compute_ring[i]; 6478 /* Per-queue interrupt is supported for MEC starting from VI. 6479 * The interrupt can only be enabled/disabled per pipe instead 6480 * of per queue. 6481 */ 6482 if ((ring->me == me_id) && 6483 (ring->pipe == pipe_id) && 6484 (ring->queue == queue_id)) 6485 amdgpu_fence_process(ring); 6486 } 6487 break; 6488 } 6489 } 6490 6491 return 0; 6492 } 6493 6494 static int gfx_v11_0_set_priv_reg_fault_state(struct amdgpu_device *adev, 6495 struct amdgpu_irq_src *source, 6496 unsigned int type, 6497 enum amdgpu_interrupt_state state) 6498 { 6499 u32 cp_int_cntl_reg, cp_int_cntl; 6500 int i, j; 6501 6502 switch (state) { 6503 case AMDGPU_IRQ_STATE_DISABLE: 6504 case AMDGPU_IRQ_STATE_ENABLE: 6505 for (i = 0; i < adev->gfx.me.num_me; i++) { 6506 for (j = 0; j < adev->gfx.me.num_pipe_per_me; j++) { 6507 cp_int_cntl_reg = gfx_v11_0_get_cpg_int_cntl(adev, i, j); 6508 6509 if (cp_int_cntl_reg) { 6510 cp_int_cntl = RREG32_SOC15_IP(GC, cp_int_cntl_reg); 6511 cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_INT_CNTL_RING0, 6512 PRIV_REG_INT_ENABLE, 6513 state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0); 6514 WREG32_SOC15_IP(GC, cp_int_cntl_reg, cp_int_cntl); 6515 } 6516 } 6517 } 6518 for (i = 0; i < adev->gfx.mec.num_mec; i++) { 6519 for (j = 0; j < adev->gfx.mec.num_pipe_per_mec; j++) { 6520 /* MECs start at 1 */ 6521 cp_int_cntl_reg = gfx_v11_0_get_cpc_int_cntl(adev, i + 1, j); 6522 6523 if (cp_int_cntl_reg) { 6524 cp_int_cntl = RREG32_SOC15_IP(GC, cp_int_cntl_reg); 6525 cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_ME1_PIPE0_INT_CNTL, 6526 PRIV_REG_INT_ENABLE, 6527 state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0); 6528 WREG32_SOC15_IP(GC, cp_int_cntl_reg, cp_int_cntl); 6529 } 6530 } 6531 } 6532 break; 6533 default: 6534 break; 6535 } 6536 6537 return 0; 6538 } 6539 6540 static int gfx_v11_0_set_bad_op_fault_state(struct amdgpu_device *adev, 6541 struct amdgpu_irq_src *source, 6542 unsigned type, 6543 enum amdgpu_interrupt_state state) 6544 { 6545 u32 cp_int_cntl_reg, cp_int_cntl; 6546 int i, j; 6547 6548 switch (state) { 6549 case AMDGPU_IRQ_STATE_DISABLE: 6550 case AMDGPU_IRQ_STATE_ENABLE: 6551 for (i = 0; i < adev->gfx.me.num_me; i++) { 6552 for (j = 0; j < adev->gfx.me.num_pipe_per_me; j++) { 6553 cp_int_cntl_reg = gfx_v11_0_get_cpg_int_cntl(adev, i, j); 6554 6555 if (cp_int_cntl_reg) { 6556 cp_int_cntl = RREG32_SOC15_IP(GC, cp_int_cntl_reg); 6557 cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_INT_CNTL_RING0, 6558 OPCODE_ERROR_INT_ENABLE, 6559 state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0); 6560 WREG32_SOC15_IP(GC, cp_int_cntl_reg, cp_int_cntl); 6561 } 6562 } 6563 } 6564 for (i = 0; i < adev->gfx.mec.num_mec; i++) { 6565 for (j = 0; j < adev->gfx.mec.num_pipe_per_mec; j++) { 6566 /* MECs start at 1 */ 6567 cp_int_cntl_reg = gfx_v11_0_get_cpc_int_cntl(adev, i + 1, j); 6568 6569 if (cp_int_cntl_reg) { 6570 cp_int_cntl = RREG32_SOC15_IP(GC, cp_int_cntl_reg); 6571 cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_ME1_PIPE0_INT_CNTL, 6572 OPCODE_ERROR_INT_ENABLE, 6573 state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0); 6574 WREG32_SOC15_IP(GC, cp_int_cntl_reg, cp_int_cntl); 6575 } 6576 } 6577 } 6578 break; 6579 default: 6580 break; 6581 } 6582 return 0; 6583 } 6584 6585 static int gfx_v11_0_set_priv_inst_fault_state(struct amdgpu_device *adev, 6586 struct amdgpu_irq_src *source, 6587 unsigned int type, 6588 enum amdgpu_interrupt_state state) 6589 { 6590 u32 cp_int_cntl_reg, cp_int_cntl; 6591 int i, j; 6592 6593 switch (state) { 6594 case AMDGPU_IRQ_STATE_DISABLE: 6595 case AMDGPU_IRQ_STATE_ENABLE: 6596 for (i = 0; i < adev->gfx.me.num_me; i++) { 6597 for (j = 0; j < adev->gfx.me.num_pipe_per_me; j++) { 6598 cp_int_cntl_reg = gfx_v11_0_get_cpg_int_cntl(adev, i, j); 6599 6600 if (cp_int_cntl_reg) { 6601 cp_int_cntl = RREG32_SOC15_IP(GC, cp_int_cntl_reg); 6602 cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_INT_CNTL_RING0, 6603 PRIV_INSTR_INT_ENABLE, 6604 state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0); 6605 WREG32_SOC15_IP(GC, cp_int_cntl_reg, cp_int_cntl); 6606 } 6607 } 6608 } 6609 break; 6610 default: 6611 break; 6612 } 6613 6614 return 0; 6615 } 6616 6617 static void gfx_v11_0_handle_priv_fault(struct amdgpu_device *adev, 6618 struct amdgpu_iv_entry *entry) 6619 { 6620 u8 me_id, pipe_id, queue_id; 6621 struct amdgpu_ring *ring; 6622 int i; 6623 6624 me_id = (entry->ring_id & 0x0c) >> 2; 6625 pipe_id = (entry->ring_id & 0x03) >> 0; 6626 queue_id = (entry->ring_id & 0x70) >> 4; 6627 6628 if (!adev->gfx.disable_kq) { 6629 switch (me_id) { 6630 case 0: 6631 for (i = 0; i < adev->gfx.num_gfx_rings; i++) { 6632 ring = &adev->gfx.gfx_ring[i]; 6633 if (ring->me == me_id && ring->pipe == pipe_id && 6634 ring->queue == queue_id) 6635 drm_sched_fault(&ring->sched); 6636 } 6637 break; 6638 case 1: 6639 case 2: 6640 for (i = 0; i < adev->gfx.num_compute_rings; i++) { 6641 ring = &adev->gfx.compute_ring[i]; 6642 if (ring->me == me_id && ring->pipe == pipe_id && 6643 ring->queue == queue_id) 6644 drm_sched_fault(&ring->sched); 6645 } 6646 break; 6647 default: 6648 BUG(); 6649 break; 6650 } 6651 } 6652 } 6653 6654 static int gfx_v11_0_priv_reg_irq(struct amdgpu_device *adev, 6655 struct amdgpu_irq_src *source, 6656 struct amdgpu_iv_entry *entry) 6657 { 6658 DRM_ERROR("Illegal register access in command stream\n"); 6659 gfx_v11_0_handle_priv_fault(adev, entry); 6660 return 0; 6661 } 6662 6663 static int gfx_v11_0_bad_op_irq(struct amdgpu_device *adev, 6664 struct amdgpu_irq_src *source, 6665 struct amdgpu_iv_entry *entry) 6666 { 6667 DRM_ERROR("Illegal opcode in command stream \n"); 6668 gfx_v11_0_handle_priv_fault(adev, entry); 6669 return 0; 6670 } 6671 6672 static int gfx_v11_0_priv_inst_irq(struct amdgpu_device *adev, 6673 struct amdgpu_irq_src *source, 6674 struct amdgpu_iv_entry *entry) 6675 { 6676 DRM_ERROR("Illegal instruction in command stream\n"); 6677 gfx_v11_0_handle_priv_fault(adev, entry); 6678 return 0; 6679 } 6680 6681 static int gfx_v11_0_rlc_gc_fed_irq(struct amdgpu_device *adev, 6682 struct amdgpu_irq_src *source, 6683 struct amdgpu_iv_entry *entry) 6684 { 6685 if (adev->gfx.ras && adev->gfx.ras->rlc_gc_fed_irq) 6686 return adev->gfx.ras->rlc_gc_fed_irq(adev, source, entry); 6687 6688 return 0; 6689 } 6690 6691 #if 0 6692 static int gfx_v11_0_kiq_set_interrupt_state(struct amdgpu_device *adev, 6693 struct amdgpu_irq_src *src, 6694 unsigned int type, 6695 enum amdgpu_interrupt_state state) 6696 { 6697 uint32_t tmp, target; 6698 struct amdgpu_ring *ring = &(adev->gfx.kiq[0].ring); 6699 6700 target = SOC15_REG_OFFSET(GC, 0, regCP_ME1_PIPE0_INT_CNTL); 6701 target += ring->pipe; 6702 6703 switch (type) { 6704 case AMDGPU_CP_KIQ_IRQ_DRIVER0: 6705 if (state == AMDGPU_IRQ_STATE_DISABLE) { 6706 tmp = RREG32_SOC15(GC, 0, regCPC_INT_CNTL); 6707 tmp = REG_SET_FIELD(tmp, CPC_INT_CNTL, 6708 GENERIC2_INT_ENABLE, 0); 6709 WREG32_SOC15(GC, 0, regCPC_INT_CNTL, tmp); 6710 6711 tmp = RREG32_SOC15_IP(GC, target); 6712 tmp = REG_SET_FIELD(tmp, CP_ME1_PIPE0_INT_CNTL, 6713 GENERIC2_INT_ENABLE, 0); 6714 WREG32_SOC15_IP(GC, target, tmp); 6715 } else { 6716 tmp = RREG32_SOC15(GC, 0, regCPC_INT_CNTL); 6717 tmp = REG_SET_FIELD(tmp, CPC_INT_CNTL, 6718 GENERIC2_INT_ENABLE, 1); 6719 WREG32_SOC15(GC, 0, regCPC_INT_CNTL, tmp); 6720 6721 tmp = RREG32_SOC15_IP(GC, target); 6722 tmp = REG_SET_FIELD(tmp, CP_ME1_PIPE0_INT_CNTL, 6723 GENERIC2_INT_ENABLE, 1); 6724 WREG32_SOC15_IP(GC, target, tmp); 6725 } 6726 break; 6727 default: 6728 BUG(); /* kiq only support GENERIC2_INT now */ 6729 break; 6730 } 6731 return 0; 6732 } 6733 #endif 6734 6735 static void gfx_v11_0_emit_mem_sync(struct amdgpu_ring *ring) 6736 { 6737 const unsigned int gcr_cntl = 6738 PACKET3_ACQUIRE_MEM_GCR_CNTL_GL2_INV(1) | 6739 PACKET3_ACQUIRE_MEM_GCR_CNTL_GL2_WB(1) | 6740 PACKET3_ACQUIRE_MEM_GCR_CNTL_GLM_INV(1) | 6741 PACKET3_ACQUIRE_MEM_GCR_CNTL_GLM_WB(1) | 6742 PACKET3_ACQUIRE_MEM_GCR_CNTL_GL1_INV(1) | 6743 PACKET3_ACQUIRE_MEM_GCR_CNTL_GLV_INV(1) | 6744 PACKET3_ACQUIRE_MEM_GCR_CNTL_GLK_INV(1) | 6745 PACKET3_ACQUIRE_MEM_GCR_CNTL_GLI_INV(1); 6746 6747 /* ACQUIRE_MEM - make one or more surfaces valid for use by the subsequent operations */ 6748 amdgpu_ring_write(ring, PACKET3(PACKET3_ACQUIRE_MEM, 6)); 6749 amdgpu_ring_write(ring, 0); /* CP_COHER_CNTL */ 6750 amdgpu_ring_write(ring, 0xffffffff); /* CP_COHER_SIZE */ 6751 amdgpu_ring_write(ring, 0xffffff); /* CP_COHER_SIZE_HI */ 6752 amdgpu_ring_write(ring, 0); /* CP_COHER_BASE */ 6753 amdgpu_ring_write(ring, 0); /* CP_COHER_BASE_HI */ 6754 amdgpu_ring_write(ring, 0x0000000A); /* POLL_INTERVAL */ 6755 amdgpu_ring_write(ring, gcr_cntl); /* GCR_CNTL */ 6756 } 6757 6758 static bool gfx_v11_pipe_reset_support(struct amdgpu_device *adev) 6759 { 6760 /* Disable the pipe reset until the CPFW fully support it.*/ 6761 dev_warn_once(adev->dev, "The CPFW hasn't support pipe reset yet.\n"); 6762 return false; 6763 } 6764 6765 6766 static int gfx_v11_reset_gfx_pipe(struct amdgpu_ring *ring) 6767 { 6768 struct amdgpu_device *adev = ring->adev; 6769 uint32_t reset_pipe = 0, clean_pipe = 0; 6770 int r; 6771 6772 if (!gfx_v11_pipe_reset_support(adev)) 6773 return -EOPNOTSUPP; 6774 6775 gfx_v11_0_set_safe_mode(adev, 0); 6776 mutex_lock(&adev->srbm_mutex); 6777 soc21_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0); 6778 6779 switch (ring->pipe) { 6780 case 0: 6781 reset_pipe = REG_SET_FIELD(reset_pipe, CP_ME_CNTL, 6782 PFP_PIPE0_RESET, 1); 6783 reset_pipe = REG_SET_FIELD(reset_pipe, CP_ME_CNTL, 6784 ME_PIPE0_RESET, 1); 6785 clean_pipe = REG_SET_FIELD(clean_pipe, CP_ME_CNTL, 6786 PFP_PIPE0_RESET, 0); 6787 clean_pipe = REG_SET_FIELD(clean_pipe, CP_ME_CNTL, 6788 ME_PIPE0_RESET, 0); 6789 break; 6790 case 1: 6791 reset_pipe = REG_SET_FIELD(reset_pipe, CP_ME_CNTL, 6792 PFP_PIPE1_RESET, 1); 6793 reset_pipe = REG_SET_FIELD(reset_pipe, CP_ME_CNTL, 6794 ME_PIPE1_RESET, 1); 6795 clean_pipe = REG_SET_FIELD(clean_pipe, CP_ME_CNTL, 6796 PFP_PIPE1_RESET, 0); 6797 clean_pipe = REG_SET_FIELD(clean_pipe, CP_ME_CNTL, 6798 ME_PIPE1_RESET, 0); 6799 break; 6800 default: 6801 break; 6802 } 6803 6804 WREG32_SOC15(GC, 0, regCP_ME_CNTL, reset_pipe); 6805 WREG32_SOC15(GC, 0, regCP_ME_CNTL, clean_pipe); 6806 6807 r = (RREG32(SOC15_REG_OFFSET(GC, 0, regCP_GFX_RS64_INSTR_PNTR1)) << 2) - 6808 RS64_FW_UC_START_ADDR_LO; 6809 soc21_grbm_select(adev, 0, 0, 0, 0); 6810 mutex_unlock(&adev->srbm_mutex); 6811 gfx_v11_0_unset_safe_mode(adev, 0); 6812 6813 dev_info(adev->dev, "The ring %s pipe reset to the ME firmware start PC: %s\n", ring->name, 6814 r == 0 ? "successfully" : "failed"); 6815 /* FIXME: Sometimes driver can't cache the ME firmware start PC correctly, 6816 * so the pipe reset status relies on the later gfx ring test result. 6817 */ 6818 return 0; 6819 } 6820 6821 static int gfx_v11_0_reset_kgq(struct amdgpu_ring *ring, 6822 unsigned int vmid, 6823 struct amdgpu_fence *timedout_fence) 6824 { 6825 struct amdgpu_device *adev = ring->adev; 6826 int r; 6827 6828 amdgpu_ring_reset_helper_begin(ring, timedout_fence); 6829 6830 r = amdgpu_mes_reset_legacy_queue(ring->adev, ring, vmid, false); 6831 if (r) { 6832 6833 dev_warn(adev->dev, "reset via MES failed and try pipe reset %d\n", r); 6834 r = gfx_v11_reset_gfx_pipe(ring); 6835 if (r) 6836 return r; 6837 } 6838 6839 r = gfx_v11_0_kgq_init_queue(ring, true); 6840 if (r) { 6841 dev_err(adev->dev, "failed to init kgq\n"); 6842 return r; 6843 } 6844 6845 r = amdgpu_mes_map_legacy_queue(adev, ring); 6846 if (r) { 6847 dev_err(adev->dev, "failed to remap kgq\n"); 6848 return r; 6849 } 6850 6851 return amdgpu_ring_reset_helper_end(ring, timedout_fence); 6852 } 6853 6854 static int gfx_v11_0_reset_compute_pipe(struct amdgpu_ring *ring) 6855 { 6856 6857 struct amdgpu_device *adev = ring->adev; 6858 uint32_t reset_pipe = 0, clean_pipe = 0; 6859 int r; 6860 6861 if (!gfx_v11_pipe_reset_support(adev)) 6862 return -EOPNOTSUPP; 6863 6864 gfx_v11_0_set_safe_mode(adev, 0); 6865 mutex_lock(&adev->srbm_mutex); 6866 soc21_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0); 6867 6868 reset_pipe = RREG32_SOC15(GC, 0, regCP_MEC_RS64_CNTL); 6869 clean_pipe = reset_pipe; 6870 6871 if (adev->gfx.rs64_enable) { 6872 6873 switch (ring->pipe) { 6874 case 0: 6875 reset_pipe = REG_SET_FIELD(reset_pipe, CP_MEC_RS64_CNTL, 6876 MEC_PIPE0_RESET, 1); 6877 clean_pipe = REG_SET_FIELD(clean_pipe, CP_MEC_RS64_CNTL, 6878 MEC_PIPE0_RESET, 0); 6879 break; 6880 case 1: 6881 reset_pipe = REG_SET_FIELD(reset_pipe, CP_MEC_RS64_CNTL, 6882 MEC_PIPE1_RESET, 1); 6883 clean_pipe = REG_SET_FIELD(clean_pipe, CP_MEC_RS64_CNTL, 6884 MEC_PIPE1_RESET, 0); 6885 break; 6886 case 2: 6887 reset_pipe = REG_SET_FIELD(reset_pipe, CP_MEC_RS64_CNTL, 6888 MEC_PIPE2_RESET, 1); 6889 clean_pipe = REG_SET_FIELD(clean_pipe, CP_MEC_RS64_CNTL, 6890 MEC_PIPE2_RESET, 0); 6891 break; 6892 case 3: 6893 reset_pipe = REG_SET_FIELD(reset_pipe, CP_MEC_RS64_CNTL, 6894 MEC_PIPE3_RESET, 1); 6895 clean_pipe = REG_SET_FIELD(clean_pipe, CP_MEC_RS64_CNTL, 6896 MEC_PIPE3_RESET, 0); 6897 break; 6898 default: 6899 break; 6900 } 6901 WREG32_SOC15(GC, 0, regCP_MEC_RS64_CNTL, reset_pipe); 6902 WREG32_SOC15(GC, 0, regCP_MEC_RS64_CNTL, clean_pipe); 6903 r = (RREG32_SOC15(GC, 0, regCP_MEC_RS64_INSTR_PNTR) << 2) - 6904 RS64_FW_UC_START_ADDR_LO; 6905 } else { 6906 if (ring->me == 1) { 6907 switch (ring->pipe) { 6908 case 0: 6909 reset_pipe = REG_SET_FIELD(reset_pipe, CP_MEC_CNTL, 6910 MEC_ME1_PIPE0_RESET, 1); 6911 clean_pipe = REG_SET_FIELD(clean_pipe, CP_MEC_CNTL, 6912 MEC_ME1_PIPE0_RESET, 0); 6913 break; 6914 case 1: 6915 reset_pipe = REG_SET_FIELD(reset_pipe, CP_MEC_CNTL, 6916 MEC_ME1_PIPE1_RESET, 1); 6917 clean_pipe = REG_SET_FIELD(clean_pipe, CP_MEC_CNTL, 6918 MEC_ME1_PIPE1_RESET, 0); 6919 break; 6920 case 2: 6921 reset_pipe = REG_SET_FIELD(reset_pipe, CP_MEC_CNTL, 6922 MEC_ME1_PIPE2_RESET, 1); 6923 clean_pipe = REG_SET_FIELD(clean_pipe, CP_MEC_CNTL, 6924 MEC_ME1_PIPE2_RESET, 0); 6925 break; 6926 case 3: 6927 reset_pipe = REG_SET_FIELD(reset_pipe, CP_MEC_CNTL, 6928 MEC_ME1_PIPE3_RESET, 1); 6929 clean_pipe = REG_SET_FIELD(clean_pipe, CP_MEC_CNTL, 6930 MEC_ME1_PIPE3_RESET, 0); 6931 break; 6932 default: 6933 break; 6934 } 6935 /* mec1 fw pc: CP_MEC1_INSTR_PNTR */ 6936 } else { 6937 switch (ring->pipe) { 6938 case 0: 6939 reset_pipe = REG_SET_FIELD(reset_pipe, CP_MEC_CNTL, 6940 MEC_ME2_PIPE0_RESET, 1); 6941 clean_pipe = REG_SET_FIELD(clean_pipe, CP_MEC_CNTL, 6942 MEC_ME2_PIPE0_RESET, 0); 6943 break; 6944 case 1: 6945 reset_pipe = REG_SET_FIELD(reset_pipe, CP_MEC_CNTL, 6946 MEC_ME2_PIPE1_RESET, 1); 6947 clean_pipe = REG_SET_FIELD(clean_pipe, CP_MEC_CNTL, 6948 MEC_ME2_PIPE1_RESET, 0); 6949 break; 6950 case 2: 6951 reset_pipe = REG_SET_FIELD(reset_pipe, CP_MEC_CNTL, 6952 MEC_ME2_PIPE2_RESET, 1); 6953 clean_pipe = REG_SET_FIELD(clean_pipe, CP_MEC_CNTL, 6954 MEC_ME2_PIPE2_RESET, 0); 6955 break; 6956 case 3: 6957 reset_pipe = REG_SET_FIELD(reset_pipe, CP_MEC_CNTL, 6958 MEC_ME2_PIPE3_RESET, 1); 6959 clean_pipe = REG_SET_FIELD(clean_pipe, CP_MEC_CNTL, 6960 MEC_ME2_PIPE3_RESET, 0); 6961 break; 6962 default: 6963 break; 6964 } 6965 /* mec2 fw pc: CP:CP_MEC2_INSTR_PNTR */ 6966 } 6967 WREG32_SOC15(GC, 0, regCP_MEC_CNTL, reset_pipe); 6968 WREG32_SOC15(GC, 0, regCP_MEC_CNTL, clean_pipe); 6969 r = RREG32(SOC15_REG_OFFSET(GC, 0, regCP_MEC1_INSTR_PNTR)); 6970 } 6971 6972 soc21_grbm_select(adev, 0, 0, 0, 0); 6973 mutex_unlock(&adev->srbm_mutex); 6974 gfx_v11_0_unset_safe_mode(adev, 0); 6975 6976 dev_info(adev->dev, "The ring %s pipe resets to MEC FW start PC: %s\n", ring->name, 6977 r == 0 ? "successfully" : "failed"); 6978 /*FIXME:Sometimes driver can't cache the MEC firmware start PC correctly, so the pipe 6979 * reset status relies on the compute ring test result. 6980 */ 6981 return 0; 6982 } 6983 6984 static int gfx_v11_0_reset_kcq(struct amdgpu_ring *ring, 6985 unsigned int vmid, 6986 struct amdgpu_fence *timedout_fence) 6987 { 6988 struct amdgpu_device *adev = ring->adev; 6989 int r = 0; 6990 6991 amdgpu_ring_reset_helper_begin(ring, timedout_fence); 6992 6993 r = amdgpu_mes_reset_legacy_queue(ring->adev, ring, vmid, true); 6994 if (r) { 6995 dev_warn(adev->dev, "fail(%d) to reset kcq and try pipe reset\n", r); 6996 r = gfx_v11_0_reset_compute_pipe(ring); 6997 if (r) 6998 return r; 6999 } 7000 7001 r = gfx_v11_0_kcq_init_queue(ring, true); 7002 if (r) { 7003 dev_err(adev->dev, "fail to init kcq\n"); 7004 return r; 7005 } 7006 r = amdgpu_mes_map_legacy_queue(adev, ring); 7007 if (r) { 7008 dev_err(adev->dev, "failed to remap kcq\n"); 7009 return r; 7010 } 7011 7012 return amdgpu_ring_reset_helper_end(ring, timedout_fence); 7013 } 7014 7015 static void gfx_v11_ip_print(struct amdgpu_ip_block *ip_block, struct drm_printer *p) 7016 { 7017 struct amdgpu_device *adev = ip_block->adev; 7018 uint32_t i, j, k, reg, index = 0; 7019 uint32_t reg_count = ARRAY_SIZE(gc_reg_list_11_0); 7020 7021 if (!adev->gfx.ip_dump_core) 7022 return; 7023 7024 for (i = 0; i < reg_count; i++) 7025 drm_printf(p, "%-50s \t 0x%08x\n", 7026 gc_reg_list_11_0[i].reg_name, 7027 adev->gfx.ip_dump_core[i]); 7028 7029 /* print compute queue registers for all instances */ 7030 if (!adev->gfx.ip_dump_compute_queues) 7031 return; 7032 7033 reg_count = ARRAY_SIZE(gc_cp_reg_list_11); 7034 drm_printf(p, "\nnum_mec: %d num_pipe: %d num_queue: %d\n", 7035 adev->gfx.mec.num_mec, 7036 adev->gfx.mec.num_pipe_per_mec, 7037 adev->gfx.mec.num_queue_per_pipe); 7038 7039 for (i = 0; i < adev->gfx.mec.num_mec; i++) { 7040 for (j = 0; j < adev->gfx.mec.num_pipe_per_mec; j++) { 7041 for (k = 0; k < adev->gfx.mec.num_queue_per_pipe; k++) { 7042 drm_printf(p, "\nmec %d, pipe %d, queue %d\n", i, j, k); 7043 for (reg = 0; reg < reg_count; reg++) { 7044 if (i && gc_cp_reg_list_11[reg].reg_offset == regCP_MEC_ME1_HEADER_DUMP) 7045 drm_printf(p, "%-50s \t 0x%08x\n", 7046 "regCP_MEC_ME2_HEADER_DUMP", 7047 adev->gfx.ip_dump_compute_queues[index + reg]); 7048 else 7049 drm_printf(p, "%-50s \t 0x%08x\n", 7050 gc_cp_reg_list_11[reg].reg_name, 7051 adev->gfx.ip_dump_compute_queues[index + reg]); 7052 } 7053 index += reg_count; 7054 } 7055 } 7056 } 7057 7058 /* print gfx queue registers for all instances */ 7059 if (!adev->gfx.ip_dump_gfx_queues) 7060 return; 7061 7062 index = 0; 7063 reg_count = ARRAY_SIZE(gc_gfx_queue_reg_list_11); 7064 drm_printf(p, "\nnum_me: %d num_pipe: %d num_queue: %d\n", 7065 adev->gfx.me.num_me, 7066 adev->gfx.me.num_pipe_per_me, 7067 adev->gfx.me.num_queue_per_pipe); 7068 7069 for (i = 0; i < adev->gfx.me.num_me; i++) { 7070 for (j = 0; j < adev->gfx.me.num_pipe_per_me; j++) { 7071 for (k = 0; k < adev->gfx.me.num_queue_per_pipe; k++) { 7072 drm_printf(p, "\nme %d, pipe %d, queue %d\n", i, j, k); 7073 for (reg = 0; reg < reg_count; reg++) { 7074 drm_printf(p, "%-50s \t 0x%08x\n", 7075 gc_gfx_queue_reg_list_11[reg].reg_name, 7076 adev->gfx.ip_dump_gfx_queues[index + reg]); 7077 } 7078 index += reg_count; 7079 } 7080 } 7081 } 7082 } 7083 7084 static void gfx_v11_ip_dump(struct amdgpu_ip_block *ip_block) 7085 { 7086 struct amdgpu_device *adev = ip_block->adev; 7087 uint32_t i, j, k, reg, index = 0; 7088 uint32_t reg_count = ARRAY_SIZE(gc_reg_list_11_0); 7089 7090 if (!adev->gfx.ip_dump_core) 7091 return; 7092 7093 amdgpu_gfx_off_ctrl(adev, false); 7094 for (i = 0; i < reg_count; i++) 7095 adev->gfx.ip_dump_core[i] = RREG32(SOC15_REG_ENTRY_OFFSET(gc_reg_list_11_0[i])); 7096 amdgpu_gfx_off_ctrl(adev, true); 7097 7098 /* dump compute queue registers for all instances */ 7099 if (!adev->gfx.ip_dump_compute_queues) 7100 return; 7101 7102 reg_count = ARRAY_SIZE(gc_cp_reg_list_11); 7103 amdgpu_gfx_off_ctrl(adev, false); 7104 mutex_lock(&adev->srbm_mutex); 7105 for (i = 0; i < adev->gfx.mec.num_mec; i++) { 7106 for (j = 0; j < adev->gfx.mec.num_pipe_per_mec; j++) { 7107 for (k = 0; k < adev->gfx.mec.num_queue_per_pipe; k++) { 7108 /* ME0 is for GFX so start from 1 for CP */ 7109 soc21_grbm_select(adev, adev->gfx.me.num_me + i, j, k, 0); 7110 for (reg = 0; reg < reg_count; reg++) { 7111 if (i && 7112 gc_cp_reg_list_11[reg].reg_offset == 7113 regCP_MEC_ME1_HEADER_DUMP) 7114 adev->gfx.ip_dump_compute_queues[index + reg] = 7115 RREG32(SOC15_REG_OFFSET(GC, 0, 7116 regCP_MEC_ME2_HEADER_DUMP)); 7117 else 7118 adev->gfx.ip_dump_compute_queues[index + reg] = 7119 RREG32(SOC15_REG_ENTRY_OFFSET( 7120 gc_cp_reg_list_11[reg])); 7121 } 7122 index += reg_count; 7123 } 7124 } 7125 } 7126 soc21_grbm_select(adev, 0, 0, 0, 0); 7127 mutex_unlock(&adev->srbm_mutex); 7128 amdgpu_gfx_off_ctrl(adev, true); 7129 7130 /* dump gfx queue registers for all instances */ 7131 if (!adev->gfx.ip_dump_gfx_queues) 7132 return; 7133 7134 index = 0; 7135 reg_count = ARRAY_SIZE(gc_gfx_queue_reg_list_11); 7136 amdgpu_gfx_off_ctrl(adev, false); 7137 mutex_lock(&adev->srbm_mutex); 7138 for (i = 0; i < adev->gfx.me.num_me; i++) { 7139 for (j = 0; j < adev->gfx.me.num_pipe_per_me; j++) { 7140 for (k = 0; k < adev->gfx.me.num_queue_per_pipe; k++) { 7141 soc21_grbm_select(adev, i, j, k, 0); 7142 7143 for (reg = 0; reg < reg_count; reg++) { 7144 adev->gfx.ip_dump_gfx_queues[index + reg] = 7145 RREG32(SOC15_REG_ENTRY_OFFSET( 7146 gc_gfx_queue_reg_list_11[reg])); 7147 } 7148 index += reg_count; 7149 } 7150 } 7151 } 7152 soc21_grbm_select(adev, 0, 0, 0, 0); 7153 mutex_unlock(&adev->srbm_mutex); 7154 amdgpu_gfx_off_ctrl(adev, true); 7155 } 7156 7157 static void gfx_v11_0_ring_emit_cleaner_shader(struct amdgpu_ring *ring) 7158 { 7159 /* Emit the cleaner shader */ 7160 amdgpu_ring_write(ring, PACKET3(PACKET3_RUN_CLEANER_SHADER, 0)); 7161 amdgpu_ring_write(ring, 0); /* RESERVED field, programmed to zero */ 7162 } 7163 7164 static void gfx_v11_0_ring_begin_use(struct amdgpu_ring *ring) 7165 { 7166 amdgpu_gfx_profile_ring_begin_use(ring); 7167 7168 amdgpu_gfx_enforce_isolation_ring_begin_use(ring); 7169 } 7170 7171 static void gfx_v11_0_ring_end_use(struct amdgpu_ring *ring) 7172 { 7173 amdgpu_gfx_profile_ring_end_use(ring); 7174 7175 amdgpu_gfx_enforce_isolation_ring_end_use(ring); 7176 } 7177 7178 static const struct amd_ip_funcs gfx_v11_0_ip_funcs = { 7179 .name = "gfx_v11_0", 7180 .early_init = gfx_v11_0_early_init, 7181 .late_init = gfx_v11_0_late_init, 7182 .sw_init = gfx_v11_0_sw_init, 7183 .sw_fini = gfx_v11_0_sw_fini, 7184 .hw_init = gfx_v11_0_hw_init, 7185 .hw_fini = gfx_v11_0_hw_fini, 7186 .suspend = gfx_v11_0_suspend, 7187 .resume = gfx_v11_0_resume, 7188 .is_idle = gfx_v11_0_is_idle, 7189 .wait_for_idle = gfx_v11_0_wait_for_idle, 7190 .soft_reset = gfx_v11_0_soft_reset, 7191 .check_soft_reset = gfx_v11_0_check_soft_reset, 7192 .post_soft_reset = gfx_v11_0_post_soft_reset, 7193 .set_clockgating_state = gfx_v11_0_set_clockgating_state, 7194 .set_powergating_state = gfx_v11_0_set_powergating_state, 7195 .get_clockgating_state = gfx_v11_0_get_clockgating_state, 7196 .dump_ip_state = gfx_v11_ip_dump, 7197 .print_ip_state = gfx_v11_ip_print, 7198 }; 7199 7200 static const struct amdgpu_ring_funcs gfx_v11_0_ring_funcs_gfx = { 7201 .type = AMDGPU_RING_TYPE_GFX, 7202 .align_mask = 0xff, 7203 .nop = PACKET3(PACKET3_NOP, 0x3FFF), 7204 .support_64bit_ptrs = true, 7205 .secure_submission_supported = true, 7206 .get_rptr = gfx_v11_0_ring_get_rptr_gfx, 7207 .get_wptr = gfx_v11_0_ring_get_wptr_gfx, 7208 .set_wptr = gfx_v11_0_ring_set_wptr_gfx, 7209 .emit_frame_size = /* totally 247 maximum if 16 IBs */ 7210 5 + /* update_spm_vmid */ 7211 5 + /* COND_EXEC */ 7212 22 + /* SET_Q_PREEMPTION_MODE */ 7213 7 + /* PIPELINE_SYNC */ 7214 SOC15_FLUSH_GPU_TLB_NUM_WREG * 5 + 7215 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 + 7216 4 + /* VM_FLUSH */ 7217 8 + /* FENCE for VM_FLUSH */ 7218 20 + /* GDS switch */ 7219 5 + /* COND_EXEC */ 7220 7 + /* HDP_flush */ 7221 4 + /* VGT_flush */ 7222 31 + /* DE_META */ 7223 3 + /* CNTX_CTRL */ 7224 5 + /* HDP_INVL */ 7225 22 + /* SET_Q_PREEMPTION_MODE */ 7226 8 + 8 + /* FENCE x2 */ 7227 8 + /* gfx_v11_0_emit_mem_sync */ 7228 2, /* gfx_v11_0_ring_emit_cleaner_shader */ 7229 .emit_ib_size = 4, /* gfx_v11_0_ring_emit_ib_gfx */ 7230 .emit_ib = gfx_v11_0_ring_emit_ib_gfx, 7231 .emit_fence = gfx_v11_0_ring_emit_fence, 7232 .emit_pipeline_sync = gfx_v11_0_ring_emit_pipeline_sync, 7233 .emit_vm_flush = gfx_v11_0_ring_emit_vm_flush, 7234 .emit_gds_switch = gfx_v11_0_ring_emit_gds_switch, 7235 .emit_hdp_flush = gfx_v11_0_ring_emit_hdp_flush, 7236 .test_ring = gfx_v11_0_ring_test_ring, 7237 .test_ib = gfx_v11_0_ring_test_ib, 7238 .insert_nop = gfx_v11_ring_insert_nop, 7239 .pad_ib = amdgpu_ring_generic_pad_ib, 7240 .emit_cntxcntl = gfx_v11_0_ring_emit_cntxcntl, 7241 .emit_gfx_shadow = gfx_v11_0_ring_emit_gfx_shadow, 7242 .init_cond_exec = gfx_v11_0_ring_emit_init_cond_exec, 7243 .preempt_ib = gfx_v11_0_ring_preempt_ib, 7244 .emit_frame_cntl = gfx_v11_0_ring_emit_frame_cntl, 7245 .emit_wreg = gfx_v11_0_ring_emit_wreg, 7246 .emit_reg_wait = gfx_v11_0_ring_emit_reg_wait, 7247 .emit_reg_write_reg_wait = gfx_v11_0_ring_emit_reg_write_reg_wait, 7248 .emit_mem_sync = gfx_v11_0_emit_mem_sync, 7249 .reset = gfx_v11_0_reset_kgq, 7250 .emit_cleaner_shader = gfx_v11_0_ring_emit_cleaner_shader, 7251 .begin_use = gfx_v11_0_ring_begin_use, 7252 .end_use = gfx_v11_0_ring_end_use, 7253 }; 7254 7255 static const struct amdgpu_ring_funcs gfx_v11_0_ring_funcs_compute = { 7256 .type = AMDGPU_RING_TYPE_COMPUTE, 7257 .align_mask = 0xff, 7258 .nop = PACKET3(PACKET3_NOP, 0x3FFF), 7259 .support_64bit_ptrs = true, 7260 .get_rptr = gfx_v11_0_ring_get_rptr_compute, 7261 .get_wptr = gfx_v11_0_ring_get_wptr_compute, 7262 .set_wptr = gfx_v11_0_ring_set_wptr_compute, 7263 .emit_frame_size = 7264 5 + /* update_spm_vmid */ 7265 20 + /* gfx_v11_0_ring_emit_gds_switch */ 7266 7 + /* gfx_v11_0_ring_emit_hdp_flush */ 7267 5 + /* hdp invalidate */ 7268 7 + /* gfx_v11_0_ring_emit_pipeline_sync */ 7269 SOC15_FLUSH_GPU_TLB_NUM_WREG * 5 + 7270 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 + 7271 2 + /* gfx_v11_0_ring_emit_vm_flush */ 7272 8 + 8 + 8 + /* gfx_v11_0_ring_emit_fence x3 for user fence, vm fence */ 7273 8 + /* gfx_v11_0_emit_mem_sync */ 7274 2, /* gfx_v11_0_ring_emit_cleaner_shader */ 7275 .emit_ib_size = 7, /* gfx_v11_0_ring_emit_ib_compute */ 7276 .emit_ib = gfx_v11_0_ring_emit_ib_compute, 7277 .emit_fence = gfx_v11_0_ring_emit_fence, 7278 .emit_pipeline_sync = gfx_v11_0_ring_emit_pipeline_sync, 7279 .emit_vm_flush = gfx_v11_0_ring_emit_vm_flush, 7280 .emit_gds_switch = gfx_v11_0_ring_emit_gds_switch, 7281 .emit_hdp_flush = gfx_v11_0_ring_emit_hdp_flush, 7282 .test_ring = gfx_v11_0_ring_test_ring, 7283 .test_ib = gfx_v11_0_ring_test_ib, 7284 .insert_nop = gfx_v11_ring_insert_nop, 7285 .pad_ib = amdgpu_ring_generic_pad_ib, 7286 .emit_wreg = gfx_v11_0_ring_emit_wreg, 7287 .emit_reg_wait = gfx_v11_0_ring_emit_reg_wait, 7288 .emit_reg_write_reg_wait = gfx_v11_0_ring_emit_reg_write_reg_wait, 7289 .emit_mem_sync = gfx_v11_0_emit_mem_sync, 7290 .reset = gfx_v11_0_reset_kcq, 7291 .emit_cleaner_shader = gfx_v11_0_ring_emit_cleaner_shader, 7292 .begin_use = gfx_v11_0_ring_begin_use, 7293 .end_use = gfx_v11_0_ring_end_use, 7294 }; 7295 7296 static const struct amdgpu_ring_funcs gfx_v11_0_ring_funcs_kiq = { 7297 .type = AMDGPU_RING_TYPE_KIQ, 7298 .align_mask = 0xff, 7299 .nop = PACKET3(PACKET3_NOP, 0x3FFF), 7300 .support_64bit_ptrs = true, 7301 .get_rptr = gfx_v11_0_ring_get_rptr_compute, 7302 .get_wptr = gfx_v11_0_ring_get_wptr_compute, 7303 .set_wptr = gfx_v11_0_ring_set_wptr_compute, 7304 .emit_frame_size = 7305 20 + /* gfx_v11_0_ring_emit_gds_switch */ 7306 7 + /* gfx_v11_0_ring_emit_hdp_flush */ 7307 5 + /*hdp invalidate */ 7308 7 + /* gfx_v11_0_ring_emit_pipeline_sync */ 7309 SOC15_FLUSH_GPU_TLB_NUM_WREG * 5 + 7310 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 + 7311 8 + 8 + 8, /* gfx_v11_0_ring_emit_fence_kiq x3 for user fence, vm fence */ 7312 .emit_ib_size = 7, /* gfx_v11_0_ring_emit_ib_compute */ 7313 .emit_ib = gfx_v11_0_ring_emit_ib_compute, 7314 .emit_fence = gfx_v11_0_ring_emit_fence_kiq, 7315 .test_ring = gfx_v11_0_ring_test_ring, 7316 .test_ib = gfx_v11_0_ring_test_ib, 7317 .insert_nop = amdgpu_ring_insert_nop, 7318 .pad_ib = amdgpu_ring_generic_pad_ib, 7319 .emit_rreg = gfx_v11_0_ring_emit_rreg, 7320 .emit_wreg = gfx_v11_0_ring_emit_wreg, 7321 .emit_reg_wait = gfx_v11_0_ring_emit_reg_wait, 7322 .emit_reg_write_reg_wait = gfx_v11_0_ring_emit_reg_write_reg_wait, 7323 .emit_hdp_flush = gfx_v11_0_ring_emit_hdp_flush, 7324 }; 7325 7326 static void gfx_v11_0_set_ring_funcs(struct amdgpu_device *adev) 7327 { 7328 int i; 7329 7330 adev->gfx.kiq[0].ring.funcs = &gfx_v11_0_ring_funcs_kiq; 7331 7332 for (i = 0; i < adev->gfx.num_gfx_rings; i++) 7333 adev->gfx.gfx_ring[i].funcs = &gfx_v11_0_ring_funcs_gfx; 7334 7335 for (i = 0; i < adev->gfx.num_compute_rings; i++) 7336 adev->gfx.compute_ring[i].funcs = &gfx_v11_0_ring_funcs_compute; 7337 } 7338 7339 static const struct amdgpu_irq_src_funcs gfx_v11_0_eop_irq_funcs = { 7340 .set = gfx_v11_0_set_eop_interrupt_state, 7341 .process = gfx_v11_0_eop_irq, 7342 }; 7343 7344 static const struct amdgpu_irq_src_funcs gfx_v11_0_priv_reg_irq_funcs = { 7345 .set = gfx_v11_0_set_priv_reg_fault_state, 7346 .process = gfx_v11_0_priv_reg_irq, 7347 }; 7348 7349 static const struct amdgpu_irq_src_funcs gfx_v11_0_bad_op_irq_funcs = { 7350 .set = gfx_v11_0_set_bad_op_fault_state, 7351 .process = gfx_v11_0_bad_op_irq, 7352 }; 7353 7354 static const struct amdgpu_irq_src_funcs gfx_v11_0_priv_inst_irq_funcs = { 7355 .set = gfx_v11_0_set_priv_inst_fault_state, 7356 .process = gfx_v11_0_priv_inst_irq, 7357 }; 7358 7359 static const struct amdgpu_irq_src_funcs gfx_v11_0_rlc_gc_fed_irq_funcs = { 7360 .process = gfx_v11_0_rlc_gc_fed_irq, 7361 }; 7362 7363 static void gfx_v11_0_set_irq_funcs(struct amdgpu_device *adev) 7364 { 7365 adev->gfx.eop_irq.num_types = AMDGPU_CP_IRQ_LAST; 7366 adev->gfx.eop_irq.funcs = &gfx_v11_0_eop_irq_funcs; 7367 7368 adev->gfx.priv_reg_irq.num_types = 1; 7369 adev->gfx.priv_reg_irq.funcs = &gfx_v11_0_priv_reg_irq_funcs; 7370 7371 adev->gfx.bad_op_irq.num_types = 1; 7372 adev->gfx.bad_op_irq.funcs = &gfx_v11_0_bad_op_irq_funcs; 7373 7374 adev->gfx.priv_inst_irq.num_types = 1; 7375 adev->gfx.priv_inst_irq.funcs = &gfx_v11_0_priv_inst_irq_funcs; 7376 7377 adev->gfx.rlc_gc_fed_irq.num_types = 1; /* 0x80 FED error */ 7378 adev->gfx.rlc_gc_fed_irq.funcs = &gfx_v11_0_rlc_gc_fed_irq_funcs; 7379 7380 } 7381 7382 static void gfx_v11_0_set_imu_funcs(struct amdgpu_device *adev) 7383 { 7384 if (adev->flags & AMD_IS_APU) 7385 adev->gfx.imu.mode = MISSION_MODE; 7386 else 7387 adev->gfx.imu.mode = DEBUG_MODE; 7388 7389 adev->gfx.imu.funcs = &gfx_v11_0_imu_funcs; 7390 } 7391 7392 static void gfx_v11_0_set_rlc_funcs(struct amdgpu_device *adev) 7393 { 7394 adev->gfx.rlc.funcs = &gfx_v11_0_rlc_funcs; 7395 } 7396 7397 static void gfx_v11_0_set_gds_init(struct amdgpu_device *adev) 7398 { 7399 unsigned total_cu = adev->gfx.config.max_cu_per_sh * 7400 adev->gfx.config.max_sh_per_se * 7401 adev->gfx.config.max_shader_engines; 7402 7403 adev->gds.gds_size = 0x1000; 7404 adev->gds.gds_compute_max_wave_id = total_cu * 32 - 1; 7405 adev->gds.gws_size = 64; 7406 adev->gds.oa_size = 16; 7407 } 7408 7409 static void gfx_v11_0_set_mqd_funcs(struct amdgpu_device *adev) 7410 { 7411 /* set gfx eng mqd */ 7412 adev->mqds[AMDGPU_HW_IP_GFX].mqd_size = 7413 sizeof(struct v11_gfx_mqd); 7414 adev->mqds[AMDGPU_HW_IP_GFX].init_mqd = 7415 gfx_v11_0_gfx_mqd_init; 7416 /* set compute eng mqd */ 7417 adev->mqds[AMDGPU_HW_IP_COMPUTE].mqd_size = 7418 sizeof(struct v11_compute_mqd); 7419 adev->mqds[AMDGPU_HW_IP_COMPUTE].init_mqd = 7420 gfx_v11_0_compute_mqd_init; 7421 } 7422 7423 static void gfx_v11_0_set_user_wgp_inactive_bitmap_per_sh(struct amdgpu_device *adev, 7424 u32 bitmap) 7425 { 7426 u32 data; 7427 7428 if (!bitmap) 7429 return; 7430 7431 data = bitmap << GC_USER_SHADER_ARRAY_CONFIG__INACTIVE_WGPS__SHIFT; 7432 data &= GC_USER_SHADER_ARRAY_CONFIG__INACTIVE_WGPS_MASK; 7433 7434 WREG32_SOC15(GC, 0, regGC_USER_SHADER_ARRAY_CONFIG, data); 7435 } 7436 7437 static u32 gfx_v11_0_get_wgp_active_bitmap_per_sh(struct amdgpu_device *adev) 7438 { 7439 u32 data, wgp_bitmask; 7440 data = RREG32_SOC15(GC, 0, regCC_GC_SHADER_ARRAY_CONFIG); 7441 data |= RREG32_SOC15(GC, 0, regGC_USER_SHADER_ARRAY_CONFIG); 7442 7443 data &= CC_GC_SHADER_ARRAY_CONFIG__INACTIVE_WGPS_MASK; 7444 data >>= CC_GC_SHADER_ARRAY_CONFIG__INACTIVE_WGPS__SHIFT; 7445 7446 wgp_bitmask = 7447 amdgpu_gfx_create_bitmask(adev->gfx.config.max_cu_per_sh >> 1); 7448 7449 return (~data) & wgp_bitmask; 7450 } 7451 7452 static u32 gfx_v11_0_get_cu_active_bitmap_per_sh(struct amdgpu_device *adev) 7453 { 7454 u32 wgp_idx, wgp_active_bitmap; 7455 u32 cu_bitmap_per_wgp, cu_active_bitmap; 7456 7457 wgp_active_bitmap = gfx_v11_0_get_wgp_active_bitmap_per_sh(adev); 7458 cu_active_bitmap = 0; 7459 7460 for (wgp_idx = 0; wgp_idx < 16; wgp_idx++) { 7461 /* if there is one WGP enabled, it means 2 CUs will be enabled */ 7462 cu_bitmap_per_wgp = 3 << (2 * wgp_idx); 7463 if (wgp_active_bitmap & (1 << wgp_idx)) 7464 cu_active_bitmap |= cu_bitmap_per_wgp; 7465 } 7466 7467 return cu_active_bitmap; 7468 } 7469 7470 static int gfx_v11_0_get_cu_info(struct amdgpu_device *adev, 7471 struct amdgpu_cu_info *cu_info) 7472 { 7473 int i, j, k, counter, active_cu_number = 0; 7474 u32 mask, bitmap; 7475 unsigned disable_masks[8 * 2]; 7476 7477 if (!adev || !cu_info) 7478 return -EINVAL; 7479 7480 amdgpu_gfx_parse_disable_cu(disable_masks, 8, 2); 7481 7482 mutex_lock(&adev->grbm_idx_mutex); 7483 for (i = 0; i < adev->gfx.config.max_shader_engines; i++) { 7484 for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) { 7485 bitmap = i * adev->gfx.config.max_sh_per_se + j; 7486 if (!((gfx_v11_0_get_sa_active_bitmap(adev) >> bitmap) & 1)) 7487 continue; 7488 mask = 1; 7489 counter = 0; 7490 gfx_v11_0_select_se_sh(adev, i, j, 0xffffffff, 0); 7491 if (i < 8 && j < 2) 7492 gfx_v11_0_set_user_wgp_inactive_bitmap_per_sh( 7493 adev, disable_masks[i * 2 + j]); 7494 bitmap = gfx_v11_0_get_cu_active_bitmap_per_sh(adev); 7495 7496 /** 7497 * GFX11 could support more than 4 SEs, while the bitmap 7498 * in cu_info struct is 4x4 and ioctl interface struct 7499 * drm_amdgpu_info_device should keep stable. 7500 * So we use last two columns of bitmap to store cu mask for 7501 * SEs 4 to 7, the layout of the bitmap is as below: 7502 * SE0: {SH0,SH1} --> {bitmap[0][0], bitmap[0][1]} 7503 * SE1: {SH0,SH1} --> {bitmap[1][0], bitmap[1][1]} 7504 * SE2: {SH0,SH1} --> {bitmap[2][0], bitmap[2][1]} 7505 * SE3: {SH0,SH1} --> {bitmap[3][0], bitmap[3][1]} 7506 * SE4: {SH0,SH1} --> {bitmap[0][2], bitmap[0][3]} 7507 * SE5: {SH0,SH1} --> {bitmap[1][2], bitmap[1][3]} 7508 * SE6: {SH0,SH1} --> {bitmap[2][2], bitmap[2][3]} 7509 * SE7: {SH0,SH1} --> {bitmap[3][2], bitmap[3][3]} 7510 */ 7511 cu_info->bitmap[0][i % 4][j + (i / 4) * 2] = bitmap; 7512 7513 for (k = 0; k < adev->gfx.config.max_cu_per_sh; k++) { 7514 if (bitmap & mask) 7515 counter++; 7516 7517 mask <<= 1; 7518 } 7519 active_cu_number += counter; 7520 } 7521 } 7522 gfx_v11_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0); 7523 mutex_unlock(&adev->grbm_idx_mutex); 7524 7525 cu_info->number = active_cu_number; 7526 cu_info->simd_per_cu = NUM_SIMD_PER_CU; 7527 7528 return 0; 7529 } 7530 7531 const struct amdgpu_ip_block_version gfx_v11_0_ip_block = 7532 { 7533 .type = AMD_IP_BLOCK_TYPE_GFX, 7534 .major = 11, 7535 .minor = 0, 7536 .rev = 0, 7537 .funcs = &gfx_v11_0_ip_funcs, 7538 }; 7539