1 /* 2 * Copyright 2021 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 */ 23 #include <linux/delay.h> 24 #include <linux/kernel.h> 25 #include <linux/firmware.h> 26 #include <linux/module.h> 27 #include <linux/pci.h> 28 #include "amdgpu.h" 29 #include "amdgpu_gfx.h" 30 #include "amdgpu_psp.h" 31 #include "amdgpu_smu.h" 32 #include "imu_v11_0.h" 33 #include "soc21.h" 34 #include "nvd.h" 35 36 #include "gc/gc_11_0_0_offset.h" 37 #include "gc/gc_11_0_0_sh_mask.h" 38 #include "smuio/smuio_13_0_6_offset.h" 39 #include "smuio/smuio_13_0_6_sh_mask.h" 40 #include "navi10_enum.h" 41 #include "ivsrcid/gfx/irqsrcs_gfx_11_0_0.h" 42 43 #include "soc15.h" 44 #include "clearstate_gfx11.h" 45 #include "v11_structs.h" 46 #include "gfx_v11_0.h" 47 #include "gfx_v11_0_cleaner_shader.h" 48 #include "gfx_v11_0_3.h" 49 #include "nbio_v4_3.h" 50 #include "mes_v11_0.h" 51 #include "mes_userqueue.h" 52 #include "amdgpu_userq_fence.h" 53 54 #define GFX11_NUM_GFX_RINGS 1 55 #define GFX11_MEC_HPD_SIZE 2048 56 57 #define RLCG_UCODE_LOADING_START_ADDRESS 0x00002000L 58 #define RLC_PG_DELAY_3_DEFAULT_GC_11_0_1 0x1388 59 60 #define regCGTT_WD_CLK_CTRL 0x5086 61 #define regCGTT_WD_CLK_CTRL_BASE_IDX 1 62 #define regRLC_RLCS_BOOTLOAD_STATUS_gc_11_0_1 0x4e7e 63 #define regRLC_RLCS_BOOTLOAD_STATUS_gc_11_0_1_BASE_IDX 1 64 #define regPC_CONFIG_CNTL_1 0x194d 65 #define regPC_CONFIG_CNTL_1_BASE_IDX 1 66 67 #define regCP_GFX_MQD_CONTROL_DEFAULT 0x00000100 68 #define regCP_GFX_HQD_VMID_DEFAULT 0x00000000 69 #define regCP_GFX_HQD_QUEUE_PRIORITY_DEFAULT 0x00000000 70 #define regCP_GFX_HQD_QUANTUM_DEFAULT 0x00000a01 71 #define regCP_GFX_HQD_CNTL_DEFAULT 0x00a00000 72 #define regCP_RB_DOORBELL_CONTROL_DEFAULT 0x00000000 73 #define regCP_GFX_HQD_RPTR_DEFAULT 0x00000000 74 75 #define regCP_HQD_EOP_CONTROL_DEFAULT 0x00000006 76 #define regCP_HQD_PQ_DOORBELL_CONTROL_DEFAULT 0x00000000 77 #define regCP_MQD_CONTROL_DEFAULT 0x00000100 78 #define regCP_HQD_PQ_CONTROL_DEFAULT 0x00308509 79 #define regCP_HQD_PQ_DOORBELL_CONTROL_DEFAULT 0x00000000 80 #define regCP_HQD_PQ_RPTR_DEFAULT 0x00000000 81 #define regCP_HQD_PERSISTENT_STATE_DEFAULT 0x0be05501 82 #define regCP_HQD_IB_CONTROL_DEFAULT 0x00300000 83 84 MODULE_FIRMWARE("amdgpu/gc_11_0_0_pfp.bin"); 85 MODULE_FIRMWARE("amdgpu/gc_11_0_0_me.bin"); 86 MODULE_FIRMWARE("amdgpu/gc_11_0_0_mec.bin"); 87 MODULE_FIRMWARE("amdgpu/gc_11_0_0_rlc.bin"); 88 MODULE_FIRMWARE("amdgpu/gc_11_0_0_rlc_1.bin"); 89 MODULE_FIRMWARE("amdgpu/gc_11_0_0_toc.bin"); 90 MODULE_FIRMWARE("amdgpu/gc_11_0_1_pfp.bin"); 91 MODULE_FIRMWARE("amdgpu/gc_11_0_1_me.bin"); 92 MODULE_FIRMWARE("amdgpu/gc_11_0_1_mec.bin"); 93 MODULE_FIRMWARE("amdgpu/gc_11_0_1_rlc.bin"); 94 MODULE_FIRMWARE("amdgpu/gc_11_0_2_pfp.bin"); 95 MODULE_FIRMWARE("amdgpu/gc_11_0_2_me.bin"); 96 MODULE_FIRMWARE("amdgpu/gc_11_0_2_mec.bin"); 97 MODULE_FIRMWARE("amdgpu/gc_11_0_2_rlc.bin"); 98 MODULE_FIRMWARE("amdgpu/gc_11_0_3_pfp.bin"); 99 MODULE_FIRMWARE("amdgpu/gc_11_0_3_me.bin"); 100 MODULE_FIRMWARE("amdgpu/gc_11_0_3_mec.bin"); 101 MODULE_FIRMWARE("amdgpu/gc_11_0_3_rlc.bin"); 102 MODULE_FIRMWARE("amdgpu/gc_11_0_4_pfp.bin"); 103 MODULE_FIRMWARE("amdgpu/gc_11_0_4_me.bin"); 104 MODULE_FIRMWARE("amdgpu/gc_11_0_4_mec.bin"); 105 MODULE_FIRMWARE("amdgpu/gc_11_0_4_rlc.bin"); 106 MODULE_FIRMWARE("amdgpu/gc_11_5_0_pfp.bin"); 107 MODULE_FIRMWARE("amdgpu/gc_11_5_0_me.bin"); 108 MODULE_FIRMWARE("amdgpu/gc_11_5_0_mec.bin"); 109 MODULE_FIRMWARE("amdgpu/gc_11_5_0_rlc.bin"); 110 MODULE_FIRMWARE("amdgpu/gc_11_5_1_pfp.bin"); 111 MODULE_FIRMWARE("amdgpu/gc_11_5_1_me.bin"); 112 MODULE_FIRMWARE("amdgpu/gc_11_5_1_mec.bin"); 113 MODULE_FIRMWARE("amdgpu/gc_11_5_1_rlc.bin"); 114 MODULE_FIRMWARE("amdgpu/gc_11_5_2_pfp.bin"); 115 MODULE_FIRMWARE("amdgpu/gc_11_5_2_me.bin"); 116 MODULE_FIRMWARE("amdgpu/gc_11_5_2_mec.bin"); 117 MODULE_FIRMWARE("amdgpu/gc_11_5_2_rlc.bin"); 118 MODULE_FIRMWARE("amdgpu/gc_11_5_3_pfp.bin"); 119 MODULE_FIRMWARE("amdgpu/gc_11_5_3_me.bin"); 120 MODULE_FIRMWARE("amdgpu/gc_11_5_3_mec.bin"); 121 MODULE_FIRMWARE("amdgpu/gc_11_5_3_rlc.bin"); 122 123 static const struct amdgpu_hwip_reg_entry gc_reg_list_11_0[] = { 124 SOC15_REG_ENTRY_STR(GC, 0, regGRBM_STATUS), 125 SOC15_REG_ENTRY_STR(GC, 0, regGRBM_STATUS2), 126 SOC15_REG_ENTRY_STR(GC, 0, regGRBM_STATUS3), 127 SOC15_REG_ENTRY_STR(GC, 0, regCP_STALLED_STAT1), 128 SOC15_REG_ENTRY_STR(GC, 0, regCP_STALLED_STAT2), 129 SOC15_REG_ENTRY_STR(GC, 0, regCP_STALLED_STAT3), 130 SOC15_REG_ENTRY_STR(GC, 0, regCP_CPC_STALLED_STAT1), 131 SOC15_REG_ENTRY_STR(GC, 0, regCP_CPF_STALLED_STAT1), 132 SOC15_REG_ENTRY_STR(GC, 0, regCP_BUSY_STAT), 133 SOC15_REG_ENTRY_STR(GC, 0, regCP_CPC_BUSY_STAT), 134 SOC15_REG_ENTRY_STR(GC, 0, regCP_CPF_BUSY_STAT), 135 SOC15_REG_ENTRY_STR(GC, 0, regCP_CPC_BUSY_STAT2), 136 SOC15_REG_ENTRY_STR(GC, 0, regCP_CPF_BUSY_STAT2), 137 SOC15_REG_ENTRY_STR(GC, 0, regCP_CPF_STATUS), 138 SOC15_REG_ENTRY_STR(GC, 0, regCP_GFX_ERROR), 139 SOC15_REG_ENTRY_STR(GC, 0, regCP_GFX_HPD_STATUS0), 140 SOC15_REG_ENTRY_STR(GC, 0, regCP_RB_BASE), 141 SOC15_REG_ENTRY_STR(GC, 0, regCP_RB_RPTR), 142 SOC15_REG_ENTRY_STR(GC, 0, regCP_RB_WPTR), 143 SOC15_REG_ENTRY_STR(GC, 0, regCP_RB0_BASE), 144 SOC15_REG_ENTRY_STR(GC, 0, regCP_RB0_RPTR), 145 SOC15_REG_ENTRY_STR(GC, 0, regCP_RB0_WPTR), 146 SOC15_REG_ENTRY_STR(GC, 0, regCP_RB1_BASE), 147 SOC15_REG_ENTRY_STR(GC, 0, regCP_RB1_RPTR), 148 SOC15_REG_ENTRY_STR(GC, 0, regCP_RB1_WPTR), 149 SOC15_REG_ENTRY_STR(GC, 0, regCP_IB1_CMD_BUFSZ), 150 SOC15_REG_ENTRY_STR(GC, 0, regCP_IB2_CMD_BUFSZ), 151 SOC15_REG_ENTRY_STR(GC, 0, regCP_IB1_BASE_LO), 152 SOC15_REG_ENTRY_STR(GC, 0, regCP_IB1_BASE_HI), 153 SOC15_REG_ENTRY_STR(GC, 0, regCP_IB1_BUFSZ), 154 SOC15_REG_ENTRY_STR(GC, 0, regCP_IB2_BASE_LO), 155 SOC15_REG_ENTRY_STR(GC, 0, regCP_IB2_BASE_HI), 156 SOC15_REG_ENTRY_STR(GC, 0, regCP_IB2_BUFSZ), 157 SOC15_REG_ENTRY_STR(GC, 0, regCPF_UTCL1_STATUS), 158 SOC15_REG_ENTRY_STR(GC, 0, regCPC_UTCL1_STATUS), 159 SOC15_REG_ENTRY_STR(GC, 0, regCPG_UTCL1_STATUS), 160 SOC15_REG_ENTRY_STR(GC, 0, regGDS_PROTECTION_FAULT), 161 SOC15_REG_ENTRY_STR(GC, 0, regGDS_VM_PROTECTION_FAULT), 162 SOC15_REG_ENTRY_STR(GC, 0, regIA_UTCL1_STATUS), 163 SOC15_REG_ENTRY_STR(GC, 0, regIA_UTCL1_STATUS_2), 164 SOC15_REG_ENTRY_STR(GC, 0, regPA_CL_CNTL_STATUS), 165 SOC15_REG_ENTRY_STR(GC, 0, regRLC_UTCL1_STATUS), 166 SOC15_REG_ENTRY_STR(GC, 0, regRMI_UTCL1_STATUS), 167 SOC15_REG_ENTRY_STR(GC, 0, regSQC_CACHES), 168 SOC15_REG_ENTRY_STR(GC, 0, regSQG_STATUS), 169 SOC15_REG_ENTRY_STR(GC, 0, regWD_UTCL1_STATUS), 170 SOC15_REG_ENTRY_STR(GC, 0, regGCVM_L2_PROTECTION_FAULT_CNTL), 171 SOC15_REG_ENTRY_STR(GC, 0, regGCVM_L2_PROTECTION_FAULT_STATUS), 172 SOC15_REG_ENTRY_STR(GC, 0, regCP_DEBUG), 173 SOC15_REG_ENTRY_STR(GC, 0, regCP_MEC_CNTL), 174 SOC15_REG_ENTRY_STR(GC, 0, regCP_MES_CNTL), 175 SOC15_REG_ENTRY_STR(GC, 0, regCP_MEC1_INSTR_PNTR), 176 SOC15_REG_ENTRY_STR(GC, 0, regCP_MES_DEBUG_INTERRUPT_INSTR_PNTR), 177 SOC15_REG_ENTRY_STR(GC, 0, regCP_MES_INSTR_PNTR), 178 SOC15_REG_ENTRY_STR(GC, 0, regCP_ME_INSTR_PNTR), 179 SOC15_REG_ENTRY_STR(GC, 0, regCP_PFP_INSTR_PNTR), 180 SOC15_REG_ENTRY_STR(GC, 0, regCP_CPC_STATUS), 181 /* cp header registers */ 182 SOC15_REG_ENTRY_STR(GC, 0, regCP_MES_HEADER_DUMP), 183 SOC15_REG_ENTRY_STR(GC, 0, regCP_MES_HEADER_DUMP), 184 SOC15_REG_ENTRY_STR(GC, 0, regCP_MES_HEADER_DUMP), 185 SOC15_REG_ENTRY_STR(GC, 0, regCP_MES_HEADER_DUMP), 186 SOC15_REG_ENTRY_STR(GC, 0, regCP_MES_HEADER_DUMP), 187 SOC15_REG_ENTRY_STR(GC, 0, regCP_MES_HEADER_DUMP), 188 SOC15_REG_ENTRY_STR(GC, 0, regCP_MES_HEADER_DUMP), 189 SOC15_REG_ENTRY_STR(GC, 0, regCP_MES_HEADER_DUMP), 190 /* SE status registers */ 191 SOC15_REG_ENTRY_STR(GC, 0, regGRBM_STATUS_SE0), 192 SOC15_REG_ENTRY_STR(GC, 0, regGRBM_STATUS_SE1), 193 SOC15_REG_ENTRY_STR(GC, 0, regGRBM_STATUS_SE2), 194 SOC15_REG_ENTRY_STR(GC, 0, regGRBM_STATUS_SE3), 195 SOC15_REG_ENTRY_STR(GC, 0, regGRBM_STATUS_SE4), 196 SOC15_REG_ENTRY_STR(GC, 0, regGRBM_STATUS_SE5) 197 }; 198 199 static const struct amdgpu_hwip_reg_entry gc_cp_reg_list_11[] = { 200 /* compute registers */ 201 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_VMID), 202 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_PERSISTENT_STATE), 203 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_PIPE_PRIORITY), 204 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_QUEUE_PRIORITY), 205 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_QUANTUM), 206 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_PQ_BASE), 207 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_PQ_BASE_HI), 208 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_PQ_RPTR), 209 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_PQ_WPTR_POLL_ADDR), 210 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_PQ_WPTR_POLL_ADDR_HI), 211 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_PQ_DOORBELL_CONTROL), 212 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_PQ_CONTROL), 213 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_IB_BASE_ADDR), 214 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_IB_BASE_ADDR_HI), 215 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_IB_RPTR), 216 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_IB_CONTROL), 217 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_DEQUEUE_REQUEST), 218 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_EOP_BASE_ADDR), 219 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_EOP_BASE_ADDR_HI), 220 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_EOP_CONTROL), 221 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_EOP_RPTR), 222 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_EOP_WPTR), 223 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_EOP_EVENTS), 224 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_CTX_SAVE_BASE_ADDR_LO), 225 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_CTX_SAVE_BASE_ADDR_HI), 226 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_CTX_SAVE_CONTROL), 227 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_CNTL_STACK_OFFSET), 228 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_CNTL_STACK_SIZE), 229 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_WG_STATE_OFFSET), 230 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_CTX_SAVE_SIZE), 231 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_GDS_RESOURCE_STATE), 232 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_ERROR), 233 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_EOP_WPTR_MEM), 234 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_PQ_WPTR_LO), 235 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_PQ_WPTR_HI), 236 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_SUSPEND_CNTL_STACK_OFFSET), 237 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_SUSPEND_CNTL_STACK_DW_CNT), 238 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_SUSPEND_WG_STATE_OFFSET), 239 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_DEQUEUE_STATUS), 240 /* cp header registers */ 241 SOC15_REG_ENTRY_STR(GC, 0, regCP_ME_HEADER_DUMP), 242 SOC15_REG_ENTRY_STR(GC, 0, regCP_ME_HEADER_DUMP), 243 SOC15_REG_ENTRY_STR(GC, 0, regCP_ME_HEADER_DUMP), 244 SOC15_REG_ENTRY_STR(GC, 0, regCP_ME_HEADER_DUMP), 245 SOC15_REG_ENTRY_STR(GC, 0, regCP_ME_HEADER_DUMP), 246 SOC15_REG_ENTRY_STR(GC, 0, regCP_ME_HEADER_DUMP), 247 SOC15_REG_ENTRY_STR(GC, 0, regCP_ME_HEADER_DUMP), 248 SOC15_REG_ENTRY_STR(GC, 0, regCP_ME_HEADER_DUMP), 249 }; 250 251 static const struct amdgpu_hwip_reg_entry gc_gfx_queue_reg_list_11[] = { 252 /* gfx queue registers */ 253 SOC15_REG_ENTRY_STR(GC, 0, regCP_GFX_HQD_ACTIVE), 254 SOC15_REG_ENTRY_STR(GC, 0, regCP_GFX_HQD_VMID), 255 SOC15_REG_ENTRY_STR(GC, 0, regCP_GFX_HQD_QUEUE_PRIORITY), 256 SOC15_REG_ENTRY_STR(GC, 0, regCP_GFX_HQD_QUANTUM), 257 SOC15_REG_ENTRY_STR(GC, 0, regCP_GFX_HQD_BASE), 258 SOC15_REG_ENTRY_STR(GC, 0, regCP_GFX_HQD_BASE_HI), 259 SOC15_REG_ENTRY_STR(GC, 0, regCP_GFX_HQD_OFFSET), 260 SOC15_REG_ENTRY_STR(GC, 0, regCP_GFX_HQD_CNTL), 261 SOC15_REG_ENTRY_STR(GC, 0, regCP_GFX_HQD_CSMD_RPTR), 262 SOC15_REG_ENTRY_STR(GC, 0, regCP_GFX_HQD_WPTR), 263 SOC15_REG_ENTRY_STR(GC, 0, regCP_GFX_HQD_WPTR_HI), 264 SOC15_REG_ENTRY_STR(GC, 0, regCP_GFX_HQD_DEQUEUE_REQUEST), 265 SOC15_REG_ENTRY_STR(GC, 0, regCP_GFX_HQD_MAPPED), 266 SOC15_REG_ENTRY_STR(GC, 0, regCP_GFX_HQD_QUE_MGR_CONTROL), 267 SOC15_REG_ENTRY_STR(GC, 0, regCP_GFX_HQD_HQ_CONTROL0), 268 SOC15_REG_ENTRY_STR(GC, 0, regCP_GFX_HQD_HQ_STATUS0), 269 SOC15_REG_ENTRY_STR(GC, 0, regCP_GFX_MQD_BASE_ADDR), 270 SOC15_REG_ENTRY_STR(GC, 0, regCP_GFX_MQD_BASE_ADDR_HI), 271 SOC15_REG_ENTRY_STR(GC, 0, regCP_RB_WPTR_POLL_ADDR_LO), 272 SOC15_REG_ENTRY_STR(GC, 0, regCP_RB_WPTR_POLL_ADDR_HI), 273 SOC15_REG_ENTRY_STR(GC, 0, regCP_RB_RPTR), 274 SOC15_REG_ENTRY_STR(GC, 0, regCP_IB1_BASE_LO), 275 SOC15_REG_ENTRY_STR(GC, 0, regCP_IB1_BASE_HI), 276 SOC15_REG_ENTRY_STR(GC, 0, regCP_IB1_CMD_BUFSZ), 277 SOC15_REG_ENTRY_STR(GC, 0, regCP_IB1_BUFSZ), 278 /* cp header registers */ 279 SOC15_REG_ENTRY_STR(GC, 0, regCP_PFP_HEADER_DUMP), 280 SOC15_REG_ENTRY_STR(GC, 0, regCP_PFP_HEADER_DUMP), 281 SOC15_REG_ENTRY_STR(GC, 0, regCP_PFP_HEADER_DUMP), 282 SOC15_REG_ENTRY_STR(GC, 0, regCP_PFP_HEADER_DUMP), 283 SOC15_REG_ENTRY_STR(GC, 0, regCP_PFP_HEADER_DUMP), 284 SOC15_REG_ENTRY_STR(GC, 0, regCP_PFP_HEADER_DUMP), 285 SOC15_REG_ENTRY_STR(GC, 0, regCP_PFP_HEADER_DUMP), 286 SOC15_REG_ENTRY_STR(GC, 0, regCP_PFP_HEADER_DUMP), 287 SOC15_REG_ENTRY_STR(GC, 0, regCP_ME_HEADER_DUMP), 288 SOC15_REG_ENTRY_STR(GC, 0, regCP_ME_HEADER_DUMP), 289 SOC15_REG_ENTRY_STR(GC, 0, regCP_ME_HEADER_DUMP), 290 SOC15_REG_ENTRY_STR(GC, 0, regCP_ME_HEADER_DUMP), 291 SOC15_REG_ENTRY_STR(GC, 0, regCP_ME_HEADER_DUMP), 292 SOC15_REG_ENTRY_STR(GC, 0, regCP_ME_HEADER_DUMP), 293 SOC15_REG_ENTRY_STR(GC, 0, regCP_ME_HEADER_DUMP), 294 SOC15_REG_ENTRY_STR(GC, 0, regCP_ME_HEADER_DUMP), 295 }; 296 297 static const struct soc15_reg_golden golden_settings_gc_11_0[] = { 298 SOC15_REG_GOLDEN_VALUE(GC, 0, regTCP_CNTL, 0x20000000, 0x20000000) 299 }; 300 301 static const struct soc15_reg_golden golden_settings_gc_11_0_1[] = 302 { 303 SOC15_REG_GOLDEN_VALUE(GC, 0, regCGTT_GS_NGG_CLK_CTRL, 0x9fff8fff, 0x00000010), 304 SOC15_REG_GOLDEN_VALUE(GC, 0, regCGTT_WD_CLK_CTRL, 0xffff8fff, 0x00000010), 305 SOC15_REG_GOLDEN_VALUE(GC, 0, regCPF_GCR_CNTL, 0x0007ffff, 0x0000c200), 306 SOC15_REG_GOLDEN_VALUE(GC, 0, regGL2C_CTRL3, 0xffff001b, 0x00f01988), 307 SOC15_REG_GOLDEN_VALUE(GC, 0, regPA_CL_ENHANCE, 0xf0ffffff, 0x00880007), 308 SOC15_REG_GOLDEN_VALUE(GC, 0, regPA_SC_ENHANCE_3, 0xfffffffd, 0x00000008), 309 SOC15_REG_GOLDEN_VALUE(GC, 0, regPA_SC_VRS_SURFACE_CNTL_1, 0xfff891ff, 0x55480100), 310 SOC15_REG_GOLDEN_VALUE(GC, 0, regTA_CNTL_AUX, 0xf7f7ffff, 0x01030000), 311 SOC15_REG_GOLDEN_VALUE(GC, 0, regTCP_CNTL2, 0xfcffffff, 0x0000000a) 312 }; 313 314 #define DEFAULT_SH_MEM_CONFIG \ 315 ((SH_MEM_ADDRESS_MODE_64 << SH_MEM_CONFIG__ADDRESS_MODE__SHIFT) | \ 316 (SH_MEM_ALIGNMENT_MODE_UNALIGNED << SH_MEM_CONFIG__ALIGNMENT_MODE__SHIFT) | \ 317 (3 << SH_MEM_CONFIG__INITIAL_INST_PREFETCH__SHIFT)) 318 319 static void gfx_v11_0_disable_gpa_mode(struct amdgpu_device *adev); 320 static void gfx_v11_0_set_ring_funcs(struct amdgpu_device *adev); 321 static void gfx_v11_0_set_irq_funcs(struct amdgpu_device *adev); 322 static void gfx_v11_0_set_gds_init(struct amdgpu_device *adev); 323 static void gfx_v11_0_set_rlc_funcs(struct amdgpu_device *adev); 324 static void gfx_v11_0_set_mqd_funcs(struct amdgpu_device *adev); 325 static void gfx_v11_0_set_imu_funcs(struct amdgpu_device *adev); 326 static int gfx_v11_0_get_cu_info(struct amdgpu_device *adev, 327 struct amdgpu_cu_info *cu_info); 328 static uint64_t gfx_v11_0_get_gpu_clock_counter(struct amdgpu_device *adev); 329 static void gfx_v11_0_select_se_sh(struct amdgpu_device *adev, u32 se_num, 330 u32 sh_num, u32 instance, int xcc_id); 331 static u32 gfx_v11_0_get_wgp_active_bitmap_per_sh(struct amdgpu_device *adev); 332 333 static void gfx_v11_0_ring_emit_de_meta(struct amdgpu_ring *ring, bool resume); 334 static void gfx_v11_0_ring_emit_frame_cntl(struct amdgpu_ring *ring, bool start, bool secure); 335 static void gfx_v11_0_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg, 336 uint32_t val); 337 static int gfx_v11_0_wait_for_rlc_autoload_complete(struct amdgpu_device *adev); 338 static void gfx_v11_0_ring_invalidate_tlbs(struct amdgpu_ring *ring, 339 uint16_t pasid, uint32_t flush_type, 340 bool all_hub, uint8_t dst_sel); 341 static void gfx_v11_0_set_safe_mode(struct amdgpu_device *adev, int xcc_id); 342 static void gfx_v11_0_unset_safe_mode(struct amdgpu_device *adev, int xcc_id); 343 static void gfx_v11_0_update_perf_clk(struct amdgpu_device *adev, 344 bool enable); 345 346 static void gfx11_kiq_set_resources(struct amdgpu_ring *kiq_ring, uint64_t queue_mask) 347 { 348 struct amdgpu_device *adev = kiq_ring->adev; 349 u64 shader_mc_addr; 350 351 /* Cleaner shader MC address */ 352 shader_mc_addr = adev->gfx.cleaner_shader_gpu_addr >> 8; 353 354 amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_SET_RESOURCES, 6)); 355 amdgpu_ring_write(kiq_ring, PACKET3_SET_RESOURCES_VMID_MASK(0) | 356 PACKET3_SET_RESOURCES_UNMAP_LATENTY(0xa) | /* unmap_latency: 0xa (~ 1s) */ 357 PACKET3_SET_RESOURCES_QUEUE_TYPE(0)); /* vmid_mask:0 queue_type:0 (KIQ) */ 358 amdgpu_ring_write(kiq_ring, lower_32_bits(queue_mask)); /* queue mask lo */ 359 amdgpu_ring_write(kiq_ring, upper_32_bits(queue_mask)); /* queue mask hi */ 360 amdgpu_ring_write(kiq_ring, lower_32_bits(shader_mc_addr)); /* cleaner shader addr lo */ 361 amdgpu_ring_write(kiq_ring, upper_32_bits(shader_mc_addr)); /* cleaner shader addr hi */ 362 amdgpu_ring_write(kiq_ring, 0); /* oac mask */ 363 amdgpu_ring_write(kiq_ring, 0); /* gds heap base:0, gds heap size:0 */ 364 } 365 366 static void gfx11_kiq_map_queues(struct amdgpu_ring *kiq_ring, 367 struct amdgpu_ring *ring) 368 { 369 uint64_t mqd_addr = amdgpu_bo_gpu_offset(ring->mqd_obj); 370 uint64_t wptr_addr = ring->wptr_gpu_addr; 371 uint32_t me = 0, eng_sel = 0; 372 373 switch (ring->funcs->type) { 374 case AMDGPU_RING_TYPE_COMPUTE: 375 me = 1; 376 eng_sel = 0; 377 break; 378 case AMDGPU_RING_TYPE_GFX: 379 me = 0; 380 eng_sel = 4; 381 break; 382 case AMDGPU_RING_TYPE_MES: 383 me = 2; 384 eng_sel = 5; 385 break; 386 default: 387 WARN_ON(1); 388 } 389 390 amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_MAP_QUEUES, 5)); 391 /* Q_sel:0, vmid:0, vidmem: 1, engine:0, num_Q:1*/ 392 amdgpu_ring_write(kiq_ring, /* Q_sel: 0, vmid: 0, engine: 0, num_Q: 1 */ 393 PACKET3_MAP_QUEUES_QUEUE_SEL(0) | /* Queue_Sel */ 394 PACKET3_MAP_QUEUES_VMID(0) | /* VMID */ 395 PACKET3_MAP_QUEUES_QUEUE(ring->queue) | 396 PACKET3_MAP_QUEUES_PIPE(ring->pipe) | 397 PACKET3_MAP_QUEUES_ME((me)) | 398 PACKET3_MAP_QUEUES_QUEUE_TYPE(0) | /*queue_type: normal compute queue */ 399 PACKET3_MAP_QUEUES_ALLOC_FORMAT(0) | /* alloc format: all_on_one_pipe */ 400 PACKET3_MAP_QUEUES_ENGINE_SEL(eng_sel) | 401 PACKET3_MAP_QUEUES_NUM_QUEUES(1)); /* num_queues: must be 1 */ 402 amdgpu_ring_write(kiq_ring, PACKET3_MAP_QUEUES_DOORBELL_OFFSET(ring->doorbell_index)); 403 amdgpu_ring_write(kiq_ring, lower_32_bits(mqd_addr)); 404 amdgpu_ring_write(kiq_ring, upper_32_bits(mqd_addr)); 405 amdgpu_ring_write(kiq_ring, lower_32_bits(wptr_addr)); 406 amdgpu_ring_write(kiq_ring, upper_32_bits(wptr_addr)); 407 } 408 409 static void gfx11_kiq_unmap_queues(struct amdgpu_ring *kiq_ring, 410 struct amdgpu_ring *ring, 411 enum amdgpu_unmap_queues_action action, 412 u64 gpu_addr, u64 seq) 413 { 414 struct amdgpu_device *adev = kiq_ring->adev; 415 uint32_t eng_sel = ring->funcs->type == AMDGPU_RING_TYPE_GFX ? 4 : 0; 416 417 if (adev->enable_mes && !adev->gfx.kiq[0].ring.sched.ready) { 418 amdgpu_mes_unmap_legacy_queue(adev, ring, action, gpu_addr, seq); 419 return; 420 } 421 422 amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_UNMAP_QUEUES, 4)); 423 amdgpu_ring_write(kiq_ring, /* Q_sel: 0, vmid: 0, engine: 0, num_Q: 1 */ 424 PACKET3_UNMAP_QUEUES_ACTION(action) | 425 PACKET3_UNMAP_QUEUES_QUEUE_SEL(0) | 426 PACKET3_UNMAP_QUEUES_ENGINE_SEL(eng_sel) | 427 PACKET3_UNMAP_QUEUES_NUM_QUEUES(1)); 428 amdgpu_ring_write(kiq_ring, 429 PACKET3_UNMAP_QUEUES_DOORBELL_OFFSET0(ring->doorbell_index)); 430 431 if (action == PREEMPT_QUEUES_NO_UNMAP) { 432 amdgpu_ring_write(kiq_ring, lower_32_bits(gpu_addr)); 433 amdgpu_ring_write(kiq_ring, upper_32_bits(gpu_addr)); 434 amdgpu_ring_write(kiq_ring, seq); 435 } else { 436 amdgpu_ring_write(kiq_ring, 0); 437 amdgpu_ring_write(kiq_ring, 0); 438 amdgpu_ring_write(kiq_ring, 0); 439 } 440 } 441 442 static void gfx11_kiq_query_status(struct amdgpu_ring *kiq_ring, 443 struct amdgpu_ring *ring, 444 u64 addr, 445 u64 seq) 446 { 447 uint32_t eng_sel = ring->funcs->type == AMDGPU_RING_TYPE_GFX ? 4 : 0; 448 449 amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_QUERY_STATUS, 5)); 450 amdgpu_ring_write(kiq_ring, 451 PACKET3_QUERY_STATUS_CONTEXT_ID(0) | 452 PACKET3_QUERY_STATUS_INTERRUPT_SEL(0) | 453 PACKET3_QUERY_STATUS_COMMAND(2)); 454 amdgpu_ring_write(kiq_ring, /* Q_sel: 0, vmid: 0, engine: 0, num_Q: 1 */ 455 PACKET3_QUERY_STATUS_DOORBELL_OFFSET(ring->doorbell_index) | 456 PACKET3_QUERY_STATUS_ENG_SEL(eng_sel)); 457 amdgpu_ring_write(kiq_ring, lower_32_bits(addr)); 458 amdgpu_ring_write(kiq_ring, upper_32_bits(addr)); 459 amdgpu_ring_write(kiq_ring, lower_32_bits(seq)); 460 amdgpu_ring_write(kiq_ring, upper_32_bits(seq)); 461 } 462 463 static void gfx11_kiq_invalidate_tlbs(struct amdgpu_ring *kiq_ring, 464 uint16_t pasid, uint32_t flush_type, 465 bool all_hub) 466 { 467 gfx_v11_0_ring_invalidate_tlbs(kiq_ring, pasid, flush_type, all_hub, 1); 468 } 469 470 static const struct kiq_pm4_funcs gfx_v11_0_kiq_pm4_funcs = { 471 .kiq_set_resources = gfx11_kiq_set_resources, 472 .kiq_map_queues = gfx11_kiq_map_queues, 473 .kiq_unmap_queues = gfx11_kiq_unmap_queues, 474 .kiq_query_status = gfx11_kiq_query_status, 475 .kiq_invalidate_tlbs = gfx11_kiq_invalidate_tlbs, 476 .set_resources_size = 8, 477 .map_queues_size = 7, 478 .unmap_queues_size = 6, 479 .query_status_size = 7, 480 .invalidate_tlbs_size = 2, 481 }; 482 483 static void gfx_v11_0_set_kiq_pm4_funcs(struct amdgpu_device *adev) 484 { 485 adev->gfx.kiq[0].pmf = &gfx_v11_0_kiq_pm4_funcs; 486 } 487 488 static void gfx_v11_0_init_golden_registers(struct amdgpu_device *adev) 489 { 490 if (amdgpu_sriov_vf(adev)) 491 return; 492 493 switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { 494 case IP_VERSION(11, 0, 1): 495 case IP_VERSION(11, 0, 4): 496 soc15_program_register_sequence(adev, 497 golden_settings_gc_11_0_1, 498 (const u32)ARRAY_SIZE(golden_settings_gc_11_0_1)); 499 break; 500 default: 501 break; 502 } 503 soc15_program_register_sequence(adev, 504 golden_settings_gc_11_0, 505 (const u32)ARRAY_SIZE(golden_settings_gc_11_0)); 506 507 } 508 509 static void gfx_v11_0_write_data_to_reg(struct amdgpu_ring *ring, int eng_sel, 510 bool wc, uint32_t reg, uint32_t val) 511 { 512 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3)); 513 amdgpu_ring_write(ring, WRITE_DATA_ENGINE_SEL(eng_sel) | 514 WRITE_DATA_DST_SEL(0) | (wc ? WR_CONFIRM : 0)); 515 amdgpu_ring_write(ring, reg); 516 amdgpu_ring_write(ring, 0); 517 amdgpu_ring_write(ring, val); 518 } 519 520 static void gfx_v11_0_wait_reg_mem(struct amdgpu_ring *ring, int eng_sel, 521 int mem_space, int opt, uint32_t addr0, 522 uint32_t addr1, uint32_t ref, uint32_t mask, 523 uint32_t inv) 524 { 525 amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5)); 526 amdgpu_ring_write(ring, 527 /* memory (1) or register (0) */ 528 (WAIT_REG_MEM_MEM_SPACE(mem_space) | 529 WAIT_REG_MEM_OPERATION(opt) | /* wait */ 530 WAIT_REG_MEM_FUNCTION(3) | /* equal */ 531 WAIT_REG_MEM_ENGINE(eng_sel))); 532 533 if (mem_space) 534 BUG_ON(addr0 & 0x3); /* Dword align */ 535 amdgpu_ring_write(ring, addr0); 536 amdgpu_ring_write(ring, addr1); 537 amdgpu_ring_write(ring, ref); 538 amdgpu_ring_write(ring, mask); 539 amdgpu_ring_write(ring, inv); /* poll interval */ 540 } 541 542 static void gfx_v11_ring_insert_nop(struct amdgpu_ring *ring, uint32_t num_nop) 543 { 544 /* Header itself is a NOP packet */ 545 if (num_nop == 1) { 546 amdgpu_ring_write(ring, ring->funcs->nop); 547 return; 548 } 549 550 /* Max HW optimization till 0x3ffe, followed by remaining one NOP at a time*/ 551 amdgpu_ring_write(ring, PACKET3(PACKET3_NOP, min(num_nop - 2, 0x3ffe))); 552 553 /* Header is at index 0, followed by num_nops - 1 NOP packet's */ 554 amdgpu_ring_insert_nop(ring, num_nop - 1); 555 } 556 557 static int gfx_v11_0_ring_test_ring(struct amdgpu_ring *ring) 558 { 559 struct amdgpu_device *adev = ring->adev; 560 uint32_t scratch = SOC15_REG_OFFSET(GC, 0, regSCRATCH_REG0); 561 uint32_t tmp = 0; 562 unsigned i; 563 int r; 564 565 WREG32(scratch, 0xCAFEDEAD); 566 r = amdgpu_ring_alloc(ring, 5); 567 if (r) { 568 DRM_ERROR("amdgpu: cp failed to lock ring %d (%d).\n", 569 ring->idx, r); 570 return r; 571 } 572 573 if (ring->funcs->type == AMDGPU_RING_TYPE_KIQ) { 574 gfx_v11_0_ring_emit_wreg(ring, scratch, 0xDEADBEEF); 575 } else { 576 amdgpu_ring_write(ring, PACKET3(PACKET3_SET_UCONFIG_REG, 1)); 577 amdgpu_ring_write(ring, scratch - 578 PACKET3_SET_UCONFIG_REG_START); 579 amdgpu_ring_write(ring, 0xDEADBEEF); 580 } 581 amdgpu_ring_commit(ring); 582 583 for (i = 0; i < adev->usec_timeout; i++) { 584 tmp = RREG32(scratch); 585 if (tmp == 0xDEADBEEF) 586 break; 587 if (amdgpu_emu_mode == 1) 588 msleep(1); 589 else 590 udelay(1); 591 } 592 593 if (i >= adev->usec_timeout) 594 r = -ETIMEDOUT; 595 return r; 596 } 597 598 static int gfx_v11_0_ring_test_ib(struct amdgpu_ring *ring, long timeout) 599 { 600 struct amdgpu_device *adev = ring->adev; 601 struct amdgpu_ib ib; 602 struct dma_fence *f = NULL; 603 unsigned index; 604 uint64_t gpu_addr; 605 volatile uint32_t *cpu_ptr; 606 long r; 607 608 /* MES KIQ fw hasn't indirect buffer support for now */ 609 if (adev->enable_mes_kiq && 610 ring->funcs->type == AMDGPU_RING_TYPE_KIQ) 611 return 0; 612 613 memset(&ib, 0, sizeof(ib)); 614 615 r = amdgpu_device_wb_get(adev, &index); 616 if (r) 617 return r; 618 619 gpu_addr = adev->wb.gpu_addr + (index * 4); 620 adev->wb.wb[index] = cpu_to_le32(0xCAFEDEAD); 621 cpu_ptr = &adev->wb.wb[index]; 622 623 r = amdgpu_ib_get(adev, NULL, 20, AMDGPU_IB_POOL_DIRECT, &ib); 624 if (r) { 625 DRM_ERROR("amdgpu: failed to get ib (%ld).\n", r); 626 goto err1; 627 } 628 629 ib.ptr[0] = PACKET3(PACKET3_WRITE_DATA, 3); 630 ib.ptr[1] = WRITE_DATA_DST_SEL(5) | WR_CONFIRM; 631 ib.ptr[2] = lower_32_bits(gpu_addr); 632 ib.ptr[3] = upper_32_bits(gpu_addr); 633 ib.ptr[4] = 0xDEADBEEF; 634 ib.length_dw = 5; 635 636 r = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f); 637 if (r) 638 goto err2; 639 640 r = dma_fence_wait_timeout(f, false, timeout); 641 if (r == 0) { 642 r = -ETIMEDOUT; 643 goto err2; 644 } else if (r < 0) { 645 goto err2; 646 } 647 648 if (le32_to_cpu(*cpu_ptr) == 0xDEADBEEF) 649 r = 0; 650 else 651 r = -EINVAL; 652 err2: 653 amdgpu_ib_free(&ib, NULL); 654 dma_fence_put(f); 655 err1: 656 amdgpu_device_wb_free(adev, index); 657 return r; 658 } 659 660 static void gfx_v11_0_free_microcode(struct amdgpu_device *adev) 661 { 662 amdgpu_ucode_release(&adev->gfx.pfp_fw); 663 amdgpu_ucode_release(&adev->gfx.me_fw); 664 amdgpu_ucode_release(&adev->gfx.rlc_fw); 665 amdgpu_ucode_release(&adev->gfx.mec_fw); 666 667 kfree(adev->gfx.rlc.register_list_format); 668 } 669 670 static int gfx_v11_0_init_toc_microcode(struct amdgpu_device *adev, const char *ucode_prefix) 671 { 672 const struct psp_firmware_header_v1_0 *toc_hdr; 673 int err = 0; 674 675 err = amdgpu_ucode_request(adev, &adev->psp.toc_fw, 676 AMDGPU_UCODE_REQUIRED, 677 "amdgpu/%s_toc.bin", ucode_prefix); 678 if (err) 679 goto out; 680 681 toc_hdr = (const struct psp_firmware_header_v1_0 *)adev->psp.toc_fw->data; 682 adev->psp.toc.fw_version = le32_to_cpu(toc_hdr->header.ucode_version); 683 adev->psp.toc.feature_version = le32_to_cpu(toc_hdr->sos.fw_version); 684 adev->psp.toc.size_bytes = le32_to_cpu(toc_hdr->header.ucode_size_bytes); 685 adev->psp.toc.start_addr = (uint8_t *)toc_hdr + 686 le32_to_cpu(toc_hdr->header.ucode_array_offset_bytes); 687 return 0; 688 out: 689 amdgpu_ucode_release(&adev->psp.toc_fw); 690 return err; 691 } 692 693 static void gfx_v11_0_check_fw_cp_gfx_shadow(struct amdgpu_device *adev) 694 { 695 switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { 696 case IP_VERSION(11, 0, 0): 697 case IP_VERSION(11, 0, 2): 698 case IP_VERSION(11, 0, 3): 699 if ((adev->gfx.me_fw_version >= 1505) && 700 (adev->gfx.pfp_fw_version >= 1600) && 701 (adev->gfx.mec_fw_version >= 512)) { 702 if (amdgpu_sriov_vf(adev)) 703 adev->gfx.cp_gfx_shadow = true; 704 else 705 adev->gfx.cp_gfx_shadow = false; 706 } 707 break; 708 default: 709 adev->gfx.cp_gfx_shadow = false; 710 break; 711 } 712 } 713 714 static int gfx_v11_0_init_microcode(struct amdgpu_device *adev) 715 { 716 char ucode_prefix[25]; 717 int err; 718 const struct rlc_firmware_header_v2_0 *rlc_hdr; 719 uint16_t version_major; 720 uint16_t version_minor; 721 722 DRM_DEBUG("\n"); 723 724 amdgpu_ucode_ip_version_decode(adev, GC_HWIP, ucode_prefix, sizeof(ucode_prefix)); 725 err = amdgpu_ucode_request(adev, &adev->gfx.pfp_fw, 726 AMDGPU_UCODE_REQUIRED, 727 "amdgpu/%s_pfp.bin", ucode_prefix); 728 if (err) 729 goto out; 730 /* check pfp fw hdr version to decide if enable rs64 for gfx11.*/ 731 adev->gfx.rs64_enable = amdgpu_ucode_hdr_version( 732 (union amdgpu_firmware_header *) 733 adev->gfx.pfp_fw->data, 2, 0); 734 if (adev->gfx.rs64_enable) { 735 dev_info(adev->dev, "CP RS64 enable\n"); 736 amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_RS64_PFP); 737 amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_RS64_PFP_P0_STACK); 738 amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_RS64_PFP_P1_STACK); 739 } else { 740 amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_PFP); 741 } 742 743 err = amdgpu_ucode_request(adev, &adev->gfx.me_fw, 744 AMDGPU_UCODE_REQUIRED, 745 "amdgpu/%s_me.bin", ucode_prefix); 746 if (err) 747 goto out; 748 if (adev->gfx.rs64_enable) { 749 amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_RS64_ME); 750 amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_RS64_ME_P0_STACK); 751 amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_RS64_ME_P1_STACK); 752 } else { 753 amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_ME); 754 } 755 756 if (!amdgpu_sriov_vf(adev)) { 757 if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(11, 0, 0) && 758 adev->pdev->revision == 0xCE) 759 err = amdgpu_ucode_request(adev, &adev->gfx.rlc_fw, 760 AMDGPU_UCODE_REQUIRED, 761 "amdgpu/gc_11_0_0_rlc_1.bin"); 762 else 763 err = amdgpu_ucode_request(adev, &adev->gfx.rlc_fw, 764 AMDGPU_UCODE_REQUIRED, 765 "amdgpu/%s_rlc.bin", ucode_prefix); 766 if (err) 767 goto out; 768 rlc_hdr = (const struct rlc_firmware_header_v2_0 *)adev->gfx.rlc_fw->data; 769 version_major = le16_to_cpu(rlc_hdr->header.header_version_major); 770 version_minor = le16_to_cpu(rlc_hdr->header.header_version_minor); 771 err = amdgpu_gfx_rlc_init_microcode(adev, version_major, version_minor); 772 if (err) 773 goto out; 774 } 775 776 err = amdgpu_ucode_request(adev, &adev->gfx.mec_fw, 777 AMDGPU_UCODE_REQUIRED, 778 "amdgpu/%s_mec.bin", ucode_prefix); 779 if (err) 780 goto out; 781 if (adev->gfx.rs64_enable) { 782 amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_RS64_MEC); 783 amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_RS64_MEC_P0_STACK); 784 amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_RS64_MEC_P1_STACK); 785 amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_RS64_MEC_P2_STACK); 786 amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_RS64_MEC_P3_STACK); 787 } else { 788 amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_MEC1); 789 amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_MEC1_JT); 790 } 791 792 if (adev->firmware.load_type == AMDGPU_FW_LOAD_RLC_BACKDOOR_AUTO) 793 err = gfx_v11_0_init_toc_microcode(adev, ucode_prefix); 794 795 /* only one MEC for gfx 11.0.0. */ 796 adev->gfx.mec2_fw = NULL; 797 798 gfx_v11_0_check_fw_cp_gfx_shadow(adev); 799 800 if (adev->gfx.imu.funcs && adev->gfx.imu.funcs->init_microcode) { 801 err = adev->gfx.imu.funcs->init_microcode(adev); 802 if (err) 803 DRM_ERROR("Failed to init imu firmware!\n"); 804 return err; 805 } 806 807 out: 808 if (err) { 809 amdgpu_ucode_release(&adev->gfx.pfp_fw); 810 amdgpu_ucode_release(&adev->gfx.me_fw); 811 amdgpu_ucode_release(&adev->gfx.rlc_fw); 812 amdgpu_ucode_release(&adev->gfx.mec_fw); 813 } 814 815 return err; 816 } 817 818 static u32 gfx_v11_0_get_csb_size(struct amdgpu_device *adev) 819 { 820 u32 count = 0; 821 const struct cs_section_def *sect = NULL; 822 const struct cs_extent_def *ext = NULL; 823 824 /* begin clear state */ 825 count += 2; 826 /* context control state */ 827 count += 3; 828 829 for (sect = gfx11_cs_data; sect->section != NULL; ++sect) { 830 for (ext = sect->section; ext->extent != NULL; ++ext) { 831 if (sect->id == SECT_CONTEXT) 832 count += 2 + ext->reg_count; 833 else 834 return 0; 835 } 836 } 837 838 /* set PA_SC_TILE_STEERING_OVERRIDE */ 839 count += 3; 840 /* end clear state */ 841 count += 2; 842 /* clear state */ 843 count += 2; 844 845 return count; 846 } 847 848 static void gfx_v11_0_get_csb_buffer(struct amdgpu_device *adev, 849 volatile u32 *buffer) 850 { 851 u32 count = 0; 852 int ctx_reg_offset; 853 854 if (adev->gfx.rlc.cs_data == NULL) 855 return; 856 if (buffer == NULL) 857 return; 858 859 count = amdgpu_gfx_csb_preamble_start(buffer); 860 count = amdgpu_gfx_csb_data_parser(adev, buffer, count); 861 862 ctx_reg_offset = SOC15_REG_OFFSET(GC, 0, regPA_SC_TILE_STEERING_OVERRIDE) - PACKET3_SET_CONTEXT_REG_START; 863 buffer[count++] = cpu_to_le32(PACKET3(PACKET3_SET_CONTEXT_REG, 1)); 864 buffer[count++] = cpu_to_le32(ctx_reg_offset); 865 buffer[count++] = cpu_to_le32(adev->gfx.config.pa_sc_tile_steering_override); 866 867 amdgpu_gfx_csb_preamble_end(buffer, count); 868 } 869 870 static void gfx_v11_0_rlc_fini(struct amdgpu_device *adev) 871 { 872 /* clear state block */ 873 amdgpu_bo_free_kernel(&adev->gfx.rlc.clear_state_obj, 874 &adev->gfx.rlc.clear_state_gpu_addr, 875 (void **)&adev->gfx.rlc.cs_ptr); 876 877 /* jump table block */ 878 amdgpu_bo_free_kernel(&adev->gfx.rlc.cp_table_obj, 879 &adev->gfx.rlc.cp_table_gpu_addr, 880 (void **)&adev->gfx.rlc.cp_table_ptr); 881 } 882 883 static void gfx_v11_0_init_rlcg_reg_access_ctrl(struct amdgpu_device *adev) 884 { 885 struct amdgpu_rlcg_reg_access_ctrl *reg_access_ctrl; 886 887 reg_access_ctrl = &adev->gfx.rlc.reg_access_ctrl[0]; 888 reg_access_ctrl->scratch_reg0 = SOC15_REG_OFFSET(GC, 0, regSCRATCH_REG0); 889 reg_access_ctrl->scratch_reg1 = SOC15_REG_OFFSET(GC, 0, regSCRATCH_REG1); 890 reg_access_ctrl->scratch_reg2 = SOC15_REG_OFFSET(GC, 0, regSCRATCH_REG2); 891 reg_access_ctrl->scratch_reg3 = SOC15_REG_OFFSET(GC, 0, regSCRATCH_REG3); 892 reg_access_ctrl->grbm_cntl = SOC15_REG_OFFSET(GC, 0, regGRBM_GFX_CNTL); 893 reg_access_ctrl->grbm_idx = SOC15_REG_OFFSET(GC, 0, regGRBM_GFX_INDEX); 894 reg_access_ctrl->spare_int = SOC15_REG_OFFSET(GC, 0, regRLC_SPARE_INT_0); 895 adev->gfx.rlc.rlcg_reg_access_supported = true; 896 } 897 898 static int gfx_v11_0_rlc_init(struct amdgpu_device *adev) 899 { 900 const struct cs_section_def *cs_data; 901 int r; 902 903 adev->gfx.rlc.cs_data = gfx11_cs_data; 904 905 cs_data = adev->gfx.rlc.cs_data; 906 907 if (cs_data) { 908 /* init clear state block */ 909 r = amdgpu_gfx_rlc_init_csb(adev); 910 if (r) 911 return r; 912 } 913 914 /* init spm vmid with 0xf */ 915 if (adev->gfx.rlc.funcs->update_spm_vmid) 916 adev->gfx.rlc.funcs->update_spm_vmid(adev, NULL, 0xf); 917 918 return 0; 919 } 920 921 static void gfx_v11_0_mec_fini(struct amdgpu_device *adev) 922 { 923 amdgpu_bo_free_kernel(&adev->gfx.mec.hpd_eop_obj, NULL, NULL); 924 amdgpu_bo_free_kernel(&adev->gfx.mec.mec_fw_obj, NULL, NULL); 925 amdgpu_bo_free_kernel(&adev->gfx.mec.mec_fw_data_obj, NULL, NULL); 926 } 927 928 static void gfx_v11_0_me_init(struct amdgpu_device *adev) 929 { 930 bitmap_zero(adev->gfx.me.queue_bitmap, AMDGPU_MAX_GFX_QUEUES); 931 932 amdgpu_gfx_graphics_queue_acquire(adev); 933 } 934 935 static int gfx_v11_0_mec_init(struct amdgpu_device *adev) 936 { 937 int r; 938 u32 *hpd; 939 size_t mec_hpd_size; 940 941 bitmap_zero(adev->gfx.mec_bitmap[0].queue_bitmap, AMDGPU_MAX_COMPUTE_QUEUES); 942 943 /* take ownership of the relevant compute queues */ 944 amdgpu_gfx_compute_queue_acquire(adev); 945 mec_hpd_size = adev->gfx.num_compute_rings * GFX11_MEC_HPD_SIZE; 946 947 if (mec_hpd_size) { 948 r = amdgpu_bo_create_reserved(adev, mec_hpd_size, PAGE_SIZE, 949 AMDGPU_GEM_DOMAIN_GTT, 950 &adev->gfx.mec.hpd_eop_obj, 951 &adev->gfx.mec.hpd_eop_gpu_addr, 952 (void **)&hpd); 953 if (r) { 954 dev_warn(adev->dev, "(%d) create HDP EOP bo failed\n", r); 955 gfx_v11_0_mec_fini(adev); 956 return r; 957 } 958 959 memset(hpd, 0, mec_hpd_size); 960 961 amdgpu_bo_kunmap(adev->gfx.mec.hpd_eop_obj); 962 amdgpu_bo_unreserve(adev->gfx.mec.hpd_eop_obj); 963 } 964 965 return 0; 966 } 967 968 static uint32_t wave_read_ind(struct amdgpu_device *adev, uint32_t wave, uint32_t address) 969 { 970 WREG32_SOC15(GC, 0, regSQ_IND_INDEX, 971 (wave << SQ_IND_INDEX__WAVE_ID__SHIFT) | 972 (address << SQ_IND_INDEX__INDEX__SHIFT)); 973 return RREG32_SOC15(GC, 0, regSQ_IND_DATA); 974 } 975 976 static void wave_read_regs(struct amdgpu_device *adev, uint32_t wave, 977 uint32_t thread, uint32_t regno, 978 uint32_t num, uint32_t *out) 979 { 980 WREG32_SOC15(GC, 0, regSQ_IND_INDEX, 981 (wave << SQ_IND_INDEX__WAVE_ID__SHIFT) | 982 (regno << SQ_IND_INDEX__INDEX__SHIFT) | 983 (thread << SQ_IND_INDEX__WORKITEM_ID__SHIFT) | 984 (SQ_IND_INDEX__AUTO_INCR_MASK)); 985 while (num--) 986 *(out++) = RREG32_SOC15(GC, 0, regSQ_IND_DATA); 987 } 988 989 static void gfx_v11_0_read_wave_data(struct amdgpu_device *adev, uint32_t xcc_id, uint32_t simd, uint32_t wave, uint32_t *dst, int *no_fields) 990 { 991 /* in gfx11 the SIMD_ID is specified as part of the INSTANCE 992 * field when performing a select_se_sh so it should be 993 * zero here */ 994 WARN_ON(simd != 0); 995 996 /* type 3 wave data */ 997 dst[(*no_fields)++] = 3; 998 dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_STATUS); 999 dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_PC_LO); 1000 dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_PC_HI); 1001 dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_EXEC_LO); 1002 dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_EXEC_HI); 1003 dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_HW_ID1); 1004 dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_HW_ID2); 1005 dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_GPR_ALLOC); 1006 dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_LDS_ALLOC); 1007 dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_TRAPSTS); 1008 dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_IB_STS); 1009 dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_IB_STS2); 1010 dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_IB_DBG1); 1011 dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_M0); 1012 dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_MODE); 1013 } 1014 1015 static void gfx_v11_0_read_wave_sgprs(struct amdgpu_device *adev, uint32_t xcc_id, uint32_t simd, 1016 uint32_t wave, uint32_t start, 1017 uint32_t size, uint32_t *dst) 1018 { 1019 WARN_ON(simd != 0); 1020 1021 wave_read_regs( 1022 adev, wave, 0, start + SQIND_WAVE_SGPRS_OFFSET, size, 1023 dst); 1024 } 1025 1026 static void gfx_v11_0_read_wave_vgprs(struct amdgpu_device *adev, uint32_t xcc_id, uint32_t simd, 1027 uint32_t wave, uint32_t thread, 1028 uint32_t start, uint32_t size, 1029 uint32_t *dst) 1030 { 1031 wave_read_regs( 1032 adev, wave, thread, 1033 start + SQIND_WAVE_VGPRS_OFFSET, size, dst); 1034 } 1035 1036 static void gfx_v11_0_select_me_pipe_q(struct amdgpu_device *adev, 1037 u32 me, u32 pipe, u32 q, u32 vm, u32 xcc_id) 1038 { 1039 soc21_grbm_select(adev, me, pipe, q, vm); 1040 } 1041 1042 /* all sizes are in bytes */ 1043 #define MQD_SHADOW_BASE_SIZE 73728 1044 #define MQD_SHADOW_BASE_ALIGNMENT 256 1045 #define MQD_FWWORKAREA_SIZE 484 1046 #define MQD_FWWORKAREA_ALIGNMENT 256 1047 1048 static void gfx_v11_0_get_gfx_shadow_info_nocheck(struct amdgpu_device *adev, 1049 struct amdgpu_gfx_shadow_info *shadow_info) 1050 { 1051 shadow_info->shadow_size = MQD_SHADOW_BASE_SIZE; 1052 shadow_info->shadow_alignment = MQD_SHADOW_BASE_ALIGNMENT; 1053 shadow_info->csa_size = MQD_FWWORKAREA_SIZE; 1054 shadow_info->csa_alignment = MQD_FWWORKAREA_ALIGNMENT; 1055 } 1056 1057 static int gfx_v11_0_get_gfx_shadow_info(struct amdgpu_device *adev, 1058 struct amdgpu_gfx_shadow_info *shadow_info, 1059 bool skip_check) 1060 { 1061 if (adev->gfx.cp_gfx_shadow || skip_check) { 1062 gfx_v11_0_get_gfx_shadow_info_nocheck(adev, shadow_info); 1063 return 0; 1064 } else { 1065 memset(shadow_info, 0, sizeof(struct amdgpu_gfx_shadow_info)); 1066 return -ENOTSUPP; 1067 } 1068 } 1069 1070 static const struct amdgpu_gfx_funcs gfx_v11_0_gfx_funcs = { 1071 .get_gpu_clock_counter = &gfx_v11_0_get_gpu_clock_counter, 1072 .select_se_sh = &gfx_v11_0_select_se_sh, 1073 .read_wave_data = &gfx_v11_0_read_wave_data, 1074 .read_wave_sgprs = &gfx_v11_0_read_wave_sgprs, 1075 .read_wave_vgprs = &gfx_v11_0_read_wave_vgprs, 1076 .select_me_pipe_q = &gfx_v11_0_select_me_pipe_q, 1077 .update_perfmon_mgcg = &gfx_v11_0_update_perf_clk, 1078 .get_gfx_shadow_info = &gfx_v11_0_get_gfx_shadow_info, 1079 }; 1080 1081 static int gfx_v11_0_gpu_early_init(struct amdgpu_device *adev) 1082 { 1083 switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { 1084 case IP_VERSION(11, 0, 0): 1085 case IP_VERSION(11, 0, 2): 1086 adev->gfx.config.max_hw_contexts = 8; 1087 adev->gfx.config.sc_prim_fifo_size_frontend = 0x20; 1088 adev->gfx.config.sc_prim_fifo_size_backend = 0x100; 1089 adev->gfx.config.sc_hiz_tile_fifo_size = 0; 1090 adev->gfx.config.sc_earlyz_tile_fifo_size = 0x4C0; 1091 break; 1092 case IP_VERSION(11, 0, 3): 1093 adev->gfx.ras = &gfx_v11_0_3_ras; 1094 adev->gfx.config.max_hw_contexts = 8; 1095 adev->gfx.config.sc_prim_fifo_size_frontend = 0x20; 1096 adev->gfx.config.sc_prim_fifo_size_backend = 0x100; 1097 adev->gfx.config.sc_hiz_tile_fifo_size = 0; 1098 adev->gfx.config.sc_earlyz_tile_fifo_size = 0x4C0; 1099 break; 1100 case IP_VERSION(11, 0, 1): 1101 case IP_VERSION(11, 0, 4): 1102 case IP_VERSION(11, 5, 0): 1103 case IP_VERSION(11, 5, 1): 1104 case IP_VERSION(11, 5, 2): 1105 case IP_VERSION(11, 5, 3): 1106 adev->gfx.config.max_hw_contexts = 8; 1107 adev->gfx.config.sc_prim_fifo_size_frontend = 0x20; 1108 adev->gfx.config.sc_prim_fifo_size_backend = 0x100; 1109 adev->gfx.config.sc_hiz_tile_fifo_size = 0x80; 1110 adev->gfx.config.sc_earlyz_tile_fifo_size = 0x300; 1111 break; 1112 default: 1113 BUG(); 1114 break; 1115 } 1116 1117 return 0; 1118 } 1119 1120 static int gfx_v11_0_gfx_ring_init(struct amdgpu_device *adev, int ring_id, 1121 int me, int pipe, int queue) 1122 { 1123 struct amdgpu_ring *ring; 1124 unsigned int irq_type; 1125 unsigned int hw_prio; 1126 1127 ring = &adev->gfx.gfx_ring[ring_id]; 1128 1129 ring->me = me; 1130 ring->pipe = pipe; 1131 ring->queue = queue; 1132 1133 ring->ring_obj = NULL; 1134 ring->use_doorbell = true; 1135 if (adev->gfx.disable_kq) { 1136 ring->no_scheduler = true; 1137 ring->no_user_submission = true; 1138 } 1139 1140 if (!ring_id) 1141 ring->doorbell_index = adev->doorbell_index.gfx_ring0 << 1; 1142 else 1143 ring->doorbell_index = adev->doorbell_index.gfx_ring1 << 1; 1144 ring->vm_hub = AMDGPU_GFXHUB(0); 1145 sprintf(ring->name, "gfx_%d.%d.%d", ring->me, ring->pipe, ring->queue); 1146 1147 irq_type = AMDGPU_CP_IRQ_GFX_ME0_PIPE0_EOP + ring->pipe; 1148 hw_prio = amdgpu_gfx_is_high_priority_graphics_queue(adev, ring) ? 1149 AMDGPU_GFX_PIPE_PRIO_HIGH : AMDGPU_GFX_PIPE_PRIO_NORMAL; 1150 return amdgpu_ring_init(adev, ring, 1024, &adev->gfx.eop_irq, irq_type, 1151 hw_prio, NULL); 1152 } 1153 1154 static int gfx_v11_0_compute_ring_init(struct amdgpu_device *adev, int ring_id, 1155 int mec, int pipe, int queue) 1156 { 1157 int r; 1158 unsigned irq_type; 1159 struct amdgpu_ring *ring; 1160 unsigned int hw_prio; 1161 1162 ring = &adev->gfx.compute_ring[ring_id]; 1163 1164 /* mec0 is me1 */ 1165 ring->me = mec + 1; 1166 ring->pipe = pipe; 1167 ring->queue = queue; 1168 1169 ring->ring_obj = NULL; 1170 ring->use_doorbell = true; 1171 ring->doorbell_index = (adev->doorbell_index.mec_ring0 + ring_id) << 1; 1172 ring->eop_gpu_addr = adev->gfx.mec.hpd_eop_gpu_addr 1173 + (ring_id * GFX11_MEC_HPD_SIZE); 1174 ring->vm_hub = AMDGPU_GFXHUB(0); 1175 sprintf(ring->name, "comp_%d.%d.%d", ring->me, ring->pipe, ring->queue); 1176 1177 irq_type = AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP 1178 + ((ring->me - 1) * adev->gfx.mec.num_pipe_per_mec) 1179 + ring->pipe; 1180 hw_prio = amdgpu_gfx_is_high_priority_compute_queue(adev, ring) ? 1181 AMDGPU_GFX_PIPE_PRIO_HIGH : AMDGPU_GFX_PIPE_PRIO_NORMAL; 1182 /* type-2 packets are deprecated on MEC, use type-3 instead */ 1183 r = amdgpu_ring_init(adev, ring, 1024, &adev->gfx.eop_irq, irq_type, 1184 hw_prio, NULL); 1185 if (r) 1186 return r; 1187 1188 return 0; 1189 } 1190 1191 static struct { 1192 SOC21_FIRMWARE_ID id; 1193 unsigned int offset; 1194 unsigned int size; 1195 } rlc_autoload_info[SOC21_FIRMWARE_ID_MAX]; 1196 1197 static void gfx_v11_0_parse_rlc_toc(struct amdgpu_device *adev, void *rlc_toc) 1198 { 1199 RLC_TABLE_OF_CONTENT *ucode = rlc_toc; 1200 1201 while (ucode && (ucode->id > SOC21_FIRMWARE_ID_INVALID) && 1202 (ucode->id < SOC21_FIRMWARE_ID_MAX)) { 1203 rlc_autoload_info[ucode->id].id = ucode->id; 1204 rlc_autoload_info[ucode->id].offset = ucode->offset * 4; 1205 rlc_autoload_info[ucode->id].size = ucode->size * 4; 1206 1207 ucode++; 1208 } 1209 } 1210 1211 static uint32_t gfx_v11_0_calc_toc_total_size(struct amdgpu_device *adev) 1212 { 1213 uint32_t total_size = 0; 1214 SOC21_FIRMWARE_ID id; 1215 1216 gfx_v11_0_parse_rlc_toc(adev, adev->psp.toc.start_addr); 1217 1218 for (id = SOC21_FIRMWARE_ID_RLC_G_UCODE; id < SOC21_FIRMWARE_ID_MAX; id++) 1219 total_size += rlc_autoload_info[id].size; 1220 1221 /* In case the offset in rlc toc ucode is aligned */ 1222 if (total_size < rlc_autoload_info[SOC21_FIRMWARE_ID_MAX-1].offset) 1223 total_size = rlc_autoload_info[SOC21_FIRMWARE_ID_MAX-1].offset + 1224 rlc_autoload_info[SOC21_FIRMWARE_ID_MAX-1].size; 1225 1226 return total_size; 1227 } 1228 1229 static int gfx_v11_0_rlc_autoload_buffer_init(struct amdgpu_device *adev) 1230 { 1231 int r; 1232 uint32_t total_size; 1233 1234 total_size = gfx_v11_0_calc_toc_total_size(adev); 1235 1236 r = amdgpu_bo_create_reserved(adev, total_size, 64 * 1024, 1237 AMDGPU_GEM_DOMAIN_VRAM | 1238 AMDGPU_GEM_DOMAIN_GTT, 1239 &adev->gfx.rlc.rlc_autoload_bo, 1240 &adev->gfx.rlc.rlc_autoload_gpu_addr, 1241 (void **)&adev->gfx.rlc.rlc_autoload_ptr); 1242 1243 if (r) { 1244 dev_err(adev->dev, "(%d) failed to create fw autoload bo\n", r); 1245 return r; 1246 } 1247 1248 return 0; 1249 } 1250 1251 static void gfx_v11_0_rlc_backdoor_autoload_copy_ucode(struct amdgpu_device *adev, 1252 SOC21_FIRMWARE_ID id, 1253 const void *fw_data, 1254 uint32_t fw_size, 1255 uint32_t *fw_autoload_mask) 1256 { 1257 uint32_t toc_offset; 1258 uint32_t toc_fw_size; 1259 char *ptr = adev->gfx.rlc.rlc_autoload_ptr; 1260 1261 if (id <= SOC21_FIRMWARE_ID_INVALID || id >= SOC21_FIRMWARE_ID_MAX) 1262 return; 1263 1264 toc_offset = rlc_autoload_info[id].offset; 1265 toc_fw_size = rlc_autoload_info[id].size; 1266 1267 if (fw_size == 0) 1268 fw_size = toc_fw_size; 1269 1270 if (fw_size > toc_fw_size) 1271 fw_size = toc_fw_size; 1272 1273 memcpy(ptr + toc_offset, fw_data, fw_size); 1274 1275 if (fw_size < toc_fw_size) 1276 memset(ptr + toc_offset + fw_size, 0, toc_fw_size - fw_size); 1277 1278 if ((id != SOC21_FIRMWARE_ID_RS64_PFP) && (id != SOC21_FIRMWARE_ID_RS64_ME)) 1279 *(uint64_t *)fw_autoload_mask |= 1ULL << id; 1280 } 1281 1282 static void gfx_v11_0_rlc_backdoor_autoload_copy_toc_ucode(struct amdgpu_device *adev, 1283 uint32_t *fw_autoload_mask) 1284 { 1285 void *data; 1286 uint32_t size; 1287 uint64_t *toc_ptr; 1288 1289 *(uint64_t *)fw_autoload_mask |= 0x1; 1290 1291 DRM_DEBUG("rlc autoload enabled fw: 0x%llx\n", *(uint64_t *)fw_autoload_mask); 1292 1293 data = adev->psp.toc.start_addr; 1294 size = rlc_autoload_info[SOC21_FIRMWARE_ID_RLC_TOC].size; 1295 1296 toc_ptr = (uint64_t *)data + size / 8 - 1; 1297 *toc_ptr = *(uint64_t *)fw_autoload_mask; 1298 1299 gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev, SOC21_FIRMWARE_ID_RLC_TOC, 1300 data, size, fw_autoload_mask); 1301 } 1302 1303 static void gfx_v11_0_rlc_backdoor_autoload_copy_gfx_ucode(struct amdgpu_device *adev, 1304 uint32_t *fw_autoload_mask) 1305 { 1306 const __le32 *fw_data; 1307 uint32_t fw_size; 1308 const struct gfx_firmware_header_v1_0 *cp_hdr; 1309 const struct gfx_firmware_header_v2_0 *cpv2_hdr; 1310 const struct rlc_firmware_header_v2_0 *rlc_hdr; 1311 const struct rlc_firmware_header_v2_2 *rlcv22_hdr; 1312 uint16_t version_major, version_minor; 1313 1314 if (adev->gfx.rs64_enable) { 1315 /* pfp ucode */ 1316 cpv2_hdr = (const struct gfx_firmware_header_v2_0 *) 1317 adev->gfx.pfp_fw->data; 1318 /* instruction */ 1319 fw_data = (const __le32 *)(adev->gfx.pfp_fw->data + 1320 le32_to_cpu(cpv2_hdr->ucode_offset_bytes)); 1321 fw_size = le32_to_cpu(cpv2_hdr->ucode_size_bytes); 1322 gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev, SOC21_FIRMWARE_ID_RS64_PFP, 1323 fw_data, fw_size, fw_autoload_mask); 1324 /* data */ 1325 fw_data = (const __le32 *)(adev->gfx.pfp_fw->data + 1326 le32_to_cpu(cpv2_hdr->data_offset_bytes)); 1327 fw_size = le32_to_cpu(cpv2_hdr->data_size_bytes); 1328 gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev, SOC21_FIRMWARE_ID_RS64_PFP_P0_STACK, 1329 fw_data, fw_size, fw_autoload_mask); 1330 gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev, SOC21_FIRMWARE_ID_RS64_PFP_P1_STACK, 1331 fw_data, fw_size, fw_autoload_mask); 1332 /* me ucode */ 1333 cpv2_hdr = (const struct gfx_firmware_header_v2_0 *) 1334 adev->gfx.me_fw->data; 1335 /* instruction */ 1336 fw_data = (const __le32 *)(adev->gfx.me_fw->data + 1337 le32_to_cpu(cpv2_hdr->ucode_offset_bytes)); 1338 fw_size = le32_to_cpu(cpv2_hdr->ucode_size_bytes); 1339 gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev, SOC21_FIRMWARE_ID_RS64_ME, 1340 fw_data, fw_size, fw_autoload_mask); 1341 /* data */ 1342 fw_data = (const __le32 *)(adev->gfx.me_fw->data + 1343 le32_to_cpu(cpv2_hdr->data_offset_bytes)); 1344 fw_size = le32_to_cpu(cpv2_hdr->data_size_bytes); 1345 gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev, SOC21_FIRMWARE_ID_RS64_ME_P0_STACK, 1346 fw_data, fw_size, fw_autoload_mask); 1347 gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev, SOC21_FIRMWARE_ID_RS64_ME_P1_STACK, 1348 fw_data, fw_size, fw_autoload_mask); 1349 /* mec ucode */ 1350 cpv2_hdr = (const struct gfx_firmware_header_v2_0 *) 1351 adev->gfx.mec_fw->data; 1352 /* instruction */ 1353 fw_data = (const __le32 *) (adev->gfx.mec_fw->data + 1354 le32_to_cpu(cpv2_hdr->ucode_offset_bytes)); 1355 fw_size = le32_to_cpu(cpv2_hdr->ucode_size_bytes); 1356 gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev, SOC21_FIRMWARE_ID_RS64_MEC, 1357 fw_data, fw_size, fw_autoload_mask); 1358 /* data */ 1359 fw_data = (const __le32 *) (adev->gfx.mec_fw->data + 1360 le32_to_cpu(cpv2_hdr->data_offset_bytes)); 1361 fw_size = le32_to_cpu(cpv2_hdr->data_size_bytes); 1362 gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev, SOC21_FIRMWARE_ID_RS64_MEC_P0_STACK, 1363 fw_data, fw_size, fw_autoload_mask); 1364 gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev, SOC21_FIRMWARE_ID_RS64_MEC_P1_STACK, 1365 fw_data, fw_size, fw_autoload_mask); 1366 gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev, SOC21_FIRMWARE_ID_RS64_MEC_P2_STACK, 1367 fw_data, fw_size, fw_autoload_mask); 1368 gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev, SOC21_FIRMWARE_ID_RS64_MEC_P3_STACK, 1369 fw_data, fw_size, fw_autoload_mask); 1370 } else { 1371 /* pfp ucode */ 1372 cp_hdr = (const struct gfx_firmware_header_v1_0 *) 1373 adev->gfx.pfp_fw->data; 1374 fw_data = (const __le32 *)(adev->gfx.pfp_fw->data + 1375 le32_to_cpu(cp_hdr->header.ucode_array_offset_bytes)); 1376 fw_size = le32_to_cpu(cp_hdr->header.ucode_size_bytes); 1377 gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev, SOC21_FIRMWARE_ID_CP_PFP, 1378 fw_data, fw_size, fw_autoload_mask); 1379 1380 /* me ucode */ 1381 cp_hdr = (const struct gfx_firmware_header_v1_0 *) 1382 adev->gfx.me_fw->data; 1383 fw_data = (const __le32 *)(adev->gfx.me_fw->data + 1384 le32_to_cpu(cp_hdr->header.ucode_array_offset_bytes)); 1385 fw_size = le32_to_cpu(cp_hdr->header.ucode_size_bytes); 1386 gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev, SOC21_FIRMWARE_ID_CP_ME, 1387 fw_data, fw_size, fw_autoload_mask); 1388 1389 /* mec ucode */ 1390 cp_hdr = (const struct gfx_firmware_header_v1_0 *) 1391 adev->gfx.mec_fw->data; 1392 fw_data = (const __le32 *) (adev->gfx.mec_fw->data + 1393 le32_to_cpu(cp_hdr->header.ucode_array_offset_bytes)); 1394 fw_size = le32_to_cpu(cp_hdr->header.ucode_size_bytes) - 1395 cp_hdr->jt_size * 4; 1396 gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev, SOC21_FIRMWARE_ID_CP_MEC, 1397 fw_data, fw_size, fw_autoload_mask); 1398 } 1399 1400 /* rlc ucode */ 1401 rlc_hdr = (const struct rlc_firmware_header_v2_0 *) 1402 adev->gfx.rlc_fw->data; 1403 fw_data = (const __le32 *)(adev->gfx.rlc_fw->data + 1404 le32_to_cpu(rlc_hdr->header.ucode_array_offset_bytes)); 1405 fw_size = le32_to_cpu(rlc_hdr->header.ucode_size_bytes); 1406 gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev, SOC21_FIRMWARE_ID_RLC_G_UCODE, 1407 fw_data, fw_size, fw_autoload_mask); 1408 1409 version_major = le16_to_cpu(rlc_hdr->header.header_version_major); 1410 version_minor = le16_to_cpu(rlc_hdr->header.header_version_minor); 1411 if (version_major == 2) { 1412 if (version_minor >= 2) { 1413 rlcv22_hdr = (const struct rlc_firmware_header_v2_2 *)adev->gfx.rlc_fw->data; 1414 1415 fw_data = (const __le32 *)(adev->gfx.rlc_fw->data + 1416 le32_to_cpu(rlcv22_hdr->rlc_iram_ucode_offset_bytes)); 1417 fw_size = le32_to_cpu(rlcv22_hdr->rlc_iram_ucode_size_bytes); 1418 gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev, SOC21_FIRMWARE_ID_RLX6_UCODE, 1419 fw_data, fw_size, fw_autoload_mask); 1420 1421 fw_data = (const __le32 *)(adev->gfx.rlc_fw->data + 1422 le32_to_cpu(rlcv22_hdr->rlc_dram_ucode_offset_bytes)); 1423 fw_size = le32_to_cpu(rlcv22_hdr->rlc_dram_ucode_size_bytes); 1424 gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev, SOC21_FIRMWARE_ID_RLX6_DRAM_BOOT, 1425 fw_data, fw_size, fw_autoload_mask); 1426 } 1427 } 1428 } 1429 1430 static void gfx_v11_0_rlc_backdoor_autoload_copy_sdma_ucode(struct amdgpu_device *adev, 1431 uint32_t *fw_autoload_mask) 1432 { 1433 const __le32 *fw_data; 1434 uint32_t fw_size; 1435 const struct sdma_firmware_header_v2_0 *sdma_hdr; 1436 1437 sdma_hdr = (const struct sdma_firmware_header_v2_0 *) 1438 adev->sdma.instance[0].fw->data; 1439 fw_data = (const __le32 *) (adev->sdma.instance[0].fw->data + 1440 le32_to_cpu(sdma_hdr->header.ucode_array_offset_bytes)); 1441 fw_size = le32_to_cpu(sdma_hdr->ctx_ucode_size_bytes); 1442 1443 gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev, 1444 SOC21_FIRMWARE_ID_SDMA_UCODE_TH0, fw_data, fw_size, fw_autoload_mask); 1445 1446 fw_data = (const __le32 *) (adev->sdma.instance[0].fw->data + 1447 le32_to_cpu(sdma_hdr->ctl_ucode_offset)); 1448 fw_size = le32_to_cpu(sdma_hdr->ctl_ucode_size_bytes); 1449 1450 gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev, 1451 SOC21_FIRMWARE_ID_SDMA_UCODE_TH1, fw_data, fw_size, fw_autoload_mask); 1452 } 1453 1454 static void gfx_v11_0_rlc_backdoor_autoload_copy_mes_ucode(struct amdgpu_device *adev, 1455 uint32_t *fw_autoload_mask) 1456 { 1457 const __le32 *fw_data; 1458 unsigned fw_size; 1459 const struct mes_firmware_header_v1_0 *mes_hdr; 1460 int pipe, ucode_id, data_id; 1461 1462 for (pipe = 0; pipe < 2; pipe++) { 1463 if (pipe==0) { 1464 ucode_id = SOC21_FIRMWARE_ID_RS64_MES_P0; 1465 data_id = SOC21_FIRMWARE_ID_RS64_MES_P0_STACK; 1466 } else { 1467 ucode_id = SOC21_FIRMWARE_ID_RS64_MES_P1; 1468 data_id = SOC21_FIRMWARE_ID_RS64_MES_P1_STACK; 1469 } 1470 1471 mes_hdr = (const struct mes_firmware_header_v1_0 *) 1472 adev->mes.fw[pipe]->data; 1473 1474 fw_data = (const __le32 *)(adev->mes.fw[pipe]->data + 1475 le32_to_cpu(mes_hdr->mes_ucode_offset_bytes)); 1476 fw_size = le32_to_cpu(mes_hdr->mes_ucode_size_bytes); 1477 1478 gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev, 1479 ucode_id, fw_data, fw_size, fw_autoload_mask); 1480 1481 fw_data = (const __le32 *)(adev->mes.fw[pipe]->data + 1482 le32_to_cpu(mes_hdr->mes_ucode_data_offset_bytes)); 1483 fw_size = le32_to_cpu(mes_hdr->mes_ucode_data_size_bytes); 1484 1485 gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev, 1486 data_id, fw_data, fw_size, fw_autoload_mask); 1487 } 1488 } 1489 1490 static int gfx_v11_0_rlc_backdoor_autoload_enable(struct amdgpu_device *adev) 1491 { 1492 uint32_t rlc_g_offset, rlc_g_size; 1493 uint64_t gpu_addr; 1494 uint32_t autoload_fw_id[2]; 1495 1496 memset(autoload_fw_id, 0, sizeof(uint32_t) * 2); 1497 1498 /* RLC autoload sequence 2: copy ucode */ 1499 gfx_v11_0_rlc_backdoor_autoload_copy_sdma_ucode(adev, autoload_fw_id); 1500 gfx_v11_0_rlc_backdoor_autoload_copy_gfx_ucode(adev, autoload_fw_id); 1501 gfx_v11_0_rlc_backdoor_autoload_copy_mes_ucode(adev, autoload_fw_id); 1502 gfx_v11_0_rlc_backdoor_autoload_copy_toc_ucode(adev, autoload_fw_id); 1503 1504 rlc_g_offset = rlc_autoload_info[SOC21_FIRMWARE_ID_RLC_G_UCODE].offset; 1505 rlc_g_size = rlc_autoload_info[SOC21_FIRMWARE_ID_RLC_G_UCODE].size; 1506 gpu_addr = adev->gfx.rlc.rlc_autoload_gpu_addr + rlc_g_offset; 1507 1508 WREG32_SOC15(GC, 0, regGFX_IMU_RLC_BOOTLOADER_ADDR_HI, upper_32_bits(gpu_addr)); 1509 WREG32_SOC15(GC, 0, regGFX_IMU_RLC_BOOTLOADER_ADDR_LO, lower_32_bits(gpu_addr)); 1510 1511 WREG32_SOC15(GC, 0, regGFX_IMU_RLC_BOOTLOADER_SIZE, rlc_g_size); 1512 1513 /* RLC autoload sequence 3: load IMU fw */ 1514 if (adev->gfx.imu.funcs->load_microcode) 1515 adev->gfx.imu.funcs->load_microcode(adev); 1516 /* RLC autoload sequence 4 init IMU fw */ 1517 if (adev->gfx.imu.funcs->setup_imu) 1518 adev->gfx.imu.funcs->setup_imu(adev); 1519 if (adev->gfx.imu.funcs->start_imu) 1520 adev->gfx.imu.funcs->start_imu(adev); 1521 1522 /* RLC autoload sequence 5 disable gpa mode */ 1523 gfx_v11_0_disable_gpa_mode(adev); 1524 1525 return 0; 1526 } 1527 1528 static void gfx_v11_0_alloc_ip_dump(struct amdgpu_device *adev) 1529 { 1530 uint32_t reg_count = ARRAY_SIZE(gc_reg_list_11_0); 1531 uint32_t *ptr; 1532 uint32_t inst; 1533 1534 ptr = kcalloc(reg_count, sizeof(uint32_t), GFP_KERNEL); 1535 if (!ptr) { 1536 DRM_ERROR("Failed to allocate memory for GFX IP Dump\n"); 1537 adev->gfx.ip_dump_core = NULL; 1538 } else { 1539 adev->gfx.ip_dump_core = ptr; 1540 } 1541 1542 /* Allocate memory for compute queue registers for all the instances */ 1543 reg_count = ARRAY_SIZE(gc_cp_reg_list_11); 1544 inst = adev->gfx.mec.num_mec * adev->gfx.mec.num_pipe_per_mec * 1545 adev->gfx.mec.num_queue_per_pipe; 1546 1547 ptr = kcalloc(reg_count * inst, sizeof(uint32_t), GFP_KERNEL); 1548 if (!ptr) { 1549 DRM_ERROR("Failed to allocate memory for Compute Queues IP Dump\n"); 1550 adev->gfx.ip_dump_compute_queues = NULL; 1551 } else { 1552 adev->gfx.ip_dump_compute_queues = ptr; 1553 } 1554 1555 /* Allocate memory for gfx queue registers for all the instances */ 1556 reg_count = ARRAY_SIZE(gc_gfx_queue_reg_list_11); 1557 inst = adev->gfx.me.num_me * adev->gfx.me.num_pipe_per_me * 1558 adev->gfx.me.num_queue_per_pipe; 1559 1560 ptr = kcalloc(reg_count * inst, sizeof(uint32_t), GFP_KERNEL); 1561 if (!ptr) { 1562 DRM_ERROR("Failed to allocate memory for GFX Queues IP Dump\n"); 1563 adev->gfx.ip_dump_gfx_queues = NULL; 1564 } else { 1565 adev->gfx.ip_dump_gfx_queues = ptr; 1566 } 1567 } 1568 1569 static int gfx_v11_0_sw_init(struct amdgpu_ip_block *ip_block) 1570 { 1571 int i, j, k, r, ring_id; 1572 int xcc_id = 0; 1573 struct amdgpu_device *adev = ip_block->adev; 1574 int num_queue_per_pipe = 1; /* we only enable 1 KGQ per pipe */ 1575 1576 INIT_DELAYED_WORK(&adev->gfx.idle_work, amdgpu_gfx_profile_idle_work_handler); 1577 1578 switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { 1579 case IP_VERSION(11, 0, 0): 1580 case IP_VERSION(11, 0, 1): 1581 case IP_VERSION(11, 0, 2): 1582 case IP_VERSION(11, 0, 3): 1583 case IP_VERSION(11, 0, 4): 1584 case IP_VERSION(11, 5, 0): 1585 case IP_VERSION(11, 5, 1): 1586 case IP_VERSION(11, 5, 2): 1587 case IP_VERSION(11, 5, 3): 1588 adev->gfx.me.num_me = 1; 1589 adev->gfx.me.num_pipe_per_me = 1; 1590 adev->gfx.me.num_queue_per_pipe = 2; 1591 adev->gfx.mec.num_mec = 1; 1592 adev->gfx.mec.num_pipe_per_mec = 4; 1593 adev->gfx.mec.num_queue_per_pipe = 4; 1594 break; 1595 default: 1596 adev->gfx.me.num_me = 1; 1597 adev->gfx.me.num_pipe_per_me = 1; 1598 adev->gfx.me.num_queue_per_pipe = 1; 1599 adev->gfx.mec.num_mec = 1; 1600 adev->gfx.mec.num_pipe_per_mec = 4; 1601 adev->gfx.mec.num_queue_per_pipe = 8; 1602 break; 1603 } 1604 1605 switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { 1606 case IP_VERSION(11, 0, 0): 1607 case IP_VERSION(11, 0, 2): 1608 case IP_VERSION(11, 0, 3): 1609 #ifdef CONFIG_DRM_AMDGPU_NAVI3X_USERQ 1610 if (!adev->gfx.disable_uq && 1611 adev->gfx.me_fw_version >= 2390 && 1612 adev->gfx.pfp_fw_version >= 2530 && 1613 adev->gfx.mec_fw_version >= 2600 && 1614 adev->mes.fw_version[0] >= 120) { 1615 adev->userq_funcs[AMDGPU_HW_IP_GFX] = &userq_mes_funcs; 1616 adev->userq_funcs[AMDGPU_HW_IP_COMPUTE] = &userq_mes_funcs; 1617 } 1618 #endif 1619 break; 1620 case IP_VERSION(11, 0, 1): 1621 case IP_VERSION(11, 0, 4): 1622 case IP_VERSION(11, 5, 0): 1623 case IP_VERSION(11, 5, 1): 1624 case IP_VERSION(11, 5, 2): 1625 case IP_VERSION(11, 5, 3): 1626 #ifdef CONFIG_DRM_AMDGPU_NAVI3X_USERQ 1627 /* add firmware version checks here */ 1628 if (0 && !adev->gfx.disable_uq) { 1629 adev->userq_funcs[AMDGPU_HW_IP_GFX] = &userq_mes_funcs; 1630 adev->userq_funcs[AMDGPU_HW_IP_COMPUTE] = &userq_mes_funcs; 1631 } 1632 #endif 1633 break; 1634 default: 1635 break; 1636 } 1637 1638 switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { 1639 case IP_VERSION(11, 0, 0): 1640 case IP_VERSION(11, 0, 2): 1641 case IP_VERSION(11, 0, 3): 1642 adev->gfx.cleaner_shader_ptr = gfx_11_0_3_cleaner_shader_hex; 1643 adev->gfx.cleaner_shader_size = sizeof(gfx_11_0_3_cleaner_shader_hex); 1644 if (adev->gfx.me_fw_version >= 2280 && 1645 adev->gfx.pfp_fw_version >= 2370 && 1646 adev->gfx.mec_fw_version >= 2450 && 1647 adev->mes.fw_version[0] >= 99) { 1648 adev->gfx.enable_cleaner_shader = true; 1649 r = amdgpu_gfx_cleaner_shader_sw_init(adev, adev->gfx.cleaner_shader_size); 1650 if (r) { 1651 adev->gfx.enable_cleaner_shader = false; 1652 dev_err(adev->dev, "Failed to initialize cleaner shader\n"); 1653 } 1654 } 1655 break; 1656 case IP_VERSION(11, 5, 0): 1657 case IP_VERSION(11, 5, 1): 1658 adev->gfx.cleaner_shader_ptr = gfx_11_0_3_cleaner_shader_hex; 1659 adev->gfx.cleaner_shader_size = sizeof(gfx_11_0_3_cleaner_shader_hex); 1660 if (adev->gfx.mec_fw_version >= 26 && 1661 adev->mes.fw_version[0] >= 114) { 1662 adev->gfx.enable_cleaner_shader = true; 1663 r = amdgpu_gfx_cleaner_shader_sw_init(adev, adev->gfx.cleaner_shader_size); 1664 if (r) { 1665 adev->gfx.enable_cleaner_shader = false; 1666 dev_err(adev->dev, "Failed to initialize cleaner shader\n"); 1667 } 1668 } 1669 break; 1670 case IP_VERSION(11, 5, 2): 1671 adev->gfx.cleaner_shader_ptr = gfx_11_0_3_cleaner_shader_hex; 1672 adev->gfx.cleaner_shader_size = sizeof(gfx_11_0_3_cleaner_shader_hex); 1673 if (adev->gfx.me_fw_version >= 12 && 1674 adev->gfx.pfp_fw_version >= 15 && 1675 adev->gfx.mec_fw_version >= 15) { 1676 adev->gfx.enable_cleaner_shader = true; 1677 r = amdgpu_gfx_cleaner_shader_sw_init(adev, adev->gfx.cleaner_shader_size); 1678 if (r) { 1679 adev->gfx.enable_cleaner_shader = false; 1680 dev_err(adev->dev, "Failed to initialize cleaner shader\n"); 1681 } 1682 } 1683 break; 1684 case IP_VERSION(11, 5, 3): 1685 adev->gfx.cleaner_shader_ptr = gfx_11_0_3_cleaner_shader_hex; 1686 adev->gfx.cleaner_shader_size = sizeof(gfx_11_0_3_cleaner_shader_hex); 1687 if (adev->gfx.me_fw_version >= 7 && 1688 adev->gfx.pfp_fw_version >= 8 && 1689 adev->gfx.mec_fw_version >= 8) { 1690 adev->gfx.enable_cleaner_shader = true; 1691 r = amdgpu_gfx_cleaner_shader_sw_init(adev, adev->gfx.cleaner_shader_size); 1692 if (r) { 1693 adev->gfx.enable_cleaner_shader = false; 1694 dev_err(adev->dev, "Failed to initialize cleaner shader\n"); 1695 } 1696 } 1697 break; 1698 default: 1699 adev->gfx.enable_cleaner_shader = false; 1700 break; 1701 } 1702 1703 /* Enable CG flag in one VF mode for enabling RLC safe mode enter/exit */ 1704 if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(11, 0, 3) && 1705 amdgpu_sriov_is_pp_one_vf(adev)) 1706 adev->cg_flags = AMD_CG_SUPPORT_GFX_CGCG; 1707 1708 /* EOP Event */ 1709 r = amdgpu_irq_add_id(adev, SOC21_IH_CLIENTID_GRBM_CP, 1710 GFX_11_0_0__SRCID__CP_EOP_INTERRUPT, 1711 &adev->gfx.eop_irq); 1712 if (r) 1713 return r; 1714 1715 /* Bad opcode Event */ 1716 r = amdgpu_irq_add_id(adev, SOC21_IH_CLIENTID_GRBM_CP, 1717 GFX_11_0_0__SRCID__CP_BAD_OPCODE_ERROR, 1718 &adev->gfx.bad_op_irq); 1719 if (r) 1720 return r; 1721 1722 /* Privileged reg */ 1723 r = amdgpu_irq_add_id(adev, SOC21_IH_CLIENTID_GRBM_CP, 1724 GFX_11_0_0__SRCID__CP_PRIV_REG_FAULT, 1725 &adev->gfx.priv_reg_irq); 1726 if (r) 1727 return r; 1728 1729 /* Privileged inst */ 1730 r = amdgpu_irq_add_id(adev, SOC21_IH_CLIENTID_GRBM_CP, 1731 GFX_11_0_0__SRCID__CP_PRIV_INSTR_FAULT, 1732 &adev->gfx.priv_inst_irq); 1733 if (r) 1734 return r; 1735 1736 /* FED error */ 1737 r = amdgpu_irq_add_id(adev, SOC21_IH_CLIENTID_GFX, 1738 GFX_11_0_0__SRCID__RLC_GC_FED_INTERRUPT, 1739 &adev->gfx.rlc_gc_fed_irq); 1740 if (r) 1741 return r; 1742 1743 adev->gfx.gfx_current_status = AMDGPU_GFX_NORMAL_MODE; 1744 1745 gfx_v11_0_me_init(adev); 1746 1747 r = gfx_v11_0_rlc_init(adev); 1748 if (r) { 1749 DRM_ERROR("Failed to init rlc BOs!\n"); 1750 return r; 1751 } 1752 1753 r = gfx_v11_0_mec_init(adev); 1754 if (r) { 1755 DRM_ERROR("Failed to init MEC BOs!\n"); 1756 return r; 1757 } 1758 1759 if (adev->gfx.num_gfx_rings) { 1760 ring_id = 0; 1761 /* set up the gfx ring */ 1762 for (i = 0; i < adev->gfx.me.num_me; i++) { 1763 for (j = 0; j < num_queue_per_pipe; j++) { 1764 for (k = 0; k < adev->gfx.me.num_pipe_per_me; k++) { 1765 if (!amdgpu_gfx_is_me_queue_enabled(adev, i, k, j)) 1766 continue; 1767 1768 r = gfx_v11_0_gfx_ring_init(adev, ring_id, 1769 i, k, j); 1770 if (r) 1771 return r; 1772 ring_id++; 1773 } 1774 } 1775 } 1776 } 1777 1778 if (adev->gfx.num_compute_rings) { 1779 ring_id = 0; 1780 /* set up the compute queues - allocate horizontally across pipes */ 1781 for (i = 0; i < adev->gfx.mec.num_mec; ++i) { 1782 for (j = 0; j < adev->gfx.mec.num_queue_per_pipe; j++) { 1783 for (k = 0; k < adev->gfx.mec.num_pipe_per_mec; k++) { 1784 if (!amdgpu_gfx_is_mec_queue_enabled(adev, 0, i, 1785 k, j)) 1786 continue; 1787 1788 r = gfx_v11_0_compute_ring_init(adev, ring_id, 1789 i, k, j); 1790 if (r) 1791 return r; 1792 1793 ring_id++; 1794 } 1795 } 1796 } 1797 } 1798 1799 adev->gfx.gfx_supported_reset = 1800 amdgpu_get_soft_full_reset_mask(&adev->gfx.gfx_ring[0]); 1801 adev->gfx.compute_supported_reset = 1802 amdgpu_get_soft_full_reset_mask(&adev->gfx.compute_ring[0]); 1803 switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { 1804 case IP_VERSION(11, 0, 0): 1805 case IP_VERSION(11, 0, 2): 1806 case IP_VERSION(11, 0, 3): 1807 if ((adev->gfx.me_fw_version >= 2280) && 1808 (adev->gfx.mec_fw_version >= 2410)) { 1809 adev->gfx.compute_supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE; 1810 adev->gfx.gfx_supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE; 1811 } 1812 break; 1813 default: 1814 break; 1815 } 1816 1817 if (!adev->enable_mes_kiq) { 1818 r = amdgpu_gfx_kiq_init(adev, GFX11_MEC_HPD_SIZE, 0); 1819 if (r) { 1820 DRM_ERROR("Failed to init KIQ BOs!\n"); 1821 return r; 1822 } 1823 1824 r = amdgpu_gfx_kiq_init_ring(adev, xcc_id); 1825 if (r) 1826 return r; 1827 } 1828 1829 r = amdgpu_gfx_mqd_sw_init(adev, sizeof(struct v11_compute_mqd), 0); 1830 if (r) 1831 return r; 1832 1833 /* allocate visible FB for rlc auto-loading fw */ 1834 if (adev->firmware.load_type == AMDGPU_FW_LOAD_RLC_BACKDOOR_AUTO) { 1835 r = gfx_v11_0_rlc_autoload_buffer_init(adev); 1836 if (r) 1837 return r; 1838 } 1839 1840 r = gfx_v11_0_gpu_early_init(adev); 1841 if (r) 1842 return r; 1843 1844 if (amdgpu_gfx_ras_sw_init(adev)) { 1845 dev_err(adev->dev, "Failed to initialize gfx ras block!\n"); 1846 return -EINVAL; 1847 } 1848 1849 gfx_v11_0_alloc_ip_dump(adev); 1850 1851 r = amdgpu_gfx_sysfs_init(adev); 1852 if (r) 1853 return r; 1854 1855 return 0; 1856 } 1857 1858 static void gfx_v11_0_pfp_fini(struct amdgpu_device *adev) 1859 { 1860 amdgpu_bo_free_kernel(&adev->gfx.pfp.pfp_fw_obj, 1861 &adev->gfx.pfp.pfp_fw_gpu_addr, 1862 (void **)&adev->gfx.pfp.pfp_fw_ptr); 1863 1864 amdgpu_bo_free_kernel(&adev->gfx.pfp.pfp_fw_data_obj, 1865 &adev->gfx.pfp.pfp_fw_data_gpu_addr, 1866 (void **)&adev->gfx.pfp.pfp_fw_data_ptr); 1867 } 1868 1869 static void gfx_v11_0_me_fini(struct amdgpu_device *adev) 1870 { 1871 amdgpu_bo_free_kernel(&adev->gfx.me.me_fw_obj, 1872 &adev->gfx.me.me_fw_gpu_addr, 1873 (void **)&adev->gfx.me.me_fw_ptr); 1874 1875 amdgpu_bo_free_kernel(&adev->gfx.me.me_fw_data_obj, 1876 &adev->gfx.me.me_fw_data_gpu_addr, 1877 (void **)&adev->gfx.me.me_fw_data_ptr); 1878 } 1879 1880 static void gfx_v11_0_rlc_autoload_buffer_fini(struct amdgpu_device *adev) 1881 { 1882 amdgpu_bo_free_kernel(&adev->gfx.rlc.rlc_autoload_bo, 1883 &adev->gfx.rlc.rlc_autoload_gpu_addr, 1884 (void **)&adev->gfx.rlc.rlc_autoload_ptr); 1885 } 1886 1887 static int gfx_v11_0_sw_fini(struct amdgpu_ip_block *ip_block) 1888 { 1889 int i; 1890 struct amdgpu_device *adev = ip_block->adev; 1891 1892 for (i = 0; i < adev->gfx.num_gfx_rings; i++) 1893 amdgpu_ring_fini(&adev->gfx.gfx_ring[i]); 1894 for (i = 0; i < adev->gfx.num_compute_rings; i++) 1895 amdgpu_ring_fini(&adev->gfx.compute_ring[i]); 1896 1897 amdgpu_gfx_mqd_sw_fini(adev, 0); 1898 1899 if (!adev->enable_mes_kiq) { 1900 amdgpu_gfx_kiq_free_ring(&adev->gfx.kiq[0].ring); 1901 amdgpu_gfx_kiq_fini(adev, 0); 1902 } 1903 1904 amdgpu_gfx_cleaner_shader_sw_fini(adev); 1905 1906 gfx_v11_0_pfp_fini(adev); 1907 gfx_v11_0_me_fini(adev); 1908 gfx_v11_0_rlc_fini(adev); 1909 gfx_v11_0_mec_fini(adev); 1910 1911 if (adev->firmware.load_type == AMDGPU_FW_LOAD_RLC_BACKDOOR_AUTO) 1912 gfx_v11_0_rlc_autoload_buffer_fini(adev); 1913 1914 gfx_v11_0_free_microcode(adev); 1915 1916 amdgpu_gfx_sysfs_fini(adev); 1917 1918 kfree(adev->gfx.ip_dump_core); 1919 kfree(adev->gfx.ip_dump_compute_queues); 1920 kfree(adev->gfx.ip_dump_gfx_queues); 1921 1922 return 0; 1923 } 1924 1925 static void gfx_v11_0_select_se_sh(struct amdgpu_device *adev, u32 se_num, 1926 u32 sh_num, u32 instance, int xcc_id) 1927 { 1928 u32 data; 1929 1930 if (instance == 0xffffffff) 1931 data = REG_SET_FIELD(0, GRBM_GFX_INDEX, 1932 INSTANCE_BROADCAST_WRITES, 1); 1933 else 1934 data = REG_SET_FIELD(0, GRBM_GFX_INDEX, INSTANCE_INDEX, 1935 instance); 1936 1937 if (se_num == 0xffffffff) 1938 data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SE_BROADCAST_WRITES, 1939 1); 1940 else 1941 data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SE_INDEX, se_num); 1942 1943 if (sh_num == 0xffffffff) 1944 data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SA_BROADCAST_WRITES, 1945 1); 1946 else 1947 data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SA_INDEX, sh_num); 1948 1949 WREG32_SOC15(GC, 0, regGRBM_GFX_INDEX, data); 1950 } 1951 1952 static u32 gfx_v11_0_get_sa_active_bitmap(struct amdgpu_device *adev) 1953 { 1954 u32 gc_disabled_sa_mask, gc_user_disabled_sa_mask, sa_mask; 1955 1956 gc_disabled_sa_mask = RREG32_SOC15(GC, 0, regCC_GC_SA_UNIT_DISABLE); 1957 gc_disabled_sa_mask = REG_GET_FIELD(gc_disabled_sa_mask, 1958 CC_GC_SA_UNIT_DISABLE, 1959 SA_DISABLE); 1960 gc_user_disabled_sa_mask = RREG32_SOC15(GC, 0, regGC_USER_SA_UNIT_DISABLE); 1961 gc_user_disabled_sa_mask = REG_GET_FIELD(gc_user_disabled_sa_mask, 1962 GC_USER_SA_UNIT_DISABLE, 1963 SA_DISABLE); 1964 sa_mask = amdgpu_gfx_create_bitmask(adev->gfx.config.max_sh_per_se * 1965 adev->gfx.config.max_shader_engines); 1966 1967 return sa_mask & (~(gc_disabled_sa_mask | gc_user_disabled_sa_mask)); 1968 } 1969 1970 static u32 gfx_v11_0_get_rb_active_bitmap(struct amdgpu_device *adev) 1971 { 1972 u32 gc_disabled_rb_mask, gc_user_disabled_rb_mask; 1973 u32 rb_mask; 1974 1975 gc_disabled_rb_mask = RREG32_SOC15(GC, 0, regCC_RB_BACKEND_DISABLE); 1976 gc_disabled_rb_mask = REG_GET_FIELD(gc_disabled_rb_mask, 1977 CC_RB_BACKEND_DISABLE, 1978 BACKEND_DISABLE); 1979 gc_user_disabled_rb_mask = RREG32_SOC15(GC, 0, regGC_USER_RB_BACKEND_DISABLE); 1980 gc_user_disabled_rb_mask = REG_GET_FIELD(gc_user_disabled_rb_mask, 1981 GC_USER_RB_BACKEND_DISABLE, 1982 BACKEND_DISABLE); 1983 rb_mask = amdgpu_gfx_create_bitmask(adev->gfx.config.max_backends_per_se * 1984 adev->gfx.config.max_shader_engines); 1985 1986 return rb_mask & (~(gc_disabled_rb_mask | gc_user_disabled_rb_mask)); 1987 } 1988 1989 static void gfx_v11_0_setup_rb(struct amdgpu_device *adev) 1990 { 1991 u32 rb_bitmap_per_sa; 1992 u32 rb_bitmap_width_per_sa; 1993 u32 max_sa; 1994 u32 active_sa_bitmap; 1995 u32 global_active_rb_bitmap; 1996 u32 active_rb_bitmap = 0; 1997 u32 i; 1998 1999 /* query sa bitmap from SA_UNIT_DISABLE registers */ 2000 active_sa_bitmap = gfx_v11_0_get_sa_active_bitmap(adev); 2001 /* query rb bitmap from RB_BACKEND_DISABLE registers */ 2002 global_active_rb_bitmap = gfx_v11_0_get_rb_active_bitmap(adev); 2003 2004 /* generate active rb bitmap according to active sa bitmap */ 2005 max_sa = adev->gfx.config.max_shader_engines * 2006 adev->gfx.config.max_sh_per_se; 2007 rb_bitmap_width_per_sa = adev->gfx.config.max_backends_per_se / 2008 adev->gfx.config.max_sh_per_se; 2009 rb_bitmap_per_sa = amdgpu_gfx_create_bitmask(rb_bitmap_width_per_sa); 2010 2011 for (i = 0; i < max_sa; i++) { 2012 if (active_sa_bitmap & (1 << i)) 2013 active_rb_bitmap |= (rb_bitmap_per_sa << (i * rb_bitmap_width_per_sa)); 2014 } 2015 2016 active_rb_bitmap &= global_active_rb_bitmap; 2017 adev->gfx.config.backend_enable_mask = active_rb_bitmap; 2018 adev->gfx.config.num_rbs = hweight32(active_rb_bitmap); 2019 } 2020 2021 #define DEFAULT_SH_MEM_BASES (0x6000) 2022 #define LDS_APP_BASE 0x1 2023 #define SCRATCH_APP_BASE 0x2 2024 2025 static void gfx_v11_0_init_compute_vmid(struct amdgpu_device *adev) 2026 { 2027 int i; 2028 uint32_t sh_mem_bases; 2029 uint32_t data; 2030 2031 /* 2032 * Configure apertures: 2033 * LDS: 0x60000000'00000000 - 0x60000001'00000000 (4GB) 2034 * Scratch: 0x60000001'00000000 - 0x60000002'00000000 (4GB) 2035 * GPUVM: 0x60010000'00000000 - 0x60020000'00000000 (1TB) 2036 */ 2037 sh_mem_bases = (LDS_APP_BASE << SH_MEM_BASES__SHARED_BASE__SHIFT) | 2038 SCRATCH_APP_BASE; 2039 2040 mutex_lock(&adev->srbm_mutex); 2041 for (i = adev->vm_manager.first_kfd_vmid; i < AMDGPU_NUM_VMID; i++) { 2042 soc21_grbm_select(adev, 0, 0, 0, i); 2043 /* CP and shaders */ 2044 WREG32_SOC15(GC, 0, regSH_MEM_CONFIG, DEFAULT_SH_MEM_CONFIG); 2045 WREG32_SOC15(GC, 0, regSH_MEM_BASES, sh_mem_bases); 2046 2047 /* Enable trap for each kfd vmid. */ 2048 data = RREG32_SOC15(GC, 0, regSPI_GDBG_PER_VMID_CNTL); 2049 data = REG_SET_FIELD(data, SPI_GDBG_PER_VMID_CNTL, TRAP_EN, 1); 2050 WREG32_SOC15(GC, 0, regSPI_GDBG_PER_VMID_CNTL, data); 2051 } 2052 soc21_grbm_select(adev, 0, 0, 0, 0); 2053 mutex_unlock(&adev->srbm_mutex); 2054 2055 /* 2056 * Initialize all compute VMIDs to have no GDS, GWS, or OA 2057 * access. These should be enabled by FW for target VMIDs. 2058 */ 2059 for (i = adev->vm_manager.first_kfd_vmid; i < AMDGPU_NUM_VMID; i++) { 2060 WREG32_SOC15_OFFSET(GC, 0, regGDS_VMID0_BASE, 2 * i, 0); 2061 WREG32_SOC15_OFFSET(GC, 0, regGDS_VMID0_SIZE, 2 * i, 0); 2062 WREG32_SOC15_OFFSET(GC, 0, regGDS_GWS_VMID0, i, 0); 2063 WREG32_SOC15_OFFSET(GC, 0, regGDS_OA_VMID0, i, 0); 2064 } 2065 } 2066 2067 static void gfx_v11_0_init_gds_vmid(struct amdgpu_device *adev) 2068 { 2069 int vmid; 2070 2071 /* 2072 * Initialize all compute and user-gfx VMIDs to have no GDS, GWS, or OA 2073 * access. Compute VMIDs should be enabled by FW for target VMIDs, 2074 * the driver can enable them for graphics. VMID0 should maintain 2075 * access so that HWS firmware can save/restore entries. 2076 */ 2077 for (vmid = 1; vmid < 16; vmid++) { 2078 WREG32_SOC15_OFFSET(GC, 0, regGDS_VMID0_BASE, 2 * vmid, 0); 2079 WREG32_SOC15_OFFSET(GC, 0, regGDS_VMID0_SIZE, 2 * vmid, 0); 2080 WREG32_SOC15_OFFSET(GC, 0, regGDS_GWS_VMID0, vmid, 0); 2081 WREG32_SOC15_OFFSET(GC, 0, regGDS_OA_VMID0, vmid, 0); 2082 } 2083 } 2084 2085 static void gfx_v11_0_tcp_harvest(struct amdgpu_device *adev) 2086 { 2087 /* TODO: harvest feature to be added later. */ 2088 } 2089 2090 static void gfx_v11_0_get_tcc_info(struct amdgpu_device *adev) 2091 { 2092 /* TCCs are global (not instanced). */ 2093 uint32_t tcc_disable = RREG32_SOC15(GC, 0, regCGTS_TCC_DISABLE) | 2094 RREG32_SOC15(GC, 0, regCGTS_USER_TCC_DISABLE); 2095 2096 adev->gfx.config.tcc_disabled_mask = 2097 REG_GET_FIELD(tcc_disable, CGTS_TCC_DISABLE, TCC_DISABLE) | 2098 (REG_GET_FIELD(tcc_disable, CGTS_TCC_DISABLE, HI_TCC_DISABLE) << 16); 2099 } 2100 2101 static void gfx_v11_0_constants_init(struct amdgpu_device *adev) 2102 { 2103 u32 tmp; 2104 int i; 2105 2106 if (!amdgpu_sriov_vf(adev)) 2107 WREG32_FIELD15_PREREG(GC, 0, GRBM_CNTL, READ_TIMEOUT, 0xff); 2108 2109 gfx_v11_0_setup_rb(adev); 2110 gfx_v11_0_get_cu_info(adev, &adev->gfx.cu_info); 2111 gfx_v11_0_get_tcc_info(adev); 2112 adev->gfx.config.pa_sc_tile_steering_override = 0; 2113 2114 /* Set whether texture coordinate truncation is conformant. */ 2115 tmp = RREG32_SOC15(GC, 0, regTA_CNTL2); 2116 adev->gfx.config.ta_cntl2_truncate_coord_mode = 2117 REG_GET_FIELD(tmp, TA_CNTL2, TRUNCATE_COORD_MODE); 2118 2119 /* XXX SH_MEM regs */ 2120 /* where to put LDS, scratch, GPUVM in FSA64 space */ 2121 mutex_lock(&adev->srbm_mutex); 2122 for (i = 0; i < adev->vm_manager.id_mgr[AMDGPU_GFXHUB(0)].num_ids; i++) { 2123 soc21_grbm_select(adev, 0, 0, 0, i); 2124 /* CP and shaders */ 2125 WREG32_SOC15(GC, 0, regSH_MEM_CONFIG, DEFAULT_SH_MEM_CONFIG); 2126 if (i != 0) { 2127 tmp = REG_SET_FIELD(0, SH_MEM_BASES, PRIVATE_BASE, 2128 (adev->gmc.private_aperture_start >> 48)); 2129 tmp = REG_SET_FIELD(tmp, SH_MEM_BASES, SHARED_BASE, 2130 (adev->gmc.shared_aperture_start >> 48)); 2131 WREG32_SOC15(GC, 0, regSH_MEM_BASES, tmp); 2132 } 2133 } 2134 soc21_grbm_select(adev, 0, 0, 0, 0); 2135 2136 mutex_unlock(&adev->srbm_mutex); 2137 2138 gfx_v11_0_init_compute_vmid(adev); 2139 gfx_v11_0_init_gds_vmid(adev); 2140 } 2141 2142 static u32 gfx_v11_0_get_cpg_int_cntl(struct amdgpu_device *adev, 2143 int me, int pipe) 2144 { 2145 if (me != 0) 2146 return 0; 2147 2148 switch (pipe) { 2149 case 0: 2150 return SOC15_REG_OFFSET(GC, 0, regCP_INT_CNTL_RING0); 2151 case 1: 2152 return SOC15_REG_OFFSET(GC, 0, regCP_INT_CNTL_RING1); 2153 default: 2154 return 0; 2155 } 2156 } 2157 2158 static u32 gfx_v11_0_get_cpc_int_cntl(struct amdgpu_device *adev, 2159 int me, int pipe) 2160 { 2161 /* 2162 * amdgpu controls only the first MEC. That's why this function only 2163 * handles the setting of interrupts for this specific MEC. All other 2164 * pipes' interrupts are set by amdkfd. 2165 */ 2166 if (me != 1) 2167 return 0; 2168 2169 switch (pipe) { 2170 case 0: 2171 return SOC15_REG_OFFSET(GC, 0, regCP_ME1_PIPE0_INT_CNTL); 2172 case 1: 2173 return SOC15_REG_OFFSET(GC, 0, regCP_ME1_PIPE1_INT_CNTL); 2174 case 2: 2175 return SOC15_REG_OFFSET(GC, 0, regCP_ME1_PIPE2_INT_CNTL); 2176 case 3: 2177 return SOC15_REG_OFFSET(GC, 0, regCP_ME1_PIPE3_INT_CNTL); 2178 default: 2179 return 0; 2180 } 2181 } 2182 2183 static void gfx_v11_0_enable_gui_idle_interrupt(struct amdgpu_device *adev, 2184 bool enable) 2185 { 2186 u32 tmp, cp_int_cntl_reg; 2187 int i, j; 2188 2189 if (amdgpu_sriov_vf(adev)) 2190 return; 2191 2192 for (i = 0; i < adev->gfx.me.num_me; i++) { 2193 for (j = 0; j < adev->gfx.me.num_pipe_per_me; j++) { 2194 cp_int_cntl_reg = gfx_v11_0_get_cpg_int_cntl(adev, i, j); 2195 2196 if (cp_int_cntl_reg) { 2197 tmp = RREG32_SOC15_IP(GC, cp_int_cntl_reg); 2198 tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CNTX_BUSY_INT_ENABLE, 2199 enable ? 1 : 0); 2200 tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CNTX_EMPTY_INT_ENABLE, 2201 enable ? 1 : 0); 2202 tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CMP_BUSY_INT_ENABLE, 2203 enable ? 1 : 0); 2204 tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, GFX_IDLE_INT_ENABLE, 2205 enable ? 1 : 0); 2206 WREG32_SOC15_IP(GC, cp_int_cntl_reg, tmp); 2207 } 2208 } 2209 } 2210 } 2211 2212 static int gfx_v11_0_init_csb(struct amdgpu_device *adev) 2213 { 2214 adev->gfx.rlc.funcs->get_csb_buffer(adev, adev->gfx.rlc.cs_ptr); 2215 2216 WREG32_SOC15(GC, 0, regRLC_CSIB_ADDR_HI, 2217 adev->gfx.rlc.clear_state_gpu_addr >> 32); 2218 WREG32_SOC15(GC, 0, regRLC_CSIB_ADDR_LO, 2219 adev->gfx.rlc.clear_state_gpu_addr & 0xfffffffc); 2220 WREG32_SOC15(GC, 0, regRLC_CSIB_LENGTH, adev->gfx.rlc.clear_state_size); 2221 2222 return 0; 2223 } 2224 2225 static void gfx_v11_0_rlc_stop(struct amdgpu_device *adev) 2226 { 2227 u32 tmp = RREG32_SOC15(GC, 0, regRLC_CNTL); 2228 2229 tmp = REG_SET_FIELD(tmp, RLC_CNTL, RLC_ENABLE_F32, 0); 2230 WREG32_SOC15(GC, 0, regRLC_CNTL, tmp); 2231 } 2232 2233 static void gfx_v11_0_rlc_reset(struct amdgpu_device *adev) 2234 { 2235 WREG32_FIELD15_PREREG(GC, 0, GRBM_SOFT_RESET, SOFT_RESET_RLC, 1); 2236 udelay(50); 2237 WREG32_FIELD15_PREREG(GC, 0, GRBM_SOFT_RESET, SOFT_RESET_RLC, 0); 2238 udelay(50); 2239 } 2240 2241 static void gfx_v11_0_rlc_smu_handshake_cntl(struct amdgpu_device *adev, 2242 bool enable) 2243 { 2244 uint32_t rlc_pg_cntl; 2245 2246 rlc_pg_cntl = RREG32_SOC15(GC, 0, regRLC_PG_CNTL); 2247 2248 if (!enable) { 2249 /* RLC_PG_CNTL[23] = 0 (default) 2250 * RLC will wait for handshake acks with SMU 2251 * GFXOFF will be enabled 2252 * RLC_PG_CNTL[23] = 1 2253 * RLC will not issue any message to SMU 2254 * hence no handshake between SMU & RLC 2255 * GFXOFF will be disabled 2256 */ 2257 rlc_pg_cntl |= RLC_PG_CNTL__SMU_HANDSHAKE_DISABLE_MASK; 2258 } else 2259 rlc_pg_cntl &= ~RLC_PG_CNTL__SMU_HANDSHAKE_DISABLE_MASK; 2260 WREG32_SOC15(GC, 0, regRLC_PG_CNTL, rlc_pg_cntl); 2261 } 2262 2263 static void gfx_v11_0_rlc_start(struct amdgpu_device *adev) 2264 { 2265 /* TODO: enable rlc & smu handshake until smu 2266 * and gfxoff feature works as expected */ 2267 if (!(amdgpu_pp_feature_mask & PP_GFXOFF_MASK)) 2268 gfx_v11_0_rlc_smu_handshake_cntl(adev, false); 2269 2270 WREG32_FIELD15_PREREG(GC, 0, RLC_CNTL, RLC_ENABLE_F32, 1); 2271 udelay(50); 2272 } 2273 2274 static void gfx_v11_0_rlc_enable_srm(struct amdgpu_device *adev) 2275 { 2276 uint32_t tmp; 2277 2278 /* enable Save Restore Machine */ 2279 tmp = RREG32(SOC15_REG_OFFSET(GC, 0, regRLC_SRM_CNTL)); 2280 tmp |= RLC_SRM_CNTL__AUTO_INCR_ADDR_MASK; 2281 tmp |= RLC_SRM_CNTL__SRM_ENABLE_MASK; 2282 WREG32(SOC15_REG_OFFSET(GC, 0, regRLC_SRM_CNTL), tmp); 2283 } 2284 2285 static void gfx_v11_0_load_rlcg_microcode(struct amdgpu_device *adev) 2286 { 2287 const struct rlc_firmware_header_v2_0 *hdr; 2288 const __le32 *fw_data; 2289 unsigned i, fw_size; 2290 2291 hdr = (const struct rlc_firmware_header_v2_0 *)adev->gfx.rlc_fw->data; 2292 fw_data = (const __le32 *)(adev->gfx.rlc_fw->data + 2293 le32_to_cpu(hdr->header.ucode_array_offset_bytes)); 2294 fw_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4; 2295 2296 WREG32_SOC15(GC, 0, regRLC_GPM_UCODE_ADDR, 2297 RLCG_UCODE_LOADING_START_ADDRESS); 2298 2299 for (i = 0; i < fw_size; i++) 2300 WREG32_SOC15(GC, 0, regRLC_GPM_UCODE_DATA, 2301 le32_to_cpup(fw_data++)); 2302 2303 WREG32_SOC15(GC, 0, regRLC_GPM_UCODE_ADDR, adev->gfx.rlc_fw_version); 2304 } 2305 2306 static void gfx_v11_0_load_rlc_iram_dram_microcode(struct amdgpu_device *adev) 2307 { 2308 const struct rlc_firmware_header_v2_2 *hdr; 2309 const __le32 *fw_data; 2310 unsigned i, fw_size; 2311 u32 tmp; 2312 2313 hdr = (const struct rlc_firmware_header_v2_2 *)adev->gfx.rlc_fw->data; 2314 2315 fw_data = (const __le32 *)(adev->gfx.rlc_fw->data + 2316 le32_to_cpu(hdr->rlc_iram_ucode_offset_bytes)); 2317 fw_size = le32_to_cpu(hdr->rlc_iram_ucode_size_bytes) / 4; 2318 2319 WREG32_SOC15(GC, 0, regRLC_LX6_IRAM_ADDR, 0); 2320 2321 for (i = 0; i < fw_size; i++) { 2322 if ((amdgpu_emu_mode == 1) && (i % 100 == 99)) 2323 msleep(1); 2324 WREG32_SOC15(GC, 0, regRLC_LX6_IRAM_DATA, 2325 le32_to_cpup(fw_data++)); 2326 } 2327 2328 WREG32_SOC15(GC, 0, regRLC_LX6_IRAM_ADDR, adev->gfx.rlc_fw_version); 2329 2330 fw_data = (const __le32 *)(adev->gfx.rlc_fw->data + 2331 le32_to_cpu(hdr->rlc_dram_ucode_offset_bytes)); 2332 fw_size = le32_to_cpu(hdr->rlc_dram_ucode_size_bytes) / 4; 2333 2334 WREG32_SOC15(GC, 0, regRLC_LX6_DRAM_ADDR, 0); 2335 for (i = 0; i < fw_size; i++) { 2336 if ((amdgpu_emu_mode == 1) && (i % 100 == 99)) 2337 msleep(1); 2338 WREG32_SOC15(GC, 0, regRLC_LX6_DRAM_DATA, 2339 le32_to_cpup(fw_data++)); 2340 } 2341 2342 WREG32_SOC15(GC, 0, regRLC_LX6_IRAM_ADDR, adev->gfx.rlc_fw_version); 2343 2344 tmp = RREG32_SOC15(GC, 0, regRLC_LX6_CNTL); 2345 tmp = REG_SET_FIELD(tmp, RLC_LX6_CNTL, PDEBUG_ENABLE, 1); 2346 tmp = REG_SET_FIELD(tmp, RLC_LX6_CNTL, BRESET, 0); 2347 WREG32_SOC15(GC, 0, regRLC_LX6_CNTL, tmp); 2348 } 2349 2350 static void gfx_v11_0_load_rlcp_rlcv_microcode(struct amdgpu_device *adev) 2351 { 2352 const struct rlc_firmware_header_v2_3 *hdr; 2353 const __le32 *fw_data; 2354 unsigned i, fw_size; 2355 u32 tmp; 2356 2357 hdr = (const struct rlc_firmware_header_v2_3 *)adev->gfx.rlc_fw->data; 2358 2359 fw_data = (const __le32 *)(adev->gfx.rlc_fw->data + 2360 le32_to_cpu(hdr->rlcp_ucode_offset_bytes)); 2361 fw_size = le32_to_cpu(hdr->rlcp_ucode_size_bytes) / 4; 2362 2363 WREG32_SOC15(GC, 0, regRLC_PACE_UCODE_ADDR, 0); 2364 2365 for (i = 0; i < fw_size; i++) { 2366 if ((amdgpu_emu_mode == 1) && (i % 100 == 99)) 2367 msleep(1); 2368 WREG32_SOC15(GC, 0, regRLC_PACE_UCODE_DATA, 2369 le32_to_cpup(fw_data++)); 2370 } 2371 2372 WREG32_SOC15(GC, 0, regRLC_PACE_UCODE_ADDR, adev->gfx.rlc_fw_version); 2373 2374 tmp = RREG32_SOC15(GC, 0, regRLC_GPM_THREAD_ENABLE); 2375 tmp = REG_SET_FIELD(tmp, RLC_GPM_THREAD_ENABLE, THREAD1_ENABLE, 1); 2376 WREG32_SOC15(GC, 0, regRLC_GPM_THREAD_ENABLE, tmp); 2377 2378 fw_data = (const __le32 *)(adev->gfx.rlc_fw->data + 2379 le32_to_cpu(hdr->rlcv_ucode_offset_bytes)); 2380 fw_size = le32_to_cpu(hdr->rlcv_ucode_size_bytes) / 4; 2381 2382 WREG32_SOC15(GC, 0, regRLC_GPU_IOV_UCODE_ADDR, 0); 2383 2384 for (i = 0; i < fw_size; i++) { 2385 if ((amdgpu_emu_mode == 1) && (i % 100 == 99)) 2386 msleep(1); 2387 WREG32_SOC15(GC, 0, regRLC_GPU_IOV_UCODE_DATA, 2388 le32_to_cpup(fw_data++)); 2389 } 2390 2391 WREG32_SOC15(GC, 0, regRLC_GPU_IOV_UCODE_ADDR, adev->gfx.rlc_fw_version); 2392 2393 tmp = RREG32_SOC15(GC, 0, regRLC_GPU_IOV_F32_CNTL); 2394 tmp = REG_SET_FIELD(tmp, RLC_GPU_IOV_F32_CNTL, ENABLE, 1); 2395 WREG32_SOC15(GC, 0, regRLC_GPU_IOV_F32_CNTL, tmp); 2396 } 2397 2398 static int gfx_v11_0_rlc_load_microcode(struct amdgpu_device *adev) 2399 { 2400 const struct rlc_firmware_header_v2_0 *hdr; 2401 uint16_t version_major; 2402 uint16_t version_minor; 2403 2404 if (!adev->gfx.rlc_fw) 2405 return -EINVAL; 2406 2407 hdr = (const struct rlc_firmware_header_v2_0 *)adev->gfx.rlc_fw->data; 2408 amdgpu_ucode_print_rlc_hdr(&hdr->header); 2409 2410 version_major = le16_to_cpu(hdr->header.header_version_major); 2411 version_minor = le16_to_cpu(hdr->header.header_version_minor); 2412 2413 if (version_major == 2) { 2414 gfx_v11_0_load_rlcg_microcode(adev); 2415 if (amdgpu_dpm == 1) { 2416 if (version_minor >= 2) 2417 gfx_v11_0_load_rlc_iram_dram_microcode(adev); 2418 if (version_minor == 3) 2419 gfx_v11_0_load_rlcp_rlcv_microcode(adev); 2420 } 2421 2422 return 0; 2423 } 2424 2425 return -EINVAL; 2426 } 2427 2428 static int gfx_v11_0_rlc_resume(struct amdgpu_device *adev) 2429 { 2430 int r; 2431 2432 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) { 2433 gfx_v11_0_init_csb(adev); 2434 2435 if (!amdgpu_sriov_vf(adev)) /* enable RLC SRM */ 2436 gfx_v11_0_rlc_enable_srm(adev); 2437 } else { 2438 if (amdgpu_sriov_vf(adev)) { 2439 gfx_v11_0_init_csb(adev); 2440 return 0; 2441 } 2442 2443 adev->gfx.rlc.funcs->stop(adev); 2444 2445 /* disable CG */ 2446 WREG32_SOC15(GC, 0, regRLC_CGCG_CGLS_CTRL, 0); 2447 2448 /* disable PG */ 2449 WREG32_SOC15(GC, 0, regRLC_PG_CNTL, 0); 2450 2451 if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT) { 2452 /* legacy rlc firmware loading */ 2453 r = gfx_v11_0_rlc_load_microcode(adev); 2454 if (r) 2455 return r; 2456 } 2457 2458 gfx_v11_0_init_csb(adev); 2459 2460 adev->gfx.rlc.funcs->start(adev); 2461 } 2462 return 0; 2463 } 2464 2465 static int gfx_v11_0_config_me_cache(struct amdgpu_device *adev, uint64_t addr) 2466 { 2467 uint32_t usec_timeout = 50000; /* wait for 50ms */ 2468 uint32_t tmp; 2469 int i; 2470 2471 /* Trigger an invalidation of the L1 instruction caches */ 2472 tmp = RREG32_SOC15(GC, 0, regCP_ME_IC_OP_CNTL); 2473 tmp = REG_SET_FIELD(tmp, CP_ME_IC_OP_CNTL, INVALIDATE_CACHE, 1); 2474 WREG32_SOC15(GC, 0, regCP_ME_IC_OP_CNTL, tmp); 2475 2476 /* Wait for invalidation complete */ 2477 for (i = 0; i < usec_timeout; i++) { 2478 tmp = RREG32_SOC15(GC, 0, regCP_ME_IC_OP_CNTL); 2479 if (1 == REG_GET_FIELD(tmp, CP_ME_IC_OP_CNTL, 2480 INVALIDATE_CACHE_COMPLETE)) 2481 break; 2482 udelay(1); 2483 } 2484 2485 if (i >= usec_timeout) { 2486 dev_err(adev->dev, "failed to invalidate instruction cache\n"); 2487 return -EINVAL; 2488 } 2489 2490 if (amdgpu_emu_mode == 1) 2491 amdgpu_device_flush_hdp(adev, NULL); 2492 2493 tmp = RREG32_SOC15(GC, 0, regCP_ME_IC_BASE_CNTL); 2494 tmp = REG_SET_FIELD(tmp, CP_ME_IC_BASE_CNTL, VMID, 0); 2495 tmp = REG_SET_FIELD(tmp, CP_ME_IC_BASE_CNTL, CACHE_POLICY, 0); 2496 tmp = REG_SET_FIELD(tmp, CP_ME_IC_BASE_CNTL, EXE_DISABLE, 0); 2497 tmp = REG_SET_FIELD(tmp, CP_ME_IC_BASE_CNTL, ADDRESS_CLAMP, 1); 2498 WREG32_SOC15(GC, 0, regCP_ME_IC_BASE_CNTL, tmp); 2499 2500 /* Program me ucode address into intruction cache address register */ 2501 WREG32_SOC15(GC, 0, regCP_ME_IC_BASE_LO, 2502 lower_32_bits(addr) & 0xFFFFF000); 2503 WREG32_SOC15(GC, 0, regCP_ME_IC_BASE_HI, 2504 upper_32_bits(addr)); 2505 2506 return 0; 2507 } 2508 2509 static int gfx_v11_0_config_pfp_cache(struct amdgpu_device *adev, uint64_t addr) 2510 { 2511 uint32_t usec_timeout = 50000; /* wait for 50ms */ 2512 uint32_t tmp; 2513 int i; 2514 2515 /* Trigger an invalidation of the L1 instruction caches */ 2516 tmp = RREG32_SOC15(GC, 0, regCP_PFP_IC_OP_CNTL); 2517 tmp = REG_SET_FIELD(tmp, CP_PFP_IC_OP_CNTL, INVALIDATE_CACHE, 1); 2518 WREG32_SOC15(GC, 0, regCP_PFP_IC_OP_CNTL, tmp); 2519 2520 /* Wait for invalidation complete */ 2521 for (i = 0; i < usec_timeout; i++) { 2522 tmp = RREG32_SOC15(GC, 0, regCP_PFP_IC_OP_CNTL); 2523 if (1 == REG_GET_FIELD(tmp, CP_PFP_IC_OP_CNTL, 2524 INVALIDATE_CACHE_COMPLETE)) 2525 break; 2526 udelay(1); 2527 } 2528 2529 if (i >= usec_timeout) { 2530 dev_err(adev->dev, "failed to invalidate instruction cache\n"); 2531 return -EINVAL; 2532 } 2533 2534 if (amdgpu_emu_mode == 1) 2535 amdgpu_device_flush_hdp(adev, NULL); 2536 2537 tmp = RREG32_SOC15(GC, 0, regCP_PFP_IC_BASE_CNTL); 2538 tmp = REG_SET_FIELD(tmp, CP_PFP_IC_BASE_CNTL, VMID, 0); 2539 tmp = REG_SET_FIELD(tmp, CP_PFP_IC_BASE_CNTL, CACHE_POLICY, 0); 2540 tmp = REG_SET_FIELD(tmp, CP_PFP_IC_BASE_CNTL, EXE_DISABLE, 0); 2541 tmp = REG_SET_FIELD(tmp, CP_PFP_IC_BASE_CNTL, ADDRESS_CLAMP, 1); 2542 WREG32_SOC15(GC, 0, regCP_PFP_IC_BASE_CNTL, tmp); 2543 2544 /* Program pfp ucode address into intruction cache address register */ 2545 WREG32_SOC15(GC, 0, regCP_PFP_IC_BASE_LO, 2546 lower_32_bits(addr) & 0xFFFFF000); 2547 WREG32_SOC15(GC, 0, regCP_PFP_IC_BASE_HI, 2548 upper_32_bits(addr)); 2549 2550 return 0; 2551 } 2552 2553 static int gfx_v11_0_config_mec_cache(struct amdgpu_device *adev, uint64_t addr) 2554 { 2555 uint32_t usec_timeout = 50000; /* wait for 50ms */ 2556 uint32_t tmp; 2557 int i; 2558 2559 /* Trigger an invalidation of the L1 instruction caches */ 2560 tmp = RREG32_SOC15(GC, 0, regCP_CPC_IC_OP_CNTL); 2561 tmp = REG_SET_FIELD(tmp, CP_CPC_IC_OP_CNTL, INVALIDATE_CACHE, 1); 2562 2563 WREG32_SOC15(GC, 0, regCP_CPC_IC_OP_CNTL, tmp); 2564 2565 /* Wait for invalidation complete */ 2566 for (i = 0; i < usec_timeout; i++) { 2567 tmp = RREG32_SOC15(GC, 0, regCP_CPC_IC_OP_CNTL); 2568 if (1 == REG_GET_FIELD(tmp, CP_CPC_IC_OP_CNTL, 2569 INVALIDATE_CACHE_COMPLETE)) 2570 break; 2571 udelay(1); 2572 } 2573 2574 if (i >= usec_timeout) { 2575 dev_err(adev->dev, "failed to invalidate instruction cache\n"); 2576 return -EINVAL; 2577 } 2578 2579 if (amdgpu_emu_mode == 1) 2580 amdgpu_device_flush_hdp(adev, NULL); 2581 2582 tmp = RREG32_SOC15(GC, 0, regCP_CPC_IC_BASE_CNTL); 2583 tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, CACHE_POLICY, 0); 2584 tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, EXE_DISABLE, 0); 2585 tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, ADDRESS_CLAMP, 1); 2586 WREG32_SOC15(GC, 0, regCP_CPC_IC_BASE_CNTL, tmp); 2587 2588 /* Program mec1 ucode address into intruction cache address register */ 2589 WREG32_SOC15(GC, 0, regCP_CPC_IC_BASE_LO, 2590 lower_32_bits(addr) & 0xFFFFF000); 2591 WREG32_SOC15(GC, 0, regCP_CPC_IC_BASE_HI, 2592 upper_32_bits(addr)); 2593 2594 return 0; 2595 } 2596 2597 static int gfx_v11_0_config_pfp_cache_rs64(struct amdgpu_device *adev, uint64_t addr, uint64_t addr2) 2598 { 2599 uint32_t usec_timeout = 50000; /* wait for 50ms */ 2600 uint32_t tmp; 2601 unsigned i, pipe_id; 2602 const struct gfx_firmware_header_v2_0 *pfp_hdr; 2603 2604 pfp_hdr = (const struct gfx_firmware_header_v2_0 *) 2605 adev->gfx.pfp_fw->data; 2606 2607 WREG32_SOC15(GC, 0, regCP_PFP_IC_BASE_LO, 2608 lower_32_bits(addr)); 2609 WREG32_SOC15(GC, 0, regCP_PFP_IC_BASE_HI, 2610 upper_32_bits(addr)); 2611 2612 tmp = RREG32_SOC15(GC, 0, regCP_PFP_IC_BASE_CNTL); 2613 tmp = REG_SET_FIELD(tmp, CP_PFP_IC_BASE_CNTL, VMID, 0); 2614 tmp = REG_SET_FIELD(tmp, CP_PFP_IC_BASE_CNTL, CACHE_POLICY, 0); 2615 tmp = REG_SET_FIELD(tmp, CP_PFP_IC_BASE_CNTL, EXE_DISABLE, 0); 2616 WREG32_SOC15(GC, 0, regCP_PFP_IC_BASE_CNTL, tmp); 2617 2618 /* 2619 * Programming any of the CP_PFP_IC_BASE registers 2620 * forces invalidation of the ME L1 I$. Wait for the 2621 * invalidation complete 2622 */ 2623 for (i = 0; i < usec_timeout; i++) { 2624 tmp = RREG32_SOC15(GC, 0, regCP_PFP_IC_OP_CNTL); 2625 if (1 == REG_GET_FIELD(tmp, CP_PFP_IC_OP_CNTL, 2626 INVALIDATE_CACHE_COMPLETE)) 2627 break; 2628 udelay(1); 2629 } 2630 2631 if (i >= usec_timeout) { 2632 dev_err(adev->dev, "failed to invalidate instruction cache\n"); 2633 return -EINVAL; 2634 } 2635 2636 /* Prime the L1 instruction caches */ 2637 tmp = RREG32_SOC15(GC, 0, regCP_PFP_IC_OP_CNTL); 2638 tmp = REG_SET_FIELD(tmp, CP_PFP_IC_OP_CNTL, PRIME_ICACHE, 1); 2639 WREG32_SOC15(GC, 0, regCP_PFP_IC_OP_CNTL, tmp); 2640 /* Waiting for cache primed*/ 2641 for (i = 0; i < usec_timeout; i++) { 2642 tmp = RREG32_SOC15(GC, 0, regCP_PFP_IC_OP_CNTL); 2643 if (1 == REG_GET_FIELD(tmp, CP_PFP_IC_OP_CNTL, 2644 ICACHE_PRIMED)) 2645 break; 2646 udelay(1); 2647 } 2648 2649 if (i >= usec_timeout) { 2650 dev_err(adev->dev, "failed to prime instruction cache\n"); 2651 return -EINVAL; 2652 } 2653 2654 mutex_lock(&adev->srbm_mutex); 2655 for (pipe_id = 0; pipe_id < adev->gfx.me.num_pipe_per_me; pipe_id++) { 2656 soc21_grbm_select(adev, 0, pipe_id, 0, 0); 2657 WREG32_SOC15(GC, 0, regCP_PFP_PRGRM_CNTR_START, 2658 (pfp_hdr->ucode_start_addr_hi << 30) | 2659 (pfp_hdr->ucode_start_addr_lo >> 2)); 2660 WREG32_SOC15(GC, 0, regCP_PFP_PRGRM_CNTR_START_HI, 2661 pfp_hdr->ucode_start_addr_hi >> 2); 2662 2663 /* 2664 * Program CP_ME_CNTL to reset given PIPE to take 2665 * effect of CP_PFP_PRGRM_CNTR_START. 2666 */ 2667 tmp = RREG32_SOC15(GC, 0, regCP_ME_CNTL); 2668 if (pipe_id == 0) 2669 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, 2670 PFP_PIPE0_RESET, 1); 2671 else 2672 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, 2673 PFP_PIPE1_RESET, 1); 2674 WREG32_SOC15(GC, 0, regCP_ME_CNTL, tmp); 2675 2676 /* Clear pfp pipe0 reset bit. */ 2677 if (pipe_id == 0) 2678 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, 2679 PFP_PIPE0_RESET, 0); 2680 else 2681 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, 2682 PFP_PIPE1_RESET, 0); 2683 WREG32_SOC15(GC, 0, regCP_ME_CNTL, tmp); 2684 2685 WREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_BASE0_LO, 2686 lower_32_bits(addr2)); 2687 WREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_BASE0_HI, 2688 upper_32_bits(addr2)); 2689 } 2690 soc21_grbm_select(adev, 0, 0, 0, 0); 2691 mutex_unlock(&adev->srbm_mutex); 2692 2693 tmp = RREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_BASE_CNTL); 2694 tmp = REG_SET_FIELD(tmp, CP_GFX_RS64_DC_BASE_CNTL, VMID, 0); 2695 tmp = REG_SET_FIELD(tmp, CP_GFX_RS64_DC_BASE_CNTL, CACHE_POLICY, 0); 2696 WREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_BASE_CNTL, tmp); 2697 2698 /* Invalidate the data caches */ 2699 tmp = RREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_OP_CNTL); 2700 tmp = REG_SET_FIELD(tmp, CP_GFX_RS64_DC_OP_CNTL, INVALIDATE_DCACHE, 1); 2701 WREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_OP_CNTL, tmp); 2702 2703 for (i = 0; i < usec_timeout; i++) { 2704 tmp = RREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_OP_CNTL); 2705 if (1 == REG_GET_FIELD(tmp, CP_GFX_RS64_DC_OP_CNTL, 2706 INVALIDATE_DCACHE_COMPLETE)) 2707 break; 2708 udelay(1); 2709 } 2710 2711 if (i >= usec_timeout) { 2712 dev_err(adev->dev, "failed to invalidate RS64 data cache\n"); 2713 return -EINVAL; 2714 } 2715 2716 return 0; 2717 } 2718 2719 static int gfx_v11_0_config_me_cache_rs64(struct amdgpu_device *adev, uint64_t addr, uint64_t addr2) 2720 { 2721 uint32_t usec_timeout = 50000; /* wait for 50ms */ 2722 uint32_t tmp; 2723 unsigned i, pipe_id; 2724 const struct gfx_firmware_header_v2_0 *me_hdr; 2725 2726 me_hdr = (const struct gfx_firmware_header_v2_0 *) 2727 adev->gfx.me_fw->data; 2728 2729 WREG32_SOC15(GC, 0, regCP_ME_IC_BASE_LO, 2730 lower_32_bits(addr)); 2731 WREG32_SOC15(GC, 0, regCP_ME_IC_BASE_HI, 2732 upper_32_bits(addr)); 2733 2734 tmp = RREG32_SOC15(GC, 0, regCP_ME_IC_BASE_CNTL); 2735 tmp = REG_SET_FIELD(tmp, CP_ME_IC_BASE_CNTL, VMID, 0); 2736 tmp = REG_SET_FIELD(tmp, CP_ME_IC_BASE_CNTL, CACHE_POLICY, 0); 2737 tmp = REG_SET_FIELD(tmp, CP_ME_IC_BASE_CNTL, EXE_DISABLE, 0); 2738 WREG32_SOC15(GC, 0, regCP_ME_IC_BASE_CNTL, tmp); 2739 2740 /* 2741 * Programming any of the CP_ME_IC_BASE registers 2742 * forces invalidation of the ME L1 I$. Wait for the 2743 * invalidation complete 2744 */ 2745 for (i = 0; i < usec_timeout; i++) { 2746 tmp = RREG32_SOC15(GC, 0, regCP_ME_IC_OP_CNTL); 2747 if (1 == REG_GET_FIELD(tmp, CP_ME_IC_OP_CNTL, 2748 INVALIDATE_CACHE_COMPLETE)) 2749 break; 2750 udelay(1); 2751 } 2752 2753 if (i >= usec_timeout) { 2754 dev_err(adev->dev, "failed to invalidate instruction cache\n"); 2755 return -EINVAL; 2756 } 2757 2758 /* Prime the instruction caches */ 2759 tmp = RREG32_SOC15(GC, 0, regCP_ME_IC_OP_CNTL); 2760 tmp = REG_SET_FIELD(tmp, CP_ME_IC_OP_CNTL, PRIME_ICACHE, 1); 2761 WREG32_SOC15(GC, 0, regCP_ME_IC_OP_CNTL, tmp); 2762 2763 /* Waiting for instruction cache primed*/ 2764 for (i = 0; i < usec_timeout; i++) { 2765 tmp = RREG32_SOC15(GC, 0, regCP_ME_IC_OP_CNTL); 2766 if (1 == REG_GET_FIELD(tmp, CP_ME_IC_OP_CNTL, 2767 ICACHE_PRIMED)) 2768 break; 2769 udelay(1); 2770 } 2771 2772 if (i >= usec_timeout) { 2773 dev_err(adev->dev, "failed to prime instruction cache\n"); 2774 return -EINVAL; 2775 } 2776 2777 mutex_lock(&adev->srbm_mutex); 2778 for (pipe_id = 0; pipe_id < adev->gfx.me.num_pipe_per_me; pipe_id++) { 2779 soc21_grbm_select(adev, 0, pipe_id, 0, 0); 2780 WREG32_SOC15(GC, 0, regCP_ME_PRGRM_CNTR_START, 2781 (me_hdr->ucode_start_addr_hi << 30) | 2782 (me_hdr->ucode_start_addr_lo >> 2) ); 2783 WREG32_SOC15(GC, 0, regCP_ME_PRGRM_CNTR_START_HI, 2784 me_hdr->ucode_start_addr_hi>>2); 2785 2786 /* 2787 * Program CP_ME_CNTL to reset given PIPE to take 2788 * effect of CP_PFP_PRGRM_CNTR_START. 2789 */ 2790 tmp = RREG32_SOC15(GC, 0, regCP_ME_CNTL); 2791 if (pipe_id == 0) 2792 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, 2793 ME_PIPE0_RESET, 1); 2794 else 2795 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, 2796 ME_PIPE1_RESET, 1); 2797 WREG32_SOC15(GC, 0, regCP_ME_CNTL, tmp); 2798 2799 /* Clear pfp pipe0 reset bit. */ 2800 if (pipe_id == 0) 2801 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, 2802 ME_PIPE0_RESET, 0); 2803 else 2804 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, 2805 ME_PIPE1_RESET, 0); 2806 WREG32_SOC15(GC, 0, regCP_ME_CNTL, tmp); 2807 2808 WREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_BASE1_LO, 2809 lower_32_bits(addr2)); 2810 WREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_BASE1_HI, 2811 upper_32_bits(addr2)); 2812 } 2813 soc21_grbm_select(adev, 0, 0, 0, 0); 2814 mutex_unlock(&adev->srbm_mutex); 2815 2816 tmp = RREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_BASE_CNTL); 2817 tmp = REG_SET_FIELD(tmp, CP_GFX_RS64_DC_BASE_CNTL, VMID, 0); 2818 tmp = REG_SET_FIELD(tmp, CP_GFX_RS64_DC_BASE_CNTL, CACHE_POLICY, 0); 2819 WREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_BASE_CNTL, tmp); 2820 2821 /* Invalidate the data caches */ 2822 tmp = RREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_OP_CNTL); 2823 tmp = REG_SET_FIELD(tmp, CP_GFX_RS64_DC_OP_CNTL, INVALIDATE_DCACHE, 1); 2824 WREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_OP_CNTL, tmp); 2825 2826 for (i = 0; i < usec_timeout; i++) { 2827 tmp = RREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_OP_CNTL); 2828 if (1 == REG_GET_FIELD(tmp, CP_GFX_RS64_DC_OP_CNTL, 2829 INVALIDATE_DCACHE_COMPLETE)) 2830 break; 2831 udelay(1); 2832 } 2833 2834 if (i >= usec_timeout) { 2835 dev_err(adev->dev, "failed to invalidate RS64 data cache\n"); 2836 return -EINVAL; 2837 } 2838 2839 return 0; 2840 } 2841 2842 static int gfx_v11_0_config_mec_cache_rs64(struct amdgpu_device *adev, uint64_t addr, uint64_t addr2) 2843 { 2844 uint32_t usec_timeout = 50000; /* wait for 50ms */ 2845 uint32_t tmp; 2846 unsigned i; 2847 const struct gfx_firmware_header_v2_0 *mec_hdr; 2848 2849 mec_hdr = (const struct gfx_firmware_header_v2_0 *) 2850 adev->gfx.mec_fw->data; 2851 2852 tmp = RREG32_SOC15(GC, 0, regCP_CPC_IC_BASE_CNTL); 2853 tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, VMID, 0); 2854 tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, EXE_DISABLE, 0); 2855 tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, CACHE_POLICY, 0); 2856 WREG32_SOC15(GC, 0, regCP_CPC_IC_BASE_CNTL, tmp); 2857 2858 tmp = RREG32_SOC15(GC, 0, regCP_MEC_DC_BASE_CNTL); 2859 tmp = REG_SET_FIELD(tmp, CP_MEC_DC_BASE_CNTL, VMID, 0); 2860 tmp = REG_SET_FIELD(tmp, CP_MEC_DC_BASE_CNTL, CACHE_POLICY, 0); 2861 WREG32_SOC15(GC, 0, regCP_MEC_DC_BASE_CNTL, tmp); 2862 2863 mutex_lock(&adev->srbm_mutex); 2864 for (i = 0; i < adev->gfx.mec.num_pipe_per_mec; i++) { 2865 soc21_grbm_select(adev, 1, i, 0, 0); 2866 2867 WREG32_SOC15(GC, 0, regCP_MEC_MDBASE_LO, addr2); 2868 WREG32_SOC15(GC, 0, regCP_MEC_MDBASE_HI, 2869 upper_32_bits(addr2)); 2870 2871 WREG32_SOC15(GC, 0, regCP_MEC_RS64_PRGRM_CNTR_START, 2872 mec_hdr->ucode_start_addr_lo >> 2 | 2873 mec_hdr->ucode_start_addr_hi << 30); 2874 WREG32_SOC15(GC, 0, regCP_MEC_RS64_PRGRM_CNTR_START_HI, 2875 mec_hdr->ucode_start_addr_hi >> 2); 2876 2877 WREG32_SOC15(GC, 0, regCP_CPC_IC_BASE_LO, addr); 2878 WREG32_SOC15(GC, 0, regCP_CPC_IC_BASE_HI, 2879 upper_32_bits(addr)); 2880 } 2881 mutex_unlock(&adev->srbm_mutex); 2882 soc21_grbm_select(adev, 0, 0, 0, 0); 2883 2884 /* Trigger an invalidation of the L1 instruction caches */ 2885 tmp = RREG32_SOC15(GC, 0, regCP_MEC_DC_OP_CNTL); 2886 tmp = REG_SET_FIELD(tmp, CP_MEC_DC_OP_CNTL, INVALIDATE_DCACHE, 1); 2887 WREG32_SOC15(GC, 0, regCP_MEC_DC_OP_CNTL, tmp); 2888 2889 /* Wait for invalidation complete */ 2890 for (i = 0; i < usec_timeout; i++) { 2891 tmp = RREG32_SOC15(GC, 0, regCP_MEC_DC_OP_CNTL); 2892 if (1 == REG_GET_FIELD(tmp, CP_MEC_DC_OP_CNTL, 2893 INVALIDATE_DCACHE_COMPLETE)) 2894 break; 2895 udelay(1); 2896 } 2897 2898 if (i >= usec_timeout) { 2899 dev_err(adev->dev, "failed to invalidate instruction cache\n"); 2900 return -EINVAL; 2901 } 2902 2903 /* Trigger an invalidation of the L1 instruction caches */ 2904 tmp = RREG32_SOC15(GC, 0, regCP_CPC_IC_OP_CNTL); 2905 tmp = REG_SET_FIELD(tmp, CP_CPC_IC_OP_CNTL, INVALIDATE_CACHE, 1); 2906 WREG32_SOC15(GC, 0, regCP_CPC_IC_OP_CNTL, tmp); 2907 2908 /* Wait for invalidation complete */ 2909 for (i = 0; i < usec_timeout; i++) { 2910 tmp = RREG32_SOC15(GC, 0, regCP_CPC_IC_OP_CNTL); 2911 if (1 == REG_GET_FIELD(tmp, CP_CPC_IC_OP_CNTL, 2912 INVALIDATE_CACHE_COMPLETE)) 2913 break; 2914 udelay(1); 2915 } 2916 2917 if (i >= usec_timeout) { 2918 dev_err(adev->dev, "failed to invalidate instruction cache\n"); 2919 return -EINVAL; 2920 } 2921 2922 return 0; 2923 } 2924 2925 static void gfx_v11_0_config_gfx_rs64(struct amdgpu_device *adev) 2926 { 2927 const struct gfx_firmware_header_v2_0 *pfp_hdr; 2928 const struct gfx_firmware_header_v2_0 *me_hdr; 2929 const struct gfx_firmware_header_v2_0 *mec_hdr; 2930 uint32_t pipe_id, tmp; 2931 2932 mec_hdr = (const struct gfx_firmware_header_v2_0 *) 2933 adev->gfx.mec_fw->data; 2934 me_hdr = (const struct gfx_firmware_header_v2_0 *) 2935 adev->gfx.me_fw->data; 2936 pfp_hdr = (const struct gfx_firmware_header_v2_0 *) 2937 adev->gfx.pfp_fw->data; 2938 2939 /* config pfp program start addr */ 2940 for (pipe_id = 0; pipe_id < 2; pipe_id++) { 2941 soc21_grbm_select(adev, 0, pipe_id, 0, 0); 2942 WREG32_SOC15(GC, 0, regCP_PFP_PRGRM_CNTR_START, 2943 (pfp_hdr->ucode_start_addr_hi << 30) | 2944 (pfp_hdr->ucode_start_addr_lo >> 2)); 2945 WREG32_SOC15(GC, 0, regCP_PFP_PRGRM_CNTR_START_HI, 2946 pfp_hdr->ucode_start_addr_hi >> 2); 2947 } 2948 soc21_grbm_select(adev, 0, 0, 0, 0); 2949 2950 /* reset pfp pipe */ 2951 tmp = RREG32_SOC15(GC, 0, regCP_ME_CNTL); 2952 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, PFP_PIPE0_RESET, 1); 2953 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, PFP_PIPE1_RESET, 1); 2954 WREG32_SOC15(GC, 0, regCP_ME_CNTL, tmp); 2955 2956 /* clear pfp pipe reset */ 2957 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, PFP_PIPE0_RESET, 0); 2958 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, PFP_PIPE1_RESET, 0); 2959 WREG32_SOC15(GC, 0, regCP_ME_CNTL, tmp); 2960 2961 /* config me program start addr */ 2962 for (pipe_id = 0; pipe_id < 2; pipe_id++) { 2963 soc21_grbm_select(adev, 0, pipe_id, 0, 0); 2964 WREG32_SOC15(GC, 0, regCP_ME_PRGRM_CNTR_START, 2965 (me_hdr->ucode_start_addr_hi << 30) | 2966 (me_hdr->ucode_start_addr_lo >> 2) ); 2967 WREG32_SOC15(GC, 0, regCP_ME_PRGRM_CNTR_START_HI, 2968 me_hdr->ucode_start_addr_hi>>2); 2969 } 2970 soc21_grbm_select(adev, 0, 0, 0, 0); 2971 2972 /* reset me pipe */ 2973 tmp = RREG32_SOC15(GC, 0, regCP_ME_CNTL); 2974 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, ME_PIPE0_RESET, 1); 2975 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, ME_PIPE1_RESET, 1); 2976 WREG32_SOC15(GC, 0, regCP_ME_CNTL, tmp); 2977 2978 /* clear me pipe reset */ 2979 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, ME_PIPE0_RESET, 0); 2980 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, ME_PIPE1_RESET, 0); 2981 WREG32_SOC15(GC, 0, regCP_ME_CNTL, tmp); 2982 2983 /* config mec program start addr */ 2984 for (pipe_id = 0; pipe_id < 4; pipe_id++) { 2985 soc21_grbm_select(adev, 1, pipe_id, 0, 0); 2986 WREG32_SOC15(GC, 0, regCP_MEC_RS64_PRGRM_CNTR_START, 2987 mec_hdr->ucode_start_addr_lo >> 2 | 2988 mec_hdr->ucode_start_addr_hi << 30); 2989 WREG32_SOC15(GC, 0, regCP_MEC_RS64_PRGRM_CNTR_START_HI, 2990 mec_hdr->ucode_start_addr_hi >> 2); 2991 } 2992 soc21_grbm_select(adev, 0, 0, 0, 0); 2993 2994 /* reset mec pipe */ 2995 tmp = RREG32_SOC15(GC, 0, regCP_MEC_RS64_CNTL); 2996 tmp = REG_SET_FIELD(tmp, CP_MEC_RS64_CNTL, MEC_PIPE0_RESET, 1); 2997 tmp = REG_SET_FIELD(tmp, CP_MEC_RS64_CNTL, MEC_PIPE1_RESET, 1); 2998 tmp = REG_SET_FIELD(tmp, CP_MEC_RS64_CNTL, MEC_PIPE2_RESET, 1); 2999 tmp = REG_SET_FIELD(tmp, CP_MEC_RS64_CNTL, MEC_PIPE3_RESET, 1); 3000 WREG32_SOC15(GC, 0, regCP_MEC_RS64_CNTL, tmp); 3001 3002 /* clear mec pipe reset */ 3003 tmp = REG_SET_FIELD(tmp, CP_MEC_RS64_CNTL, MEC_PIPE0_RESET, 0); 3004 tmp = REG_SET_FIELD(tmp, CP_MEC_RS64_CNTL, MEC_PIPE1_RESET, 0); 3005 tmp = REG_SET_FIELD(tmp, CP_MEC_RS64_CNTL, MEC_PIPE2_RESET, 0); 3006 tmp = REG_SET_FIELD(tmp, CP_MEC_RS64_CNTL, MEC_PIPE3_RESET, 0); 3007 WREG32_SOC15(GC, 0, regCP_MEC_RS64_CNTL, tmp); 3008 } 3009 3010 static int gfx_v11_0_wait_for_rlc_autoload_complete(struct amdgpu_device *adev) 3011 { 3012 uint32_t cp_status; 3013 uint32_t bootload_status; 3014 int i, r; 3015 uint64_t addr, addr2; 3016 3017 for (i = 0; i < adev->usec_timeout; i++) { 3018 cp_status = RREG32_SOC15(GC, 0, regCP_STAT); 3019 3020 if (amdgpu_ip_version(adev, GC_HWIP, 0) == 3021 IP_VERSION(11, 0, 1) || 3022 amdgpu_ip_version(adev, GC_HWIP, 0) == 3023 IP_VERSION(11, 0, 4) || 3024 amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(11, 5, 0) || 3025 amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(11, 5, 1) || 3026 amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(11, 5, 2) || 3027 amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(11, 5, 3)) 3028 bootload_status = RREG32_SOC15(GC, 0, 3029 regRLC_RLCS_BOOTLOAD_STATUS_gc_11_0_1); 3030 else 3031 bootload_status = RREG32_SOC15(GC, 0, regRLC_RLCS_BOOTLOAD_STATUS); 3032 3033 if ((cp_status == 0) && 3034 (REG_GET_FIELD(bootload_status, 3035 RLC_RLCS_BOOTLOAD_STATUS, BOOTLOAD_COMPLETE) == 1)) { 3036 break; 3037 } 3038 udelay(1); 3039 } 3040 3041 if (i >= adev->usec_timeout) { 3042 dev_err(adev->dev, "rlc autoload: gc ucode autoload timeout\n"); 3043 return -ETIMEDOUT; 3044 } 3045 3046 if (adev->firmware.load_type == AMDGPU_FW_LOAD_RLC_BACKDOOR_AUTO) { 3047 if (adev->gfx.rs64_enable) { 3048 addr = adev->gfx.rlc.rlc_autoload_gpu_addr + 3049 rlc_autoload_info[SOC21_FIRMWARE_ID_RS64_ME].offset; 3050 addr2 = adev->gfx.rlc.rlc_autoload_gpu_addr + 3051 rlc_autoload_info[SOC21_FIRMWARE_ID_RS64_ME_P0_STACK].offset; 3052 r = gfx_v11_0_config_me_cache_rs64(adev, addr, addr2); 3053 if (r) 3054 return r; 3055 addr = adev->gfx.rlc.rlc_autoload_gpu_addr + 3056 rlc_autoload_info[SOC21_FIRMWARE_ID_RS64_PFP].offset; 3057 addr2 = adev->gfx.rlc.rlc_autoload_gpu_addr + 3058 rlc_autoload_info[SOC21_FIRMWARE_ID_RS64_PFP_P0_STACK].offset; 3059 r = gfx_v11_0_config_pfp_cache_rs64(adev, addr, addr2); 3060 if (r) 3061 return r; 3062 addr = adev->gfx.rlc.rlc_autoload_gpu_addr + 3063 rlc_autoload_info[SOC21_FIRMWARE_ID_RS64_MEC].offset; 3064 addr2 = adev->gfx.rlc.rlc_autoload_gpu_addr + 3065 rlc_autoload_info[SOC21_FIRMWARE_ID_RS64_MEC_P0_STACK].offset; 3066 r = gfx_v11_0_config_mec_cache_rs64(adev, addr, addr2); 3067 if (r) 3068 return r; 3069 } else { 3070 addr = adev->gfx.rlc.rlc_autoload_gpu_addr + 3071 rlc_autoload_info[SOC21_FIRMWARE_ID_CP_ME].offset; 3072 r = gfx_v11_0_config_me_cache(adev, addr); 3073 if (r) 3074 return r; 3075 addr = adev->gfx.rlc.rlc_autoload_gpu_addr + 3076 rlc_autoload_info[SOC21_FIRMWARE_ID_CP_PFP].offset; 3077 r = gfx_v11_0_config_pfp_cache(adev, addr); 3078 if (r) 3079 return r; 3080 addr = adev->gfx.rlc.rlc_autoload_gpu_addr + 3081 rlc_autoload_info[SOC21_FIRMWARE_ID_CP_MEC].offset; 3082 r = gfx_v11_0_config_mec_cache(adev, addr); 3083 if (r) 3084 return r; 3085 } 3086 } 3087 3088 return 0; 3089 } 3090 3091 static int gfx_v11_0_cp_gfx_enable(struct amdgpu_device *adev, bool enable) 3092 { 3093 int i; 3094 u32 tmp = RREG32_SOC15(GC, 0, regCP_ME_CNTL); 3095 3096 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, ME_HALT, enable ? 0 : 1); 3097 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, PFP_HALT, enable ? 0 : 1); 3098 WREG32_SOC15(GC, 0, regCP_ME_CNTL, tmp); 3099 3100 for (i = 0; i < adev->usec_timeout; i++) { 3101 if (RREG32_SOC15(GC, 0, regCP_STAT) == 0) 3102 break; 3103 udelay(1); 3104 } 3105 3106 if (i >= adev->usec_timeout) 3107 DRM_ERROR("failed to %s cp gfx\n", enable ? "unhalt" : "halt"); 3108 3109 return 0; 3110 } 3111 3112 static int gfx_v11_0_cp_gfx_load_pfp_microcode(struct amdgpu_device *adev) 3113 { 3114 int r; 3115 const struct gfx_firmware_header_v1_0 *pfp_hdr; 3116 const __le32 *fw_data; 3117 unsigned i, fw_size; 3118 3119 pfp_hdr = (const struct gfx_firmware_header_v1_0 *) 3120 adev->gfx.pfp_fw->data; 3121 3122 amdgpu_ucode_print_gfx_hdr(&pfp_hdr->header); 3123 3124 fw_data = (const __le32 *)(adev->gfx.pfp_fw->data + 3125 le32_to_cpu(pfp_hdr->header.ucode_array_offset_bytes)); 3126 fw_size = le32_to_cpu(pfp_hdr->header.ucode_size_bytes); 3127 3128 r = amdgpu_bo_create_reserved(adev, pfp_hdr->header.ucode_size_bytes, 3129 PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT, 3130 &adev->gfx.pfp.pfp_fw_obj, 3131 &adev->gfx.pfp.pfp_fw_gpu_addr, 3132 (void **)&adev->gfx.pfp.pfp_fw_ptr); 3133 if (r) { 3134 dev_err(adev->dev, "(%d) failed to create pfp fw bo\n", r); 3135 gfx_v11_0_pfp_fini(adev); 3136 return r; 3137 } 3138 3139 memcpy(adev->gfx.pfp.pfp_fw_ptr, fw_data, fw_size); 3140 3141 amdgpu_bo_kunmap(adev->gfx.pfp.pfp_fw_obj); 3142 amdgpu_bo_unreserve(adev->gfx.pfp.pfp_fw_obj); 3143 3144 gfx_v11_0_config_pfp_cache(adev, adev->gfx.pfp.pfp_fw_gpu_addr); 3145 3146 WREG32_SOC15(GC, 0, regCP_HYP_PFP_UCODE_ADDR, 0); 3147 3148 for (i = 0; i < pfp_hdr->jt_size; i++) 3149 WREG32_SOC15(GC, 0, regCP_HYP_PFP_UCODE_DATA, 3150 le32_to_cpup(fw_data + pfp_hdr->jt_offset + i)); 3151 3152 WREG32_SOC15(GC, 0, regCP_HYP_PFP_UCODE_ADDR, adev->gfx.pfp_fw_version); 3153 3154 return 0; 3155 } 3156 3157 static int gfx_v11_0_cp_gfx_load_pfp_microcode_rs64(struct amdgpu_device *adev) 3158 { 3159 int r; 3160 const struct gfx_firmware_header_v2_0 *pfp_hdr; 3161 const __le32 *fw_ucode, *fw_data; 3162 unsigned i, pipe_id, fw_ucode_size, fw_data_size; 3163 uint32_t tmp; 3164 uint32_t usec_timeout = 50000; /* wait for 50ms */ 3165 3166 pfp_hdr = (const struct gfx_firmware_header_v2_0 *) 3167 adev->gfx.pfp_fw->data; 3168 3169 amdgpu_ucode_print_gfx_hdr(&pfp_hdr->header); 3170 3171 /* instruction */ 3172 fw_ucode = (const __le32 *)(adev->gfx.pfp_fw->data + 3173 le32_to_cpu(pfp_hdr->ucode_offset_bytes)); 3174 fw_ucode_size = le32_to_cpu(pfp_hdr->ucode_size_bytes); 3175 /* data */ 3176 fw_data = (const __le32 *)(adev->gfx.pfp_fw->data + 3177 le32_to_cpu(pfp_hdr->data_offset_bytes)); 3178 fw_data_size = le32_to_cpu(pfp_hdr->data_size_bytes); 3179 3180 /* 64kb align */ 3181 r = amdgpu_bo_create_reserved(adev, fw_ucode_size, 3182 64 * 1024, 3183 AMDGPU_GEM_DOMAIN_VRAM | 3184 AMDGPU_GEM_DOMAIN_GTT, 3185 &adev->gfx.pfp.pfp_fw_obj, 3186 &adev->gfx.pfp.pfp_fw_gpu_addr, 3187 (void **)&adev->gfx.pfp.pfp_fw_ptr); 3188 if (r) { 3189 dev_err(adev->dev, "(%d) failed to create pfp ucode fw bo\n", r); 3190 gfx_v11_0_pfp_fini(adev); 3191 return r; 3192 } 3193 3194 r = amdgpu_bo_create_reserved(adev, fw_data_size, 3195 64 * 1024, 3196 AMDGPU_GEM_DOMAIN_VRAM | 3197 AMDGPU_GEM_DOMAIN_GTT, 3198 &adev->gfx.pfp.pfp_fw_data_obj, 3199 &adev->gfx.pfp.pfp_fw_data_gpu_addr, 3200 (void **)&adev->gfx.pfp.pfp_fw_data_ptr); 3201 if (r) { 3202 dev_err(adev->dev, "(%d) failed to create pfp data fw bo\n", r); 3203 gfx_v11_0_pfp_fini(adev); 3204 return r; 3205 } 3206 3207 memcpy(adev->gfx.pfp.pfp_fw_ptr, fw_ucode, fw_ucode_size); 3208 memcpy(adev->gfx.pfp.pfp_fw_data_ptr, fw_data, fw_data_size); 3209 3210 amdgpu_bo_kunmap(adev->gfx.pfp.pfp_fw_obj); 3211 amdgpu_bo_kunmap(adev->gfx.pfp.pfp_fw_data_obj); 3212 amdgpu_bo_unreserve(adev->gfx.pfp.pfp_fw_obj); 3213 amdgpu_bo_unreserve(adev->gfx.pfp.pfp_fw_data_obj); 3214 3215 if (amdgpu_emu_mode == 1) 3216 amdgpu_device_flush_hdp(adev, NULL); 3217 3218 WREG32_SOC15(GC, 0, regCP_PFP_IC_BASE_LO, 3219 lower_32_bits(adev->gfx.pfp.pfp_fw_gpu_addr)); 3220 WREG32_SOC15(GC, 0, regCP_PFP_IC_BASE_HI, 3221 upper_32_bits(adev->gfx.pfp.pfp_fw_gpu_addr)); 3222 3223 tmp = RREG32_SOC15(GC, 0, regCP_PFP_IC_BASE_CNTL); 3224 tmp = REG_SET_FIELD(tmp, CP_PFP_IC_BASE_CNTL, VMID, 0); 3225 tmp = REG_SET_FIELD(tmp, CP_PFP_IC_BASE_CNTL, CACHE_POLICY, 0); 3226 tmp = REG_SET_FIELD(tmp, CP_PFP_IC_BASE_CNTL, EXE_DISABLE, 0); 3227 WREG32_SOC15(GC, 0, regCP_PFP_IC_BASE_CNTL, tmp); 3228 3229 /* 3230 * Programming any of the CP_PFP_IC_BASE registers 3231 * forces invalidation of the ME L1 I$. Wait for the 3232 * invalidation complete 3233 */ 3234 for (i = 0; i < usec_timeout; i++) { 3235 tmp = RREG32_SOC15(GC, 0, regCP_PFP_IC_OP_CNTL); 3236 if (1 == REG_GET_FIELD(tmp, CP_PFP_IC_OP_CNTL, 3237 INVALIDATE_CACHE_COMPLETE)) 3238 break; 3239 udelay(1); 3240 } 3241 3242 if (i >= usec_timeout) { 3243 dev_err(adev->dev, "failed to invalidate instruction cache\n"); 3244 return -EINVAL; 3245 } 3246 3247 /* Prime the L1 instruction caches */ 3248 tmp = RREG32_SOC15(GC, 0, regCP_PFP_IC_OP_CNTL); 3249 tmp = REG_SET_FIELD(tmp, CP_PFP_IC_OP_CNTL, PRIME_ICACHE, 1); 3250 WREG32_SOC15(GC, 0, regCP_PFP_IC_OP_CNTL, tmp); 3251 /* Waiting for cache primed*/ 3252 for (i = 0; i < usec_timeout; i++) { 3253 tmp = RREG32_SOC15(GC, 0, regCP_PFP_IC_OP_CNTL); 3254 if (1 == REG_GET_FIELD(tmp, CP_PFP_IC_OP_CNTL, 3255 ICACHE_PRIMED)) 3256 break; 3257 udelay(1); 3258 } 3259 3260 if (i >= usec_timeout) { 3261 dev_err(adev->dev, "failed to prime instruction cache\n"); 3262 return -EINVAL; 3263 } 3264 3265 mutex_lock(&adev->srbm_mutex); 3266 for (pipe_id = 0; pipe_id < adev->gfx.me.num_pipe_per_me; pipe_id++) { 3267 soc21_grbm_select(adev, 0, pipe_id, 0, 0); 3268 WREG32_SOC15(GC, 0, regCP_PFP_PRGRM_CNTR_START, 3269 (pfp_hdr->ucode_start_addr_hi << 30) | 3270 (pfp_hdr->ucode_start_addr_lo >> 2) ); 3271 WREG32_SOC15(GC, 0, regCP_PFP_PRGRM_CNTR_START_HI, 3272 pfp_hdr->ucode_start_addr_hi>>2); 3273 3274 /* 3275 * Program CP_ME_CNTL to reset given PIPE to take 3276 * effect of CP_PFP_PRGRM_CNTR_START. 3277 */ 3278 tmp = RREG32_SOC15(GC, 0, regCP_ME_CNTL); 3279 if (pipe_id == 0) 3280 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, 3281 PFP_PIPE0_RESET, 1); 3282 else 3283 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, 3284 PFP_PIPE1_RESET, 1); 3285 WREG32_SOC15(GC, 0, regCP_ME_CNTL, tmp); 3286 3287 /* Clear pfp pipe0 reset bit. */ 3288 if (pipe_id == 0) 3289 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, 3290 PFP_PIPE0_RESET, 0); 3291 else 3292 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, 3293 PFP_PIPE1_RESET, 0); 3294 WREG32_SOC15(GC, 0, regCP_ME_CNTL, tmp); 3295 3296 WREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_BASE0_LO, 3297 lower_32_bits(adev->gfx.pfp.pfp_fw_data_gpu_addr)); 3298 WREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_BASE0_HI, 3299 upper_32_bits(adev->gfx.pfp.pfp_fw_data_gpu_addr)); 3300 } 3301 soc21_grbm_select(adev, 0, 0, 0, 0); 3302 mutex_unlock(&adev->srbm_mutex); 3303 3304 tmp = RREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_BASE_CNTL); 3305 tmp = REG_SET_FIELD(tmp, CP_GFX_RS64_DC_BASE_CNTL, VMID, 0); 3306 tmp = REG_SET_FIELD(tmp, CP_GFX_RS64_DC_BASE_CNTL, CACHE_POLICY, 0); 3307 WREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_BASE_CNTL, tmp); 3308 3309 /* Invalidate the data caches */ 3310 tmp = RREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_OP_CNTL); 3311 tmp = REG_SET_FIELD(tmp, CP_GFX_RS64_DC_OP_CNTL, INVALIDATE_DCACHE, 1); 3312 WREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_OP_CNTL, tmp); 3313 3314 for (i = 0; i < usec_timeout; i++) { 3315 tmp = RREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_OP_CNTL); 3316 if (1 == REG_GET_FIELD(tmp, CP_GFX_RS64_DC_OP_CNTL, 3317 INVALIDATE_DCACHE_COMPLETE)) 3318 break; 3319 udelay(1); 3320 } 3321 3322 if (i >= usec_timeout) { 3323 dev_err(adev->dev, "failed to invalidate RS64 data cache\n"); 3324 return -EINVAL; 3325 } 3326 3327 return 0; 3328 } 3329 3330 static int gfx_v11_0_cp_gfx_load_me_microcode(struct amdgpu_device *adev) 3331 { 3332 int r; 3333 const struct gfx_firmware_header_v1_0 *me_hdr; 3334 const __le32 *fw_data; 3335 unsigned i, fw_size; 3336 3337 me_hdr = (const struct gfx_firmware_header_v1_0 *) 3338 adev->gfx.me_fw->data; 3339 3340 amdgpu_ucode_print_gfx_hdr(&me_hdr->header); 3341 3342 fw_data = (const __le32 *)(adev->gfx.me_fw->data + 3343 le32_to_cpu(me_hdr->header.ucode_array_offset_bytes)); 3344 fw_size = le32_to_cpu(me_hdr->header.ucode_size_bytes); 3345 3346 r = amdgpu_bo_create_reserved(adev, me_hdr->header.ucode_size_bytes, 3347 PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT, 3348 &adev->gfx.me.me_fw_obj, 3349 &adev->gfx.me.me_fw_gpu_addr, 3350 (void **)&adev->gfx.me.me_fw_ptr); 3351 if (r) { 3352 dev_err(adev->dev, "(%d) failed to create me fw bo\n", r); 3353 gfx_v11_0_me_fini(adev); 3354 return r; 3355 } 3356 3357 memcpy(adev->gfx.me.me_fw_ptr, fw_data, fw_size); 3358 3359 amdgpu_bo_kunmap(adev->gfx.me.me_fw_obj); 3360 amdgpu_bo_unreserve(adev->gfx.me.me_fw_obj); 3361 3362 gfx_v11_0_config_me_cache(adev, adev->gfx.me.me_fw_gpu_addr); 3363 3364 WREG32_SOC15(GC, 0, regCP_HYP_ME_UCODE_ADDR, 0); 3365 3366 for (i = 0; i < me_hdr->jt_size; i++) 3367 WREG32_SOC15(GC, 0, regCP_HYP_ME_UCODE_DATA, 3368 le32_to_cpup(fw_data + me_hdr->jt_offset + i)); 3369 3370 WREG32_SOC15(GC, 0, regCP_HYP_ME_UCODE_ADDR, adev->gfx.me_fw_version); 3371 3372 return 0; 3373 } 3374 3375 static int gfx_v11_0_cp_gfx_load_me_microcode_rs64(struct amdgpu_device *adev) 3376 { 3377 int r; 3378 const struct gfx_firmware_header_v2_0 *me_hdr; 3379 const __le32 *fw_ucode, *fw_data; 3380 unsigned i, pipe_id, fw_ucode_size, fw_data_size; 3381 uint32_t tmp; 3382 uint32_t usec_timeout = 50000; /* wait for 50ms */ 3383 3384 me_hdr = (const struct gfx_firmware_header_v2_0 *) 3385 adev->gfx.me_fw->data; 3386 3387 amdgpu_ucode_print_gfx_hdr(&me_hdr->header); 3388 3389 /* instruction */ 3390 fw_ucode = (const __le32 *)(adev->gfx.me_fw->data + 3391 le32_to_cpu(me_hdr->ucode_offset_bytes)); 3392 fw_ucode_size = le32_to_cpu(me_hdr->ucode_size_bytes); 3393 /* data */ 3394 fw_data = (const __le32 *)(adev->gfx.me_fw->data + 3395 le32_to_cpu(me_hdr->data_offset_bytes)); 3396 fw_data_size = le32_to_cpu(me_hdr->data_size_bytes); 3397 3398 /* 64kb align*/ 3399 r = amdgpu_bo_create_reserved(adev, fw_ucode_size, 3400 64 * 1024, 3401 AMDGPU_GEM_DOMAIN_VRAM | 3402 AMDGPU_GEM_DOMAIN_GTT, 3403 &adev->gfx.me.me_fw_obj, 3404 &adev->gfx.me.me_fw_gpu_addr, 3405 (void **)&adev->gfx.me.me_fw_ptr); 3406 if (r) { 3407 dev_err(adev->dev, "(%d) failed to create me ucode bo\n", r); 3408 gfx_v11_0_me_fini(adev); 3409 return r; 3410 } 3411 3412 r = amdgpu_bo_create_reserved(adev, fw_data_size, 3413 64 * 1024, 3414 AMDGPU_GEM_DOMAIN_VRAM | 3415 AMDGPU_GEM_DOMAIN_GTT, 3416 &adev->gfx.me.me_fw_data_obj, 3417 &adev->gfx.me.me_fw_data_gpu_addr, 3418 (void **)&adev->gfx.me.me_fw_data_ptr); 3419 if (r) { 3420 dev_err(adev->dev, "(%d) failed to create me data bo\n", r); 3421 gfx_v11_0_pfp_fini(adev); 3422 return r; 3423 } 3424 3425 memcpy(adev->gfx.me.me_fw_ptr, fw_ucode, fw_ucode_size); 3426 memcpy(adev->gfx.me.me_fw_data_ptr, fw_data, fw_data_size); 3427 3428 amdgpu_bo_kunmap(adev->gfx.me.me_fw_obj); 3429 amdgpu_bo_kunmap(adev->gfx.me.me_fw_data_obj); 3430 amdgpu_bo_unreserve(adev->gfx.me.me_fw_obj); 3431 amdgpu_bo_unreserve(adev->gfx.me.me_fw_data_obj); 3432 3433 if (amdgpu_emu_mode == 1) 3434 amdgpu_device_flush_hdp(adev, NULL); 3435 3436 WREG32_SOC15(GC, 0, regCP_ME_IC_BASE_LO, 3437 lower_32_bits(adev->gfx.me.me_fw_gpu_addr)); 3438 WREG32_SOC15(GC, 0, regCP_ME_IC_BASE_HI, 3439 upper_32_bits(adev->gfx.me.me_fw_gpu_addr)); 3440 3441 tmp = RREG32_SOC15(GC, 0, regCP_ME_IC_BASE_CNTL); 3442 tmp = REG_SET_FIELD(tmp, CP_ME_IC_BASE_CNTL, VMID, 0); 3443 tmp = REG_SET_FIELD(tmp, CP_ME_IC_BASE_CNTL, CACHE_POLICY, 0); 3444 tmp = REG_SET_FIELD(tmp, CP_ME_IC_BASE_CNTL, EXE_DISABLE, 0); 3445 WREG32_SOC15(GC, 0, regCP_ME_IC_BASE_CNTL, tmp); 3446 3447 /* 3448 * Programming any of the CP_ME_IC_BASE registers 3449 * forces invalidation of the ME L1 I$. Wait for the 3450 * invalidation complete 3451 */ 3452 for (i = 0; i < usec_timeout; i++) { 3453 tmp = RREG32_SOC15(GC, 0, regCP_ME_IC_OP_CNTL); 3454 if (1 == REG_GET_FIELD(tmp, CP_ME_IC_OP_CNTL, 3455 INVALIDATE_CACHE_COMPLETE)) 3456 break; 3457 udelay(1); 3458 } 3459 3460 if (i >= usec_timeout) { 3461 dev_err(adev->dev, "failed to invalidate instruction cache\n"); 3462 return -EINVAL; 3463 } 3464 3465 /* Prime the instruction caches */ 3466 tmp = RREG32_SOC15(GC, 0, regCP_ME_IC_OP_CNTL); 3467 tmp = REG_SET_FIELD(tmp, CP_ME_IC_OP_CNTL, PRIME_ICACHE, 1); 3468 WREG32_SOC15(GC, 0, regCP_ME_IC_OP_CNTL, tmp); 3469 3470 /* Waiting for instruction cache primed*/ 3471 for (i = 0; i < usec_timeout; i++) { 3472 tmp = RREG32_SOC15(GC, 0, regCP_ME_IC_OP_CNTL); 3473 if (1 == REG_GET_FIELD(tmp, CP_ME_IC_OP_CNTL, 3474 ICACHE_PRIMED)) 3475 break; 3476 udelay(1); 3477 } 3478 3479 if (i >= usec_timeout) { 3480 dev_err(adev->dev, "failed to prime instruction cache\n"); 3481 return -EINVAL; 3482 } 3483 3484 mutex_lock(&adev->srbm_mutex); 3485 for (pipe_id = 0; pipe_id < adev->gfx.me.num_pipe_per_me; pipe_id++) { 3486 soc21_grbm_select(adev, 0, pipe_id, 0, 0); 3487 WREG32_SOC15(GC, 0, regCP_ME_PRGRM_CNTR_START, 3488 (me_hdr->ucode_start_addr_hi << 30) | 3489 (me_hdr->ucode_start_addr_lo >> 2) ); 3490 WREG32_SOC15(GC, 0, regCP_ME_PRGRM_CNTR_START_HI, 3491 me_hdr->ucode_start_addr_hi>>2); 3492 3493 /* 3494 * Program CP_ME_CNTL to reset given PIPE to take 3495 * effect of CP_PFP_PRGRM_CNTR_START. 3496 */ 3497 tmp = RREG32_SOC15(GC, 0, regCP_ME_CNTL); 3498 if (pipe_id == 0) 3499 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, 3500 ME_PIPE0_RESET, 1); 3501 else 3502 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, 3503 ME_PIPE1_RESET, 1); 3504 WREG32_SOC15(GC, 0, regCP_ME_CNTL, tmp); 3505 3506 /* Clear pfp pipe0 reset bit. */ 3507 if (pipe_id == 0) 3508 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, 3509 ME_PIPE0_RESET, 0); 3510 else 3511 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, 3512 ME_PIPE1_RESET, 0); 3513 WREG32_SOC15(GC, 0, regCP_ME_CNTL, tmp); 3514 3515 WREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_BASE1_LO, 3516 lower_32_bits(adev->gfx.me.me_fw_data_gpu_addr)); 3517 WREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_BASE1_HI, 3518 upper_32_bits(adev->gfx.me.me_fw_data_gpu_addr)); 3519 } 3520 soc21_grbm_select(adev, 0, 0, 0, 0); 3521 mutex_unlock(&adev->srbm_mutex); 3522 3523 tmp = RREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_BASE_CNTL); 3524 tmp = REG_SET_FIELD(tmp, CP_GFX_RS64_DC_BASE_CNTL, VMID, 0); 3525 tmp = REG_SET_FIELD(tmp, CP_GFX_RS64_DC_BASE_CNTL, CACHE_POLICY, 0); 3526 WREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_BASE_CNTL, tmp); 3527 3528 /* Invalidate the data caches */ 3529 tmp = RREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_OP_CNTL); 3530 tmp = REG_SET_FIELD(tmp, CP_GFX_RS64_DC_OP_CNTL, INVALIDATE_DCACHE, 1); 3531 WREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_OP_CNTL, tmp); 3532 3533 for (i = 0; i < usec_timeout; i++) { 3534 tmp = RREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_OP_CNTL); 3535 if (1 == REG_GET_FIELD(tmp, CP_GFX_RS64_DC_OP_CNTL, 3536 INVALIDATE_DCACHE_COMPLETE)) 3537 break; 3538 udelay(1); 3539 } 3540 3541 if (i >= usec_timeout) { 3542 dev_err(adev->dev, "failed to invalidate RS64 data cache\n"); 3543 return -EINVAL; 3544 } 3545 3546 return 0; 3547 } 3548 3549 static int gfx_v11_0_cp_gfx_load_microcode(struct amdgpu_device *adev) 3550 { 3551 int r; 3552 3553 if (!adev->gfx.me_fw || !adev->gfx.pfp_fw) 3554 return -EINVAL; 3555 3556 gfx_v11_0_cp_gfx_enable(adev, false); 3557 3558 if (adev->gfx.rs64_enable) 3559 r = gfx_v11_0_cp_gfx_load_pfp_microcode_rs64(adev); 3560 else 3561 r = gfx_v11_0_cp_gfx_load_pfp_microcode(adev); 3562 if (r) { 3563 dev_err(adev->dev, "(%d) failed to load pfp fw\n", r); 3564 return r; 3565 } 3566 3567 if (adev->gfx.rs64_enable) 3568 r = gfx_v11_0_cp_gfx_load_me_microcode_rs64(adev); 3569 else 3570 r = gfx_v11_0_cp_gfx_load_me_microcode(adev); 3571 if (r) { 3572 dev_err(adev->dev, "(%d) failed to load me fw\n", r); 3573 return r; 3574 } 3575 3576 return 0; 3577 } 3578 3579 static int gfx_v11_0_cp_gfx_start(struct amdgpu_device *adev) 3580 { 3581 struct amdgpu_ring *ring; 3582 const struct cs_section_def *sect = NULL; 3583 const struct cs_extent_def *ext = NULL; 3584 int r, i; 3585 int ctx_reg_offset; 3586 3587 /* init the CP */ 3588 WREG32_SOC15(GC, 0, regCP_MAX_CONTEXT, 3589 adev->gfx.config.max_hw_contexts - 1); 3590 WREG32_SOC15(GC, 0, regCP_DEVICE_ID, 1); 3591 3592 if (!amdgpu_async_gfx_ring) 3593 gfx_v11_0_cp_gfx_enable(adev, true); 3594 3595 ring = &adev->gfx.gfx_ring[0]; 3596 r = amdgpu_ring_alloc(ring, gfx_v11_0_get_csb_size(adev)); 3597 if (r) { 3598 DRM_ERROR("amdgpu: cp failed to lock ring (%d).\n", r); 3599 return r; 3600 } 3601 3602 amdgpu_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0)); 3603 amdgpu_ring_write(ring, PACKET3_PREAMBLE_BEGIN_CLEAR_STATE); 3604 3605 amdgpu_ring_write(ring, PACKET3(PACKET3_CONTEXT_CONTROL, 1)); 3606 amdgpu_ring_write(ring, 0x80000000); 3607 amdgpu_ring_write(ring, 0x80000000); 3608 3609 for (sect = gfx11_cs_data; sect->section != NULL; ++sect) { 3610 for (ext = sect->section; ext->extent != NULL; ++ext) { 3611 if (sect->id == SECT_CONTEXT) { 3612 amdgpu_ring_write(ring, 3613 PACKET3(PACKET3_SET_CONTEXT_REG, 3614 ext->reg_count)); 3615 amdgpu_ring_write(ring, ext->reg_index - 3616 PACKET3_SET_CONTEXT_REG_START); 3617 for (i = 0; i < ext->reg_count; i++) 3618 amdgpu_ring_write(ring, ext->extent[i]); 3619 } 3620 } 3621 } 3622 3623 ctx_reg_offset = 3624 SOC15_REG_OFFSET(GC, 0, regPA_SC_TILE_STEERING_OVERRIDE) - PACKET3_SET_CONTEXT_REG_START; 3625 amdgpu_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 1)); 3626 amdgpu_ring_write(ring, ctx_reg_offset); 3627 amdgpu_ring_write(ring, adev->gfx.config.pa_sc_tile_steering_override); 3628 3629 amdgpu_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0)); 3630 amdgpu_ring_write(ring, PACKET3_PREAMBLE_END_CLEAR_STATE); 3631 3632 amdgpu_ring_write(ring, PACKET3(PACKET3_CLEAR_STATE, 0)); 3633 amdgpu_ring_write(ring, 0); 3634 3635 amdgpu_ring_commit(ring); 3636 3637 /* submit cs packet to copy state 0 to next available state */ 3638 if (adev->gfx.num_gfx_rings > 1) { 3639 /* maximum supported gfx ring is 2 */ 3640 ring = &adev->gfx.gfx_ring[1]; 3641 r = amdgpu_ring_alloc(ring, 2); 3642 if (r) { 3643 DRM_ERROR("amdgpu: cp failed to lock ring (%d).\n", r); 3644 return r; 3645 } 3646 3647 amdgpu_ring_write(ring, PACKET3(PACKET3_CLEAR_STATE, 0)); 3648 amdgpu_ring_write(ring, 0); 3649 3650 amdgpu_ring_commit(ring); 3651 } 3652 return 0; 3653 } 3654 3655 static void gfx_v11_0_cp_gfx_switch_pipe(struct amdgpu_device *adev, 3656 CP_PIPE_ID pipe) 3657 { 3658 u32 tmp; 3659 3660 tmp = RREG32_SOC15(GC, 0, regGRBM_GFX_CNTL); 3661 tmp = REG_SET_FIELD(tmp, GRBM_GFX_CNTL, PIPEID, pipe); 3662 3663 WREG32_SOC15(GC, 0, regGRBM_GFX_CNTL, tmp); 3664 } 3665 3666 static void gfx_v11_0_cp_gfx_set_doorbell(struct amdgpu_device *adev, 3667 struct amdgpu_ring *ring) 3668 { 3669 u32 tmp; 3670 3671 tmp = RREG32_SOC15(GC, 0, regCP_RB_DOORBELL_CONTROL); 3672 if (ring->use_doorbell) { 3673 tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL, 3674 DOORBELL_OFFSET, ring->doorbell_index); 3675 tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL, 3676 DOORBELL_EN, 1); 3677 } else { 3678 tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL, 3679 DOORBELL_EN, 0); 3680 } 3681 WREG32_SOC15(GC, 0, regCP_RB_DOORBELL_CONTROL, tmp); 3682 3683 tmp = REG_SET_FIELD(0, CP_RB_DOORBELL_RANGE_LOWER, 3684 DOORBELL_RANGE_LOWER, ring->doorbell_index); 3685 WREG32_SOC15(GC, 0, regCP_RB_DOORBELL_RANGE_LOWER, tmp); 3686 3687 WREG32_SOC15(GC, 0, regCP_RB_DOORBELL_RANGE_UPPER, 3688 CP_RB_DOORBELL_RANGE_UPPER__DOORBELL_RANGE_UPPER_MASK); 3689 } 3690 3691 static int gfx_v11_0_cp_gfx_resume(struct amdgpu_device *adev) 3692 { 3693 struct amdgpu_ring *ring; 3694 u32 tmp; 3695 u32 rb_bufsz; 3696 u64 rb_addr, rptr_addr, wptr_gpu_addr; 3697 3698 /* Set the write pointer delay */ 3699 WREG32_SOC15(GC, 0, regCP_RB_WPTR_DELAY, 0); 3700 3701 /* set the RB to use vmid 0 */ 3702 WREG32_SOC15(GC, 0, regCP_RB_VMID, 0); 3703 3704 /* Init gfx ring 0 for pipe 0 */ 3705 mutex_lock(&adev->srbm_mutex); 3706 gfx_v11_0_cp_gfx_switch_pipe(adev, PIPE_ID0); 3707 3708 /* Set ring buffer size */ 3709 ring = &adev->gfx.gfx_ring[0]; 3710 rb_bufsz = order_base_2(ring->ring_size / 8); 3711 tmp = REG_SET_FIELD(0, CP_RB0_CNTL, RB_BUFSZ, rb_bufsz); 3712 tmp = REG_SET_FIELD(tmp, CP_RB0_CNTL, RB_BLKSZ, rb_bufsz - 2); 3713 WREG32_SOC15(GC, 0, regCP_RB0_CNTL, tmp); 3714 3715 /* Initialize the ring buffer's write pointers */ 3716 ring->wptr = 0; 3717 WREG32_SOC15(GC, 0, regCP_RB0_WPTR, lower_32_bits(ring->wptr)); 3718 WREG32_SOC15(GC, 0, regCP_RB0_WPTR_HI, upper_32_bits(ring->wptr)); 3719 3720 /* set the wb address whether it's enabled or not */ 3721 rptr_addr = ring->rptr_gpu_addr; 3722 WREG32_SOC15(GC, 0, regCP_RB0_RPTR_ADDR, lower_32_bits(rptr_addr)); 3723 WREG32_SOC15(GC, 0, regCP_RB0_RPTR_ADDR_HI, upper_32_bits(rptr_addr) & 3724 CP_RB_RPTR_ADDR_HI__RB_RPTR_ADDR_HI_MASK); 3725 3726 wptr_gpu_addr = ring->wptr_gpu_addr; 3727 WREG32_SOC15(GC, 0, regCP_RB_WPTR_POLL_ADDR_LO, 3728 lower_32_bits(wptr_gpu_addr)); 3729 WREG32_SOC15(GC, 0, regCP_RB_WPTR_POLL_ADDR_HI, 3730 upper_32_bits(wptr_gpu_addr)); 3731 3732 mdelay(1); 3733 WREG32_SOC15(GC, 0, regCP_RB0_CNTL, tmp); 3734 3735 rb_addr = ring->gpu_addr >> 8; 3736 WREG32_SOC15(GC, 0, regCP_RB0_BASE, rb_addr); 3737 WREG32_SOC15(GC, 0, regCP_RB0_BASE_HI, upper_32_bits(rb_addr)); 3738 3739 WREG32_SOC15(GC, 0, regCP_RB_ACTIVE, 1); 3740 3741 gfx_v11_0_cp_gfx_set_doorbell(adev, ring); 3742 mutex_unlock(&adev->srbm_mutex); 3743 3744 /* Init gfx ring 1 for pipe 1 */ 3745 if (adev->gfx.num_gfx_rings > 1) { 3746 mutex_lock(&adev->srbm_mutex); 3747 gfx_v11_0_cp_gfx_switch_pipe(adev, PIPE_ID1); 3748 /* maximum supported gfx ring is 2 */ 3749 ring = &adev->gfx.gfx_ring[1]; 3750 rb_bufsz = order_base_2(ring->ring_size / 8); 3751 tmp = REG_SET_FIELD(0, CP_RB1_CNTL, RB_BUFSZ, rb_bufsz); 3752 tmp = REG_SET_FIELD(tmp, CP_RB1_CNTL, RB_BLKSZ, rb_bufsz - 2); 3753 WREG32_SOC15(GC, 0, regCP_RB1_CNTL, tmp); 3754 /* Initialize the ring buffer's write pointers */ 3755 ring->wptr = 0; 3756 WREG32_SOC15(GC, 0, regCP_RB1_WPTR, lower_32_bits(ring->wptr)); 3757 WREG32_SOC15(GC, 0, regCP_RB1_WPTR_HI, upper_32_bits(ring->wptr)); 3758 /* Set the wb address whether it's enabled or not */ 3759 rptr_addr = ring->rptr_gpu_addr; 3760 WREG32_SOC15(GC, 0, regCP_RB1_RPTR_ADDR, lower_32_bits(rptr_addr)); 3761 WREG32_SOC15(GC, 0, regCP_RB1_RPTR_ADDR_HI, upper_32_bits(rptr_addr) & 3762 CP_RB1_RPTR_ADDR_HI__RB_RPTR_ADDR_HI_MASK); 3763 wptr_gpu_addr = ring->wptr_gpu_addr; 3764 WREG32_SOC15(GC, 0, regCP_RB_WPTR_POLL_ADDR_LO, 3765 lower_32_bits(wptr_gpu_addr)); 3766 WREG32_SOC15(GC, 0, regCP_RB_WPTR_POLL_ADDR_HI, 3767 upper_32_bits(wptr_gpu_addr)); 3768 3769 mdelay(1); 3770 WREG32_SOC15(GC, 0, regCP_RB1_CNTL, tmp); 3771 3772 rb_addr = ring->gpu_addr >> 8; 3773 WREG32_SOC15(GC, 0, regCP_RB1_BASE, rb_addr); 3774 WREG32_SOC15(GC, 0, regCP_RB1_BASE_HI, upper_32_bits(rb_addr)); 3775 WREG32_SOC15(GC, 0, regCP_RB1_ACTIVE, 1); 3776 3777 gfx_v11_0_cp_gfx_set_doorbell(adev, ring); 3778 mutex_unlock(&adev->srbm_mutex); 3779 } 3780 /* Switch to pipe 0 */ 3781 mutex_lock(&adev->srbm_mutex); 3782 gfx_v11_0_cp_gfx_switch_pipe(adev, PIPE_ID0); 3783 mutex_unlock(&adev->srbm_mutex); 3784 3785 /* start the ring */ 3786 gfx_v11_0_cp_gfx_start(adev); 3787 3788 return 0; 3789 } 3790 3791 static void gfx_v11_0_cp_compute_enable(struct amdgpu_device *adev, bool enable) 3792 { 3793 u32 data; 3794 3795 if (adev->gfx.rs64_enable) { 3796 data = RREG32_SOC15(GC, 0, regCP_MEC_RS64_CNTL); 3797 data = REG_SET_FIELD(data, CP_MEC_RS64_CNTL, MEC_INVALIDATE_ICACHE, 3798 enable ? 0 : 1); 3799 data = REG_SET_FIELD(data, CP_MEC_RS64_CNTL, MEC_PIPE0_RESET, 3800 enable ? 0 : 1); 3801 data = REG_SET_FIELD(data, CP_MEC_RS64_CNTL, MEC_PIPE1_RESET, 3802 enable ? 0 : 1); 3803 data = REG_SET_FIELD(data, CP_MEC_RS64_CNTL, MEC_PIPE2_RESET, 3804 enable ? 0 : 1); 3805 data = REG_SET_FIELD(data, CP_MEC_RS64_CNTL, MEC_PIPE3_RESET, 3806 enable ? 0 : 1); 3807 data = REG_SET_FIELD(data, CP_MEC_RS64_CNTL, MEC_PIPE0_ACTIVE, 3808 enable ? 1 : 0); 3809 data = REG_SET_FIELD(data, CP_MEC_RS64_CNTL, MEC_PIPE1_ACTIVE, 3810 enable ? 1 : 0); 3811 data = REG_SET_FIELD(data, CP_MEC_RS64_CNTL, MEC_PIPE2_ACTIVE, 3812 enable ? 1 : 0); 3813 data = REG_SET_FIELD(data, CP_MEC_RS64_CNTL, MEC_PIPE3_ACTIVE, 3814 enable ? 1 : 0); 3815 data = REG_SET_FIELD(data, CP_MEC_RS64_CNTL, MEC_HALT, 3816 enable ? 0 : 1); 3817 WREG32_SOC15(GC, 0, regCP_MEC_RS64_CNTL, data); 3818 } else { 3819 data = RREG32_SOC15(GC, 0, regCP_MEC_CNTL); 3820 3821 if (enable) { 3822 data = REG_SET_FIELD(data, CP_MEC_CNTL, MEC_ME1_HALT, 0); 3823 if (!adev->enable_mes_kiq) 3824 data = REG_SET_FIELD(data, CP_MEC_CNTL, 3825 MEC_ME2_HALT, 0); 3826 } else { 3827 data = REG_SET_FIELD(data, CP_MEC_CNTL, MEC_ME1_HALT, 1); 3828 data = REG_SET_FIELD(data, CP_MEC_CNTL, MEC_ME2_HALT, 1); 3829 } 3830 WREG32_SOC15(GC, 0, regCP_MEC_CNTL, data); 3831 } 3832 3833 udelay(50); 3834 } 3835 3836 static int gfx_v11_0_cp_compute_load_microcode(struct amdgpu_device *adev) 3837 { 3838 const struct gfx_firmware_header_v1_0 *mec_hdr; 3839 const __le32 *fw_data; 3840 unsigned i, fw_size; 3841 u32 *fw = NULL; 3842 int r; 3843 3844 if (!adev->gfx.mec_fw) 3845 return -EINVAL; 3846 3847 gfx_v11_0_cp_compute_enable(adev, false); 3848 3849 mec_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data; 3850 amdgpu_ucode_print_gfx_hdr(&mec_hdr->header); 3851 3852 fw_data = (const __le32 *) 3853 (adev->gfx.mec_fw->data + 3854 le32_to_cpu(mec_hdr->header.ucode_array_offset_bytes)); 3855 fw_size = le32_to_cpu(mec_hdr->header.ucode_size_bytes); 3856 3857 r = amdgpu_bo_create_reserved(adev, mec_hdr->header.ucode_size_bytes, 3858 PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT, 3859 &adev->gfx.mec.mec_fw_obj, 3860 &adev->gfx.mec.mec_fw_gpu_addr, 3861 (void **)&fw); 3862 if (r) { 3863 dev_err(adev->dev, "(%d) failed to create mec fw bo\n", r); 3864 gfx_v11_0_mec_fini(adev); 3865 return r; 3866 } 3867 3868 memcpy(fw, fw_data, fw_size); 3869 3870 amdgpu_bo_kunmap(adev->gfx.mec.mec_fw_obj); 3871 amdgpu_bo_unreserve(adev->gfx.mec.mec_fw_obj); 3872 3873 gfx_v11_0_config_mec_cache(adev, adev->gfx.mec.mec_fw_gpu_addr); 3874 3875 /* MEC1 */ 3876 WREG32_SOC15(GC, 0, regCP_MEC_ME1_UCODE_ADDR, 0); 3877 3878 for (i = 0; i < mec_hdr->jt_size; i++) 3879 WREG32_SOC15(GC, 0, regCP_MEC_ME1_UCODE_DATA, 3880 le32_to_cpup(fw_data + mec_hdr->jt_offset + i)); 3881 3882 WREG32_SOC15(GC, 0, regCP_MEC_ME1_UCODE_ADDR, adev->gfx.mec_fw_version); 3883 3884 return 0; 3885 } 3886 3887 static int gfx_v11_0_cp_compute_load_microcode_rs64(struct amdgpu_device *adev) 3888 { 3889 const struct gfx_firmware_header_v2_0 *mec_hdr; 3890 const __le32 *fw_ucode, *fw_data; 3891 u32 tmp, fw_ucode_size, fw_data_size; 3892 u32 i, usec_timeout = 50000; /* Wait for 50 ms */ 3893 u32 *fw_ucode_ptr, *fw_data_ptr; 3894 int r; 3895 3896 if (!adev->gfx.mec_fw) 3897 return -EINVAL; 3898 3899 gfx_v11_0_cp_compute_enable(adev, false); 3900 3901 mec_hdr = (const struct gfx_firmware_header_v2_0 *)adev->gfx.mec_fw->data; 3902 amdgpu_ucode_print_gfx_hdr(&mec_hdr->header); 3903 3904 fw_ucode = (const __le32 *) (adev->gfx.mec_fw->data + 3905 le32_to_cpu(mec_hdr->ucode_offset_bytes)); 3906 fw_ucode_size = le32_to_cpu(mec_hdr->ucode_size_bytes); 3907 3908 fw_data = (const __le32 *) (adev->gfx.mec_fw->data + 3909 le32_to_cpu(mec_hdr->data_offset_bytes)); 3910 fw_data_size = le32_to_cpu(mec_hdr->data_size_bytes); 3911 3912 r = amdgpu_bo_create_reserved(adev, fw_ucode_size, 3913 64 * 1024, 3914 AMDGPU_GEM_DOMAIN_VRAM | 3915 AMDGPU_GEM_DOMAIN_GTT, 3916 &adev->gfx.mec.mec_fw_obj, 3917 &adev->gfx.mec.mec_fw_gpu_addr, 3918 (void **)&fw_ucode_ptr); 3919 if (r) { 3920 dev_err(adev->dev, "(%d) failed to create mec fw ucode bo\n", r); 3921 gfx_v11_0_mec_fini(adev); 3922 return r; 3923 } 3924 3925 r = amdgpu_bo_create_reserved(adev, fw_data_size, 3926 64 * 1024, 3927 AMDGPU_GEM_DOMAIN_VRAM | 3928 AMDGPU_GEM_DOMAIN_GTT, 3929 &adev->gfx.mec.mec_fw_data_obj, 3930 &adev->gfx.mec.mec_fw_data_gpu_addr, 3931 (void **)&fw_data_ptr); 3932 if (r) { 3933 dev_err(adev->dev, "(%d) failed to create mec fw ucode bo\n", r); 3934 gfx_v11_0_mec_fini(adev); 3935 return r; 3936 } 3937 3938 memcpy(fw_ucode_ptr, fw_ucode, fw_ucode_size); 3939 memcpy(fw_data_ptr, fw_data, fw_data_size); 3940 3941 amdgpu_bo_kunmap(adev->gfx.mec.mec_fw_obj); 3942 amdgpu_bo_kunmap(adev->gfx.mec.mec_fw_data_obj); 3943 amdgpu_bo_unreserve(adev->gfx.mec.mec_fw_obj); 3944 amdgpu_bo_unreserve(adev->gfx.mec.mec_fw_data_obj); 3945 3946 tmp = RREG32_SOC15(GC, 0, regCP_CPC_IC_BASE_CNTL); 3947 tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, VMID, 0); 3948 tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, EXE_DISABLE, 0); 3949 tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, CACHE_POLICY, 0); 3950 WREG32_SOC15(GC, 0, regCP_CPC_IC_BASE_CNTL, tmp); 3951 3952 tmp = RREG32_SOC15(GC, 0, regCP_MEC_DC_BASE_CNTL); 3953 tmp = REG_SET_FIELD(tmp, CP_MEC_DC_BASE_CNTL, VMID, 0); 3954 tmp = REG_SET_FIELD(tmp, CP_MEC_DC_BASE_CNTL, CACHE_POLICY, 0); 3955 WREG32_SOC15(GC, 0, regCP_MEC_DC_BASE_CNTL, tmp); 3956 3957 mutex_lock(&adev->srbm_mutex); 3958 for (i = 0; i < adev->gfx.mec.num_pipe_per_mec; i++) { 3959 soc21_grbm_select(adev, 1, i, 0, 0); 3960 3961 WREG32_SOC15(GC, 0, regCP_MEC_MDBASE_LO, adev->gfx.mec.mec_fw_data_gpu_addr); 3962 WREG32_SOC15(GC, 0, regCP_MEC_MDBASE_HI, 3963 upper_32_bits(adev->gfx.mec.mec_fw_data_gpu_addr)); 3964 3965 WREG32_SOC15(GC, 0, regCP_MEC_RS64_PRGRM_CNTR_START, 3966 mec_hdr->ucode_start_addr_lo >> 2 | 3967 mec_hdr->ucode_start_addr_hi << 30); 3968 WREG32_SOC15(GC, 0, regCP_MEC_RS64_PRGRM_CNTR_START_HI, 3969 mec_hdr->ucode_start_addr_hi >> 2); 3970 3971 WREG32_SOC15(GC, 0, regCP_CPC_IC_BASE_LO, adev->gfx.mec.mec_fw_gpu_addr); 3972 WREG32_SOC15(GC, 0, regCP_CPC_IC_BASE_HI, 3973 upper_32_bits(adev->gfx.mec.mec_fw_gpu_addr)); 3974 } 3975 mutex_unlock(&adev->srbm_mutex); 3976 soc21_grbm_select(adev, 0, 0, 0, 0); 3977 3978 /* Trigger an invalidation of the L1 instruction caches */ 3979 tmp = RREG32_SOC15(GC, 0, regCP_MEC_DC_OP_CNTL); 3980 tmp = REG_SET_FIELD(tmp, CP_MEC_DC_OP_CNTL, INVALIDATE_DCACHE, 1); 3981 WREG32_SOC15(GC, 0, regCP_MEC_DC_OP_CNTL, tmp); 3982 3983 /* Wait for invalidation complete */ 3984 for (i = 0; i < usec_timeout; i++) { 3985 tmp = RREG32_SOC15(GC, 0, regCP_MEC_DC_OP_CNTL); 3986 if (1 == REG_GET_FIELD(tmp, CP_MEC_DC_OP_CNTL, 3987 INVALIDATE_DCACHE_COMPLETE)) 3988 break; 3989 udelay(1); 3990 } 3991 3992 if (i >= usec_timeout) { 3993 dev_err(adev->dev, "failed to invalidate instruction cache\n"); 3994 return -EINVAL; 3995 } 3996 3997 /* Trigger an invalidation of the L1 instruction caches */ 3998 tmp = RREG32_SOC15(GC, 0, regCP_CPC_IC_OP_CNTL); 3999 tmp = REG_SET_FIELD(tmp, CP_CPC_IC_OP_CNTL, INVALIDATE_CACHE, 1); 4000 WREG32_SOC15(GC, 0, regCP_CPC_IC_OP_CNTL, tmp); 4001 4002 /* Wait for invalidation complete */ 4003 for (i = 0; i < usec_timeout; i++) { 4004 tmp = RREG32_SOC15(GC, 0, regCP_CPC_IC_OP_CNTL); 4005 if (1 == REG_GET_FIELD(tmp, CP_CPC_IC_OP_CNTL, 4006 INVALIDATE_CACHE_COMPLETE)) 4007 break; 4008 udelay(1); 4009 } 4010 4011 if (i >= usec_timeout) { 4012 dev_err(adev->dev, "failed to invalidate instruction cache\n"); 4013 return -EINVAL; 4014 } 4015 4016 return 0; 4017 } 4018 4019 static void gfx_v11_0_kiq_setting(struct amdgpu_ring *ring) 4020 { 4021 uint32_t tmp; 4022 struct amdgpu_device *adev = ring->adev; 4023 4024 /* tell RLC which is KIQ queue */ 4025 tmp = RREG32_SOC15(GC, 0, regRLC_CP_SCHEDULERS); 4026 tmp &= 0xffffff00; 4027 tmp |= (ring->me << 5) | (ring->pipe << 3) | (ring->queue); 4028 WREG32_SOC15(GC, 0, regRLC_CP_SCHEDULERS, tmp | 0x80); 4029 } 4030 4031 static void gfx_v11_0_cp_set_doorbell_range(struct amdgpu_device *adev) 4032 { 4033 /* set graphics engine doorbell range */ 4034 WREG32_SOC15(GC, 0, regCP_RB_DOORBELL_RANGE_LOWER, 4035 (adev->doorbell_index.gfx_ring0 * 2) << 2); 4036 WREG32_SOC15(GC, 0, regCP_RB_DOORBELL_RANGE_UPPER, 4037 (adev->doorbell_index.gfx_userqueue_end * 2) << 2); 4038 4039 /* set compute engine doorbell range */ 4040 WREG32_SOC15(GC, 0, regCP_MEC_DOORBELL_RANGE_LOWER, 4041 (adev->doorbell_index.kiq * 2) << 2); 4042 WREG32_SOC15(GC, 0, regCP_MEC_DOORBELL_RANGE_UPPER, 4043 (adev->doorbell_index.userqueue_end * 2) << 2); 4044 } 4045 4046 static void gfx_v11_0_gfx_mqd_set_priority(struct amdgpu_device *adev, 4047 struct v11_gfx_mqd *mqd, 4048 struct amdgpu_mqd_prop *prop) 4049 { 4050 bool priority = 0; 4051 u32 tmp; 4052 4053 /* set up default queue priority level 4054 * 0x0 = low priority, 0x1 = high priority 4055 */ 4056 if (prop->hqd_pipe_priority == AMDGPU_GFX_PIPE_PRIO_HIGH) 4057 priority = 1; 4058 4059 tmp = regCP_GFX_HQD_QUEUE_PRIORITY_DEFAULT; 4060 tmp = REG_SET_FIELD(tmp, CP_GFX_HQD_QUEUE_PRIORITY, PRIORITY_LEVEL, priority); 4061 mqd->cp_gfx_hqd_queue_priority = tmp; 4062 } 4063 4064 static int gfx_v11_0_gfx_mqd_init(struct amdgpu_device *adev, void *m, 4065 struct amdgpu_mqd_prop *prop) 4066 { 4067 struct v11_gfx_mqd *mqd = m; 4068 uint64_t hqd_gpu_addr, wb_gpu_addr; 4069 uint32_t tmp; 4070 uint32_t rb_bufsz; 4071 4072 /* set up gfx hqd wptr */ 4073 mqd->cp_gfx_hqd_wptr = 0; 4074 mqd->cp_gfx_hqd_wptr_hi = 0; 4075 4076 /* set the pointer to the MQD */ 4077 mqd->cp_mqd_base_addr = prop->mqd_gpu_addr & 0xfffffffc; 4078 mqd->cp_mqd_base_addr_hi = upper_32_bits(prop->mqd_gpu_addr); 4079 4080 /* set up mqd control */ 4081 tmp = regCP_GFX_MQD_CONTROL_DEFAULT; 4082 tmp = REG_SET_FIELD(tmp, CP_GFX_MQD_CONTROL, VMID, 0); 4083 tmp = REG_SET_FIELD(tmp, CP_GFX_MQD_CONTROL, PRIV_STATE, 1); 4084 tmp = REG_SET_FIELD(tmp, CP_GFX_MQD_CONTROL, CACHE_POLICY, 0); 4085 mqd->cp_gfx_mqd_control = tmp; 4086 4087 /* set up gfx_hqd_vimd with 0x0 to indicate the ring buffer's vmid */ 4088 tmp = regCP_GFX_HQD_VMID_DEFAULT; 4089 tmp = REG_SET_FIELD(tmp, CP_GFX_HQD_VMID, VMID, 0); 4090 mqd->cp_gfx_hqd_vmid = 0; 4091 4092 /* set up gfx queue priority */ 4093 gfx_v11_0_gfx_mqd_set_priority(adev, mqd, prop); 4094 4095 /* set up time quantum */ 4096 tmp = regCP_GFX_HQD_QUANTUM_DEFAULT; 4097 tmp = REG_SET_FIELD(tmp, CP_GFX_HQD_QUANTUM, QUANTUM_EN, 1); 4098 mqd->cp_gfx_hqd_quantum = tmp; 4099 4100 /* set up gfx hqd base. this is similar as CP_RB_BASE */ 4101 hqd_gpu_addr = prop->hqd_base_gpu_addr >> 8; 4102 mqd->cp_gfx_hqd_base = hqd_gpu_addr; 4103 mqd->cp_gfx_hqd_base_hi = upper_32_bits(hqd_gpu_addr); 4104 4105 /* set up hqd_rptr_addr/_hi, similar as CP_RB_RPTR */ 4106 wb_gpu_addr = prop->rptr_gpu_addr; 4107 mqd->cp_gfx_hqd_rptr_addr = wb_gpu_addr & 0xfffffffc; 4108 mqd->cp_gfx_hqd_rptr_addr_hi = 4109 upper_32_bits(wb_gpu_addr) & 0xffff; 4110 4111 /* set up rb_wptr_poll addr */ 4112 wb_gpu_addr = prop->wptr_gpu_addr; 4113 mqd->cp_rb_wptr_poll_addr_lo = wb_gpu_addr & 0xfffffffc; 4114 mqd->cp_rb_wptr_poll_addr_hi = upper_32_bits(wb_gpu_addr) & 0xffff; 4115 4116 /* set up the gfx_hqd_control, similar as CP_RB0_CNTL */ 4117 rb_bufsz = order_base_2(prop->queue_size / 4) - 1; 4118 tmp = regCP_GFX_HQD_CNTL_DEFAULT; 4119 tmp = REG_SET_FIELD(tmp, CP_GFX_HQD_CNTL, RB_BUFSZ, rb_bufsz); 4120 tmp = REG_SET_FIELD(tmp, CP_GFX_HQD_CNTL, RB_BLKSZ, rb_bufsz - 2); 4121 #ifdef __BIG_ENDIAN 4122 tmp = REG_SET_FIELD(tmp, CP_GFX_HQD_CNTL, BUF_SWAP, 1); 4123 #endif 4124 if (prop->tmz_queue) 4125 tmp = REG_SET_FIELD(tmp, CP_GFX_HQD_CNTL, TMZ_MATCH, 1); 4126 mqd->cp_gfx_hqd_cntl = tmp; 4127 4128 /* set up cp_doorbell_control */ 4129 tmp = regCP_RB_DOORBELL_CONTROL_DEFAULT; 4130 if (prop->use_doorbell) { 4131 tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL, 4132 DOORBELL_OFFSET, prop->doorbell_index); 4133 tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL, 4134 DOORBELL_EN, 1); 4135 } else 4136 tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL, 4137 DOORBELL_EN, 0); 4138 mqd->cp_rb_doorbell_control = tmp; 4139 4140 /* reset read and write pointers, similar to CP_RB0_WPTR/_RPTR */ 4141 mqd->cp_gfx_hqd_rptr = regCP_GFX_HQD_RPTR_DEFAULT; 4142 4143 /* active the queue */ 4144 mqd->cp_gfx_hqd_active = 1; 4145 4146 /* set gfx UQ items */ 4147 mqd->shadow_base_lo = lower_32_bits(prop->shadow_addr); 4148 mqd->shadow_base_hi = upper_32_bits(prop->shadow_addr); 4149 mqd->gds_bkup_base_lo = lower_32_bits(prop->gds_bkup_addr); 4150 mqd->gds_bkup_base_hi = upper_32_bits(prop->gds_bkup_addr); 4151 mqd->fw_work_area_base_lo = lower_32_bits(prop->csa_addr); 4152 mqd->fw_work_area_base_hi = upper_32_bits(prop->csa_addr); 4153 mqd->fence_address_lo = lower_32_bits(prop->fence_address); 4154 mqd->fence_address_hi = upper_32_bits(prop->fence_address); 4155 4156 return 0; 4157 } 4158 4159 static int gfx_v11_0_kgq_init_queue(struct amdgpu_ring *ring, bool reset) 4160 { 4161 struct amdgpu_device *adev = ring->adev; 4162 struct v11_gfx_mqd *mqd = ring->mqd_ptr; 4163 int mqd_idx = ring - &adev->gfx.gfx_ring[0]; 4164 4165 if (!reset && !amdgpu_in_reset(adev) && !adev->in_suspend) { 4166 memset((void *)mqd, 0, sizeof(*mqd)); 4167 mutex_lock(&adev->srbm_mutex); 4168 soc21_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0); 4169 amdgpu_ring_init_mqd(ring); 4170 soc21_grbm_select(adev, 0, 0, 0, 0); 4171 mutex_unlock(&adev->srbm_mutex); 4172 if (adev->gfx.me.mqd_backup[mqd_idx]) 4173 memcpy_fromio(adev->gfx.me.mqd_backup[mqd_idx], mqd, sizeof(*mqd)); 4174 } else { 4175 /* restore mqd with the backup copy */ 4176 if (adev->gfx.me.mqd_backup[mqd_idx]) 4177 memcpy_toio(mqd, adev->gfx.me.mqd_backup[mqd_idx], sizeof(*mqd)); 4178 /* reset the ring */ 4179 ring->wptr = 0; 4180 *ring->wptr_cpu_addr = 0; 4181 amdgpu_ring_clear_ring(ring); 4182 } 4183 4184 return 0; 4185 } 4186 4187 static int gfx_v11_0_cp_async_gfx_ring_resume(struct amdgpu_device *adev) 4188 { 4189 int r, i; 4190 4191 for (i = 0; i < adev->gfx.num_gfx_rings; i++) { 4192 r = gfx_v11_0_kgq_init_queue(&adev->gfx.gfx_ring[i], false); 4193 if (r) 4194 return r; 4195 } 4196 4197 r = amdgpu_gfx_enable_kgq(adev, 0); 4198 if (r) 4199 return r; 4200 4201 return gfx_v11_0_cp_gfx_start(adev); 4202 } 4203 4204 static int gfx_v11_0_compute_mqd_init(struct amdgpu_device *adev, void *m, 4205 struct amdgpu_mqd_prop *prop) 4206 { 4207 struct v11_compute_mqd *mqd = m; 4208 uint64_t hqd_gpu_addr, wb_gpu_addr, eop_base_addr; 4209 uint32_t tmp; 4210 4211 mqd->header = 0xC0310800; 4212 mqd->compute_pipelinestat_enable = 0x00000001; 4213 mqd->compute_static_thread_mgmt_se0 = 0xffffffff; 4214 mqd->compute_static_thread_mgmt_se1 = 0xffffffff; 4215 mqd->compute_static_thread_mgmt_se2 = 0xffffffff; 4216 mqd->compute_static_thread_mgmt_se3 = 0xffffffff; 4217 mqd->compute_misc_reserved = 0x00000007; 4218 4219 eop_base_addr = prop->eop_gpu_addr >> 8; 4220 mqd->cp_hqd_eop_base_addr_lo = eop_base_addr; 4221 mqd->cp_hqd_eop_base_addr_hi = upper_32_bits(eop_base_addr); 4222 4223 /* set the EOP size, register value is 2^(EOP_SIZE+1) dwords */ 4224 tmp = regCP_HQD_EOP_CONTROL_DEFAULT; 4225 tmp = REG_SET_FIELD(tmp, CP_HQD_EOP_CONTROL, EOP_SIZE, 4226 (order_base_2(GFX11_MEC_HPD_SIZE / 4) - 1)); 4227 4228 mqd->cp_hqd_eop_control = tmp; 4229 4230 /* enable doorbell? */ 4231 tmp = regCP_HQD_PQ_DOORBELL_CONTROL_DEFAULT; 4232 4233 if (prop->use_doorbell) { 4234 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL, 4235 DOORBELL_OFFSET, prop->doorbell_index); 4236 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL, 4237 DOORBELL_EN, 1); 4238 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL, 4239 DOORBELL_SOURCE, 0); 4240 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL, 4241 DOORBELL_HIT, 0); 4242 } else { 4243 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL, 4244 DOORBELL_EN, 0); 4245 } 4246 4247 mqd->cp_hqd_pq_doorbell_control = tmp; 4248 4249 /* disable the queue if it's active */ 4250 mqd->cp_hqd_dequeue_request = 0; 4251 mqd->cp_hqd_pq_rptr = 0; 4252 mqd->cp_hqd_pq_wptr_lo = 0; 4253 mqd->cp_hqd_pq_wptr_hi = 0; 4254 4255 /* set the pointer to the MQD */ 4256 mqd->cp_mqd_base_addr_lo = prop->mqd_gpu_addr & 0xfffffffc; 4257 mqd->cp_mqd_base_addr_hi = upper_32_bits(prop->mqd_gpu_addr); 4258 4259 /* set MQD vmid to 0 */ 4260 tmp = regCP_MQD_CONTROL_DEFAULT; 4261 tmp = REG_SET_FIELD(tmp, CP_MQD_CONTROL, VMID, 0); 4262 mqd->cp_mqd_control = tmp; 4263 4264 /* set the pointer to the HQD, this is similar CP_RB0_BASE/_HI */ 4265 hqd_gpu_addr = prop->hqd_base_gpu_addr >> 8; 4266 mqd->cp_hqd_pq_base_lo = hqd_gpu_addr; 4267 mqd->cp_hqd_pq_base_hi = upper_32_bits(hqd_gpu_addr); 4268 4269 /* set up the HQD, this is similar to CP_RB0_CNTL */ 4270 tmp = regCP_HQD_PQ_CONTROL_DEFAULT; 4271 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, QUEUE_SIZE, 4272 (order_base_2(prop->queue_size / 4) - 1)); 4273 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, RPTR_BLOCK_SIZE, 4274 (order_base_2(AMDGPU_GPU_PAGE_SIZE / 4) - 1)); 4275 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, UNORD_DISPATCH, 1); 4276 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, TUNNEL_DISPATCH, 4277 prop->allow_tunneling); 4278 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, PRIV_STATE, 1); 4279 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, KMD_QUEUE, 1); 4280 if (prop->tmz_queue) 4281 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, TMZ, 1); 4282 mqd->cp_hqd_pq_control = tmp; 4283 4284 /* set the wb address whether it's enabled or not */ 4285 wb_gpu_addr = prop->rptr_gpu_addr; 4286 mqd->cp_hqd_pq_rptr_report_addr_lo = wb_gpu_addr & 0xfffffffc; 4287 mqd->cp_hqd_pq_rptr_report_addr_hi = 4288 upper_32_bits(wb_gpu_addr) & 0xffff; 4289 4290 /* only used if CP_PQ_WPTR_POLL_CNTL.CP_PQ_WPTR_POLL_CNTL__EN_MASK=1 */ 4291 wb_gpu_addr = prop->wptr_gpu_addr; 4292 mqd->cp_hqd_pq_wptr_poll_addr_lo = wb_gpu_addr & 0xfffffffc; 4293 mqd->cp_hqd_pq_wptr_poll_addr_hi = upper_32_bits(wb_gpu_addr) & 0xffff; 4294 4295 tmp = 0; 4296 /* enable the doorbell if requested */ 4297 if (prop->use_doorbell) { 4298 tmp = regCP_HQD_PQ_DOORBELL_CONTROL_DEFAULT; 4299 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL, 4300 DOORBELL_OFFSET, prop->doorbell_index); 4301 4302 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL, 4303 DOORBELL_EN, 1); 4304 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL, 4305 DOORBELL_SOURCE, 0); 4306 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL, 4307 DOORBELL_HIT, 0); 4308 } 4309 4310 mqd->cp_hqd_pq_doorbell_control = tmp; 4311 4312 /* reset read and write pointers, similar to CP_RB0_WPTR/_RPTR */ 4313 mqd->cp_hqd_pq_rptr = regCP_HQD_PQ_RPTR_DEFAULT; 4314 4315 /* set the vmid for the queue */ 4316 mqd->cp_hqd_vmid = 0; 4317 4318 tmp = regCP_HQD_PERSISTENT_STATE_DEFAULT; 4319 tmp = REG_SET_FIELD(tmp, CP_HQD_PERSISTENT_STATE, PRELOAD_SIZE, 0x55); 4320 mqd->cp_hqd_persistent_state = tmp; 4321 4322 /* set MIN_IB_AVAIL_SIZE */ 4323 tmp = regCP_HQD_IB_CONTROL_DEFAULT; 4324 tmp = REG_SET_FIELD(tmp, CP_HQD_IB_CONTROL, MIN_IB_AVAIL_SIZE, 3); 4325 mqd->cp_hqd_ib_control = tmp; 4326 4327 /* set static priority for a compute queue/ring */ 4328 mqd->cp_hqd_pipe_priority = prop->hqd_pipe_priority; 4329 mqd->cp_hqd_queue_priority = prop->hqd_queue_priority; 4330 4331 mqd->cp_hqd_active = prop->hqd_active; 4332 4333 /* set UQ fenceaddress */ 4334 mqd->fence_address_lo = lower_32_bits(prop->fence_address); 4335 mqd->fence_address_hi = upper_32_bits(prop->fence_address); 4336 4337 return 0; 4338 } 4339 4340 static int gfx_v11_0_kiq_init_register(struct amdgpu_ring *ring) 4341 { 4342 struct amdgpu_device *adev = ring->adev; 4343 struct v11_compute_mqd *mqd = ring->mqd_ptr; 4344 int j; 4345 4346 /* inactivate the queue */ 4347 if (amdgpu_sriov_vf(adev)) 4348 WREG32_SOC15(GC, 0, regCP_HQD_ACTIVE, 0); 4349 4350 /* disable wptr polling */ 4351 WREG32_FIELD15_PREREG(GC, 0, CP_PQ_WPTR_POLL_CNTL, EN, 0); 4352 4353 /* write the EOP addr */ 4354 WREG32_SOC15(GC, 0, regCP_HQD_EOP_BASE_ADDR, 4355 mqd->cp_hqd_eop_base_addr_lo); 4356 WREG32_SOC15(GC, 0, regCP_HQD_EOP_BASE_ADDR_HI, 4357 mqd->cp_hqd_eop_base_addr_hi); 4358 4359 /* set the EOP size, register value is 2^(EOP_SIZE+1) dwords */ 4360 WREG32_SOC15(GC, 0, regCP_HQD_EOP_CONTROL, 4361 mqd->cp_hqd_eop_control); 4362 4363 /* enable doorbell? */ 4364 WREG32_SOC15(GC, 0, regCP_HQD_PQ_DOORBELL_CONTROL, 4365 mqd->cp_hqd_pq_doorbell_control); 4366 4367 /* disable the queue if it's active */ 4368 if (RREG32_SOC15(GC, 0, regCP_HQD_ACTIVE) & 1) { 4369 WREG32_SOC15(GC, 0, regCP_HQD_DEQUEUE_REQUEST, 1); 4370 for (j = 0; j < adev->usec_timeout; j++) { 4371 if (!(RREG32_SOC15(GC, 0, regCP_HQD_ACTIVE) & 1)) 4372 break; 4373 udelay(1); 4374 } 4375 WREG32_SOC15(GC, 0, regCP_HQD_DEQUEUE_REQUEST, 4376 mqd->cp_hqd_dequeue_request); 4377 WREG32_SOC15(GC, 0, regCP_HQD_PQ_RPTR, 4378 mqd->cp_hqd_pq_rptr); 4379 WREG32_SOC15(GC, 0, regCP_HQD_PQ_WPTR_LO, 4380 mqd->cp_hqd_pq_wptr_lo); 4381 WREG32_SOC15(GC, 0, regCP_HQD_PQ_WPTR_HI, 4382 mqd->cp_hqd_pq_wptr_hi); 4383 } 4384 4385 /* set the pointer to the MQD */ 4386 WREG32_SOC15(GC, 0, regCP_MQD_BASE_ADDR, 4387 mqd->cp_mqd_base_addr_lo); 4388 WREG32_SOC15(GC, 0, regCP_MQD_BASE_ADDR_HI, 4389 mqd->cp_mqd_base_addr_hi); 4390 4391 /* set MQD vmid to 0 */ 4392 WREG32_SOC15(GC, 0, regCP_MQD_CONTROL, 4393 mqd->cp_mqd_control); 4394 4395 /* set the pointer to the HQD, this is similar CP_RB0_BASE/_HI */ 4396 WREG32_SOC15(GC, 0, regCP_HQD_PQ_BASE, 4397 mqd->cp_hqd_pq_base_lo); 4398 WREG32_SOC15(GC, 0, regCP_HQD_PQ_BASE_HI, 4399 mqd->cp_hqd_pq_base_hi); 4400 4401 /* set up the HQD, this is similar to CP_RB0_CNTL */ 4402 WREG32_SOC15(GC, 0, regCP_HQD_PQ_CONTROL, 4403 mqd->cp_hqd_pq_control); 4404 4405 /* set the wb address whether it's enabled or not */ 4406 WREG32_SOC15(GC, 0, regCP_HQD_PQ_RPTR_REPORT_ADDR, 4407 mqd->cp_hqd_pq_rptr_report_addr_lo); 4408 WREG32_SOC15(GC, 0, regCP_HQD_PQ_RPTR_REPORT_ADDR_HI, 4409 mqd->cp_hqd_pq_rptr_report_addr_hi); 4410 4411 /* only used if CP_PQ_WPTR_POLL_CNTL.CP_PQ_WPTR_POLL_CNTL__EN_MASK=1 */ 4412 WREG32_SOC15(GC, 0, regCP_HQD_PQ_WPTR_POLL_ADDR, 4413 mqd->cp_hqd_pq_wptr_poll_addr_lo); 4414 WREG32_SOC15(GC, 0, regCP_HQD_PQ_WPTR_POLL_ADDR_HI, 4415 mqd->cp_hqd_pq_wptr_poll_addr_hi); 4416 4417 /* enable the doorbell if requested */ 4418 if (ring->use_doorbell) { 4419 WREG32_SOC15(GC, 0, regCP_MEC_DOORBELL_RANGE_LOWER, 4420 (adev->doorbell_index.kiq * 2) << 2); 4421 WREG32_SOC15(GC, 0, regCP_MEC_DOORBELL_RANGE_UPPER, 4422 (adev->doorbell_index.userqueue_end * 2) << 2); 4423 } 4424 4425 WREG32_SOC15(GC, 0, regCP_HQD_PQ_DOORBELL_CONTROL, 4426 mqd->cp_hqd_pq_doorbell_control); 4427 4428 /* reset read and write pointers, similar to CP_RB0_WPTR/_RPTR */ 4429 WREG32_SOC15(GC, 0, regCP_HQD_PQ_WPTR_LO, 4430 mqd->cp_hqd_pq_wptr_lo); 4431 WREG32_SOC15(GC, 0, regCP_HQD_PQ_WPTR_HI, 4432 mqd->cp_hqd_pq_wptr_hi); 4433 4434 /* set the vmid for the queue */ 4435 WREG32_SOC15(GC, 0, regCP_HQD_VMID, mqd->cp_hqd_vmid); 4436 4437 WREG32_SOC15(GC, 0, regCP_HQD_PERSISTENT_STATE, 4438 mqd->cp_hqd_persistent_state); 4439 4440 /* activate the queue */ 4441 WREG32_SOC15(GC, 0, regCP_HQD_ACTIVE, 4442 mqd->cp_hqd_active); 4443 4444 if (ring->use_doorbell) 4445 WREG32_FIELD15_PREREG(GC, 0, CP_PQ_STATUS, DOORBELL_ENABLE, 1); 4446 4447 return 0; 4448 } 4449 4450 static int gfx_v11_0_kiq_init_queue(struct amdgpu_ring *ring) 4451 { 4452 struct amdgpu_device *adev = ring->adev; 4453 struct v11_compute_mqd *mqd = ring->mqd_ptr; 4454 4455 gfx_v11_0_kiq_setting(ring); 4456 4457 if (amdgpu_in_reset(adev)) { /* for GPU_RESET case */ 4458 /* reset MQD to a clean status */ 4459 if (adev->gfx.kiq[0].mqd_backup) 4460 memcpy_toio(mqd, adev->gfx.kiq[0].mqd_backup, sizeof(*mqd)); 4461 4462 /* reset ring buffer */ 4463 ring->wptr = 0; 4464 amdgpu_ring_clear_ring(ring); 4465 4466 mutex_lock(&adev->srbm_mutex); 4467 soc21_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0); 4468 gfx_v11_0_kiq_init_register(ring); 4469 soc21_grbm_select(adev, 0, 0, 0, 0); 4470 mutex_unlock(&adev->srbm_mutex); 4471 } else { 4472 memset((void *)mqd, 0, sizeof(*mqd)); 4473 if (amdgpu_sriov_vf(adev) && adev->in_suspend) 4474 amdgpu_ring_clear_ring(ring); 4475 mutex_lock(&adev->srbm_mutex); 4476 soc21_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0); 4477 amdgpu_ring_init_mqd(ring); 4478 gfx_v11_0_kiq_init_register(ring); 4479 soc21_grbm_select(adev, 0, 0, 0, 0); 4480 mutex_unlock(&adev->srbm_mutex); 4481 4482 if (adev->gfx.kiq[0].mqd_backup) 4483 memcpy_fromio(adev->gfx.kiq[0].mqd_backup, mqd, sizeof(*mqd)); 4484 } 4485 4486 return 0; 4487 } 4488 4489 static int gfx_v11_0_kcq_init_queue(struct amdgpu_ring *ring, bool reset) 4490 { 4491 struct amdgpu_device *adev = ring->adev; 4492 struct v11_compute_mqd *mqd = ring->mqd_ptr; 4493 int mqd_idx = ring - &adev->gfx.compute_ring[0]; 4494 4495 if (!reset && !amdgpu_in_reset(adev) && !adev->in_suspend) { 4496 memset((void *)mqd, 0, sizeof(*mqd)); 4497 mutex_lock(&adev->srbm_mutex); 4498 soc21_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0); 4499 amdgpu_ring_init_mqd(ring); 4500 soc21_grbm_select(adev, 0, 0, 0, 0); 4501 mutex_unlock(&adev->srbm_mutex); 4502 4503 if (adev->gfx.mec.mqd_backup[mqd_idx]) 4504 memcpy_fromio(adev->gfx.mec.mqd_backup[mqd_idx], mqd, sizeof(*mqd)); 4505 } else { 4506 /* restore MQD to a clean status */ 4507 if (adev->gfx.mec.mqd_backup[mqd_idx]) 4508 memcpy_toio(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(*mqd)); 4509 /* reset ring buffer */ 4510 ring->wptr = 0; 4511 atomic64_set((atomic64_t *)ring->wptr_cpu_addr, 0); 4512 amdgpu_ring_clear_ring(ring); 4513 } 4514 4515 return 0; 4516 } 4517 4518 static int gfx_v11_0_kiq_resume(struct amdgpu_device *adev) 4519 { 4520 gfx_v11_0_kiq_init_queue(&adev->gfx.kiq[0].ring); 4521 return 0; 4522 } 4523 4524 static int gfx_v11_0_kcq_resume(struct amdgpu_device *adev) 4525 { 4526 int i, r; 4527 4528 if (!amdgpu_async_gfx_ring) 4529 gfx_v11_0_cp_compute_enable(adev, true); 4530 4531 for (i = 0; i < adev->gfx.num_compute_rings; i++) { 4532 r = gfx_v11_0_kcq_init_queue(&adev->gfx.compute_ring[i], false); 4533 if (r) 4534 return r; 4535 } 4536 4537 return amdgpu_gfx_enable_kcq(adev, 0); 4538 } 4539 4540 static int gfx_v11_0_cp_resume(struct amdgpu_device *adev) 4541 { 4542 int r, i; 4543 struct amdgpu_ring *ring; 4544 4545 if (!(adev->flags & AMD_IS_APU)) 4546 gfx_v11_0_enable_gui_idle_interrupt(adev, false); 4547 4548 if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT) { 4549 /* legacy firmware loading */ 4550 r = gfx_v11_0_cp_gfx_load_microcode(adev); 4551 if (r) 4552 return r; 4553 4554 if (adev->gfx.rs64_enable) 4555 r = gfx_v11_0_cp_compute_load_microcode_rs64(adev); 4556 else 4557 r = gfx_v11_0_cp_compute_load_microcode(adev); 4558 if (r) 4559 return r; 4560 } 4561 4562 gfx_v11_0_cp_set_doorbell_range(adev); 4563 4564 if (amdgpu_async_gfx_ring) { 4565 gfx_v11_0_cp_compute_enable(adev, true); 4566 gfx_v11_0_cp_gfx_enable(adev, true); 4567 } 4568 4569 if (adev->enable_mes_kiq && adev->mes.kiq_hw_init) 4570 r = amdgpu_mes_kiq_hw_init(adev); 4571 else 4572 r = gfx_v11_0_kiq_resume(adev); 4573 if (r) 4574 return r; 4575 4576 r = gfx_v11_0_kcq_resume(adev); 4577 if (r) 4578 return r; 4579 4580 if (!amdgpu_async_gfx_ring) { 4581 r = gfx_v11_0_cp_gfx_resume(adev); 4582 if (r) 4583 return r; 4584 } else { 4585 r = gfx_v11_0_cp_async_gfx_ring_resume(adev); 4586 if (r) 4587 return r; 4588 } 4589 4590 if (adev->gfx.disable_kq) { 4591 for (i = 0; i < adev->gfx.num_gfx_rings; i++) { 4592 ring = &adev->gfx.gfx_ring[i]; 4593 /* we don't want to set ring->ready */ 4594 r = amdgpu_ring_test_ring(ring); 4595 if (r) 4596 return r; 4597 } 4598 if (amdgpu_async_gfx_ring) 4599 amdgpu_gfx_disable_kgq(adev, 0); 4600 } else { 4601 for (i = 0; i < adev->gfx.num_gfx_rings; i++) { 4602 ring = &adev->gfx.gfx_ring[i]; 4603 r = amdgpu_ring_test_helper(ring); 4604 if (r) 4605 return r; 4606 } 4607 } 4608 4609 for (i = 0; i < adev->gfx.num_compute_rings; i++) { 4610 ring = &adev->gfx.compute_ring[i]; 4611 r = amdgpu_ring_test_helper(ring); 4612 if (r) 4613 return r; 4614 } 4615 4616 return 0; 4617 } 4618 4619 static void gfx_v11_0_cp_enable(struct amdgpu_device *adev, bool enable) 4620 { 4621 gfx_v11_0_cp_gfx_enable(adev, enable); 4622 gfx_v11_0_cp_compute_enable(adev, enable); 4623 } 4624 4625 static int gfx_v11_0_gfxhub_enable(struct amdgpu_device *adev) 4626 { 4627 int r; 4628 bool value; 4629 4630 r = adev->gfxhub.funcs->gart_enable(adev); 4631 if (r) 4632 return r; 4633 4634 amdgpu_device_flush_hdp(adev, NULL); 4635 4636 value = (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS) ? 4637 false : true; 4638 4639 adev->gfxhub.funcs->set_fault_enable_default(adev, value); 4640 /* TODO investigate why this and the hdp flush above is needed, 4641 * are we missing a flush somewhere else? */ 4642 adev->gmc.gmc_funcs->flush_gpu_tlb(adev, 0, AMDGPU_GFXHUB(0), 0); 4643 4644 return 0; 4645 } 4646 4647 static void gfx_v11_0_select_cp_fw_arch(struct amdgpu_device *adev) 4648 { 4649 u32 tmp; 4650 4651 /* select RS64 */ 4652 if (adev->gfx.rs64_enable) { 4653 tmp = RREG32_SOC15(GC, 0, regCP_GFX_CNTL); 4654 tmp = REG_SET_FIELD(tmp, CP_GFX_CNTL, ENGINE_SEL, 1); 4655 WREG32_SOC15(GC, 0, regCP_GFX_CNTL, tmp); 4656 4657 tmp = RREG32_SOC15(GC, 0, regCP_MEC_ISA_CNTL); 4658 tmp = REG_SET_FIELD(tmp, CP_MEC_ISA_CNTL, ISA_MODE, 1); 4659 WREG32_SOC15(GC, 0, regCP_MEC_ISA_CNTL, tmp); 4660 } 4661 4662 if (amdgpu_emu_mode == 1) 4663 msleep(100); 4664 } 4665 4666 static int get_gb_addr_config(struct amdgpu_device * adev) 4667 { 4668 u32 gb_addr_config; 4669 4670 gb_addr_config = RREG32_SOC15(GC, 0, regGB_ADDR_CONFIG); 4671 if (gb_addr_config == 0) 4672 return -EINVAL; 4673 4674 adev->gfx.config.gb_addr_config_fields.num_pkrs = 4675 1 << REG_GET_FIELD(gb_addr_config, GB_ADDR_CONFIG, NUM_PKRS); 4676 4677 adev->gfx.config.gb_addr_config = gb_addr_config; 4678 4679 adev->gfx.config.gb_addr_config_fields.num_pipes = 1 << 4680 REG_GET_FIELD(adev->gfx.config.gb_addr_config, 4681 GB_ADDR_CONFIG, NUM_PIPES); 4682 4683 adev->gfx.config.max_tile_pipes = 4684 adev->gfx.config.gb_addr_config_fields.num_pipes; 4685 4686 adev->gfx.config.gb_addr_config_fields.max_compress_frags = 1 << 4687 REG_GET_FIELD(adev->gfx.config.gb_addr_config, 4688 GB_ADDR_CONFIG, MAX_COMPRESSED_FRAGS); 4689 adev->gfx.config.gb_addr_config_fields.num_rb_per_se = 1 << 4690 REG_GET_FIELD(adev->gfx.config.gb_addr_config, 4691 GB_ADDR_CONFIG, NUM_RB_PER_SE); 4692 adev->gfx.config.gb_addr_config_fields.num_se = 1 << 4693 REG_GET_FIELD(adev->gfx.config.gb_addr_config, 4694 GB_ADDR_CONFIG, NUM_SHADER_ENGINES); 4695 adev->gfx.config.gb_addr_config_fields.pipe_interleave_size = 1 << (8 + 4696 REG_GET_FIELD(adev->gfx.config.gb_addr_config, 4697 GB_ADDR_CONFIG, PIPE_INTERLEAVE_SIZE)); 4698 4699 return 0; 4700 } 4701 4702 static void gfx_v11_0_disable_gpa_mode(struct amdgpu_device *adev) 4703 { 4704 uint32_t data; 4705 4706 data = RREG32_SOC15(GC, 0, regCPC_PSP_DEBUG); 4707 data |= CPC_PSP_DEBUG__GPA_OVERRIDE_MASK; 4708 WREG32_SOC15(GC, 0, regCPC_PSP_DEBUG, data); 4709 4710 data = RREG32_SOC15(GC, 0, regCPG_PSP_DEBUG); 4711 data |= CPG_PSP_DEBUG__GPA_OVERRIDE_MASK; 4712 WREG32_SOC15(GC, 0, regCPG_PSP_DEBUG, data); 4713 } 4714 4715 static int gfx_v11_0_hw_init(struct amdgpu_ip_block *ip_block) 4716 { 4717 int r; 4718 struct amdgpu_device *adev = ip_block->adev; 4719 4720 amdgpu_gfx_cleaner_shader_init(adev, adev->gfx.cleaner_shader_size, 4721 adev->gfx.cleaner_shader_ptr); 4722 4723 if (adev->firmware.load_type == AMDGPU_FW_LOAD_RLC_BACKDOOR_AUTO) { 4724 if (adev->gfx.imu.funcs) { 4725 /* RLC autoload sequence 1: Program rlc ram */ 4726 if (adev->gfx.imu.funcs->program_rlc_ram) 4727 adev->gfx.imu.funcs->program_rlc_ram(adev); 4728 /* rlc autoload firmware */ 4729 r = gfx_v11_0_rlc_backdoor_autoload_enable(adev); 4730 if (r) 4731 return r; 4732 } 4733 } else { 4734 if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT) { 4735 if (adev->gfx.imu.funcs && (amdgpu_dpm > 0)) { 4736 if (adev->gfx.imu.funcs->load_microcode) 4737 adev->gfx.imu.funcs->load_microcode(adev); 4738 if (adev->gfx.imu.funcs->setup_imu) 4739 adev->gfx.imu.funcs->setup_imu(adev); 4740 if (adev->gfx.imu.funcs->start_imu) 4741 adev->gfx.imu.funcs->start_imu(adev); 4742 } 4743 4744 /* disable gpa mode in backdoor loading */ 4745 gfx_v11_0_disable_gpa_mode(adev); 4746 } 4747 } 4748 4749 if ((adev->firmware.load_type == AMDGPU_FW_LOAD_RLC_BACKDOOR_AUTO) || 4750 (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP)) { 4751 r = gfx_v11_0_wait_for_rlc_autoload_complete(adev); 4752 if (r) { 4753 dev_err(adev->dev, "(%d) failed to wait rlc autoload complete\n", r); 4754 return r; 4755 } 4756 } 4757 4758 adev->gfx.is_poweron = true; 4759 4760 if(get_gb_addr_config(adev)) 4761 DRM_WARN("Invalid gb_addr_config !\n"); 4762 4763 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP && 4764 adev->gfx.rs64_enable) 4765 gfx_v11_0_config_gfx_rs64(adev); 4766 4767 r = gfx_v11_0_gfxhub_enable(adev); 4768 if (r) 4769 return r; 4770 4771 if (!amdgpu_emu_mode) 4772 gfx_v11_0_init_golden_registers(adev); 4773 4774 if ((adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT) || 4775 (adev->firmware.load_type == AMDGPU_FW_LOAD_RLC_BACKDOOR_AUTO && amdgpu_dpm == 1)) { 4776 /** 4777 * For gfx 11, rlc firmware loading relies on smu firmware is 4778 * loaded firstly, so in direct type, it has to load smc ucode 4779 * here before rlc. 4780 */ 4781 r = amdgpu_pm_load_smu_firmware(adev, NULL); 4782 if (r) 4783 return r; 4784 } 4785 4786 gfx_v11_0_constants_init(adev); 4787 4788 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) 4789 gfx_v11_0_select_cp_fw_arch(adev); 4790 4791 if (adev->nbio.funcs->gc_doorbell_init) 4792 adev->nbio.funcs->gc_doorbell_init(adev); 4793 4794 r = gfx_v11_0_rlc_resume(adev); 4795 if (r) 4796 return r; 4797 4798 /* 4799 * init golden registers and rlc resume may override some registers, 4800 * reconfig them here 4801 */ 4802 gfx_v11_0_tcp_harvest(adev); 4803 4804 r = gfx_v11_0_cp_resume(adev); 4805 if (r) 4806 return r; 4807 4808 /* get IMU version from HW if it's not set */ 4809 if (!adev->gfx.imu_fw_version) 4810 adev->gfx.imu_fw_version = RREG32_SOC15(GC, 0, regGFX_IMU_SCRATCH_0); 4811 4812 return r; 4813 } 4814 4815 static int gfx_v11_0_set_userq_eop_interrupts(struct amdgpu_device *adev, 4816 bool enable) 4817 { 4818 unsigned int irq_type; 4819 int m, p, r; 4820 4821 if (adev->userq_funcs[AMDGPU_HW_IP_GFX]) { 4822 for (m = 0; m < adev->gfx.me.num_me; m++) { 4823 for (p = 0; p < adev->gfx.me.num_pipe_per_me; p++) { 4824 irq_type = AMDGPU_CP_IRQ_GFX_ME0_PIPE0_EOP + p; 4825 if (enable) 4826 r = amdgpu_irq_get(adev, &adev->gfx.eop_irq, 4827 irq_type); 4828 else 4829 r = amdgpu_irq_put(adev, &adev->gfx.eop_irq, 4830 irq_type); 4831 if (r) 4832 return r; 4833 } 4834 } 4835 } 4836 4837 if (adev->userq_funcs[AMDGPU_HW_IP_COMPUTE]) { 4838 for (m = 0; m < adev->gfx.mec.num_mec; ++m) { 4839 for (p = 0; p < adev->gfx.mec.num_pipe_per_mec; p++) { 4840 irq_type = AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP 4841 + (m * adev->gfx.mec.num_pipe_per_mec) 4842 + p; 4843 if (enable) 4844 r = amdgpu_irq_get(adev, &adev->gfx.eop_irq, 4845 irq_type); 4846 else 4847 r = amdgpu_irq_put(adev, &adev->gfx.eop_irq, 4848 irq_type); 4849 if (r) 4850 return r; 4851 } 4852 } 4853 } 4854 4855 return 0; 4856 } 4857 4858 static int gfx_v11_0_hw_fini(struct amdgpu_ip_block *ip_block) 4859 { 4860 struct amdgpu_device *adev = ip_block->adev; 4861 4862 cancel_delayed_work_sync(&adev->gfx.idle_work); 4863 4864 amdgpu_irq_put(adev, &adev->gfx.priv_reg_irq, 0); 4865 amdgpu_irq_put(adev, &adev->gfx.priv_inst_irq, 0); 4866 amdgpu_irq_put(adev, &adev->gfx.bad_op_irq, 0); 4867 gfx_v11_0_set_userq_eop_interrupts(adev, false); 4868 4869 if (!adev->no_hw_access) { 4870 if (amdgpu_async_gfx_ring && 4871 !adev->gfx.disable_kq) { 4872 if (amdgpu_gfx_disable_kgq(adev, 0)) 4873 DRM_ERROR("KGQ disable failed\n"); 4874 } 4875 4876 if (amdgpu_gfx_disable_kcq(adev, 0)) 4877 DRM_ERROR("KCQ disable failed\n"); 4878 4879 amdgpu_mes_kiq_hw_fini(adev); 4880 } 4881 4882 if (amdgpu_sriov_vf(adev)) 4883 /* Remove the steps disabling CPG and clearing KIQ position, 4884 * so that CP could perform IDLE-SAVE during switch. Those 4885 * steps are necessary to avoid a DMAR error in gfx9 but it is 4886 * not reproduced on gfx11. 4887 */ 4888 return 0; 4889 4890 gfx_v11_0_cp_enable(adev, false); 4891 gfx_v11_0_enable_gui_idle_interrupt(adev, false); 4892 4893 adev->gfxhub.funcs->gart_disable(adev); 4894 4895 adev->gfx.is_poweron = false; 4896 4897 return 0; 4898 } 4899 4900 static int gfx_v11_0_suspend(struct amdgpu_ip_block *ip_block) 4901 { 4902 return gfx_v11_0_hw_fini(ip_block); 4903 } 4904 4905 static int gfx_v11_0_resume(struct amdgpu_ip_block *ip_block) 4906 { 4907 return gfx_v11_0_hw_init(ip_block); 4908 } 4909 4910 static bool gfx_v11_0_is_idle(struct amdgpu_ip_block *ip_block) 4911 { 4912 struct amdgpu_device *adev = ip_block->adev; 4913 4914 if (REG_GET_FIELD(RREG32_SOC15(GC, 0, regGRBM_STATUS), 4915 GRBM_STATUS, GUI_ACTIVE)) 4916 return false; 4917 else 4918 return true; 4919 } 4920 4921 static int gfx_v11_0_wait_for_idle(struct amdgpu_ip_block *ip_block) 4922 { 4923 unsigned i; 4924 u32 tmp; 4925 struct amdgpu_device *adev = ip_block->adev; 4926 4927 for (i = 0; i < adev->usec_timeout; i++) { 4928 /* read MC_STATUS */ 4929 tmp = RREG32_SOC15(GC, 0, regGRBM_STATUS) & 4930 GRBM_STATUS__GUI_ACTIVE_MASK; 4931 4932 if (!REG_GET_FIELD(tmp, GRBM_STATUS, GUI_ACTIVE)) 4933 return 0; 4934 udelay(1); 4935 } 4936 return -ETIMEDOUT; 4937 } 4938 4939 int gfx_v11_0_request_gfx_index_mutex(struct amdgpu_device *adev, 4940 bool req) 4941 { 4942 u32 i, tmp, val; 4943 4944 for (i = 0; i < adev->usec_timeout; i++) { 4945 /* Request with MeId=2, PipeId=0 */ 4946 tmp = REG_SET_FIELD(0, CP_GFX_INDEX_MUTEX, REQUEST, req); 4947 tmp = REG_SET_FIELD(tmp, CP_GFX_INDEX_MUTEX, CLIENTID, 4); 4948 WREG32_SOC15(GC, 0, regCP_GFX_INDEX_MUTEX, tmp); 4949 4950 val = RREG32_SOC15(GC, 0, regCP_GFX_INDEX_MUTEX); 4951 if (req) { 4952 if (val == tmp) 4953 break; 4954 } else { 4955 tmp = REG_SET_FIELD(tmp, CP_GFX_INDEX_MUTEX, 4956 REQUEST, 1); 4957 4958 /* unlocked or locked by firmware */ 4959 if (val != tmp) 4960 break; 4961 } 4962 udelay(1); 4963 } 4964 4965 if (i >= adev->usec_timeout) 4966 return -EINVAL; 4967 4968 return 0; 4969 } 4970 4971 static int gfx_v11_0_soft_reset(struct amdgpu_ip_block *ip_block) 4972 { 4973 u32 grbm_soft_reset = 0; 4974 u32 tmp; 4975 int r, i, j, k; 4976 struct amdgpu_device *adev = ip_block->adev; 4977 4978 amdgpu_gfx_rlc_enter_safe_mode(adev, 0); 4979 4980 tmp = RREG32_SOC15(GC, 0, regCP_INT_CNTL); 4981 tmp = REG_SET_FIELD(tmp, CP_INT_CNTL, CMP_BUSY_INT_ENABLE, 0); 4982 tmp = REG_SET_FIELD(tmp, CP_INT_CNTL, CNTX_BUSY_INT_ENABLE, 0); 4983 tmp = REG_SET_FIELD(tmp, CP_INT_CNTL, CNTX_EMPTY_INT_ENABLE, 0); 4984 tmp = REG_SET_FIELD(tmp, CP_INT_CNTL, GFX_IDLE_INT_ENABLE, 0); 4985 WREG32_SOC15(GC, 0, regCP_INT_CNTL, tmp); 4986 4987 mutex_lock(&adev->srbm_mutex); 4988 for (i = 0; i < adev->gfx.mec.num_mec; ++i) { 4989 for (j = 0; j < adev->gfx.mec.num_queue_per_pipe; j++) { 4990 for (k = 0; k < adev->gfx.mec.num_pipe_per_mec; k++) { 4991 soc21_grbm_select(adev, i, k, j, 0); 4992 4993 WREG32_SOC15(GC, 0, regCP_HQD_DEQUEUE_REQUEST, 0x2); 4994 WREG32_SOC15(GC, 0, regSPI_COMPUTE_QUEUE_RESET, 0x1); 4995 } 4996 } 4997 } 4998 for (i = 0; i < adev->gfx.me.num_me; ++i) { 4999 for (j = 0; j < adev->gfx.me.num_queue_per_pipe; j++) { 5000 for (k = 0; k < adev->gfx.me.num_pipe_per_me; k++) { 5001 soc21_grbm_select(adev, i, k, j, 0); 5002 5003 WREG32_SOC15(GC, 0, regCP_GFX_HQD_DEQUEUE_REQUEST, 0x1); 5004 } 5005 } 5006 } 5007 soc21_grbm_select(adev, 0, 0, 0, 0); 5008 mutex_unlock(&adev->srbm_mutex); 5009 5010 /* Try to acquire the gfx mutex before access to CP_VMID_RESET */ 5011 mutex_lock(&adev->gfx.reset_sem_mutex); 5012 r = gfx_v11_0_request_gfx_index_mutex(adev, true); 5013 if (r) { 5014 mutex_unlock(&adev->gfx.reset_sem_mutex); 5015 DRM_ERROR("Failed to acquire the gfx mutex during soft reset\n"); 5016 return r; 5017 } 5018 5019 WREG32_SOC15(GC, 0, regCP_VMID_RESET, 0xfffffffe); 5020 5021 // Read CP_VMID_RESET register three times. 5022 // to get sufficient time for GFX_HQD_ACTIVE reach 0 5023 RREG32_SOC15(GC, 0, regCP_VMID_RESET); 5024 RREG32_SOC15(GC, 0, regCP_VMID_RESET); 5025 RREG32_SOC15(GC, 0, regCP_VMID_RESET); 5026 5027 /* release the gfx mutex */ 5028 r = gfx_v11_0_request_gfx_index_mutex(adev, false); 5029 mutex_unlock(&adev->gfx.reset_sem_mutex); 5030 if (r) { 5031 DRM_ERROR("Failed to release the gfx mutex during soft reset\n"); 5032 return r; 5033 } 5034 5035 for (i = 0; i < adev->usec_timeout; i++) { 5036 if (!RREG32_SOC15(GC, 0, regCP_HQD_ACTIVE) && 5037 !RREG32_SOC15(GC, 0, regCP_GFX_HQD_ACTIVE)) 5038 break; 5039 udelay(1); 5040 } 5041 if (i >= adev->usec_timeout) { 5042 printk("Failed to wait all pipes clean\n"); 5043 return -EINVAL; 5044 } 5045 5046 /********** trigger soft reset ***********/ 5047 grbm_soft_reset = RREG32_SOC15(GC, 0, regGRBM_SOFT_RESET); 5048 grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, 5049 SOFT_RESET_CP, 1); 5050 grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, 5051 SOFT_RESET_GFX, 1); 5052 grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, 5053 SOFT_RESET_CPF, 1); 5054 grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, 5055 SOFT_RESET_CPC, 1); 5056 grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, 5057 SOFT_RESET_CPG, 1); 5058 WREG32_SOC15(GC, 0, regGRBM_SOFT_RESET, grbm_soft_reset); 5059 /********** exit soft reset ***********/ 5060 grbm_soft_reset = RREG32_SOC15(GC, 0, regGRBM_SOFT_RESET); 5061 grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, 5062 SOFT_RESET_CP, 0); 5063 grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, 5064 SOFT_RESET_GFX, 0); 5065 grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, 5066 SOFT_RESET_CPF, 0); 5067 grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, 5068 SOFT_RESET_CPC, 0); 5069 grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, 5070 SOFT_RESET_CPG, 0); 5071 WREG32_SOC15(GC, 0, regGRBM_SOFT_RESET, grbm_soft_reset); 5072 5073 tmp = RREG32_SOC15(GC, 0, regCP_SOFT_RESET_CNTL); 5074 tmp = REG_SET_FIELD(tmp, CP_SOFT_RESET_CNTL, CMP_HQD_REG_RESET, 0x1); 5075 WREG32_SOC15(GC, 0, regCP_SOFT_RESET_CNTL, tmp); 5076 5077 WREG32_SOC15(GC, 0, regCP_ME_CNTL, 0x0); 5078 WREG32_SOC15(GC, 0, regCP_MEC_RS64_CNTL, 0x0); 5079 5080 for (i = 0; i < adev->usec_timeout; i++) { 5081 if (!RREG32_SOC15(GC, 0, regCP_VMID_RESET)) 5082 break; 5083 udelay(1); 5084 } 5085 if (i >= adev->usec_timeout) { 5086 printk("Failed to wait CP_VMID_RESET to 0\n"); 5087 return -EINVAL; 5088 } 5089 5090 tmp = RREG32_SOC15(GC, 0, regCP_INT_CNTL); 5091 tmp = REG_SET_FIELD(tmp, CP_INT_CNTL, CMP_BUSY_INT_ENABLE, 1); 5092 tmp = REG_SET_FIELD(tmp, CP_INT_CNTL, CNTX_BUSY_INT_ENABLE, 1); 5093 tmp = REG_SET_FIELD(tmp, CP_INT_CNTL, CNTX_EMPTY_INT_ENABLE, 1); 5094 tmp = REG_SET_FIELD(tmp, CP_INT_CNTL, GFX_IDLE_INT_ENABLE, 1); 5095 WREG32_SOC15(GC, 0, regCP_INT_CNTL, tmp); 5096 5097 amdgpu_gfx_rlc_exit_safe_mode(adev, 0); 5098 5099 return gfx_v11_0_cp_resume(adev); 5100 } 5101 5102 static bool gfx_v11_0_check_soft_reset(struct amdgpu_ip_block *ip_block) 5103 { 5104 int i, r; 5105 struct amdgpu_device *adev = ip_block->adev; 5106 struct amdgpu_ring *ring; 5107 long tmo = msecs_to_jiffies(1000); 5108 5109 for (i = 0; i < adev->gfx.num_gfx_rings; i++) { 5110 ring = &adev->gfx.gfx_ring[i]; 5111 r = amdgpu_ring_test_ib(ring, tmo); 5112 if (r) 5113 return true; 5114 } 5115 5116 for (i = 0; i < adev->gfx.num_compute_rings; i++) { 5117 ring = &adev->gfx.compute_ring[i]; 5118 r = amdgpu_ring_test_ib(ring, tmo); 5119 if (r) 5120 return true; 5121 } 5122 5123 return false; 5124 } 5125 5126 static int gfx_v11_0_post_soft_reset(struct amdgpu_ip_block *ip_block) 5127 { 5128 struct amdgpu_device *adev = ip_block->adev; 5129 /** 5130 * GFX soft reset will impact MES, need resume MES when do GFX soft reset 5131 */ 5132 return amdgpu_mes_resume(adev); 5133 } 5134 5135 static uint64_t gfx_v11_0_get_gpu_clock_counter(struct amdgpu_device *adev) 5136 { 5137 uint64_t clock; 5138 uint64_t clock_counter_lo, clock_counter_hi_pre, clock_counter_hi_after; 5139 5140 if (amdgpu_sriov_vf(adev)) { 5141 amdgpu_gfx_off_ctrl(adev, false); 5142 mutex_lock(&adev->gfx.gpu_clock_mutex); 5143 clock_counter_hi_pre = (uint64_t)RREG32_SOC15(GC, 0, regCP_MES_MTIME_HI); 5144 clock_counter_lo = (uint64_t)RREG32_SOC15(GC, 0, regCP_MES_MTIME_LO); 5145 clock_counter_hi_after = (uint64_t)RREG32_SOC15(GC, 0, regCP_MES_MTIME_HI); 5146 if (clock_counter_hi_pre != clock_counter_hi_after) 5147 clock_counter_lo = (uint64_t)RREG32_SOC15(GC, 0, regCP_MES_MTIME_LO); 5148 mutex_unlock(&adev->gfx.gpu_clock_mutex); 5149 amdgpu_gfx_off_ctrl(adev, true); 5150 } else { 5151 preempt_disable(); 5152 clock_counter_hi_pre = (uint64_t)RREG32_SOC15(SMUIO, 0, regGOLDEN_TSC_COUNT_UPPER); 5153 clock_counter_lo = (uint64_t)RREG32_SOC15(SMUIO, 0, regGOLDEN_TSC_COUNT_LOWER); 5154 clock_counter_hi_after = (uint64_t)RREG32_SOC15(SMUIO, 0, regGOLDEN_TSC_COUNT_UPPER); 5155 if (clock_counter_hi_pre != clock_counter_hi_after) 5156 clock_counter_lo = (uint64_t)RREG32_SOC15(SMUIO, 0, regGOLDEN_TSC_COUNT_LOWER); 5157 preempt_enable(); 5158 } 5159 clock = clock_counter_lo | (clock_counter_hi_after << 32ULL); 5160 5161 return clock; 5162 } 5163 5164 static void gfx_v11_0_ring_emit_gds_switch(struct amdgpu_ring *ring, 5165 uint32_t vmid, 5166 uint32_t gds_base, uint32_t gds_size, 5167 uint32_t gws_base, uint32_t gws_size, 5168 uint32_t oa_base, uint32_t oa_size) 5169 { 5170 struct amdgpu_device *adev = ring->adev; 5171 5172 /* GDS Base */ 5173 gfx_v11_0_write_data_to_reg(ring, 0, false, 5174 SOC15_REG_OFFSET(GC, 0, regGDS_VMID0_BASE) + 2 * vmid, 5175 gds_base); 5176 5177 /* GDS Size */ 5178 gfx_v11_0_write_data_to_reg(ring, 0, false, 5179 SOC15_REG_OFFSET(GC, 0, regGDS_VMID0_SIZE) + 2 * vmid, 5180 gds_size); 5181 5182 /* GWS */ 5183 gfx_v11_0_write_data_to_reg(ring, 0, false, 5184 SOC15_REG_OFFSET(GC, 0, regGDS_GWS_VMID0) + vmid, 5185 gws_size << GDS_GWS_VMID0__SIZE__SHIFT | gws_base); 5186 5187 /* OA */ 5188 gfx_v11_0_write_data_to_reg(ring, 0, false, 5189 SOC15_REG_OFFSET(GC, 0, regGDS_OA_VMID0) + vmid, 5190 (1 << (oa_size + oa_base)) - (1 << oa_base)); 5191 } 5192 5193 static int gfx_v11_0_early_init(struct amdgpu_ip_block *ip_block) 5194 { 5195 struct amdgpu_device *adev = ip_block->adev; 5196 5197 switch (amdgpu_user_queue) { 5198 case -1: 5199 case 0: 5200 default: 5201 adev->gfx.disable_kq = false; 5202 adev->gfx.disable_uq = true; 5203 break; 5204 case 1: 5205 adev->gfx.disable_kq = false; 5206 adev->gfx.disable_uq = false; 5207 break; 5208 case 2: 5209 adev->gfx.disable_kq = true; 5210 adev->gfx.disable_uq = false; 5211 break; 5212 } 5213 5214 adev->gfx.funcs = &gfx_v11_0_gfx_funcs; 5215 5216 if (adev->gfx.disable_kq) { 5217 /* We need one GFX ring temporarily to set up 5218 * the clear state. 5219 */ 5220 adev->gfx.num_gfx_rings = 1; 5221 adev->gfx.num_compute_rings = 0; 5222 } else { 5223 adev->gfx.num_gfx_rings = GFX11_NUM_GFX_RINGS; 5224 adev->gfx.num_compute_rings = min(amdgpu_gfx_get_num_kcq(adev), 5225 AMDGPU_MAX_COMPUTE_RINGS); 5226 } 5227 5228 gfx_v11_0_set_kiq_pm4_funcs(adev); 5229 gfx_v11_0_set_ring_funcs(adev); 5230 gfx_v11_0_set_irq_funcs(adev); 5231 gfx_v11_0_set_gds_init(adev); 5232 gfx_v11_0_set_rlc_funcs(adev); 5233 gfx_v11_0_set_mqd_funcs(adev); 5234 gfx_v11_0_set_imu_funcs(adev); 5235 5236 gfx_v11_0_init_rlcg_reg_access_ctrl(adev); 5237 5238 return gfx_v11_0_init_microcode(adev); 5239 } 5240 5241 static int gfx_v11_0_late_init(struct amdgpu_ip_block *ip_block) 5242 { 5243 struct amdgpu_device *adev = ip_block->adev; 5244 int r; 5245 5246 r = amdgpu_irq_get(adev, &adev->gfx.priv_reg_irq, 0); 5247 if (r) 5248 return r; 5249 5250 r = amdgpu_irq_get(adev, &adev->gfx.priv_inst_irq, 0); 5251 if (r) 5252 return r; 5253 5254 r = amdgpu_irq_get(adev, &adev->gfx.bad_op_irq, 0); 5255 if (r) 5256 return r; 5257 5258 r = gfx_v11_0_set_userq_eop_interrupts(adev, true); 5259 if (r) 5260 return r; 5261 5262 return 0; 5263 } 5264 5265 static bool gfx_v11_0_is_rlc_enabled(struct amdgpu_device *adev) 5266 { 5267 uint32_t rlc_cntl; 5268 5269 /* if RLC is not enabled, do nothing */ 5270 rlc_cntl = RREG32_SOC15(GC, 0, regRLC_CNTL); 5271 return (REG_GET_FIELD(rlc_cntl, RLC_CNTL, RLC_ENABLE_F32)) ? true : false; 5272 } 5273 5274 static void gfx_v11_0_set_safe_mode(struct amdgpu_device *adev, int xcc_id) 5275 { 5276 uint32_t data; 5277 unsigned i; 5278 5279 data = RLC_SAFE_MODE__CMD_MASK; 5280 data |= (1 << RLC_SAFE_MODE__MESSAGE__SHIFT); 5281 5282 WREG32_SOC15(GC, 0, regRLC_SAFE_MODE, data); 5283 5284 /* wait for RLC_SAFE_MODE */ 5285 for (i = 0; i < adev->usec_timeout; i++) { 5286 if (!REG_GET_FIELD(RREG32_SOC15(GC, 0, regRLC_SAFE_MODE), 5287 RLC_SAFE_MODE, CMD)) 5288 break; 5289 udelay(1); 5290 } 5291 } 5292 5293 static void gfx_v11_0_unset_safe_mode(struct amdgpu_device *adev, int xcc_id) 5294 { 5295 WREG32_SOC15(GC, 0, regRLC_SAFE_MODE, RLC_SAFE_MODE__CMD_MASK); 5296 } 5297 5298 static void gfx_v11_0_update_perf_clk(struct amdgpu_device *adev, 5299 bool enable) 5300 { 5301 uint32_t def, data; 5302 5303 if (!(adev->cg_flags & AMD_CG_SUPPORT_GFX_PERF_CLK)) 5304 return; 5305 5306 def = data = RREG32_SOC15(GC, 0, regRLC_CGTT_MGCG_OVERRIDE); 5307 5308 if (enable) 5309 data &= ~RLC_CGTT_MGCG_OVERRIDE__PERFMON_CLOCK_STATE_MASK; 5310 else 5311 data |= RLC_CGTT_MGCG_OVERRIDE__PERFMON_CLOCK_STATE_MASK; 5312 5313 if (def != data) 5314 WREG32_SOC15(GC, 0, regRLC_CGTT_MGCG_OVERRIDE, data); 5315 } 5316 5317 static void gfx_v11_0_update_sram_fgcg(struct amdgpu_device *adev, 5318 bool enable) 5319 { 5320 uint32_t def, data; 5321 5322 if (!(adev->cg_flags & AMD_CG_SUPPORT_GFX_FGCG)) 5323 return; 5324 5325 def = data = RREG32_SOC15(GC, 0, regRLC_CGTT_MGCG_OVERRIDE); 5326 5327 if (enable) 5328 data &= ~RLC_CGTT_MGCG_OVERRIDE__GFXIP_FGCG_OVERRIDE_MASK; 5329 else 5330 data |= RLC_CGTT_MGCG_OVERRIDE__GFXIP_FGCG_OVERRIDE_MASK; 5331 5332 if (def != data) 5333 WREG32_SOC15(GC, 0, regRLC_CGTT_MGCG_OVERRIDE, data); 5334 } 5335 5336 static void gfx_v11_0_update_repeater_fgcg(struct amdgpu_device *adev, 5337 bool enable) 5338 { 5339 uint32_t def, data; 5340 5341 if (!(adev->cg_flags & AMD_CG_SUPPORT_REPEATER_FGCG)) 5342 return; 5343 5344 def = data = RREG32_SOC15(GC, 0, regRLC_CGTT_MGCG_OVERRIDE); 5345 5346 if (enable) 5347 data &= ~RLC_CGTT_MGCG_OVERRIDE__GFXIP_REPEATER_FGCG_OVERRIDE_MASK; 5348 else 5349 data |= RLC_CGTT_MGCG_OVERRIDE__GFXIP_REPEATER_FGCG_OVERRIDE_MASK; 5350 5351 if (def != data) 5352 WREG32_SOC15(GC, 0, regRLC_CGTT_MGCG_OVERRIDE, data); 5353 } 5354 5355 static void gfx_v11_0_update_medium_grain_clock_gating(struct amdgpu_device *adev, 5356 bool enable) 5357 { 5358 uint32_t data, def; 5359 5360 if (!(adev->cg_flags & (AMD_CG_SUPPORT_GFX_MGCG | AMD_CG_SUPPORT_GFX_MGLS))) 5361 return; 5362 5363 /* It is disabled by HW by default */ 5364 if (enable) { 5365 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGCG) { 5366 /* 1 - RLC_CGTT_MGCG_OVERRIDE */ 5367 def = data = RREG32_SOC15(GC, 0, regRLC_CGTT_MGCG_OVERRIDE); 5368 5369 data &= ~(RLC_CGTT_MGCG_OVERRIDE__GRBM_CGTT_SCLK_OVERRIDE_MASK | 5370 RLC_CGTT_MGCG_OVERRIDE__RLC_CGTT_SCLK_OVERRIDE_MASK | 5371 RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGCG_OVERRIDE_MASK); 5372 5373 if (def != data) 5374 WREG32_SOC15(GC, 0, regRLC_CGTT_MGCG_OVERRIDE, data); 5375 } 5376 } else { 5377 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGCG) { 5378 def = data = RREG32_SOC15(GC, 0, regRLC_CGTT_MGCG_OVERRIDE); 5379 5380 data |= (RLC_CGTT_MGCG_OVERRIDE__RLC_CGTT_SCLK_OVERRIDE_MASK | 5381 RLC_CGTT_MGCG_OVERRIDE__GRBM_CGTT_SCLK_OVERRIDE_MASK | 5382 RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGCG_OVERRIDE_MASK); 5383 5384 if (def != data) 5385 WREG32_SOC15(GC, 0, regRLC_CGTT_MGCG_OVERRIDE, data); 5386 } 5387 } 5388 } 5389 5390 static void gfx_v11_0_update_coarse_grain_clock_gating(struct amdgpu_device *adev, 5391 bool enable) 5392 { 5393 uint32_t def, data; 5394 5395 if (!(adev->cg_flags & 5396 (AMD_CG_SUPPORT_GFX_CGCG | 5397 AMD_CG_SUPPORT_GFX_CGLS | 5398 AMD_CG_SUPPORT_GFX_3D_CGCG | 5399 AMD_CG_SUPPORT_GFX_3D_CGLS))) 5400 return; 5401 5402 if (enable) { 5403 def = data = RREG32_SOC15(GC, 0, regRLC_CGTT_MGCG_OVERRIDE); 5404 5405 /* unset CGCG override */ 5406 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGCG) 5407 data &= ~RLC_CGTT_MGCG_OVERRIDE__GFXIP_CGCG_OVERRIDE_MASK; 5408 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGLS) 5409 data &= ~RLC_CGTT_MGCG_OVERRIDE__GFXIP_CGLS_OVERRIDE_MASK; 5410 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_3D_CGCG || 5411 adev->cg_flags & AMD_CG_SUPPORT_GFX_3D_CGLS) 5412 data &= ~RLC_CGTT_MGCG_OVERRIDE__GFXIP_GFX3D_CG_OVERRIDE_MASK; 5413 5414 /* update CGCG override bits */ 5415 if (def != data) 5416 WREG32_SOC15(GC, 0, regRLC_CGTT_MGCG_OVERRIDE, data); 5417 5418 /* enable cgcg FSM(0x0000363F) */ 5419 def = data = RREG32_SOC15(GC, 0, regRLC_CGCG_CGLS_CTRL); 5420 5421 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGCG) { 5422 data &= ~RLC_CGCG_CGLS_CTRL__CGCG_GFX_IDLE_THRESHOLD_MASK; 5423 data |= (0x36 << RLC_CGCG_CGLS_CTRL__CGCG_GFX_IDLE_THRESHOLD__SHIFT) | 5424 RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK; 5425 } 5426 5427 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGLS) { 5428 data &= ~RLC_CGCG_CGLS_CTRL__CGLS_REP_COMPANSAT_DELAY_MASK; 5429 data |= (0x000F << RLC_CGCG_CGLS_CTRL__CGLS_REP_COMPANSAT_DELAY__SHIFT) | 5430 RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK; 5431 } 5432 5433 if (def != data) 5434 WREG32_SOC15(GC, 0, regRLC_CGCG_CGLS_CTRL, data); 5435 5436 /* Program RLC_CGCG_CGLS_CTRL_3D */ 5437 def = data = RREG32_SOC15(GC, 0, regRLC_CGCG_CGLS_CTRL_3D); 5438 5439 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_3D_CGCG) { 5440 data &= ~RLC_CGCG_CGLS_CTRL_3D__CGCG_GFX_IDLE_THRESHOLD_MASK; 5441 data |= (0x36 << RLC_CGCG_CGLS_CTRL_3D__CGCG_GFX_IDLE_THRESHOLD__SHIFT) | 5442 RLC_CGCG_CGLS_CTRL_3D__CGCG_EN_MASK; 5443 } 5444 5445 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_3D_CGLS) { 5446 data &= ~RLC_CGCG_CGLS_CTRL_3D__CGLS_REP_COMPANSAT_DELAY_MASK; 5447 data |= (0xf << RLC_CGCG_CGLS_CTRL_3D__CGLS_REP_COMPANSAT_DELAY__SHIFT) | 5448 RLC_CGCG_CGLS_CTRL_3D__CGLS_EN_MASK; 5449 } 5450 5451 if (def != data) 5452 WREG32_SOC15(GC, 0, regRLC_CGCG_CGLS_CTRL_3D, data); 5453 5454 /* set IDLE_POLL_COUNT(0x00900100) */ 5455 def = data = RREG32_SOC15(GC, 0, regCP_RB_WPTR_POLL_CNTL); 5456 5457 data &= ~(CP_RB_WPTR_POLL_CNTL__POLL_FREQUENCY_MASK | CP_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK); 5458 data |= (0x0100 << CP_RB_WPTR_POLL_CNTL__POLL_FREQUENCY__SHIFT) | 5459 (0x0090 << CP_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT); 5460 5461 if (def != data) 5462 WREG32_SOC15(GC, 0, regCP_RB_WPTR_POLL_CNTL, data); 5463 5464 data = RREG32_SOC15(GC, 0, regCP_INT_CNTL); 5465 data = REG_SET_FIELD(data, CP_INT_CNTL, CNTX_BUSY_INT_ENABLE, 1); 5466 data = REG_SET_FIELD(data, CP_INT_CNTL, CNTX_EMPTY_INT_ENABLE, 1); 5467 data = REG_SET_FIELD(data, CP_INT_CNTL, CMP_BUSY_INT_ENABLE, 1); 5468 data = REG_SET_FIELD(data, CP_INT_CNTL, GFX_IDLE_INT_ENABLE, 1); 5469 WREG32_SOC15(GC, 0, regCP_INT_CNTL, data); 5470 5471 data = RREG32_SOC15(GC, 0, regSDMA0_RLC_CGCG_CTRL); 5472 data = REG_SET_FIELD(data, SDMA0_RLC_CGCG_CTRL, CGCG_INT_ENABLE, 1); 5473 WREG32_SOC15(GC, 0, regSDMA0_RLC_CGCG_CTRL, data); 5474 5475 /* Some ASICs only have one SDMA instance, not need to configure SDMA1 */ 5476 if (adev->sdma.num_instances > 1) { 5477 data = RREG32_SOC15(GC, 0, regSDMA1_RLC_CGCG_CTRL); 5478 data = REG_SET_FIELD(data, SDMA1_RLC_CGCG_CTRL, CGCG_INT_ENABLE, 1); 5479 WREG32_SOC15(GC, 0, regSDMA1_RLC_CGCG_CTRL, data); 5480 } 5481 } else { 5482 /* Program RLC_CGCG_CGLS_CTRL */ 5483 def = data = RREG32_SOC15(GC, 0, regRLC_CGCG_CGLS_CTRL); 5484 5485 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGCG) 5486 data &= ~RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK; 5487 5488 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGLS) 5489 data &= ~RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK; 5490 5491 if (def != data) 5492 WREG32_SOC15(GC, 0, regRLC_CGCG_CGLS_CTRL, data); 5493 5494 /* Program RLC_CGCG_CGLS_CTRL_3D */ 5495 def = data = RREG32_SOC15(GC, 0, regRLC_CGCG_CGLS_CTRL_3D); 5496 5497 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_3D_CGCG) 5498 data &= ~RLC_CGCG_CGLS_CTRL_3D__CGCG_EN_MASK; 5499 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_3D_CGLS) 5500 data &= ~RLC_CGCG_CGLS_CTRL_3D__CGLS_EN_MASK; 5501 5502 if (def != data) 5503 WREG32_SOC15(GC, 0, regRLC_CGCG_CGLS_CTRL_3D, data); 5504 5505 data = RREG32_SOC15(GC, 0, regSDMA0_RLC_CGCG_CTRL); 5506 data &= ~SDMA0_RLC_CGCG_CTRL__CGCG_INT_ENABLE_MASK; 5507 WREG32_SOC15(GC, 0, regSDMA0_RLC_CGCG_CTRL, data); 5508 5509 /* Some ASICs only have one SDMA instance, not need to configure SDMA1 */ 5510 if (adev->sdma.num_instances > 1) { 5511 data = RREG32_SOC15(GC, 0, regSDMA1_RLC_CGCG_CTRL); 5512 data &= ~SDMA1_RLC_CGCG_CTRL__CGCG_INT_ENABLE_MASK; 5513 WREG32_SOC15(GC, 0, regSDMA1_RLC_CGCG_CTRL, data); 5514 } 5515 } 5516 } 5517 5518 static int gfx_v11_0_update_gfx_clock_gating(struct amdgpu_device *adev, 5519 bool enable) 5520 { 5521 amdgpu_gfx_rlc_enter_safe_mode(adev, 0); 5522 5523 gfx_v11_0_update_coarse_grain_clock_gating(adev, enable); 5524 5525 gfx_v11_0_update_medium_grain_clock_gating(adev, enable); 5526 5527 gfx_v11_0_update_repeater_fgcg(adev, enable); 5528 5529 gfx_v11_0_update_sram_fgcg(adev, enable); 5530 5531 gfx_v11_0_update_perf_clk(adev, enable); 5532 5533 if (adev->cg_flags & 5534 (AMD_CG_SUPPORT_GFX_MGCG | 5535 AMD_CG_SUPPORT_GFX_CGLS | 5536 AMD_CG_SUPPORT_GFX_CGCG | 5537 AMD_CG_SUPPORT_GFX_3D_CGCG | 5538 AMD_CG_SUPPORT_GFX_3D_CGLS)) 5539 gfx_v11_0_enable_gui_idle_interrupt(adev, enable); 5540 5541 amdgpu_gfx_rlc_exit_safe_mode(adev, 0); 5542 5543 return 0; 5544 } 5545 5546 static void gfx_v11_0_update_spm_vmid(struct amdgpu_device *adev, struct amdgpu_ring *ring, unsigned vmid) 5547 { 5548 u32 reg, pre_data, data; 5549 5550 amdgpu_gfx_off_ctrl(adev, false); 5551 reg = SOC15_REG_OFFSET(GC, 0, regRLC_SPM_MC_CNTL); 5552 if (amdgpu_sriov_is_pp_one_vf(adev) && !amdgpu_sriov_runtime(adev)) 5553 pre_data = RREG32_NO_KIQ(reg); 5554 else 5555 pre_data = RREG32(reg); 5556 5557 data = pre_data & (~RLC_SPM_MC_CNTL__RLC_SPM_VMID_MASK); 5558 data |= (vmid & RLC_SPM_MC_CNTL__RLC_SPM_VMID_MASK) << RLC_SPM_MC_CNTL__RLC_SPM_VMID__SHIFT; 5559 5560 if (pre_data != data) { 5561 if (amdgpu_sriov_is_pp_one_vf(adev) && !amdgpu_sriov_runtime(adev)) { 5562 WREG32_SOC15_NO_KIQ(GC, 0, regRLC_SPM_MC_CNTL, data); 5563 } else 5564 WREG32_SOC15(GC, 0, regRLC_SPM_MC_CNTL, data); 5565 } 5566 amdgpu_gfx_off_ctrl(adev, true); 5567 5568 if (ring 5569 && amdgpu_sriov_is_pp_one_vf(adev) 5570 && (pre_data != data) 5571 && ((ring->funcs->type == AMDGPU_RING_TYPE_GFX) 5572 || (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE))) { 5573 amdgpu_ring_emit_wreg(ring, reg, data); 5574 } 5575 } 5576 5577 static const struct amdgpu_rlc_funcs gfx_v11_0_rlc_funcs = { 5578 .is_rlc_enabled = gfx_v11_0_is_rlc_enabled, 5579 .set_safe_mode = gfx_v11_0_set_safe_mode, 5580 .unset_safe_mode = gfx_v11_0_unset_safe_mode, 5581 .init = gfx_v11_0_rlc_init, 5582 .get_csb_size = gfx_v11_0_get_csb_size, 5583 .get_csb_buffer = gfx_v11_0_get_csb_buffer, 5584 .resume = gfx_v11_0_rlc_resume, 5585 .stop = gfx_v11_0_rlc_stop, 5586 .reset = gfx_v11_0_rlc_reset, 5587 .start = gfx_v11_0_rlc_start, 5588 .update_spm_vmid = gfx_v11_0_update_spm_vmid, 5589 }; 5590 5591 static void gfx_v11_cntl_power_gating(struct amdgpu_device *adev, bool enable) 5592 { 5593 u32 data = RREG32_SOC15(GC, 0, regRLC_PG_CNTL); 5594 5595 if (enable && (adev->pg_flags & AMD_PG_SUPPORT_GFX_PG)) 5596 data |= RLC_PG_CNTL__GFX_POWER_GATING_ENABLE_MASK; 5597 else 5598 data &= ~RLC_PG_CNTL__GFX_POWER_GATING_ENABLE_MASK; 5599 5600 WREG32_SOC15(GC, 0, regRLC_PG_CNTL, data); 5601 5602 // Program RLC_PG_DELAY3 for CGPG hysteresis 5603 if (enable && (adev->pg_flags & AMD_PG_SUPPORT_GFX_PG)) { 5604 switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { 5605 case IP_VERSION(11, 0, 1): 5606 case IP_VERSION(11, 0, 4): 5607 case IP_VERSION(11, 5, 0): 5608 case IP_VERSION(11, 5, 1): 5609 case IP_VERSION(11, 5, 2): 5610 case IP_VERSION(11, 5, 3): 5611 WREG32_SOC15(GC, 0, regRLC_PG_DELAY_3, RLC_PG_DELAY_3_DEFAULT_GC_11_0_1); 5612 break; 5613 default: 5614 break; 5615 } 5616 } 5617 } 5618 5619 static void gfx_v11_cntl_pg(struct amdgpu_device *adev, bool enable) 5620 { 5621 amdgpu_gfx_rlc_enter_safe_mode(adev, 0); 5622 5623 gfx_v11_cntl_power_gating(adev, enable); 5624 5625 amdgpu_gfx_rlc_exit_safe_mode(adev, 0); 5626 } 5627 5628 static int gfx_v11_0_set_powergating_state(struct amdgpu_ip_block *ip_block, 5629 enum amd_powergating_state state) 5630 { 5631 struct amdgpu_device *adev = ip_block->adev; 5632 bool enable = (state == AMD_PG_STATE_GATE); 5633 5634 if (amdgpu_sriov_vf(adev)) 5635 return 0; 5636 5637 switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { 5638 case IP_VERSION(11, 0, 0): 5639 case IP_VERSION(11, 0, 2): 5640 case IP_VERSION(11, 0, 3): 5641 amdgpu_gfx_off_ctrl(adev, enable); 5642 break; 5643 case IP_VERSION(11, 0, 1): 5644 case IP_VERSION(11, 0, 4): 5645 case IP_VERSION(11, 5, 0): 5646 case IP_VERSION(11, 5, 1): 5647 case IP_VERSION(11, 5, 2): 5648 case IP_VERSION(11, 5, 3): 5649 if (!enable) 5650 amdgpu_gfx_off_ctrl(adev, false); 5651 5652 gfx_v11_cntl_pg(adev, enable); 5653 5654 if (enable) 5655 amdgpu_gfx_off_ctrl(adev, true); 5656 5657 break; 5658 default: 5659 break; 5660 } 5661 5662 return 0; 5663 } 5664 5665 static int gfx_v11_0_set_clockgating_state(struct amdgpu_ip_block *ip_block, 5666 enum amd_clockgating_state state) 5667 { 5668 struct amdgpu_device *adev = ip_block->adev; 5669 5670 if (amdgpu_sriov_vf(adev)) 5671 return 0; 5672 5673 switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { 5674 case IP_VERSION(11, 0, 0): 5675 case IP_VERSION(11, 0, 1): 5676 case IP_VERSION(11, 0, 2): 5677 case IP_VERSION(11, 0, 3): 5678 case IP_VERSION(11, 0, 4): 5679 case IP_VERSION(11, 5, 0): 5680 case IP_VERSION(11, 5, 1): 5681 case IP_VERSION(11, 5, 2): 5682 case IP_VERSION(11, 5, 3): 5683 gfx_v11_0_update_gfx_clock_gating(adev, 5684 state == AMD_CG_STATE_GATE); 5685 break; 5686 default: 5687 break; 5688 } 5689 5690 return 0; 5691 } 5692 5693 static void gfx_v11_0_get_clockgating_state(struct amdgpu_ip_block *ip_block, u64 *flags) 5694 { 5695 struct amdgpu_device *adev = ip_block->adev; 5696 int data; 5697 5698 /* AMD_CG_SUPPORT_GFX_MGCG */ 5699 data = RREG32_SOC15(GC, 0, regRLC_CGTT_MGCG_OVERRIDE); 5700 if (!(data & RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGCG_OVERRIDE_MASK)) 5701 *flags |= AMD_CG_SUPPORT_GFX_MGCG; 5702 5703 /* AMD_CG_SUPPORT_REPEATER_FGCG */ 5704 if (!(data & RLC_CGTT_MGCG_OVERRIDE__GFXIP_REPEATER_FGCG_OVERRIDE_MASK)) 5705 *flags |= AMD_CG_SUPPORT_REPEATER_FGCG; 5706 5707 /* AMD_CG_SUPPORT_GFX_FGCG */ 5708 if (!(data & RLC_CGTT_MGCG_OVERRIDE__GFXIP_FGCG_OVERRIDE_MASK)) 5709 *flags |= AMD_CG_SUPPORT_GFX_FGCG; 5710 5711 /* AMD_CG_SUPPORT_GFX_PERF_CLK */ 5712 if (!(data & RLC_CGTT_MGCG_OVERRIDE__PERFMON_CLOCK_STATE_MASK)) 5713 *flags |= AMD_CG_SUPPORT_GFX_PERF_CLK; 5714 5715 /* AMD_CG_SUPPORT_GFX_CGCG */ 5716 data = RREG32_SOC15(GC, 0, regRLC_CGCG_CGLS_CTRL); 5717 if (data & RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK) 5718 *flags |= AMD_CG_SUPPORT_GFX_CGCG; 5719 5720 /* AMD_CG_SUPPORT_GFX_CGLS */ 5721 if (data & RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK) 5722 *flags |= AMD_CG_SUPPORT_GFX_CGLS; 5723 5724 /* AMD_CG_SUPPORT_GFX_3D_CGCG */ 5725 data = RREG32_SOC15(GC, 0, regRLC_CGCG_CGLS_CTRL_3D); 5726 if (data & RLC_CGCG_CGLS_CTRL_3D__CGCG_EN_MASK) 5727 *flags |= AMD_CG_SUPPORT_GFX_3D_CGCG; 5728 5729 /* AMD_CG_SUPPORT_GFX_3D_CGLS */ 5730 if (data & RLC_CGCG_CGLS_CTRL_3D__CGLS_EN_MASK) 5731 *flags |= AMD_CG_SUPPORT_GFX_3D_CGLS; 5732 } 5733 5734 static u64 gfx_v11_0_ring_get_rptr_gfx(struct amdgpu_ring *ring) 5735 { 5736 /* gfx11 is 32bit rptr*/ 5737 return *(uint32_t *)ring->rptr_cpu_addr; 5738 } 5739 5740 static u64 gfx_v11_0_ring_get_wptr_gfx(struct amdgpu_ring *ring) 5741 { 5742 struct amdgpu_device *adev = ring->adev; 5743 u64 wptr; 5744 5745 /* XXX check if swapping is necessary on BE */ 5746 if (ring->use_doorbell) { 5747 wptr = atomic64_read((atomic64_t *)ring->wptr_cpu_addr); 5748 } else { 5749 wptr = RREG32_SOC15(GC, 0, regCP_RB0_WPTR); 5750 wptr += (u64)RREG32_SOC15(GC, 0, regCP_RB0_WPTR_HI) << 32; 5751 } 5752 5753 return wptr; 5754 } 5755 5756 static void gfx_v11_0_ring_set_wptr_gfx(struct amdgpu_ring *ring) 5757 { 5758 struct amdgpu_device *adev = ring->adev; 5759 5760 if (ring->use_doorbell) { 5761 /* XXX check if swapping is necessary on BE */ 5762 atomic64_set((atomic64_t *)ring->wptr_cpu_addr, 5763 ring->wptr); 5764 WDOORBELL64(ring->doorbell_index, ring->wptr); 5765 } else { 5766 WREG32_SOC15(GC, 0, regCP_RB0_WPTR, 5767 lower_32_bits(ring->wptr)); 5768 WREG32_SOC15(GC, 0, regCP_RB0_WPTR_HI, 5769 upper_32_bits(ring->wptr)); 5770 } 5771 } 5772 5773 static u64 gfx_v11_0_ring_get_rptr_compute(struct amdgpu_ring *ring) 5774 { 5775 /* gfx11 hardware is 32bit rptr */ 5776 return *(uint32_t *)ring->rptr_cpu_addr; 5777 } 5778 5779 static u64 gfx_v11_0_ring_get_wptr_compute(struct amdgpu_ring *ring) 5780 { 5781 u64 wptr; 5782 5783 /* XXX check if swapping is necessary on BE */ 5784 if (ring->use_doorbell) 5785 wptr = atomic64_read((atomic64_t *)ring->wptr_cpu_addr); 5786 else 5787 BUG(); 5788 return wptr; 5789 } 5790 5791 static void gfx_v11_0_ring_set_wptr_compute(struct amdgpu_ring *ring) 5792 { 5793 struct amdgpu_device *adev = ring->adev; 5794 5795 /* XXX check if swapping is necessary on BE */ 5796 if (ring->use_doorbell) { 5797 atomic64_set((atomic64_t *)ring->wptr_cpu_addr, 5798 ring->wptr); 5799 WDOORBELL64(ring->doorbell_index, ring->wptr); 5800 } else { 5801 BUG(); /* only DOORBELL method supported on gfx11 now */ 5802 } 5803 } 5804 5805 static void gfx_v11_0_ring_emit_hdp_flush(struct amdgpu_ring *ring) 5806 { 5807 struct amdgpu_device *adev = ring->adev; 5808 u32 ref_and_mask, reg_mem_engine; 5809 const struct nbio_hdp_flush_reg *nbio_hf_reg = adev->nbio.hdp_flush_reg; 5810 5811 if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) { 5812 switch (ring->me) { 5813 case 1: 5814 ref_and_mask = nbio_hf_reg->ref_and_mask_cp2 << ring->pipe; 5815 break; 5816 case 2: 5817 ref_and_mask = nbio_hf_reg->ref_and_mask_cp6 << ring->pipe; 5818 break; 5819 default: 5820 return; 5821 } 5822 reg_mem_engine = 0; 5823 } else { 5824 ref_and_mask = nbio_hf_reg->ref_and_mask_cp0 << ring->pipe; 5825 reg_mem_engine = 1; /* pfp */ 5826 } 5827 5828 gfx_v11_0_wait_reg_mem(ring, reg_mem_engine, 0, 1, 5829 adev->nbio.funcs->get_hdp_flush_req_offset(adev), 5830 adev->nbio.funcs->get_hdp_flush_done_offset(adev), 5831 ref_and_mask, ref_and_mask, 0x20); 5832 } 5833 5834 static void gfx_v11_0_ring_emit_ib_gfx(struct amdgpu_ring *ring, 5835 struct amdgpu_job *job, 5836 struct amdgpu_ib *ib, 5837 uint32_t flags) 5838 { 5839 unsigned vmid = AMDGPU_JOB_GET_VMID(job); 5840 u32 header, control = 0; 5841 5842 BUG_ON(ib->flags & AMDGPU_IB_FLAG_CE); 5843 5844 header = PACKET3(PACKET3_INDIRECT_BUFFER, 2); 5845 5846 control |= ib->length_dw | (vmid << 24); 5847 5848 if (ring->adev->gfx.mcbp && (ib->flags & AMDGPU_IB_FLAG_PREEMPT)) { 5849 control |= INDIRECT_BUFFER_PRE_ENB(1); 5850 5851 if (flags & AMDGPU_IB_PREEMPTED) 5852 control |= INDIRECT_BUFFER_PRE_RESUME(1); 5853 5854 if (vmid) 5855 gfx_v11_0_ring_emit_de_meta(ring, 5856 (!amdgpu_sriov_vf(ring->adev) && flags & AMDGPU_IB_PREEMPTED) ? true : false); 5857 } 5858 5859 amdgpu_ring_write(ring, header); 5860 BUG_ON(ib->gpu_addr & 0x3); /* Dword align */ 5861 amdgpu_ring_write(ring, 5862 #ifdef __BIG_ENDIAN 5863 (2 << 0) | 5864 #endif 5865 lower_32_bits(ib->gpu_addr)); 5866 amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr)); 5867 amdgpu_ring_write(ring, control); 5868 } 5869 5870 static void gfx_v11_0_ring_emit_ib_compute(struct amdgpu_ring *ring, 5871 struct amdgpu_job *job, 5872 struct amdgpu_ib *ib, 5873 uint32_t flags) 5874 { 5875 unsigned vmid = AMDGPU_JOB_GET_VMID(job); 5876 u32 control = INDIRECT_BUFFER_VALID | ib->length_dw | (vmid << 24); 5877 5878 /* Currently, there is a high possibility to get wave ID mismatch 5879 * between ME and GDS, leading to a hw deadlock, because ME generates 5880 * different wave IDs than the GDS expects. This situation happens 5881 * randomly when at least 5 compute pipes use GDS ordered append. 5882 * The wave IDs generated by ME are also wrong after suspend/resume. 5883 * Those are probably bugs somewhere else in the kernel driver. 5884 * 5885 * Writing GDS_COMPUTE_MAX_WAVE_ID resets wave ID counters in ME and 5886 * GDS to 0 for this ring (me/pipe). 5887 */ 5888 if (ib->flags & AMDGPU_IB_FLAG_RESET_GDS_MAX_WAVE_ID) { 5889 amdgpu_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1)); 5890 amdgpu_ring_write(ring, regGDS_COMPUTE_MAX_WAVE_ID); 5891 amdgpu_ring_write(ring, ring->adev->gds.gds_compute_max_wave_id); 5892 } 5893 5894 amdgpu_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2)); 5895 BUG_ON(ib->gpu_addr & 0x3); /* Dword align */ 5896 amdgpu_ring_write(ring, 5897 #ifdef __BIG_ENDIAN 5898 (2 << 0) | 5899 #endif 5900 lower_32_bits(ib->gpu_addr)); 5901 amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr)); 5902 amdgpu_ring_write(ring, control); 5903 } 5904 5905 static void gfx_v11_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, 5906 u64 seq, unsigned flags) 5907 { 5908 bool write64bit = flags & AMDGPU_FENCE_FLAG_64BIT; 5909 bool int_sel = flags & AMDGPU_FENCE_FLAG_INT; 5910 5911 /* RELEASE_MEM - flush caches, send int */ 5912 amdgpu_ring_write(ring, PACKET3(PACKET3_RELEASE_MEM, 6)); 5913 amdgpu_ring_write(ring, (PACKET3_RELEASE_MEM_GCR_SEQ | 5914 PACKET3_RELEASE_MEM_GCR_GL2_WB | 5915 PACKET3_RELEASE_MEM_GCR_GLM_INV | /* must be set with GLM_WB */ 5916 PACKET3_RELEASE_MEM_GCR_GLM_WB | 5917 PACKET3_RELEASE_MEM_CACHE_POLICY(3) | 5918 PACKET3_RELEASE_MEM_EVENT_TYPE(CACHE_FLUSH_AND_INV_TS_EVENT) | 5919 PACKET3_RELEASE_MEM_EVENT_INDEX(5))); 5920 amdgpu_ring_write(ring, (PACKET3_RELEASE_MEM_DATA_SEL(write64bit ? 2 : 1) | 5921 PACKET3_RELEASE_MEM_INT_SEL(int_sel ? 2 : 0))); 5922 5923 /* 5924 * the address should be Qword aligned if 64bit write, Dword 5925 * aligned if only send 32bit data low (discard data high) 5926 */ 5927 if (write64bit) 5928 BUG_ON(addr & 0x7); 5929 else 5930 BUG_ON(addr & 0x3); 5931 amdgpu_ring_write(ring, lower_32_bits(addr)); 5932 amdgpu_ring_write(ring, upper_32_bits(addr)); 5933 amdgpu_ring_write(ring, lower_32_bits(seq)); 5934 amdgpu_ring_write(ring, upper_32_bits(seq)); 5935 amdgpu_ring_write(ring, 0); 5936 } 5937 5938 static void gfx_v11_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring) 5939 { 5940 int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX); 5941 uint32_t seq = ring->fence_drv.sync_seq; 5942 uint64_t addr = ring->fence_drv.gpu_addr; 5943 5944 gfx_v11_0_wait_reg_mem(ring, usepfp, 1, 0, lower_32_bits(addr), 5945 upper_32_bits(addr), seq, 0xffffffff, 4); 5946 } 5947 5948 static void gfx_v11_0_ring_invalidate_tlbs(struct amdgpu_ring *ring, 5949 uint16_t pasid, uint32_t flush_type, 5950 bool all_hub, uint8_t dst_sel) 5951 { 5952 amdgpu_ring_write(ring, PACKET3(PACKET3_INVALIDATE_TLBS, 0)); 5953 amdgpu_ring_write(ring, 5954 PACKET3_INVALIDATE_TLBS_DST_SEL(dst_sel) | 5955 PACKET3_INVALIDATE_TLBS_ALL_HUB(all_hub) | 5956 PACKET3_INVALIDATE_TLBS_PASID(pasid) | 5957 PACKET3_INVALIDATE_TLBS_FLUSH_TYPE(flush_type)); 5958 } 5959 5960 static void gfx_v11_0_ring_emit_vm_flush(struct amdgpu_ring *ring, 5961 unsigned vmid, uint64_t pd_addr) 5962 { 5963 amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr); 5964 5965 /* compute doesn't have PFP */ 5966 if (ring->funcs->type == AMDGPU_RING_TYPE_GFX) { 5967 /* sync PFP to ME, otherwise we might get invalid PFP reads */ 5968 amdgpu_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0)); 5969 amdgpu_ring_write(ring, 0x0); 5970 } 5971 5972 /* Make sure that we can't skip the SET_Q_MODE packets when the VM 5973 * changed in any way. 5974 */ 5975 ring->set_q_mode_offs = 0; 5976 ring->set_q_mode_ptr = NULL; 5977 } 5978 5979 static void gfx_v11_0_ring_emit_fence_kiq(struct amdgpu_ring *ring, u64 addr, 5980 u64 seq, unsigned int flags) 5981 { 5982 struct amdgpu_device *adev = ring->adev; 5983 5984 /* we only allocate 32bit for each seq wb address */ 5985 BUG_ON(flags & AMDGPU_FENCE_FLAG_64BIT); 5986 5987 /* write fence seq to the "addr" */ 5988 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3)); 5989 amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) | 5990 WRITE_DATA_DST_SEL(5) | WR_CONFIRM)); 5991 amdgpu_ring_write(ring, lower_32_bits(addr)); 5992 amdgpu_ring_write(ring, upper_32_bits(addr)); 5993 amdgpu_ring_write(ring, lower_32_bits(seq)); 5994 5995 if (flags & AMDGPU_FENCE_FLAG_INT) { 5996 /* set register to trigger INT */ 5997 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3)); 5998 amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) | 5999 WRITE_DATA_DST_SEL(0) | WR_CONFIRM)); 6000 amdgpu_ring_write(ring, SOC15_REG_OFFSET(GC, 0, regCPC_INT_STATUS)); 6001 amdgpu_ring_write(ring, 0); 6002 amdgpu_ring_write(ring, 0x20000000); /* src_id is 178 */ 6003 } 6004 } 6005 6006 static void gfx_v11_0_ring_emit_cntxcntl(struct amdgpu_ring *ring, 6007 uint32_t flags) 6008 { 6009 uint32_t dw2 = 0; 6010 6011 dw2 |= 0x80000000; /* set load_enable otherwise this package is just NOPs */ 6012 if (flags & AMDGPU_HAVE_CTX_SWITCH) { 6013 /* set load_global_config & load_global_uconfig */ 6014 dw2 |= 0x8001; 6015 /* set load_cs_sh_regs */ 6016 dw2 |= 0x01000000; 6017 /* set load_per_context_state & load_gfx_sh_regs for GFX */ 6018 dw2 |= 0x10002; 6019 } 6020 6021 amdgpu_ring_write(ring, PACKET3(PACKET3_CONTEXT_CONTROL, 1)); 6022 amdgpu_ring_write(ring, dw2); 6023 amdgpu_ring_write(ring, 0); 6024 } 6025 6026 static unsigned gfx_v11_0_ring_emit_init_cond_exec(struct amdgpu_ring *ring, 6027 uint64_t addr) 6028 { 6029 unsigned ret; 6030 6031 amdgpu_ring_write(ring, PACKET3(PACKET3_COND_EXEC, 3)); 6032 amdgpu_ring_write(ring, lower_32_bits(addr)); 6033 amdgpu_ring_write(ring, upper_32_bits(addr)); 6034 /* discard following DWs if *cond_exec_gpu_addr==0 */ 6035 amdgpu_ring_write(ring, 0); 6036 ret = ring->wptr & ring->buf_mask; 6037 /* patch dummy value later */ 6038 amdgpu_ring_write(ring, 0); 6039 6040 return ret; 6041 } 6042 6043 static void gfx_v11_0_ring_emit_gfx_shadow(struct amdgpu_ring *ring, 6044 u64 shadow_va, u64 csa_va, 6045 u64 gds_va, bool init_shadow, 6046 int vmid) 6047 { 6048 struct amdgpu_device *adev = ring->adev; 6049 unsigned int offs, end; 6050 6051 if (!adev->gfx.cp_gfx_shadow || !ring->ring_obj) 6052 return; 6053 6054 /* 6055 * The logic here isn't easy to understand because we need to keep state 6056 * accross multiple executions of the function as well as between the 6057 * CPU and GPU. The general idea is that the newly written GPU command 6058 * has a condition on the previous one and only executed if really 6059 * necessary. 6060 */ 6061 6062 /* 6063 * The dw in the NOP controls if the next SET_Q_MODE packet should be 6064 * executed or not. Reserve 64bits just to be on the save side. 6065 */ 6066 amdgpu_ring_write(ring, PACKET3(PACKET3_NOP, 1)); 6067 offs = ring->wptr & ring->buf_mask; 6068 6069 /* 6070 * We start with skipping the prefix SET_Q_MODE and always executing 6071 * the postfix SET_Q_MODE packet. This is changed below with a 6072 * WRITE_DATA command when the postfix executed. 6073 */ 6074 amdgpu_ring_write(ring, shadow_va ? 1 : 0); 6075 amdgpu_ring_write(ring, 0); 6076 6077 if (ring->set_q_mode_offs) { 6078 uint64_t addr; 6079 6080 addr = amdgpu_bo_gpu_offset(ring->ring_obj); 6081 addr += ring->set_q_mode_offs << 2; 6082 end = gfx_v11_0_ring_emit_init_cond_exec(ring, addr); 6083 } 6084 6085 /* 6086 * When the postfix SET_Q_MODE packet executes we need to make sure that the 6087 * next prefix SET_Q_MODE packet executes as well. 6088 */ 6089 if (!shadow_va) { 6090 uint64_t addr; 6091 6092 addr = amdgpu_bo_gpu_offset(ring->ring_obj); 6093 addr += offs << 2; 6094 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3)); 6095 amdgpu_ring_write(ring, WRITE_DATA_DST_SEL(5) | WR_CONFIRM); 6096 amdgpu_ring_write(ring, lower_32_bits(addr)); 6097 amdgpu_ring_write(ring, upper_32_bits(addr)); 6098 amdgpu_ring_write(ring, 0x1); 6099 } 6100 6101 amdgpu_ring_write(ring, PACKET3(PACKET3_SET_Q_PREEMPTION_MODE, 7)); 6102 amdgpu_ring_write(ring, lower_32_bits(shadow_va)); 6103 amdgpu_ring_write(ring, upper_32_bits(shadow_va)); 6104 amdgpu_ring_write(ring, lower_32_bits(gds_va)); 6105 amdgpu_ring_write(ring, upper_32_bits(gds_va)); 6106 amdgpu_ring_write(ring, lower_32_bits(csa_va)); 6107 amdgpu_ring_write(ring, upper_32_bits(csa_va)); 6108 amdgpu_ring_write(ring, shadow_va ? 6109 PACKET3_SET_Q_PREEMPTION_MODE_IB_VMID(vmid) : 0); 6110 amdgpu_ring_write(ring, init_shadow ? 6111 PACKET3_SET_Q_PREEMPTION_MODE_INIT_SHADOW_MEM : 0); 6112 6113 if (ring->set_q_mode_offs) 6114 amdgpu_ring_patch_cond_exec(ring, end); 6115 6116 if (shadow_va) { 6117 uint64_t token = shadow_va ^ csa_va ^ gds_va ^ vmid; 6118 6119 /* 6120 * If the tokens match try to skip the last postfix SET_Q_MODE 6121 * packet to avoid saving/restoring the state all the time. 6122 */ 6123 if (ring->set_q_mode_ptr && ring->set_q_mode_token == token) 6124 *ring->set_q_mode_ptr = 0; 6125 6126 ring->set_q_mode_token = token; 6127 } else { 6128 ring->set_q_mode_ptr = &ring->ring[ring->set_q_mode_offs]; 6129 } 6130 6131 ring->set_q_mode_offs = offs; 6132 } 6133 6134 static int gfx_v11_0_ring_preempt_ib(struct amdgpu_ring *ring) 6135 { 6136 int i, r = 0; 6137 struct amdgpu_device *adev = ring->adev; 6138 struct amdgpu_kiq *kiq = &adev->gfx.kiq[0]; 6139 struct amdgpu_ring *kiq_ring = &kiq->ring; 6140 unsigned long flags; 6141 6142 if (adev->enable_mes) 6143 return -EINVAL; 6144 6145 if (!kiq->pmf || !kiq->pmf->kiq_unmap_queues) 6146 return -EINVAL; 6147 6148 spin_lock_irqsave(&kiq->ring_lock, flags); 6149 6150 if (amdgpu_ring_alloc(kiq_ring, kiq->pmf->unmap_queues_size)) { 6151 spin_unlock_irqrestore(&kiq->ring_lock, flags); 6152 return -ENOMEM; 6153 } 6154 6155 /* assert preemption condition */ 6156 amdgpu_ring_set_preempt_cond_exec(ring, false); 6157 6158 /* assert IB preemption, emit the trailing fence */ 6159 kiq->pmf->kiq_unmap_queues(kiq_ring, ring, PREEMPT_QUEUES_NO_UNMAP, 6160 ring->trail_fence_gpu_addr, 6161 ++ring->trail_seq); 6162 amdgpu_ring_commit(kiq_ring); 6163 6164 spin_unlock_irqrestore(&kiq->ring_lock, flags); 6165 6166 /* poll the trailing fence */ 6167 for (i = 0; i < adev->usec_timeout; i++) { 6168 if (ring->trail_seq == 6169 le32_to_cpu(*(ring->trail_fence_cpu_addr))) 6170 break; 6171 udelay(1); 6172 } 6173 6174 if (i >= adev->usec_timeout) { 6175 r = -EINVAL; 6176 DRM_ERROR("ring %d failed to preempt ib\n", ring->idx); 6177 } 6178 6179 /* deassert preemption condition */ 6180 amdgpu_ring_set_preempt_cond_exec(ring, true); 6181 return r; 6182 } 6183 6184 static void gfx_v11_0_ring_emit_de_meta(struct amdgpu_ring *ring, bool resume) 6185 { 6186 struct amdgpu_device *adev = ring->adev; 6187 struct v10_de_ib_state de_payload = {0}; 6188 uint64_t offset, gds_addr, de_payload_gpu_addr; 6189 void *de_payload_cpu_addr; 6190 int cnt; 6191 6192 offset = offsetof(struct v10_gfx_meta_data, de_payload); 6193 de_payload_gpu_addr = amdgpu_csa_vaddr(ring->adev) + offset; 6194 de_payload_cpu_addr = adev->virt.csa_cpu_addr + offset; 6195 6196 gds_addr = ALIGN(amdgpu_csa_vaddr(ring->adev) + 6197 AMDGPU_CSA_SIZE - adev->gds.gds_size, 6198 PAGE_SIZE); 6199 6200 de_payload.gds_backup_addrlo = lower_32_bits(gds_addr); 6201 de_payload.gds_backup_addrhi = upper_32_bits(gds_addr); 6202 6203 cnt = (sizeof(de_payload) >> 2) + 4 - 2; 6204 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, cnt)); 6205 amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(1) | 6206 WRITE_DATA_DST_SEL(8) | 6207 WR_CONFIRM) | 6208 WRITE_DATA_CACHE_POLICY(0)); 6209 amdgpu_ring_write(ring, lower_32_bits(de_payload_gpu_addr)); 6210 amdgpu_ring_write(ring, upper_32_bits(de_payload_gpu_addr)); 6211 6212 if (resume) 6213 amdgpu_ring_write_multiple(ring, de_payload_cpu_addr, 6214 sizeof(de_payload) >> 2); 6215 else 6216 amdgpu_ring_write_multiple(ring, (void *)&de_payload, 6217 sizeof(de_payload) >> 2); 6218 } 6219 6220 static void gfx_v11_0_ring_emit_frame_cntl(struct amdgpu_ring *ring, bool start, 6221 bool secure) 6222 { 6223 uint32_t v = secure ? FRAME_TMZ : 0; 6224 6225 amdgpu_ring_write(ring, PACKET3(PACKET3_FRAME_CONTROL, 0)); 6226 amdgpu_ring_write(ring, v | FRAME_CMD(start ? 0 : 1)); 6227 } 6228 6229 static void gfx_v11_0_ring_emit_rreg(struct amdgpu_ring *ring, uint32_t reg, 6230 uint32_t reg_val_offs) 6231 { 6232 struct amdgpu_device *adev = ring->adev; 6233 6234 amdgpu_ring_write(ring, PACKET3(PACKET3_COPY_DATA, 4)); 6235 amdgpu_ring_write(ring, 0 | /* src: register*/ 6236 (5 << 8) | /* dst: memory */ 6237 (1 << 20)); /* write confirm */ 6238 amdgpu_ring_write(ring, reg); 6239 amdgpu_ring_write(ring, 0); 6240 amdgpu_ring_write(ring, lower_32_bits(adev->wb.gpu_addr + 6241 reg_val_offs * 4)); 6242 amdgpu_ring_write(ring, upper_32_bits(adev->wb.gpu_addr + 6243 reg_val_offs * 4)); 6244 } 6245 6246 static void gfx_v11_0_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg, 6247 uint32_t val) 6248 { 6249 uint32_t cmd = 0; 6250 6251 switch (ring->funcs->type) { 6252 case AMDGPU_RING_TYPE_GFX: 6253 cmd = WRITE_DATA_ENGINE_SEL(1) | WR_CONFIRM; 6254 break; 6255 case AMDGPU_RING_TYPE_KIQ: 6256 cmd = (1 << 16); /* no inc addr */ 6257 break; 6258 default: 6259 cmd = WR_CONFIRM; 6260 break; 6261 } 6262 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3)); 6263 amdgpu_ring_write(ring, cmd); 6264 amdgpu_ring_write(ring, reg); 6265 amdgpu_ring_write(ring, 0); 6266 amdgpu_ring_write(ring, val); 6267 } 6268 6269 static void gfx_v11_0_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg, 6270 uint32_t val, uint32_t mask) 6271 { 6272 gfx_v11_0_wait_reg_mem(ring, 0, 0, 0, reg, 0, val, mask, 0x20); 6273 } 6274 6275 static void gfx_v11_0_ring_emit_reg_write_reg_wait(struct amdgpu_ring *ring, 6276 uint32_t reg0, uint32_t reg1, 6277 uint32_t ref, uint32_t mask) 6278 { 6279 int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX); 6280 6281 gfx_v11_0_wait_reg_mem(ring, usepfp, 0, 1, reg0, reg1, 6282 ref, mask, 0x20); 6283 } 6284 6285 static void gfx_v11_0_ring_soft_recovery(struct amdgpu_ring *ring, 6286 unsigned vmid) 6287 { 6288 struct amdgpu_device *adev = ring->adev; 6289 uint32_t value = 0; 6290 6291 value = REG_SET_FIELD(value, SQ_CMD, CMD, 0x03); 6292 value = REG_SET_FIELD(value, SQ_CMD, MODE, 0x01); 6293 value = REG_SET_FIELD(value, SQ_CMD, CHECK_VMID, 1); 6294 value = REG_SET_FIELD(value, SQ_CMD, VM_ID, vmid); 6295 amdgpu_gfx_rlc_enter_safe_mode(adev, 0); 6296 WREG32_SOC15(GC, 0, regSQ_CMD, value); 6297 amdgpu_gfx_rlc_exit_safe_mode(adev, 0); 6298 } 6299 6300 static void 6301 gfx_v11_0_set_gfx_eop_interrupt_state(struct amdgpu_device *adev, 6302 uint32_t me, uint32_t pipe, 6303 enum amdgpu_interrupt_state state) 6304 { 6305 uint32_t cp_int_cntl, cp_int_cntl_reg; 6306 6307 if (!me) { 6308 switch (pipe) { 6309 case 0: 6310 cp_int_cntl_reg = SOC15_REG_OFFSET(GC, 0, regCP_INT_CNTL_RING0); 6311 break; 6312 case 1: 6313 cp_int_cntl_reg = SOC15_REG_OFFSET(GC, 0, regCP_INT_CNTL_RING1); 6314 break; 6315 default: 6316 DRM_DEBUG("invalid pipe %d\n", pipe); 6317 return; 6318 } 6319 } else { 6320 DRM_DEBUG("invalid me %d\n", me); 6321 return; 6322 } 6323 6324 switch (state) { 6325 case AMDGPU_IRQ_STATE_DISABLE: 6326 cp_int_cntl = RREG32_SOC15_IP(GC, cp_int_cntl_reg); 6327 cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_INT_CNTL_RING0, 6328 TIME_STAMP_INT_ENABLE, 0); 6329 cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_INT_CNTL_RING0, 6330 GENERIC0_INT_ENABLE, 0); 6331 WREG32_SOC15_IP(GC, cp_int_cntl_reg, cp_int_cntl); 6332 break; 6333 case AMDGPU_IRQ_STATE_ENABLE: 6334 cp_int_cntl = RREG32_SOC15_IP(GC, cp_int_cntl_reg); 6335 cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_INT_CNTL_RING0, 6336 TIME_STAMP_INT_ENABLE, 1); 6337 cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_INT_CNTL_RING0, 6338 GENERIC0_INT_ENABLE, 1); 6339 WREG32_SOC15_IP(GC, cp_int_cntl_reg, cp_int_cntl); 6340 break; 6341 default: 6342 break; 6343 } 6344 } 6345 6346 static void gfx_v11_0_set_compute_eop_interrupt_state(struct amdgpu_device *adev, 6347 int me, int pipe, 6348 enum amdgpu_interrupt_state state) 6349 { 6350 u32 mec_int_cntl, mec_int_cntl_reg; 6351 6352 /* 6353 * amdgpu controls only the first MEC. That's why this function only 6354 * handles the setting of interrupts for this specific MEC. All other 6355 * pipes' interrupts are set by amdkfd. 6356 */ 6357 6358 if (me == 1) { 6359 switch (pipe) { 6360 case 0: 6361 mec_int_cntl_reg = SOC15_REG_OFFSET(GC, 0, regCP_ME1_PIPE0_INT_CNTL); 6362 break; 6363 case 1: 6364 mec_int_cntl_reg = SOC15_REG_OFFSET(GC, 0, regCP_ME1_PIPE1_INT_CNTL); 6365 break; 6366 case 2: 6367 mec_int_cntl_reg = SOC15_REG_OFFSET(GC, 0, regCP_ME1_PIPE2_INT_CNTL); 6368 break; 6369 case 3: 6370 mec_int_cntl_reg = SOC15_REG_OFFSET(GC, 0, regCP_ME1_PIPE3_INT_CNTL); 6371 break; 6372 default: 6373 DRM_DEBUG("invalid pipe %d\n", pipe); 6374 return; 6375 } 6376 } else { 6377 DRM_DEBUG("invalid me %d\n", me); 6378 return; 6379 } 6380 6381 switch (state) { 6382 case AMDGPU_IRQ_STATE_DISABLE: 6383 mec_int_cntl = RREG32_SOC15_IP(GC, mec_int_cntl_reg); 6384 mec_int_cntl = REG_SET_FIELD(mec_int_cntl, CP_ME1_PIPE0_INT_CNTL, 6385 TIME_STAMP_INT_ENABLE, 0); 6386 mec_int_cntl = REG_SET_FIELD(mec_int_cntl, CP_ME1_PIPE0_INT_CNTL, 6387 GENERIC0_INT_ENABLE, 0); 6388 WREG32_SOC15_IP(GC, mec_int_cntl_reg, mec_int_cntl); 6389 break; 6390 case AMDGPU_IRQ_STATE_ENABLE: 6391 mec_int_cntl = RREG32_SOC15_IP(GC, mec_int_cntl_reg); 6392 mec_int_cntl = REG_SET_FIELD(mec_int_cntl, CP_ME1_PIPE0_INT_CNTL, 6393 TIME_STAMP_INT_ENABLE, 1); 6394 mec_int_cntl = REG_SET_FIELD(mec_int_cntl, CP_ME1_PIPE0_INT_CNTL, 6395 GENERIC0_INT_ENABLE, 1); 6396 WREG32_SOC15_IP(GC, mec_int_cntl_reg, mec_int_cntl); 6397 break; 6398 default: 6399 break; 6400 } 6401 } 6402 6403 static int gfx_v11_0_set_eop_interrupt_state(struct amdgpu_device *adev, 6404 struct amdgpu_irq_src *src, 6405 unsigned type, 6406 enum amdgpu_interrupt_state state) 6407 { 6408 switch (type) { 6409 case AMDGPU_CP_IRQ_GFX_ME0_PIPE0_EOP: 6410 gfx_v11_0_set_gfx_eop_interrupt_state(adev, 0, 0, state); 6411 break; 6412 case AMDGPU_CP_IRQ_GFX_ME0_PIPE1_EOP: 6413 gfx_v11_0_set_gfx_eop_interrupt_state(adev, 0, 1, state); 6414 break; 6415 case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP: 6416 gfx_v11_0_set_compute_eop_interrupt_state(adev, 1, 0, state); 6417 break; 6418 case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE1_EOP: 6419 gfx_v11_0_set_compute_eop_interrupt_state(adev, 1, 1, state); 6420 break; 6421 case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE2_EOP: 6422 gfx_v11_0_set_compute_eop_interrupt_state(adev, 1, 2, state); 6423 break; 6424 case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE3_EOP: 6425 gfx_v11_0_set_compute_eop_interrupt_state(adev, 1, 3, state); 6426 break; 6427 default: 6428 break; 6429 } 6430 return 0; 6431 } 6432 6433 static int gfx_v11_0_eop_irq(struct amdgpu_device *adev, 6434 struct amdgpu_irq_src *source, 6435 struct amdgpu_iv_entry *entry) 6436 { 6437 u32 doorbell_offset = entry->src_data[0]; 6438 u8 me_id, pipe_id, queue_id; 6439 struct amdgpu_ring *ring; 6440 int i; 6441 6442 DRM_DEBUG("IH: CP EOP\n"); 6443 6444 if (adev->enable_mes && doorbell_offset) { 6445 struct amdgpu_userq_fence_driver *fence_drv = NULL; 6446 struct xarray *xa = &adev->userq_xa; 6447 unsigned long flags; 6448 6449 xa_lock_irqsave(xa, flags); 6450 fence_drv = xa_load(xa, doorbell_offset); 6451 if (fence_drv) 6452 amdgpu_userq_fence_driver_process(fence_drv); 6453 xa_unlock_irqrestore(xa, flags); 6454 } else { 6455 me_id = (entry->ring_id & 0x0c) >> 2; 6456 pipe_id = (entry->ring_id & 0x03) >> 0; 6457 queue_id = (entry->ring_id & 0x70) >> 4; 6458 6459 switch (me_id) { 6460 case 0: 6461 if (pipe_id == 0) 6462 amdgpu_fence_process(&adev->gfx.gfx_ring[0]); 6463 else 6464 amdgpu_fence_process(&adev->gfx.gfx_ring[1]); 6465 break; 6466 case 1: 6467 case 2: 6468 for (i = 0; i < adev->gfx.num_compute_rings; i++) { 6469 ring = &adev->gfx.compute_ring[i]; 6470 /* Per-queue interrupt is supported for MEC starting from VI. 6471 * The interrupt can only be enabled/disabled per pipe instead 6472 * of per queue. 6473 */ 6474 if ((ring->me == me_id) && 6475 (ring->pipe == pipe_id) && 6476 (ring->queue == queue_id)) 6477 amdgpu_fence_process(ring); 6478 } 6479 break; 6480 } 6481 } 6482 6483 return 0; 6484 } 6485 6486 static int gfx_v11_0_set_priv_reg_fault_state(struct amdgpu_device *adev, 6487 struct amdgpu_irq_src *source, 6488 unsigned int type, 6489 enum amdgpu_interrupt_state state) 6490 { 6491 u32 cp_int_cntl_reg, cp_int_cntl; 6492 int i, j; 6493 6494 switch (state) { 6495 case AMDGPU_IRQ_STATE_DISABLE: 6496 case AMDGPU_IRQ_STATE_ENABLE: 6497 for (i = 0; i < adev->gfx.me.num_me; i++) { 6498 for (j = 0; j < adev->gfx.me.num_pipe_per_me; j++) { 6499 cp_int_cntl_reg = gfx_v11_0_get_cpg_int_cntl(adev, i, j); 6500 6501 if (cp_int_cntl_reg) { 6502 cp_int_cntl = RREG32_SOC15_IP(GC, cp_int_cntl_reg); 6503 cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_INT_CNTL_RING0, 6504 PRIV_REG_INT_ENABLE, 6505 state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0); 6506 WREG32_SOC15_IP(GC, cp_int_cntl_reg, cp_int_cntl); 6507 } 6508 } 6509 } 6510 for (i = 0; i < adev->gfx.mec.num_mec; i++) { 6511 for (j = 0; j < adev->gfx.mec.num_pipe_per_mec; j++) { 6512 /* MECs start at 1 */ 6513 cp_int_cntl_reg = gfx_v11_0_get_cpc_int_cntl(adev, i + 1, j); 6514 6515 if (cp_int_cntl_reg) { 6516 cp_int_cntl = RREG32_SOC15_IP(GC, cp_int_cntl_reg); 6517 cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_ME1_PIPE0_INT_CNTL, 6518 PRIV_REG_INT_ENABLE, 6519 state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0); 6520 WREG32_SOC15_IP(GC, cp_int_cntl_reg, cp_int_cntl); 6521 } 6522 } 6523 } 6524 break; 6525 default: 6526 break; 6527 } 6528 6529 return 0; 6530 } 6531 6532 static int gfx_v11_0_set_bad_op_fault_state(struct amdgpu_device *adev, 6533 struct amdgpu_irq_src *source, 6534 unsigned type, 6535 enum amdgpu_interrupt_state state) 6536 { 6537 u32 cp_int_cntl_reg, cp_int_cntl; 6538 int i, j; 6539 6540 switch (state) { 6541 case AMDGPU_IRQ_STATE_DISABLE: 6542 case AMDGPU_IRQ_STATE_ENABLE: 6543 for (i = 0; i < adev->gfx.me.num_me; i++) { 6544 for (j = 0; j < adev->gfx.me.num_pipe_per_me; j++) { 6545 cp_int_cntl_reg = gfx_v11_0_get_cpg_int_cntl(adev, i, j); 6546 6547 if (cp_int_cntl_reg) { 6548 cp_int_cntl = RREG32_SOC15_IP(GC, cp_int_cntl_reg); 6549 cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_INT_CNTL_RING0, 6550 OPCODE_ERROR_INT_ENABLE, 6551 state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0); 6552 WREG32_SOC15_IP(GC, cp_int_cntl_reg, cp_int_cntl); 6553 } 6554 } 6555 } 6556 for (i = 0; i < adev->gfx.mec.num_mec; i++) { 6557 for (j = 0; j < adev->gfx.mec.num_pipe_per_mec; j++) { 6558 /* MECs start at 1 */ 6559 cp_int_cntl_reg = gfx_v11_0_get_cpc_int_cntl(adev, i + 1, j); 6560 6561 if (cp_int_cntl_reg) { 6562 cp_int_cntl = RREG32_SOC15_IP(GC, cp_int_cntl_reg); 6563 cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_ME1_PIPE0_INT_CNTL, 6564 OPCODE_ERROR_INT_ENABLE, 6565 state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0); 6566 WREG32_SOC15_IP(GC, cp_int_cntl_reg, cp_int_cntl); 6567 } 6568 } 6569 } 6570 break; 6571 default: 6572 break; 6573 } 6574 return 0; 6575 } 6576 6577 static int gfx_v11_0_set_priv_inst_fault_state(struct amdgpu_device *adev, 6578 struct amdgpu_irq_src *source, 6579 unsigned int type, 6580 enum amdgpu_interrupt_state state) 6581 { 6582 u32 cp_int_cntl_reg, cp_int_cntl; 6583 int i, j; 6584 6585 switch (state) { 6586 case AMDGPU_IRQ_STATE_DISABLE: 6587 case AMDGPU_IRQ_STATE_ENABLE: 6588 for (i = 0; i < adev->gfx.me.num_me; i++) { 6589 for (j = 0; j < adev->gfx.me.num_pipe_per_me; j++) { 6590 cp_int_cntl_reg = gfx_v11_0_get_cpg_int_cntl(adev, i, j); 6591 6592 if (cp_int_cntl_reg) { 6593 cp_int_cntl = RREG32_SOC15_IP(GC, cp_int_cntl_reg); 6594 cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_INT_CNTL_RING0, 6595 PRIV_INSTR_INT_ENABLE, 6596 state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0); 6597 WREG32_SOC15_IP(GC, cp_int_cntl_reg, cp_int_cntl); 6598 } 6599 } 6600 } 6601 break; 6602 default: 6603 break; 6604 } 6605 6606 return 0; 6607 } 6608 6609 static void gfx_v11_0_handle_priv_fault(struct amdgpu_device *adev, 6610 struct amdgpu_iv_entry *entry) 6611 { 6612 u8 me_id, pipe_id, queue_id; 6613 struct amdgpu_ring *ring; 6614 int i; 6615 6616 me_id = (entry->ring_id & 0x0c) >> 2; 6617 pipe_id = (entry->ring_id & 0x03) >> 0; 6618 queue_id = (entry->ring_id & 0x70) >> 4; 6619 6620 if (!adev->gfx.disable_kq) { 6621 switch (me_id) { 6622 case 0: 6623 for (i = 0; i < adev->gfx.num_gfx_rings; i++) { 6624 ring = &adev->gfx.gfx_ring[i]; 6625 if (ring->me == me_id && ring->pipe == pipe_id && 6626 ring->queue == queue_id) 6627 drm_sched_fault(&ring->sched); 6628 } 6629 break; 6630 case 1: 6631 case 2: 6632 for (i = 0; i < adev->gfx.num_compute_rings; i++) { 6633 ring = &adev->gfx.compute_ring[i]; 6634 if (ring->me == me_id && ring->pipe == pipe_id && 6635 ring->queue == queue_id) 6636 drm_sched_fault(&ring->sched); 6637 } 6638 break; 6639 default: 6640 BUG(); 6641 break; 6642 } 6643 } 6644 } 6645 6646 static int gfx_v11_0_priv_reg_irq(struct amdgpu_device *adev, 6647 struct amdgpu_irq_src *source, 6648 struct amdgpu_iv_entry *entry) 6649 { 6650 DRM_ERROR("Illegal register access in command stream\n"); 6651 gfx_v11_0_handle_priv_fault(adev, entry); 6652 return 0; 6653 } 6654 6655 static int gfx_v11_0_bad_op_irq(struct amdgpu_device *adev, 6656 struct amdgpu_irq_src *source, 6657 struct amdgpu_iv_entry *entry) 6658 { 6659 DRM_ERROR("Illegal opcode in command stream \n"); 6660 gfx_v11_0_handle_priv_fault(adev, entry); 6661 return 0; 6662 } 6663 6664 static int gfx_v11_0_priv_inst_irq(struct amdgpu_device *adev, 6665 struct amdgpu_irq_src *source, 6666 struct amdgpu_iv_entry *entry) 6667 { 6668 DRM_ERROR("Illegal instruction in command stream\n"); 6669 gfx_v11_0_handle_priv_fault(adev, entry); 6670 return 0; 6671 } 6672 6673 static int gfx_v11_0_rlc_gc_fed_irq(struct amdgpu_device *adev, 6674 struct amdgpu_irq_src *source, 6675 struct amdgpu_iv_entry *entry) 6676 { 6677 if (adev->gfx.ras && adev->gfx.ras->rlc_gc_fed_irq) 6678 return adev->gfx.ras->rlc_gc_fed_irq(adev, source, entry); 6679 6680 return 0; 6681 } 6682 6683 #if 0 6684 static int gfx_v11_0_kiq_set_interrupt_state(struct amdgpu_device *adev, 6685 struct amdgpu_irq_src *src, 6686 unsigned int type, 6687 enum amdgpu_interrupt_state state) 6688 { 6689 uint32_t tmp, target; 6690 struct amdgpu_ring *ring = &(adev->gfx.kiq[0].ring); 6691 6692 target = SOC15_REG_OFFSET(GC, 0, regCP_ME1_PIPE0_INT_CNTL); 6693 target += ring->pipe; 6694 6695 switch (type) { 6696 case AMDGPU_CP_KIQ_IRQ_DRIVER0: 6697 if (state == AMDGPU_IRQ_STATE_DISABLE) { 6698 tmp = RREG32_SOC15(GC, 0, regCPC_INT_CNTL); 6699 tmp = REG_SET_FIELD(tmp, CPC_INT_CNTL, 6700 GENERIC2_INT_ENABLE, 0); 6701 WREG32_SOC15(GC, 0, regCPC_INT_CNTL, tmp); 6702 6703 tmp = RREG32_SOC15_IP(GC, target); 6704 tmp = REG_SET_FIELD(tmp, CP_ME1_PIPE0_INT_CNTL, 6705 GENERIC2_INT_ENABLE, 0); 6706 WREG32_SOC15_IP(GC, target, tmp); 6707 } else { 6708 tmp = RREG32_SOC15(GC, 0, regCPC_INT_CNTL); 6709 tmp = REG_SET_FIELD(tmp, CPC_INT_CNTL, 6710 GENERIC2_INT_ENABLE, 1); 6711 WREG32_SOC15(GC, 0, regCPC_INT_CNTL, tmp); 6712 6713 tmp = RREG32_SOC15_IP(GC, target); 6714 tmp = REG_SET_FIELD(tmp, CP_ME1_PIPE0_INT_CNTL, 6715 GENERIC2_INT_ENABLE, 1); 6716 WREG32_SOC15_IP(GC, target, tmp); 6717 } 6718 break; 6719 default: 6720 BUG(); /* kiq only support GENERIC2_INT now */ 6721 break; 6722 } 6723 return 0; 6724 } 6725 #endif 6726 6727 static void gfx_v11_0_emit_mem_sync(struct amdgpu_ring *ring) 6728 { 6729 const unsigned int gcr_cntl = 6730 PACKET3_ACQUIRE_MEM_GCR_CNTL_GL2_INV(1) | 6731 PACKET3_ACQUIRE_MEM_GCR_CNTL_GL2_WB(1) | 6732 PACKET3_ACQUIRE_MEM_GCR_CNTL_GLM_INV(1) | 6733 PACKET3_ACQUIRE_MEM_GCR_CNTL_GLM_WB(1) | 6734 PACKET3_ACQUIRE_MEM_GCR_CNTL_GL1_INV(1) | 6735 PACKET3_ACQUIRE_MEM_GCR_CNTL_GLV_INV(1) | 6736 PACKET3_ACQUIRE_MEM_GCR_CNTL_GLK_INV(1) | 6737 PACKET3_ACQUIRE_MEM_GCR_CNTL_GLI_INV(1); 6738 6739 /* ACQUIRE_MEM - make one or more surfaces valid for use by the subsequent operations */ 6740 amdgpu_ring_write(ring, PACKET3(PACKET3_ACQUIRE_MEM, 6)); 6741 amdgpu_ring_write(ring, 0); /* CP_COHER_CNTL */ 6742 amdgpu_ring_write(ring, 0xffffffff); /* CP_COHER_SIZE */ 6743 amdgpu_ring_write(ring, 0xffffff); /* CP_COHER_SIZE_HI */ 6744 amdgpu_ring_write(ring, 0); /* CP_COHER_BASE */ 6745 amdgpu_ring_write(ring, 0); /* CP_COHER_BASE_HI */ 6746 amdgpu_ring_write(ring, 0x0000000A); /* POLL_INTERVAL */ 6747 amdgpu_ring_write(ring, gcr_cntl); /* GCR_CNTL */ 6748 } 6749 6750 static bool gfx_v11_pipe_reset_support(struct amdgpu_device *adev) 6751 { 6752 /* Disable the pipe reset until the CPFW fully support it.*/ 6753 dev_warn_once(adev->dev, "The CPFW hasn't support pipe reset yet.\n"); 6754 return false; 6755 } 6756 6757 6758 static int gfx_v11_reset_gfx_pipe(struct amdgpu_ring *ring) 6759 { 6760 struct amdgpu_device *adev = ring->adev; 6761 uint32_t reset_pipe = 0, clean_pipe = 0; 6762 int r; 6763 6764 if (!gfx_v11_pipe_reset_support(adev)) 6765 return -EOPNOTSUPP; 6766 6767 gfx_v11_0_set_safe_mode(adev, 0); 6768 mutex_lock(&adev->srbm_mutex); 6769 soc21_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0); 6770 6771 switch (ring->pipe) { 6772 case 0: 6773 reset_pipe = REG_SET_FIELD(reset_pipe, CP_ME_CNTL, 6774 PFP_PIPE0_RESET, 1); 6775 reset_pipe = REG_SET_FIELD(reset_pipe, CP_ME_CNTL, 6776 ME_PIPE0_RESET, 1); 6777 clean_pipe = REG_SET_FIELD(clean_pipe, CP_ME_CNTL, 6778 PFP_PIPE0_RESET, 0); 6779 clean_pipe = REG_SET_FIELD(clean_pipe, CP_ME_CNTL, 6780 ME_PIPE0_RESET, 0); 6781 break; 6782 case 1: 6783 reset_pipe = REG_SET_FIELD(reset_pipe, CP_ME_CNTL, 6784 PFP_PIPE1_RESET, 1); 6785 reset_pipe = REG_SET_FIELD(reset_pipe, CP_ME_CNTL, 6786 ME_PIPE1_RESET, 1); 6787 clean_pipe = REG_SET_FIELD(clean_pipe, CP_ME_CNTL, 6788 PFP_PIPE1_RESET, 0); 6789 clean_pipe = REG_SET_FIELD(clean_pipe, CP_ME_CNTL, 6790 ME_PIPE1_RESET, 0); 6791 break; 6792 default: 6793 break; 6794 } 6795 6796 WREG32_SOC15(GC, 0, regCP_ME_CNTL, reset_pipe); 6797 WREG32_SOC15(GC, 0, regCP_ME_CNTL, clean_pipe); 6798 6799 r = (RREG32(SOC15_REG_OFFSET(GC, 0, regCP_GFX_RS64_INSTR_PNTR1)) << 2) - 6800 RS64_FW_UC_START_ADDR_LO; 6801 soc21_grbm_select(adev, 0, 0, 0, 0); 6802 mutex_unlock(&adev->srbm_mutex); 6803 gfx_v11_0_unset_safe_mode(adev, 0); 6804 6805 dev_info(adev->dev, "The ring %s pipe reset to the ME firmware start PC: %s\n", ring->name, 6806 r == 0 ? "successfully" : "failed"); 6807 /* FIXME: Sometimes driver can't cache the ME firmware start PC correctly, 6808 * so the pipe reset status relies on the later gfx ring test result. 6809 */ 6810 return 0; 6811 } 6812 6813 static int gfx_v11_0_reset_kgq(struct amdgpu_ring *ring, unsigned int vmid) 6814 { 6815 struct amdgpu_device *adev = ring->adev; 6816 int r; 6817 6818 if (amdgpu_sriov_vf(adev)) 6819 return -EINVAL; 6820 6821 r = amdgpu_mes_reset_legacy_queue(ring->adev, ring, vmid, false); 6822 if (r) { 6823 6824 dev_warn(adev->dev, "reset via MES failed and try pipe reset %d\n", r); 6825 r = gfx_v11_reset_gfx_pipe(ring); 6826 if (r) 6827 return r; 6828 } 6829 6830 r = gfx_v11_0_kgq_init_queue(ring, true); 6831 if (r) { 6832 dev_err(adev->dev, "failed to init kgq\n"); 6833 return r; 6834 } 6835 6836 r = amdgpu_mes_map_legacy_queue(adev, ring); 6837 if (r) { 6838 dev_err(adev->dev, "failed to remap kgq\n"); 6839 return r; 6840 } 6841 6842 return amdgpu_ring_test_ring(ring); 6843 } 6844 6845 static int gfx_v11_0_reset_compute_pipe(struct amdgpu_ring *ring) 6846 { 6847 6848 struct amdgpu_device *adev = ring->adev; 6849 uint32_t reset_pipe = 0, clean_pipe = 0; 6850 int r; 6851 6852 if (!gfx_v11_pipe_reset_support(adev)) 6853 return -EOPNOTSUPP; 6854 6855 gfx_v11_0_set_safe_mode(adev, 0); 6856 mutex_lock(&adev->srbm_mutex); 6857 soc21_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0); 6858 6859 reset_pipe = RREG32_SOC15(GC, 0, regCP_MEC_RS64_CNTL); 6860 clean_pipe = reset_pipe; 6861 6862 if (adev->gfx.rs64_enable) { 6863 6864 switch (ring->pipe) { 6865 case 0: 6866 reset_pipe = REG_SET_FIELD(reset_pipe, CP_MEC_RS64_CNTL, 6867 MEC_PIPE0_RESET, 1); 6868 clean_pipe = REG_SET_FIELD(clean_pipe, CP_MEC_RS64_CNTL, 6869 MEC_PIPE0_RESET, 0); 6870 break; 6871 case 1: 6872 reset_pipe = REG_SET_FIELD(reset_pipe, CP_MEC_RS64_CNTL, 6873 MEC_PIPE1_RESET, 1); 6874 clean_pipe = REG_SET_FIELD(clean_pipe, CP_MEC_RS64_CNTL, 6875 MEC_PIPE1_RESET, 0); 6876 break; 6877 case 2: 6878 reset_pipe = REG_SET_FIELD(reset_pipe, CP_MEC_RS64_CNTL, 6879 MEC_PIPE2_RESET, 1); 6880 clean_pipe = REG_SET_FIELD(clean_pipe, CP_MEC_RS64_CNTL, 6881 MEC_PIPE2_RESET, 0); 6882 break; 6883 case 3: 6884 reset_pipe = REG_SET_FIELD(reset_pipe, CP_MEC_RS64_CNTL, 6885 MEC_PIPE3_RESET, 1); 6886 clean_pipe = REG_SET_FIELD(clean_pipe, CP_MEC_RS64_CNTL, 6887 MEC_PIPE3_RESET, 0); 6888 break; 6889 default: 6890 break; 6891 } 6892 WREG32_SOC15(GC, 0, regCP_MEC_RS64_CNTL, reset_pipe); 6893 WREG32_SOC15(GC, 0, regCP_MEC_RS64_CNTL, clean_pipe); 6894 r = (RREG32_SOC15(GC, 0, regCP_MEC_RS64_INSTR_PNTR) << 2) - 6895 RS64_FW_UC_START_ADDR_LO; 6896 } else { 6897 if (ring->me == 1) { 6898 switch (ring->pipe) { 6899 case 0: 6900 reset_pipe = REG_SET_FIELD(reset_pipe, CP_MEC_CNTL, 6901 MEC_ME1_PIPE0_RESET, 1); 6902 clean_pipe = REG_SET_FIELD(clean_pipe, CP_MEC_CNTL, 6903 MEC_ME1_PIPE0_RESET, 0); 6904 break; 6905 case 1: 6906 reset_pipe = REG_SET_FIELD(reset_pipe, CP_MEC_CNTL, 6907 MEC_ME1_PIPE1_RESET, 1); 6908 clean_pipe = REG_SET_FIELD(clean_pipe, CP_MEC_CNTL, 6909 MEC_ME1_PIPE1_RESET, 0); 6910 break; 6911 case 2: 6912 reset_pipe = REG_SET_FIELD(reset_pipe, CP_MEC_CNTL, 6913 MEC_ME1_PIPE2_RESET, 1); 6914 clean_pipe = REG_SET_FIELD(clean_pipe, CP_MEC_CNTL, 6915 MEC_ME1_PIPE2_RESET, 0); 6916 break; 6917 case 3: 6918 reset_pipe = REG_SET_FIELD(reset_pipe, CP_MEC_CNTL, 6919 MEC_ME1_PIPE3_RESET, 1); 6920 clean_pipe = REG_SET_FIELD(clean_pipe, CP_MEC_CNTL, 6921 MEC_ME1_PIPE3_RESET, 0); 6922 break; 6923 default: 6924 break; 6925 } 6926 /* mec1 fw pc: CP_MEC1_INSTR_PNTR */ 6927 } else { 6928 switch (ring->pipe) { 6929 case 0: 6930 reset_pipe = REG_SET_FIELD(reset_pipe, CP_MEC_CNTL, 6931 MEC_ME2_PIPE0_RESET, 1); 6932 clean_pipe = REG_SET_FIELD(clean_pipe, CP_MEC_CNTL, 6933 MEC_ME2_PIPE0_RESET, 0); 6934 break; 6935 case 1: 6936 reset_pipe = REG_SET_FIELD(reset_pipe, CP_MEC_CNTL, 6937 MEC_ME2_PIPE1_RESET, 1); 6938 clean_pipe = REG_SET_FIELD(clean_pipe, CP_MEC_CNTL, 6939 MEC_ME2_PIPE1_RESET, 0); 6940 break; 6941 case 2: 6942 reset_pipe = REG_SET_FIELD(reset_pipe, CP_MEC_CNTL, 6943 MEC_ME2_PIPE2_RESET, 1); 6944 clean_pipe = REG_SET_FIELD(clean_pipe, CP_MEC_CNTL, 6945 MEC_ME2_PIPE2_RESET, 0); 6946 break; 6947 case 3: 6948 reset_pipe = REG_SET_FIELD(reset_pipe, CP_MEC_CNTL, 6949 MEC_ME2_PIPE3_RESET, 1); 6950 clean_pipe = REG_SET_FIELD(clean_pipe, CP_MEC_CNTL, 6951 MEC_ME2_PIPE3_RESET, 0); 6952 break; 6953 default: 6954 break; 6955 } 6956 /* mec2 fw pc: CP:CP_MEC2_INSTR_PNTR */ 6957 } 6958 WREG32_SOC15(GC, 0, regCP_MEC_CNTL, reset_pipe); 6959 WREG32_SOC15(GC, 0, regCP_MEC_CNTL, clean_pipe); 6960 r = RREG32(SOC15_REG_OFFSET(GC, 0, regCP_MEC1_INSTR_PNTR)); 6961 } 6962 6963 soc21_grbm_select(adev, 0, 0, 0, 0); 6964 mutex_unlock(&adev->srbm_mutex); 6965 gfx_v11_0_unset_safe_mode(adev, 0); 6966 6967 dev_info(adev->dev, "The ring %s pipe resets to MEC FW start PC: %s\n", ring->name, 6968 r == 0 ? "successfully" : "failed"); 6969 /*FIXME:Sometimes driver can't cache the MEC firmware start PC correctly, so the pipe 6970 * reset status relies on the compute ring test result. 6971 */ 6972 return 0; 6973 } 6974 6975 static int gfx_v11_0_reset_kcq(struct amdgpu_ring *ring, unsigned int vmid) 6976 { 6977 struct amdgpu_device *adev = ring->adev; 6978 int r = 0; 6979 6980 if (amdgpu_sriov_vf(adev)) 6981 return -EINVAL; 6982 6983 r = amdgpu_mes_reset_legacy_queue(ring->adev, ring, vmid, true); 6984 if (r) { 6985 dev_warn(adev->dev, "fail(%d) to reset kcq and try pipe reset\n", r); 6986 r = gfx_v11_0_reset_compute_pipe(ring); 6987 if (r) 6988 return r; 6989 } 6990 6991 r = gfx_v11_0_kcq_init_queue(ring, true); 6992 if (r) { 6993 dev_err(adev->dev, "fail to init kcq\n"); 6994 return r; 6995 } 6996 r = amdgpu_mes_map_legacy_queue(adev, ring); 6997 if (r) { 6998 dev_err(adev->dev, "failed to remap kcq\n"); 6999 return r; 7000 } 7001 7002 return amdgpu_ring_test_ring(ring); 7003 } 7004 7005 static void gfx_v11_ip_print(struct amdgpu_ip_block *ip_block, struct drm_printer *p) 7006 { 7007 struct amdgpu_device *adev = ip_block->adev; 7008 uint32_t i, j, k, reg, index = 0; 7009 uint32_t reg_count = ARRAY_SIZE(gc_reg_list_11_0); 7010 7011 if (!adev->gfx.ip_dump_core) 7012 return; 7013 7014 for (i = 0; i < reg_count; i++) 7015 drm_printf(p, "%-50s \t 0x%08x\n", 7016 gc_reg_list_11_0[i].reg_name, 7017 adev->gfx.ip_dump_core[i]); 7018 7019 /* print compute queue registers for all instances */ 7020 if (!adev->gfx.ip_dump_compute_queues) 7021 return; 7022 7023 reg_count = ARRAY_SIZE(gc_cp_reg_list_11); 7024 drm_printf(p, "\nnum_mec: %d num_pipe: %d num_queue: %d\n", 7025 adev->gfx.mec.num_mec, 7026 adev->gfx.mec.num_pipe_per_mec, 7027 adev->gfx.mec.num_queue_per_pipe); 7028 7029 for (i = 0; i < adev->gfx.mec.num_mec; i++) { 7030 for (j = 0; j < adev->gfx.mec.num_pipe_per_mec; j++) { 7031 for (k = 0; k < adev->gfx.mec.num_queue_per_pipe; k++) { 7032 drm_printf(p, "\nmec %d, pipe %d, queue %d\n", i, j, k); 7033 for (reg = 0; reg < reg_count; reg++) { 7034 if (i && gc_cp_reg_list_11[reg].reg_offset == regCP_MEC_ME1_HEADER_DUMP) 7035 drm_printf(p, "%-50s \t 0x%08x\n", 7036 "regCP_MEC_ME2_HEADER_DUMP", 7037 adev->gfx.ip_dump_compute_queues[index + reg]); 7038 else 7039 drm_printf(p, "%-50s \t 0x%08x\n", 7040 gc_cp_reg_list_11[reg].reg_name, 7041 adev->gfx.ip_dump_compute_queues[index + reg]); 7042 } 7043 index += reg_count; 7044 } 7045 } 7046 } 7047 7048 /* print gfx queue registers for all instances */ 7049 if (!adev->gfx.ip_dump_gfx_queues) 7050 return; 7051 7052 index = 0; 7053 reg_count = ARRAY_SIZE(gc_gfx_queue_reg_list_11); 7054 drm_printf(p, "\nnum_me: %d num_pipe: %d num_queue: %d\n", 7055 adev->gfx.me.num_me, 7056 adev->gfx.me.num_pipe_per_me, 7057 adev->gfx.me.num_queue_per_pipe); 7058 7059 for (i = 0; i < adev->gfx.me.num_me; i++) { 7060 for (j = 0; j < adev->gfx.me.num_pipe_per_me; j++) { 7061 for (k = 0; k < adev->gfx.me.num_queue_per_pipe; k++) { 7062 drm_printf(p, "\nme %d, pipe %d, queue %d\n", i, j, k); 7063 for (reg = 0; reg < reg_count; reg++) { 7064 drm_printf(p, "%-50s \t 0x%08x\n", 7065 gc_gfx_queue_reg_list_11[reg].reg_name, 7066 adev->gfx.ip_dump_gfx_queues[index + reg]); 7067 } 7068 index += reg_count; 7069 } 7070 } 7071 } 7072 } 7073 7074 static void gfx_v11_ip_dump(struct amdgpu_ip_block *ip_block) 7075 { 7076 struct amdgpu_device *adev = ip_block->adev; 7077 uint32_t i, j, k, reg, index = 0; 7078 uint32_t reg_count = ARRAY_SIZE(gc_reg_list_11_0); 7079 7080 if (!adev->gfx.ip_dump_core) 7081 return; 7082 7083 amdgpu_gfx_off_ctrl(adev, false); 7084 for (i = 0; i < reg_count; i++) 7085 adev->gfx.ip_dump_core[i] = RREG32(SOC15_REG_ENTRY_OFFSET(gc_reg_list_11_0[i])); 7086 amdgpu_gfx_off_ctrl(adev, true); 7087 7088 /* dump compute queue registers for all instances */ 7089 if (!adev->gfx.ip_dump_compute_queues) 7090 return; 7091 7092 reg_count = ARRAY_SIZE(gc_cp_reg_list_11); 7093 amdgpu_gfx_off_ctrl(adev, false); 7094 mutex_lock(&adev->srbm_mutex); 7095 for (i = 0; i < adev->gfx.mec.num_mec; i++) { 7096 for (j = 0; j < adev->gfx.mec.num_pipe_per_mec; j++) { 7097 for (k = 0; k < adev->gfx.mec.num_queue_per_pipe; k++) { 7098 /* ME0 is for GFX so start from 1 for CP */ 7099 soc21_grbm_select(adev, adev->gfx.me.num_me + i, j, k, 0); 7100 for (reg = 0; reg < reg_count; reg++) { 7101 if (i && gc_cp_reg_list_11[reg].reg_offset == regCP_MEC_ME1_HEADER_DUMP) 7102 adev->gfx.ip_dump_compute_queues[index + reg] = 7103 RREG32(SOC15_REG_OFFSET(GC, 0, regCP_MEC_ME2_HEADER_DUMP)); 7104 else 7105 adev->gfx.ip_dump_compute_queues[index + reg] = 7106 RREG32(SOC15_REG_ENTRY_OFFSET( 7107 gc_cp_reg_list_11[reg])); 7108 } 7109 index += reg_count; 7110 } 7111 } 7112 } 7113 soc21_grbm_select(adev, 0, 0, 0, 0); 7114 mutex_unlock(&adev->srbm_mutex); 7115 amdgpu_gfx_off_ctrl(adev, true); 7116 7117 /* dump gfx queue registers for all instances */ 7118 if (!adev->gfx.ip_dump_gfx_queues) 7119 return; 7120 7121 index = 0; 7122 reg_count = ARRAY_SIZE(gc_gfx_queue_reg_list_11); 7123 amdgpu_gfx_off_ctrl(adev, false); 7124 mutex_lock(&adev->srbm_mutex); 7125 for (i = 0; i < adev->gfx.me.num_me; i++) { 7126 for (j = 0; j < adev->gfx.me.num_pipe_per_me; j++) { 7127 for (k = 0; k < adev->gfx.me.num_queue_per_pipe; k++) { 7128 soc21_grbm_select(adev, i, j, k, 0); 7129 7130 for (reg = 0; reg < reg_count; reg++) { 7131 adev->gfx.ip_dump_gfx_queues[index + reg] = 7132 RREG32(SOC15_REG_ENTRY_OFFSET( 7133 gc_gfx_queue_reg_list_11[reg])); 7134 } 7135 index += reg_count; 7136 } 7137 } 7138 } 7139 soc21_grbm_select(adev, 0, 0, 0, 0); 7140 mutex_unlock(&adev->srbm_mutex); 7141 amdgpu_gfx_off_ctrl(adev, true); 7142 } 7143 7144 static void gfx_v11_0_ring_emit_cleaner_shader(struct amdgpu_ring *ring) 7145 { 7146 /* Emit the cleaner shader */ 7147 amdgpu_ring_write(ring, PACKET3(PACKET3_RUN_CLEANER_SHADER, 0)); 7148 amdgpu_ring_write(ring, 0); /* RESERVED field, programmed to zero */ 7149 } 7150 7151 static void gfx_v11_0_ring_begin_use(struct amdgpu_ring *ring) 7152 { 7153 amdgpu_gfx_profile_ring_begin_use(ring); 7154 7155 amdgpu_gfx_enforce_isolation_ring_begin_use(ring); 7156 } 7157 7158 static void gfx_v11_0_ring_end_use(struct amdgpu_ring *ring) 7159 { 7160 amdgpu_gfx_profile_ring_end_use(ring); 7161 7162 amdgpu_gfx_enforce_isolation_ring_end_use(ring); 7163 } 7164 7165 static const struct amd_ip_funcs gfx_v11_0_ip_funcs = { 7166 .name = "gfx_v11_0", 7167 .early_init = gfx_v11_0_early_init, 7168 .late_init = gfx_v11_0_late_init, 7169 .sw_init = gfx_v11_0_sw_init, 7170 .sw_fini = gfx_v11_0_sw_fini, 7171 .hw_init = gfx_v11_0_hw_init, 7172 .hw_fini = gfx_v11_0_hw_fini, 7173 .suspend = gfx_v11_0_suspend, 7174 .resume = gfx_v11_0_resume, 7175 .is_idle = gfx_v11_0_is_idle, 7176 .wait_for_idle = gfx_v11_0_wait_for_idle, 7177 .soft_reset = gfx_v11_0_soft_reset, 7178 .check_soft_reset = gfx_v11_0_check_soft_reset, 7179 .post_soft_reset = gfx_v11_0_post_soft_reset, 7180 .set_clockgating_state = gfx_v11_0_set_clockgating_state, 7181 .set_powergating_state = gfx_v11_0_set_powergating_state, 7182 .get_clockgating_state = gfx_v11_0_get_clockgating_state, 7183 .dump_ip_state = gfx_v11_ip_dump, 7184 .print_ip_state = gfx_v11_ip_print, 7185 }; 7186 7187 static const struct amdgpu_ring_funcs gfx_v11_0_ring_funcs_gfx = { 7188 .type = AMDGPU_RING_TYPE_GFX, 7189 .align_mask = 0xff, 7190 .nop = PACKET3(PACKET3_NOP, 0x3FFF), 7191 .support_64bit_ptrs = true, 7192 .secure_submission_supported = true, 7193 .get_rptr = gfx_v11_0_ring_get_rptr_gfx, 7194 .get_wptr = gfx_v11_0_ring_get_wptr_gfx, 7195 .set_wptr = gfx_v11_0_ring_set_wptr_gfx, 7196 .emit_frame_size = /* totally 247 maximum if 16 IBs */ 7197 5 + /* update_spm_vmid */ 7198 5 + /* COND_EXEC */ 7199 22 + /* SET_Q_PREEMPTION_MODE */ 7200 7 + /* PIPELINE_SYNC */ 7201 SOC15_FLUSH_GPU_TLB_NUM_WREG * 5 + 7202 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 + 7203 4 + /* VM_FLUSH */ 7204 8 + /* FENCE for VM_FLUSH */ 7205 20 + /* GDS switch */ 7206 5 + /* COND_EXEC */ 7207 7 + /* HDP_flush */ 7208 4 + /* VGT_flush */ 7209 31 + /* DE_META */ 7210 3 + /* CNTX_CTRL */ 7211 5 + /* HDP_INVL */ 7212 22 + /* SET_Q_PREEMPTION_MODE */ 7213 8 + 8 + /* FENCE x2 */ 7214 8 + /* gfx_v11_0_emit_mem_sync */ 7215 2, /* gfx_v11_0_ring_emit_cleaner_shader */ 7216 .emit_ib_size = 4, /* gfx_v11_0_ring_emit_ib_gfx */ 7217 .emit_ib = gfx_v11_0_ring_emit_ib_gfx, 7218 .emit_fence = gfx_v11_0_ring_emit_fence, 7219 .emit_pipeline_sync = gfx_v11_0_ring_emit_pipeline_sync, 7220 .emit_vm_flush = gfx_v11_0_ring_emit_vm_flush, 7221 .emit_gds_switch = gfx_v11_0_ring_emit_gds_switch, 7222 .emit_hdp_flush = gfx_v11_0_ring_emit_hdp_flush, 7223 .test_ring = gfx_v11_0_ring_test_ring, 7224 .test_ib = gfx_v11_0_ring_test_ib, 7225 .insert_nop = gfx_v11_ring_insert_nop, 7226 .pad_ib = amdgpu_ring_generic_pad_ib, 7227 .emit_cntxcntl = gfx_v11_0_ring_emit_cntxcntl, 7228 .emit_gfx_shadow = gfx_v11_0_ring_emit_gfx_shadow, 7229 .init_cond_exec = gfx_v11_0_ring_emit_init_cond_exec, 7230 .preempt_ib = gfx_v11_0_ring_preempt_ib, 7231 .emit_frame_cntl = gfx_v11_0_ring_emit_frame_cntl, 7232 .emit_wreg = gfx_v11_0_ring_emit_wreg, 7233 .emit_reg_wait = gfx_v11_0_ring_emit_reg_wait, 7234 .emit_reg_write_reg_wait = gfx_v11_0_ring_emit_reg_write_reg_wait, 7235 .soft_recovery = gfx_v11_0_ring_soft_recovery, 7236 .emit_mem_sync = gfx_v11_0_emit_mem_sync, 7237 .reset = gfx_v11_0_reset_kgq, 7238 .emit_cleaner_shader = gfx_v11_0_ring_emit_cleaner_shader, 7239 .begin_use = gfx_v11_0_ring_begin_use, 7240 .end_use = gfx_v11_0_ring_end_use, 7241 }; 7242 7243 static const struct amdgpu_ring_funcs gfx_v11_0_ring_funcs_compute = { 7244 .type = AMDGPU_RING_TYPE_COMPUTE, 7245 .align_mask = 0xff, 7246 .nop = PACKET3(PACKET3_NOP, 0x3FFF), 7247 .support_64bit_ptrs = true, 7248 .get_rptr = gfx_v11_0_ring_get_rptr_compute, 7249 .get_wptr = gfx_v11_0_ring_get_wptr_compute, 7250 .set_wptr = gfx_v11_0_ring_set_wptr_compute, 7251 .emit_frame_size = 7252 5 + /* update_spm_vmid */ 7253 20 + /* gfx_v11_0_ring_emit_gds_switch */ 7254 7 + /* gfx_v11_0_ring_emit_hdp_flush */ 7255 5 + /* hdp invalidate */ 7256 7 + /* gfx_v11_0_ring_emit_pipeline_sync */ 7257 SOC15_FLUSH_GPU_TLB_NUM_WREG * 5 + 7258 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 + 7259 2 + /* gfx_v11_0_ring_emit_vm_flush */ 7260 8 + 8 + 8 + /* gfx_v11_0_ring_emit_fence x3 for user fence, vm fence */ 7261 8 + /* gfx_v11_0_emit_mem_sync */ 7262 2, /* gfx_v11_0_ring_emit_cleaner_shader */ 7263 .emit_ib_size = 7, /* gfx_v11_0_ring_emit_ib_compute */ 7264 .emit_ib = gfx_v11_0_ring_emit_ib_compute, 7265 .emit_fence = gfx_v11_0_ring_emit_fence, 7266 .emit_pipeline_sync = gfx_v11_0_ring_emit_pipeline_sync, 7267 .emit_vm_flush = gfx_v11_0_ring_emit_vm_flush, 7268 .emit_gds_switch = gfx_v11_0_ring_emit_gds_switch, 7269 .emit_hdp_flush = gfx_v11_0_ring_emit_hdp_flush, 7270 .test_ring = gfx_v11_0_ring_test_ring, 7271 .test_ib = gfx_v11_0_ring_test_ib, 7272 .insert_nop = gfx_v11_ring_insert_nop, 7273 .pad_ib = amdgpu_ring_generic_pad_ib, 7274 .emit_wreg = gfx_v11_0_ring_emit_wreg, 7275 .emit_reg_wait = gfx_v11_0_ring_emit_reg_wait, 7276 .emit_reg_write_reg_wait = gfx_v11_0_ring_emit_reg_write_reg_wait, 7277 .soft_recovery = gfx_v11_0_ring_soft_recovery, 7278 .emit_mem_sync = gfx_v11_0_emit_mem_sync, 7279 .reset = gfx_v11_0_reset_kcq, 7280 .emit_cleaner_shader = gfx_v11_0_ring_emit_cleaner_shader, 7281 .begin_use = gfx_v11_0_ring_begin_use, 7282 .end_use = gfx_v11_0_ring_end_use, 7283 }; 7284 7285 static const struct amdgpu_ring_funcs gfx_v11_0_ring_funcs_kiq = { 7286 .type = AMDGPU_RING_TYPE_KIQ, 7287 .align_mask = 0xff, 7288 .nop = PACKET3(PACKET3_NOP, 0x3FFF), 7289 .support_64bit_ptrs = true, 7290 .get_rptr = gfx_v11_0_ring_get_rptr_compute, 7291 .get_wptr = gfx_v11_0_ring_get_wptr_compute, 7292 .set_wptr = gfx_v11_0_ring_set_wptr_compute, 7293 .emit_frame_size = 7294 20 + /* gfx_v11_0_ring_emit_gds_switch */ 7295 7 + /* gfx_v11_0_ring_emit_hdp_flush */ 7296 5 + /*hdp invalidate */ 7297 7 + /* gfx_v11_0_ring_emit_pipeline_sync */ 7298 SOC15_FLUSH_GPU_TLB_NUM_WREG * 5 + 7299 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 + 7300 8 + 8 + 8, /* gfx_v11_0_ring_emit_fence_kiq x3 for user fence, vm fence */ 7301 .emit_ib_size = 7, /* gfx_v11_0_ring_emit_ib_compute */ 7302 .emit_ib = gfx_v11_0_ring_emit_ib_compute, 7303 .emit_fence = gfx_v11_0_ring_emit_fence_kiq, 7304 .test_ring = gfx_v11_0_ring_test_ring, 7305 .test_ib = gfx_v11_0_ring_test_ib, 7306 .insert_nop = amdgpu_ring_insert_nop, 7307 .pad_ib = amdgpu_ring_generic_pad_ib, 7308 .emit_rreg = gfx_v11_0_ring_emit_rreg, 7309 .emit_wreg = gfx_v11_0_ring_emit_wreg, 7310 .emit_reg_wait = gfx_v11_0_ring_emit_reg_wait, 7311 .emit_reg_write_reg_wait = gfx_v11_0_ring_emit_reg_write_reg_wait, 7312 }; 7313 7314 static void gfx_v11_0_set_ring_funcs(struct amdgpu_device *adev) 7315 { 7316 int i; 7317 7318 adev->gfx.kiq[0].ring.funcs = &gfx_v11_0_ring_funcs_kiq; 7319 7320 for (i = 0; i < adev->gfx.num_gfx_rings; i++) 7321 adev->gfx.gfx_ring[i].funcs = &gfx_v11_0_ring_funcs_gfx; 7322 7323 for (i = 0; i < adev->gfx.num_compute_rings; i++) 7324 adev->gfx.compute_ring[i].funcs = &gfx_v11_0_ring_funcs_compute; 7325 } 7326 7327 static const struct amdgpu_irq_src_funcs gfx_v11_0_eop_irq_funcs = { 7328 .set = gfx_v11_0_set_eop_interrupt_state, 7329 .process = gfx_v11_0_eop_irq, 7330 }; 7331 7332 static const struct amdgpu_irq_src_funcs gfx_v11_0_priv_reg_irq_funcs = { 7333 .set = gfx_v11_0_set_priv_reg_fault_state, 7334 .process = gfx_v11_0_priv_reg_irq, 7335 }; 7336 7337 static const struct amdgpu_irq_src_funcs gfx_v11_0_bad_op_irq_funcs = { 7338 .set = gfx_v11_0_set_bad_op_fault_state, 7339 .process = gfx_v11_0_bad_op_irq, 7340 }; 7341 7342 static const struct amdgpu_irq_src_funcs gfx_v11_0_priv_inst_irq_funcs = { 7343 .set = gfx_v11_0_set_priv_inst_fault_state, 7344 .process = gfx_v11_0_priv_inst_irq, 7345 }; 7346 7347 static const struct amdgpu_irq_src_funcs gfx_v11_0_rlc_gc_fed_irq_funcs = { 7348 .process = gfx_v11_0_rlc_gc_fed_irq, 7349 }; 7350 7351 static void gfx_v11_0_set_irq_funcs(struct amdgpu_device *adev) 7352 { 7353 adev->gfx.eop_irq.num_types = AMDGPU_CP_IRQ_LAST; 7354 adev->gfx.eop_irq.funcs = &gfx_v11_0_eop_irq_funcs; 7355 7356 adev->gfx.priv_reg_irq.num_types = 1; 7357 adev->gfx.priv_reg_irq.funcs = &gfx_v11_0_priv_reg_irq_funcs; 7358 7359 adev->gfx.bad_op_irq.num_types = 1; 7360 adev->gfx.bad_op_irq.funcs = &gfx_v11_0_bad_op_irq_funcs; 7361 7362 adev->gfx.priv_inst_irq.num_types = 1; 7363 adev->gfx.priv_inst_irq.funcs = &gfx_v11_0_priv_inst_irq_funcs; 7364 7365 adev->gfx.rlc_gc_fed_irq.num_types = 1; /* 0x80 FED error */ 7366 adev->gfx.rlc_gc_fed_irq.funcs = &gfx_v11_0_rlc_gc_fed_irq_funcs; 7367 7368 } 7369 7370 static void gfx_v11_0_set_imu_funcs(struct amdgpu_device *adev) 7371 { 7372 if (adev->flags & AMD_IS_APU) 7373 adev->gfx.imu.mode = MISSION_MODE; 7374 else 7375 adev->gfx.imu.mode = DEBUG_MODE; 7376 7377 adev->gfx.imu.funcs = &gfx_v11_0_imu_funcs; 7378 } 7379 7380 static void gfx_v11_0_set_rlc_funcs(struct amdgpu_device *adev) 7381 { 7382 adev->gfx.rlc.funcs = &gfx_v11_0_rlc_funcs; 7383 } 7384 7385 static void gfx_v11_0_set_gds_init(struct amdgpu_device *adev) 7386 { 7387 unsigned total_cu = adev->gfx.config.max_cu_per_sh * 7388 adev->gfx.config.max_sh_per_se * 7389 adev->gfx.config.max_shader_engines; 7390 7391 adev->gds.gds_size = 0x1000; 7392 adev->gds.gds_compute_max_wave_id = total_cu * 32 - 1; 7393 adev->gds.gws_size = 64; 7394 adev->gds.oa_size = 16; 7395 } 7396 7397 static void gfx_v11_0_set_mqd_funcs(struct amdgpu_device *adev) 7398 { 7399 /* set gfx eng mqd */ 7400 adev->mqds[AMDGPU_HW_IP_GFX].mqd_size = 7401 sizeof(struct v11_gfx_mqd); 7402 adev->mqds[AMDGPU_HW_IP_GFX].init_mqd = 7403 gfx_v11_0_gfx_mqd_init; 7404 /* set compute eng mqd */ 7405 adev->mqds[AMDGPU_HW_IP_COMPUTE].mqd_size = 7406 sizeof(struct v11_compute_mqd); 7407 adev->mqds[AMDGPU_HW_IP_COMPUTE].init_mqd = 7408 gfx_v11_0_compute_mqd_init; 7409 } 7410 7411 static void gfx_v11_0_set_user_wgp_inactive_bitmap_per_sh(struct amdgpu_device *adev, 7412 u32 bitmap) 7413 { 7414 u32 data; 7415 7416 if (!bitmap) 7417 return; 7418 7419 data = bitmap << GC_USER_SHADER_ARRAY_CONFIG__INACTIVE_WGPS__SHIFT; 7420 data &= GC_USER_SHADER_ARRAY_CONFIG__INACTIVE_WGPS_MASK; 7421 7422 WREG32_SOC15(GC, 0, regGC_USER_SHADER_ARRAY_CONFIG, data); 7423 } 7424 7425 static u32 gfx_v11_0_get_wgp_active_bitmap_per_sh(struct amdgpu_device *adev) 7426 { 7427 u32 data, wgp_bitmask; 7428 data = RREG32_SOC15(GC, 0, regCC_GC_SHADER_ARRAY_CONFIG); 7429 data |= RREG32_SOC15(GC, 0, regGC_USER_SHADER_ARRAY_CONFIG); 7430 7431 data &= CC_GC_SHADER_ARRAY_CONFIG__INACTIVE_WGPS_MASK; 7432 data >>= CC_GC_SHADER_ARRAY_CONFIG__INACTIVE_WGPS__SHIFT; 7433 7434 wgp_bitmask = 7435 amdgpu_gfx_create_bitmask(adev->gfx.config.max_cu_per_sh >> 1); 7436 7437 return (~data) & wgp_bitmask; 7438 } 7439 7440 static u32 gfx_v11_0_get_cu_active_bitmap_per_sh(struct amdgpu_device *adev) 7441 { 7442 u32 wgp_idx, wgp_active_bitmap; 7443 u32 cu_bitmap_per_wgp, cu_active_bitmap; 7444 7445 wgp_active_bitmap = gfx_v11_0_get_wgp_active_bitmap_per_sh(adev); 7446 cu_active_bitmap = 0; 7447 7448 for (wgp_idx = 0; wgp_idx < 16; wgp_idx++) { 7449 /* if there is one WGP enabled, it means 2 CUs will be enabled */ 7450 cu_bitmap_per_wgp = 3 << (2 * wgp_idx); 7451 if (wgp_active_bitmap & (1 << wgp_idx)) 7452 cu_active_bitmap |= cu_bitmap_per_wgp; 7453 } 7454 7455 return cu_active_bitmap; 7456 } 7457 7458 static int gfx_v11_0_get_cu_info(struct amdgpu_device *adev, 7459 struct amdgpu_cu_info *cu_info) 7460 { 7461 int i, j, k, counter, active_cu_number = 0; 7462 u32 mask, bitmap; 7463 unsigned disable_masks[8 * 2]; 7464 7465 if (!adev || !cu_info) 7466 return -EINVAL; 7467 7468 amdgpu_gfx_parse_disable_cu(disable_masks, 8, 2); 7469 7470 mutex_lock(&adev->grbm_idx_mutex); 7471 for (i = 0; i < adev->gfx.config.max_shader_engines; i++) { 7472 for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) { 7473 bitmap = i * adev->gfx.config.max_sh_per_se + j; 7474 if (!((gfx_v11_0_get_sa_active_bitmap(adev) >> bitmap) & 1)) 7475 continue; 7476 mask = 1; 7477 counter = 0; 7478 gfx_v11_0_select_se_sh(adev, i, j, 0xffffffff, 0); 7479 if (i < 8 && j < 2) 7480 gfx_v11_0_set_user_wgp_inactive_bitmap_per_sh( 7481 adev, disable_masks[i * 2 + j]); 7482 bitmap = gfx_v11_0_get_cu_active_bitmap_per_sh(adev); 7483 7484 /** 7485 * GFX11 could support more than 4 SEs, while the bitmap 7486 * in cu_info struct is 4x4 and ioctl interface struct 7487 * drm_amdgpu_info_device should keep stable. 7488 * So we use last two columns of bitmap to store cu mask for 7489 * SEs 4 to 7, the layout of the bitmap is as below: 7490 * SE0: {SH0,SH1} --> {bitmap[0][0], bitmap[0][1]} 7491 * SE1: {SH0,SH1} --> {bitmap[1][0], bitmap[1][1]} 7492 * SE2: {SH0,SH1} --> {bitmap[2][0], bitmap[2][1]} 7493 * SE3: {SH0,SH1} --> {bitmap[3][0], bitmap[3][1]} 7494 * SE4: {SH0,SH1} --> {bitmap[0][2], bitmap[0][3]} 7495 * SE5: {SH0,SH1} --> {bitmap[1][2], bitmap[1][3]} 7496 * SE6: {SH0,SH1} --> {bitmap[2][2], bitmap[2][3]} 7497 * SE7: {SH0,SH1} --> {bitmap[3][2], bitmap[3][3]} 7498 */ 7499 cu_info->bitmap[0][i % 4][j + (i / 4) * 2] = bitmap; 7500 7501 for (k = 0; k < adev->gfx.config.max_cu_per_sh; k++) { 7502 if (bitmap & mask) 7503 counter++; 7504 7505 mask <<= 1; 7506 } 7507 active_cu_number += counter; 7508 } 7509 } 7510 gfx_v11_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0); 7511 mutex_unlock(&adev->grbm_idx_mutex); 7512 7513 cu_info->number = active_cu_number; 7514 cu_info->simd_per_cu = NUM_SIMD_PER_CU; 7515 7516 return 0; 7517 } 7518 7519 const struct amdgpu_ip_block_version gfx_v11_0_ip_block = 7520 { 7521 .type = AMD_IP_BLOCK_TYPE_GFX, 7522 .major = 11, 7523 .minor = 0, 7524 .rev = 0, 7525 .funcs = &gfx_v11_0_ip_funcs, 7526 }; 7527